hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
f23424ee043b8478e0515accb186553cd63ffe3b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <color_spinor_field.h>
#include <color_spinor_field_order.h>
#include <tune_quda.h>
#include <hipcub/hipcub.hpp>
#include <typeinfo>
#include <multigrid_helper.cuh>
namespace quda {
#ifdef GPU_MULTIGRID
using namespace quda::colorspinor;
/**
Kernel argument struct
*/
template <typename Out, typename In, typename Rotator, int fineSpin, int coarseSpin>
struct RestrictArg {
Out out;
const In in;
const Rotator V;
const int *fine_to_coarse;
const int *coarse_to_fine;
const spin_mapper<fineSpin,coarseSpin> spin_map;
RestrictArg(Out &out, const In &in, const Rotator &V,
const int *fine_to_coarse, const int *coarse_to_fine) :
out(out), in(in), V(V), fine_to_coarse(fine_to_coarse), coarse_to_fine(coarse_to_fine), spin_map()
{ }
RestrictArg(const RestrictArg<Out,In,Rotator,fineSpin,coarseSpin> &arg) :
out(arg.out), in(arg.in), V(arg.V),
fine_to_coarse(arg.fine_to_coarse), coarse_to_fine(arg.coarse_to_fine), spin_map()
{ }
};
/**
Rotates from the fine-color basis into the coarse-color basis.
*/
template <typename Float, int fineSpin, int fineColor, int coarseColor, int coarse_colors_per_thread,
class FineColor, class Rotator>
__device__ __host__ inline void rotateCoarseColor(complex<Float> out[fineSpin*coarse_colors_per_thread],
const FineColor &in, const Rotator &V, int parity, int x_cb, int coarse_color_block) {
for (int s=0; s<fineSpin; s++)
for (int coarse_color_local=0; coarse_color_local<coarse_colors_per_thread; coarse_color_local++) {
out[s*coarse_colors_per_thread+coarse_color_local] = 0.0;
}
for (int coarse_color_local=0; coarse_color_local<coarse_colors_per_thread; coarse_color_local++) {
int i = coarse_color_block + coarse_color_local;
for (int s=0; s<fineSpin; s++) {
for (int j=0; j<fineColor; j++) {
out[s*coarse_colors_per_thread + coarse_color_local] += conj(V(parity, x_cb, s, j, i)) * in(parity, x_cb, s, j);
}
}
}
}
template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor, int coarse_colors_per_thread, typename Arg>
void Restrict(Arg arg) {
for (int parity_coarse=0; parity_coarse<2; parity_coarse++)
for (int x_coarse_cb=0; x_coarse_cb<arg.out.VolumeCB(); x_coarse_cb++)
for (int s=0; s<coarseSpin; s++)
for (int c=0; c<coarseColor; c++)
arg.out(parity_coarse, x_coarse_cb, s, c) = 0.0;
// loop over fine degrees of freedom
for (int parity=0; parity<2; parity++) {
for (int x_cb=0; x_cb<arg.in.VolumeCB(); x_cb++) {
int x = parity*arg.in.VolumeCB() + x_cb;
int x_coarse = arg.fine_to_coarse[x];
int parity_coarse = (x_coarse >= arg.out.VolumeCB()) ? 1 : 0;
int x_coarse_cb = x_coarse - parity_coarse*arg.out.VolumeCB();
for (int coarse_color_block=0; coarse_color_block<coarseColor; coarse_color_block+=coarse_colors_per_thread) {
complex<Float> tmp[fineSpin*coarse_colors_per_thread];
rotateCoarseColor<Float,fineSpin,fineColor,coarseColor,coarse_colors_per_thread>(tmp, arg.in, arg.V, parity, x_cb, coarse_color_block);
for (int s=0; s<fineSpin; s++) {
for (int coarse_color_local=0; coarse_color_local<coarse_colors_per_thread; coarse_color_local++) {
int c = coarse_color_block + coarse_color_local;
arg.out(parity_coarse,x_coarse_cb,arg.spin_map(s),c) += tmp[s*coarse_colors_per_thread+coarse_color_local];
}
}
}
}
}
}
/**
struct which acts as a wrapper to a vector of data.
*/
template <typename scalar, int n>
struct vector_type {
scalar data[n];
__device__ __host__ inline scalar& operator[](int i) { return data[i]; }
__device__ __host__ inline const scalar& operator[](int i) const { return data[i]; }
__device__ __host__ inline static constexpr int size() { return n; }
__device__ __host__ vector_type() { for (int i=0; i<n; i++) data[i] = 0.0; }
};
/**
functor that defines how to do a multi-vector reduction
*/
template <typename T>
struct reduce {
__device__ __host__ inline T operator()(const T &a, const T &b) {
T sum;
for (int i=0; i<sum.size(); i++) sum[i] = a[i] + b[i];
return sum;
}
};
/**
Here, we ensure that each thread block maps exactly to a
geometric block. Each thread block corresponds to one geometric
block, with number of threads equal to the number of fine grid
points per aggregate, so each thread represents a fine-grid
point. The look up table coarse_to_fine is the mapping to the
each fine grid point.
*/
template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor, int coarse_colors_per_thread,
typename Arg, int block_size>
__global__ void RestrictKernel(Arg arg) {
int x_coarse = blockIdx.x;
int parity_coarse = x_coarse >= arg.out.VolumeCB() ? 1 : 0;
int x_coarse_cb = x_coarse - parity_coarse*arg.out.VolumeCB();
// obtain fine index from this look up table
// since both parities map to the same block, each thread block must do both parities
// threadIdx.x - fine checkboard offset
// threadIdx.y - fine parity offset
// blockIdx.x - which coarse block are we working on
// assume that coarse_to_fine look up map is ordered as (coarse-block-id + fine-point-id)
// and that fine-point-id is parity ordered
int x_fine = arg.coarse_to_fine[ (blockIdx.x*blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x];
int parity = threadIdx.y;
int x_fine_cb = x_fine - parity*arg.in.VolumeCB();
int coarse_color_block = (blockDim.z*blockIdx.z + threadIdx.z) * coarse_colors_per_thread;
if (coarse_color_block >= coarseColor) return;
complex<Float> tmp[fineSpin*coarse_colors_per_thread];
rotateCoarseColor<Float,fineSpin,fineColor,coarseColor,coarse_colors_per_thread>(tmp, arg.in, arg.V, parity, x_fine_cb, coarse_color_block);
typedef vector_type<complex<Float>, coarseSpin*coarse_colors_per_thread> vector;
vector reduced;
// first lets coarsen spin locally
for (int s=0; s<fineSpin; s++) {
for (int v=0; v<coarse_colors_per_thread; v++) {
reduced[arg.spin_map(s)*coarse_colors_per_thread+v] += tmp[s*coarse_colors_per_thread+v];
}
}
// now lets coarsen geometry across threads
typedef hipcub::BlockReduce<vector, block_size, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, 2> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
reduce<vector> reducer; // reduce functor
// note this is not safe for blockDim.z > 1
reduced = BlockReduce(temp_storage).Reduce(reduced, reducer);
if (threadIdx.x==0 && threadIdx.y == 0) {
for (int s=0; s<coarseSpin; s++) {
for (int coarse_color_local=0; coarse_color_local<coarse_colors_per_thread; coarse_color_local++) {
int v = coarse_color_block + coarse_color_local;
arg.out(parity_coarse, x_coarse_cb, s, v) = reduced[s*coarse_colors_per_thread+coarse_color_local];
}
}
}
}
template <typename Float, typename Arg, int fineSpin, int fineColor, int coarseSpin, int coarseColor,
int coarse_colors_per_thread>
class RestrictLaunch : public Tunable {
protected:
Arg &arg;
QudaFieldLocation location;
const int block_size;
char vol[TuneKey::volume_n];
long long flops() const { return 0; }
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.in.VolumeCB(); } // fine parity is the block y dimension
public:
RestrictLaunch(Arg &arg, const ColorSpinorField &coarse, const ColorSpinorField &fine,
const QudaFieldLocation location) : arg(arg), location(location),
block_size((arg.in.VolumeCB())/arg.out.Volume()) {
strcpy(vol, coarse.VolString());
strcat(vol, ",");
strcat(vol, fine.VolString());
strcpy(aux, coarse.AuxString());
strcat(aux, ",");
strcat(aux, fine.AuxString());
} // block size is checkerboard fine length / full coarse length
virtual ~RestrictLaunch() { }
void apply(const hipStream_t &stream) {
if (location == QUDA_CPU_FIELD_LOCATION) {
Restrict<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread>(arg);
} else {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
tp.block.y = 2; // need factor of two for fine parity with in the block
if (block_size == 8) {
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,8>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 16) {
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,16>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else if (block_size == 128) {
hipLaunchKernelGGL(( RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,128>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else {
errorQuda("Block size %d not instantiated", block_size);
}
}
}
// This block tuning tunes for the optimal amount of color
// splitting between blockDim.z and gridDim.z. However, enabling
// blockDim.z > 1 gives incorrect results due to cub reductions
// being unable to do independent sliced reductions along
// blockDim.z. So for now we only split between colors per thread
// and grid.z.
bool advanceBlockDim(TuneParam ¶m) const
{
// let's try to advance spin/block-color
while(param.block.z <= coarseColor/coarse_colors_per_thread) {
param.block.z++;
if ( (coarseColor/coarse_colors_per_thread) % param.block.z == 0) {
param.grid.z = (coarseColor/coarse_colors_per_thread) / param.block.z;
break;
}
}
// we can advance spin/block-color since this is valid
if (param.block.z <= (coarseColor/coarse_colors_per_thread) ) { //
return true;
} else { // we have run off the end so let's reset
param.block.z = 1;
param.grid.z = coarseColor/coarse_colors_per_thread;
return false;
}
}
// only tune shared memory per thread (disable tuning for block.z for now)
bool advanceTuneParam(TuneParam ¶m) const { return advanceSharedBytes(param); } //|| advanceBlockDim(param); }
TuneKey tuneKey() const { return TuneKey(vol, typeid(*this).name(), aux); }
void initTuneParam(TuneParam ¶m) const { defaultTuneParam(param); }
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const {
param.block = dim3(block_size, 1, 1);
param.grid = dim3( (minThreads()+param.block.x-1) / param.block.x, 1, 1);
param.shared_bytes = 0;
param.block.z = 1;
param.grid.z = coarseColor / coarse_colors_per_thread;
}
long long bytes() const {
return arg.in.Bytes() + arg.out.Bytes() + arg.V.Bytes() + arg.in.Volume()*sizeof(int);
}
};
template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor, QudaFieldOrder order>
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
const int *fine_to_coarse, const int *coarse_to_fine) {
typedef FieldOrderCB<Float,fineSpin,fineColor,1,order> fineSpinor;
typedef FieldOrderCB<Float,coarseSpin,coarseColor,1,order> coarseSpinor;
typedef FieldOrderCB<Float,fineSpin,fineColor,coarseColor,order> packedSpinor;
typedef RestrictArg<coarseSpinor,fineSpinor,packedSpinor,fineSpin,coarseSpin> Arg;
coarseSpinor Out(const_cast<ColorSpinorField&>(out));
fineSpinor In(const_cast<ColorSpinorField&>(in));
packedSpinor V(const_cast<ColorSpinorField&>(v));
// this seems like a reasonable value for both fine and coarse grids
constexpr int coarse_colors_per_thread = 2;
Arg arg(Out, In, V, fine_to_coarse, coarse_to_fine);
RestrictLaunch<Float, Arg, fineSpin, fineColor, coarseSpin, coarseColor, coarse_colors_per_thread> restrictor(arg, out, in, Location(out, in, v));
restrictor.apply(0);
if (Location(out, in, v) == QUDA_CUDA_FIELD_LOCATION) checkCudaError();
}
template <typename Float, int fineSpin, int fineColor, int coarseSpin, QudaFieldOrder order>
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int nVec, const int *fine_to_coarse, const int *coarse_to_fine, const int *spin_map) {
// first check that the spin_map matches the spin_mapper
spin_mapper<fineSpin,coarseSpin> mapper;
for (int s=0; s<fineSpin; s++)
if (mapper(s) != spin_map[s]) errorQuda("Spin map does not match spin_mapper");
if (nVec == 2) {
Restrict<Float,fineSpin,fineColor,coarseSpin,2,order>(out, in, v, fine_to_coarse, coarse_to_fine);
} else if (nVec == 4) {
Restrict<Float,fineSpin,fineColor,coarseSpin,4,order>(out, in, v, fine_to_coarse, coarse_to_fine);
} else if (nVec == 8) {
Restrict<Float,fineSpin,fineColor,coarseSpin,8,order>(out, in, v, fine_to_coarse, coarse_to_fine);
} else if (nVec == 12) {
Restrict<Float,fineSpin,fineColor,coarseSpin,12,order>(out, in, v, fine_to_coarse, coarse_to_fine);
} else if (nVec == 16) {
Restrict<Float,fineSpin,fineColor,coarseSpin,16,order>(out, in, v, fine_to_coarse, coarse_to_fine);
} else if (nVec == 20) {
Restrict<Float,fineSpin,fineColor,coarseSpin,20,order>(out, in, v, fine_to_coarse, coarse_to_fine);
} else if (nVec == 24) {
Restrict<Float,fineSpin,fineColor,coarseSpin,24,order>(out, in, v, fine_to_coarse, coarse_to_fine);
} else if (nVec == 48) {
Restrict<Float,fineSpin,fineColor,coarseSpin,48,order>(out, in, v, fine_to_coarse, coarse_to_fine);
} else {
errorQuda("Unsupported nVec %d", nVec);
}
}
template <typename Float, int fineSpin, QudaFieldOrder order>
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int Nvec, const int *fine_to_coarse, const int *coarse_to_fine, const int *spin_map) {
if (out.Nspin() != 2) errorQuda("Unsupported nSpin %d", out.Nspin());
if (in.Ncolor() == 3) {
Restrict<Float,fineSpin,3, 2,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else if (in.Ncolor() == 2) {
Restrict<Float,fineSpin,2, 2,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else if (in.Ncolor() == 8) {
Restrict<Float,fineSpin,8, 2,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else if (in.Ncolor() == 16) {
Restrict<Float,fineSpin,16, 2,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else if (in.Ncolor() == 24) {
Restrict<Float,fineSpin,24, 2,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else if (in.Ncolor() == 48) {
Restrict<Float,fineSpin,48, 2,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else {
errorQuda("Unsupported nColor %d", in.Ncolor());
}
}
template <typename Float, QudaFieldOrder order>
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int Nvec, const int *fine_to_coarse, const int *coarse_to_fine, const int *spin_map) {
if (in.Nspin() == 4) {
Restrict<Float,4,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else if (in.Nspin() == 2) {
Restrict<Float,2,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
#if GPU_STAGGERED_DIRAC
} else if (in.Nspin() == 1) {
Restrict<Float,1,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
#endif
} else {
errorQuda("Unsupported nSpin %d", in.Nspin());
}
}
template <typename Float>
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int Nvec, const int *fine_to_coarse, const int *coarse_to_fine, const int *spin_map) {
if (out.FieldOrder() != in.FieldOrder() || out.FieldOrder() != v.FieldOrder())
errorQuda("Field orders do not match (out=%d, in=%d, v=%d)",
out.FieldOrder(), in.FieldOrder(), v.FieldOrder());
if (out.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER) {
Restrict<Float,QUDA_FLOAT2_FIELD_ORDER>
(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else if (out.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER) {
Restrict<Float,QUDA_SPACE_SPIN_COLOR_FIELD_ORDER>
(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else {
errorQuda("Unsupported field type %d", out.FieldOrder());
}
}
#endif // GPU_MULTIGRID
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int Nvec, const int *fine_to_coarse, const int *coarse_to_fine, const int *spin_map) {
#ifdef GPU_MULTIGRID
if (out.Precision() != in.Precision() || v.Precision() != in.Precision())
errorQuda("Precision mismatch out=%d in=%d v=%d", out.Precision(), in.Precision(), v.Precision());
if (out.Precision() == QUDA_DOUBLE_PRECISION) {
Restrict<double>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else if (out.Precision() == QUDA_SINGLE_PRECISION) {
Restrict<float>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else {
errorQuda("Unsupported precision %d", out.Precision());
}
#else
errorQuda("Multigrid has not been built");
#endif
}
} // namespace quda
| f23424ee043b8478e0515accb186553cd63ffe3b.cu | #include <color_spinor_field.h>
#include <color_spinor_field_order.h>
#include <tune_quda.h>
#include <cub/cub.cuh>
#include <typeinfo>
#include <multigrid_helper.cuh>
namespace quda {
#ifdef GPU_MULTIGRID
using namespace quda::colorspinor;
/**
Kernel argument struct
*/
template <typename Out, typename In, typename Rotator, int fineSpin, int coarseSpin>
struct RestrictArg {
Out out;
const In in;
const Rotator V;
const int *fine_to_coarse;
const int *coarse_to_fine;
const spin_mapper<fineSpin,coarseSpin> spin_map;
RestrictArg(Out &out, const In &in, const Rotator &V,
const int *fine_to_coarse, const int *coarse_to_fine) :
out(out), in(in), V(V), fine_to_coarse(fine_to_coarse), coarse_to_fine(coarse_to_fine), spin_map()
{ }
RestrictArg(const RestrictArg<Out,In,Rotator,fineSpin,coarseSpin> &arg) :
out(arg.out), in(arg.in), V(arg.V),
fine_to_coarse(arg.fine_to_coarse), coarse_to_fine(arg.coarse_to_fine), spin_map()
{ }
};
/**
Rotates from the fine-color basis into the coarse-color basis.
*/
template <typename Float, int fineSpin, int fineColor, int coarseColor, int coarse_colors_per_thread,
class FineColor, class Rotator>
__device__ __host__ inline void rotateCoarseColor(complex<Float> out[fineSpin*coarse_colors_per_thread],
const FineColor &in, const Rotator &V, int parity, int x_cb, int coarse_color_block) {
for (int s=0; s<fineSpin; s++)
for (int coarse_color_local=0; coarse_color_local<coarse_colors_per_thread; coarse_color_local++) {
out[s*coarse_colors_per_thread+coarse_color_local] = 0.0;
}
for (int coarse_color_local=0; coarse_color_local<coarse_colors_per_thread; coarse_color_local++) {
int i = coarse_color_block + coarse_color_local;
for (int s=0; s<fineSpin; s++) {
for (int j=0; j<fineColor; j++) {
out[s*coarse_colors_per_thread + coarse_color_local] += conj(V(parity, x_cb, s, j, i)) * in(parity, x_cb, s, j);
}
}
}
}
template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor, int coarse_colors_per_thread, typename Arg>
void Restrict(Arg arg) {
for (int parity_coarse=0; parity_coarse<2; parity_coarse++)
for (int x_coarse_cb=0; x_coarse_cb<arg.out.VolumeCB(); x_coarse_cb++)
for (int s=0; s<coarseSpin; s++)
for (int c=0; c<coarseColor; c++)
arg.out(parity_coarse, x_coarse_cb, s, c) = 0.0;
// loop over fine degrees of freedom
for (int parity=0; parity<2; parity++) {
for (int x_cb=0; x_cb<arg.in.VolumeCB(); x_cb++) {
int x = parity*arg.in.VolumeCB() + x_cb;
int x_coarse = arg.fine_to_coarse[x];
int parity_coarse = (x_coarse >= arg.out.VolumeCB()) ? 1 : 0;
int x_coarse_cb = x_coarse - parity_coarse*arg.out.VolumeCB();
for (int coarse_color_block=0; coarse_color_block<coarseColor; coarse_color_block+=coarse_colors_per_thread) {
complex<Float> tmp[fineSpin*coarse_colors_per_thread];
rotateCoarseColor<Float,fineSpin,fineColor,coarseColor,coarse_colors_per_thread>(tmp, arg.in, arg.V, parity, x_cb, coarse_color_block);
for (int s=0; s<fineSpin; s++) {
for (int coarse_color_local=0; coarse_color_local<coarse_colors_per_thread; coarse_color_local++) {
int c = coarse_color_block + coarse_color_local;
arg.out(parity_coarse,x_coarse_cb,arg.spin_map(s),c) += tmp[s*coarse_colors_per_thread+coarse_color_local];
}
}
}
}
}
}
/**
struct which acts as a wrapper to a vector of data.
*/
template <typename scalar, int n>
struct vector_type {
scalar data[n];
__device__ __host__ inline scalar& operator[](int i) { return data[i]; }
__device__ __host__ inline const scalar& operator[](int i) const { return data[i]; }
__device__ __host__ inline static constexpr int size() { return n; }
__device__ __host__ vector_type() { for (int i=0; i<n; i++) data[i] = 0.0; }
};
/**
functor that defines how to do a multi-vector reduction
*/
template <typename T>
struct reduce {
__device__ __host__ inline T operator()(const T &a, const T &b) {
T sum;
for (int i=0; i<sum.size(); i++) sum[i] = a[i] + b[i];
return sum;
}
};
/**
Here, we ensure that each thread block maps exactly to a
geometric block. Each thread block corresponds to one geometric
block, with number of threads equal to the number of fine grid
points per aggregate, so each thread represents a fine-grid
point. The look up table coarse_to_fine is the mapping to the
each fine grid point.
*/
template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor, int coarse_colors_per_thread,
typename Arg, int block_size>
__global__ void RestrictKernel(Arg arg) {
int x_coarse = blockIdx.x;
int parity_coarse = x_coarse >= arg.out.VolumeCB() ? 1 : 0;
int x_coarse_cb = x_coarse - parity_coarse*arg.out.VolumeCB();
// obtain fine index from this look up table
// since both parities map to the same block, each thread block must do both parities
// threadIdx.x - fine checkboard offset
// threadIdx.y - fine parity offset
// blockIdx.x - which coarse block are we working on
// assume that coarse_to_fine look up map is ordered as (coarse-block-id + fine-point-id)
// and that fine-point-id is parity ordered
int x_fine = arg.coarse_to_fine[ (blockIdx.x*blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x];
int parity = threadIdx.y;
int x_fine_cb = x_fine - parity*arg.in.VolumeCB();
int coarse_color_block = (blockDim.z*blockIdx.z + threadIdx.z) * coarse_colors_per_thread;
if (coarse_color_block >= coarseColor) return;
complex<Float> tmp[fineSpin*coarse_colors_per_thread];
rotateCoarseColor<Float,fineSpin,fineColor,coarseColor,coarse_colors_per_thread>(tmp, arg.in, arg.V, parity, x_fine_cb, coarse_color_block);
typedef vector_type<complex<Float>, coarseSpin*coarse_colors_per_thread> vector;
vector reduced;
// first lets coarsen spin locally
for (int s=0; s<fineSpin; s++) {
for (int v=0; v<coarse_colors_per_thread; v++) {
reduced[arg.spin_map(s)*coarse_colors_per_thread+v] += tmp[s*coarse_colors_per_thread+v];
}
}
// now lets coarsen geometry across threads
typedef cub::BlockReduce<vector, block_size, cub::BLOCK_REDUCE_WARP_REDUCTIONS, 2> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
reduce<vector> reducer; // reduce functor
// note this is not safe for blockDim.z > 1
reduced = BlockReduce(temp_storage).Reduce(reduced, reducer);
if (threadIdx.x==0 && threadIdx.y == 0) {
for (int s=0; s<coarseSpin; s++) {
for (int coarse_color_local=0; coarse_color_local<coarse_colors_per_thread; coarse_color_local++) {
int v = coarse_color_block + coarse_color_local;
arg.out(parity_coarse, x_coarse_cb, s, v) = reduced[s*coarse_colors_per_thread+coarse_color_local];
}
}
}
}
template <typename Float, typename Arg, int fineSpin, int fineColor, int coarseSpin, int coarseColor,
int coarse_colors_per_thread>
class RestrictLaunch : public Tunable {
protected:
Arg &arg;
QudaFieldLocation location;
const int block_size;
char vol[TuneKey::volume_n];
long long flops() const { return 0; }
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.in.VolumeCB(); } // fine parity is the block y dimension
public:
RestrictLaunch(Arg &arg, const ColorSpinorField &coarse, const ColorSpinorField &fine,
const QudaFieldLocation location) : arg(arg), location(location),
block_size((arg.in.VolumeCB())/arg.out.Volume()) {
strcpy(vol, coarse.VolString());
strcat(vol, ",");
strcat(vol, fine.VolString());
strcpy(aux, coarse.AuxString());
strcat(aux, ",");
strcat(aux, fine.AuxString());
} // block size is checkerboard fine length / full coarse length
virtual ~RestrictLaunch() { }
void apply(const cudaStream_t &stream) {
if (location == QUDA_CPU_FIELD_LOCATION) {
Restrict<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread>(arg);
} else {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
tp.block.y = 2; // need factor of two for fine parity with in the block
if (block_size == 8) {
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,8>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 16) {
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,16>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else if (block_size == 128) {
RestrictKernel<Float,fineSpin,fineColor,coarseSpin,coarseColor,coarse_colors_per_thread,Arg,128>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else {
errorQuda("Block size %d not instantiated", block_size);
}
}
}
// This block tuning tunes for the optimal amount of color
// splitting between blockDim.z and gridDim.z. However, enabling
// blockDim.z > 1 gives incorrect results due to cub reductions
// being unable to do independent sliced reductions along
// blockDim.z. So for now we only split between colors per thread
// and grid.z.
bool advanceBlockDim(TuneParam ¶m) const
{
// let's try to advance spin/block-color
while(param.block.z <= coarseColor/coarse_colors_per_thread) {
param.block.z++;
if ( (coarseColor/coarse_colors_per_thread) % param.block.z == 0) {
param.grid.z = (coarseColor/coarse_colors_per_thread) / param.block.z;
break;
}
}
// we can advance spin/block-color since this is valid
if (param.block.z <= (coarseColor/coarse_colors_per_thread) ) { //
return true;
} else { // we have run off the end so let's reset
param.block.z = 1;
param.grid.z = coarseColor/coarse_colors_per_thread;
return false;
}
}
// only tune shared memory per thread (disable tuning for block.z for now)
bool advanceTuneParam(TuneParam ¶m) const { return advanceSharedBytes(param); } //|| advanceBlockDim(param); }
TuneKey tuneKey() const { return TuneKey(vol, typeid(*this).name(), aux); }
void initTuneParam(TuneParam ¶m) const { defaultTuneParam(param); }
/** sets default values for when tuning is disabled */
void defaultTuneParam(TuneParam ¶m) const {
param.block = dim3(block_size, 1, 1);
param.grid = dim3( (minThreads()+param.block.x-1) / param.block.x, 1, 1);
param.shared_bytes = 0;
param.block.z = 1;
param.grid.z = coarseColor / coarse_colors_per_thread;
}
long long bytes() const {
return arg.in.Bytes() + arg.out.Bytes() + arg.V.Bytes() + arg.in.Volume()*sizeof(int);
}
};
template <typename Float, int fineSpin, int fineColor, int coarseSpin, int coarseColor, QudaFieldOrder order>
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
const int *fine_to_coarse, const int *coarse_to_fine) {
typedef FieldOrderCB<Float,fineSpin,fineColor,1,order> fineSpinor;
typedef FieldOrderCB<Float,coarseSpin,coarseColor,1,order> coarseSpinor;
typedef FieldOrderCB<Float,fineSpin,fineColor,coarseColor,order> packedSpinor;
typedef RestrictArg<coarseSpinor,fineSpinor,packedSpinor,fineSpin,coarseSpin> Arg;
coarseSpinor Out(const_cast<ColorSpinorField&>(out));
fineSpinor In(const_cast<ColorSpinorField&>(in));
packedSpinor V(const_cast<ColorSpinorField&>(v));
// this seems like a reasonable value for both fine and coarse grids
constexpr int coarse_colors_per_thread = 2;
Arg arg(Out, In, V, fine_to_coarse, coarse_to_fine);
RestrictLaunch<Float, Arg, fineSpin, fineColor, coarseSpin, coarseColor, coarse_colors_per_thread> restrictor(arg, out, in, Location(out, in, v));
restrictor.apply(0);
if (Location(out, in, v) == QUDA_CUDA_FIELD_LOCATION) checkCudaError();
}
template <typename Float, int fineSpin, int fineColor, int coarseSpin, QudaFieldOrder order>
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int nVec, const int *fine_to_coarse, const int *coarse_to_fine, const int *spin_map) {
// first check that the spin_map matches the spin_mapper
spin_mapper<fineSpin,coarseSpin> mapper;
for (int s=0; s<fineSpin; s++)
if (mapper(s) != spin_map[s]) errorQuda("Spin map does not match spin_mapper");
if (nVec == 2) {
Restrict<Float,fineSpin,fineColor,coarseSpin,2,order>(out, in, v, fine_to_coarse, coarse_to_fine);
} else if (nVec == 4) {
Restrict<Float,fineSpin,fineColor,coarseSpin,4,order>(out, in, v, fine_to_coarse, coarse_to_fine);
} else if (nVec == 8) {
Restrict<Float,fineSpin,fineColor,coarseSpin,8,order>(out, in, v, fine_to_coarse, coarse_to_fine);
} else if (nVec == 12) {
Restrict<Float,fineSpin,fineColor,coarseSpin,12,order>(out, in, v, fine_to_coarse, coarse_to_fine);
} else if (nVec == 16) {
Restrict<Float,fineSpin,fineColor,coarseSpin,16,order>(out, in, v, fine_to_coarse, coarse_to_fine);
} else if (nVec == 20) {
Restrict<Float,fineSpin,fineColor,coarseSpin,20,order>(out, in, v, fine_to_coarse, coarse_to_fine);
} else if (nVec == 24) {
Restrict<Float,fineSpin,fineColor,coarseSpin,24,order>(out, in, v, fine_to_coarse, coarse_to_fine);
} else if (nVec == 48) {
Restrict<Float,fineSpin,fineColor,coarseSpin,48,order>(out, in, v, fine_to_coarse, coarse_to_fine);
} else {
errorQuda("Unsupported nVec %d", nVec);
}
}
template <typename Float, int fineSpin, QudaFieldOrder order>
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int Nvec, const int *fine_to_coarse, const int *coarse_to_fine, const int *spin_map) {
if (out.Nspin() != 2) errorQuda("Unsupported nSpin %d", out.Nspin());
if (in.Ncolor() == 3) {
Restrict<Float,fineSpin,3, 2,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else if (in.Ncolor() == 2) {
Restrict<Float,fineSpin,2, 2,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else if (in.Ncolor() == 8) {
Restrict<Float,fineSpin,8, 2,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else if (in.Ncolor() == 16) {
Restrict<Float,fineSpin,16, 2,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else if (in.Ncolor() == 24) {
Restrict<Float,fineSpin,24, 2,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else if (in.Ncolor() == 48) {
Restrict<Float,fineSpin,48, 2,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else {
errorQuda("Unsupported nColor %d", in.Ncolor());
}
}
template <typename Float, QudaFieldOrder order>
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int Nvec, const int *fine_to_coarse, const int *coarse_to_fine, const int *spin_map) {
if (in.Nspin() == 4) {
Restrict<Float,4,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else if (in.Nspin() == 2) {
Restrict<Float,2,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
#if GPU_STAGGERED_DIRAC
} else if (in.Nspin() == 1) {
Restrict<Float,1,order>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
#endif
} else {
errorQuda("Unsupported nSpin %d", in.Nspin());
}
}
template <typename Float>
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int Nvec, const int *fine_to_coarse, const int *coarse_to_fine, const int *spin_map) {
if (out.FieldOrder() != in.FieldOrder() || out.FieldOrder() != v.FieldOrder())
errorQuda("Field orders do not match (out=%d, in=%d, v=%d)",
out.FieldOrder(), in.FieldOrder(), v.FieldOrder());
if (out.FieldOrder() == QUDA_FLOAT2_FIELD_ORDER) {
Restrict<Float,QUDA_FLOAT2_FIELD_ORDER>
(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else if (out.FieldOrder() == QUDA_SPACE_SPIN_COLOR_FIELD_ORDER) {
Restrict<Float,QUDA_SPACE_SPIN_COLOR_FIELD_ORDER>
(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else {
errorQuda("Unsupported field type %d", out.FieldOrder());
}
}
#endif // GPU_MULTIGRID
void Restrict(ColorSpinorField &out, const ColorSpinorField &in, const ColorSpinorField &v,
int Nvec, const int *fine_to_coarse, const int *coarse_to_fine, const int *spin_map) {
#ifdef GPU_MULTIGRID
if (out.Precision() != in.Precision() || v.Precision() != in.Precision())
errorQuda("Precision mismatch out=%d in=%d v=%d", out.Precision(), in.Precision(), v.Precision());
if (out.Precision() == QUDA_DOUBLE_PRECISION) {
Restrict<double>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else if (out.Precision() == QUDA_SINGLE_PRECISION) {
Restrict<float>(out, in, v, Nvec, fine_to_coarse, coarse_to_fine, spin_map);
} else {
errorQuda("Unsupported precision %d", out.Precision());
}
#else
errorQuda("Multigrid has not been built");
#endif
}
} // namespace quda
|
2b3d3a065efa6d568edeb46cf19fbf5e3ad546ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <chrono>
#define block_size_x 256
#define num_blocks 1024
//a naive summation in C
float sum_floats(float *in_array, int n) {
float sum = 0.0;
for (int i=0; i<n; i++) {
sum += in_array[i];
}
return sum;
}
//Kahan summation to avoid floating-point precision errors
float sum_floats_kahan(float *in_array, int n) {
float sum = 0.0;
float c = 0.0;
for (int i=0; i<n; i++) {
float v = in_array[i] - c;
float t = sum + v;
c = (t - sum) - v;
sum = t;
}
return sum;
}
//CUDA kernel for parallel reduction
extern "C" __global__ void reduce_kernel(float *out_array, float *in_array, int n) {
int ti = threadIdx.x;
int x = blockIdx.x * block_size_x + threadIdx.x;
int step_size = gridDim.x * block_size_x;
float sum = 0.0f;
//cooperatively (with all threads in all thread blocks) iterate over input array
for (int i=x; i<n; i+=step_size) {
sum += in_array[i];
}
//at the point we have reduced the number of values to be summed from n to
//the total number of threads in all thread blocks combined
//the goal is now to reduce the values within each thread block to a single
//value per thread block for this we will need shared memory
//declare shared memory array, how much shared memory do we need?
//__shared__ float ...;
//make every thread store its thread-local sum to the array in shared memory
//... = sum;
//now let's call syncthreads() to make sure all threads have finished
//storing their local sums to shared memory
__syncthreads();
//now this interesting looking loop will do the following:
//it iterates over the block_size_x with the following values for s:
//if block_size_x is 256, 's' will be powers of 2 from 128, 64, 32, down to 1.
//these decreasing offsets can be used to reduce the number
//of values within the thread block in only a few steps.
#pragma unroll
for (unsigned int s=block_size_x/2; s>0; s/=2) {
//you are to write the code inside this loop such that
//threads will add the sums of other threads that are 's' away
//do this iteratively such that together the threads compute the
//sum of all thread-local sums
//use shared memory to access the values of other threads
//and store the new value in shared memory to be used in the next round
//be careful that values that should be read are
//not overwritten before they are read
//make sure to call __syncthreads() when needed
}
//write back one value per thread block
if (ti == 0) {
//out_array[blockIdx.x] = ; //store the per-thread block reduced value to global memory
}
}
int main() {
int n = (int)5e7; //problem size
float time;
hipError_t err;
//allocate arrays and fill them
float *in_array = (float *) malloc(n * sizeof(float));
float *out_array = (float *) malloc(num_blocks * sizeof(float));
for (int i=0; i<n; i++) {
in_array[i] = (rand() % 10000) / 100000.0;
}
memset(out_array, 0, num_blocks * sizeof(float));
//measure the CPU function
auto start = std::chrono::high_resolution_clock::now();
float sum = sum_floats(in_array, n);
auto stop = std::chrono::high_resolution_clock::now();
time = (float)std::chrono::duration_cast<std::chrono::microseconds>(stop-start).count()/1000.0;
printf("sum_floats took %.3f ms\n", time);
//allocate GPU memory
float *d_in; float *d_out;
err = hipMalloc((void **)&d_in, n*sizeof(float));
if (err != hipSuccess) fprintf(stderr, "Error in hipMalloc: %s\n", hipGetErrorString( err ));
err = hipMalloc((void **)&d_out, num_blocks*sizeof(float));
if (err != hipSuccess) fprintf(stderr, "Error in hipMalloc: %s\n", hipGetErrorString( err ));
//copy the input data to the GPU
err = hipMemcpy(d_in, in_array, n*sizeof(float), hipMemcpyHostToDevice);
if (err != hipSuccess) fprintf(stderr, "Error in hipMemcpy host to device: %s\n", hipGetErrorString( err ));
//zero the output array
err = hipMemset(d_out, 0, num_blocks*sizeof(float));
if (err != hipSuccess) fprintf(stderr, "Error in hipMemset: %s\n", hipGetErrorString( err ));
//setup the grid and thread blocks
dim3 grid(num_blocks, 1);
dim3 grid2(1, 1);
dim3 threads(block_size_x, 1, 1);
//measure the GPU function
hipDeviceSynchronize();
start = std::chrono::high_resolution_clock::now();
hipLaunchKernelGGL(( reduce_kernel), dim3(grid), dim3(threads), 0, 0, d_out, d_in, n);
hipLaunchKernelGGL(( reduce_kernel), dim3(grid2), dim3(threads), 0, 0, d_out, d_out, num_blocks); //call the kernel again with only 1 thread block
hipDeviceSynchronize();
stop = std::chrono::high_resolution_clock::now();
time = (float)std::chrono::duration_cast<std::chrono::microseconds>(stop-start).count()/1000.0;
printf("reduce_kernel took %.3f ms\n", time);
//check to see if all went well
err = hipGetLastError();
if (err != hipSuccess) fprintf(stderr, "Error during kernel launch: %s\n", hipGetErrorString( err ));
//copy the result back to host memory
err = hipMemcpy(out_array, d_out, 1*sizeof(float), hipMemcpyDeviceToHost);
if (err != hipSuccess) fprintf(stderr, "Error in hipMemcpy device to host: %s\n", hipGetErrorString( err ));
//compute a reliable reference answer on the host
float sum2 = sum_floats_kahan(in_array, n);
//check the result
float diff = abs(*out_array - sum2);
printf("cpu: %f, corrected: %f\n", sum, sum2);
printf("gpu: %f\n", *out_array);
if (diff < 1.0) {
printf("TEST PASSED!\n");
} else {
printf("TEST FAILED!\n");
}
//clean up
hipFree(d_in);
hipFree(d_out);
free(in_array);
free(out_array);
return 0;
}
| 2b3d3a065efa6d568edeb46cf19fbf5e3ad546ed.cu | #include <stdio.h>
#include <chrono>
#define block_size_x 256
#define num_blocks 1024
//a naive summation in C
float sum_floats(float *in_array, int n) {
float sum = 0.0;
for (int i=0; i<n; i++) {
sum += in_array[i];
}
return sum;
}
//Kahan summation to avoid floating-point precision errors
float sum_floats_kahan(float *in_array, int n) {
float sum = 0.0;
float c = 0.0;
for (int i=0; i<n; i++) {
float v = in_array[i] - c;
float t = sum + v;
c = (t - sum) - v;
sum = t;
}
return sum;
}
//CUDA kernel for parallel reduction
extern "C" __global__ void reduce_kernel(float *out_array, float *in_array, int n) {
int ti = threadIdx.x;
int x = blockIdx.x * block_size_x + threadIdx.x;
int step_size = gridDim.x * block_size_x;
float sum = 0.0f;
//cooperatively (with all threads in all thread blocks) iterate over input array
for (int i=x; i<n; i+=step_size) {
sum += in_array[i];
}
//at the point we have reduced the number of values to be summed from n to
//the total number of threads in all thread blocks combined
//the goal is now to reduce the values within each thread block to a single
//value per thread block for this we will need shared memory
//declare shared memory array, how much shared memory do we need?
//__shared__ float ...;
//make every thread store its thread-local sum to the array in shared memory
//... = sum;
//now let's call syncthreads() to make sure all threads have finished
//storing their local sums to shared memory
__syncthreads();
//now this interesting looking loop will do the following:
//it iterates over the block_size_x with the following values for s:
//if block_size_x is 256, 's' will be powers of 2 from 128, 64, 32, down to 1.
//these decreasing offsets can be used to reduce the number
//of values within the thread block in only a few steps.
#pragma unroll
for (unsigned int s=block_size_x/2; s>0; s/=2) {
//you are to write the code inside this loop such that
//threads will add the sums of other threads that are 's' away
//do this iteratively such that together the threads compute the
//sum of all thread-local sums
//use shared memory to access the values of other threads
//and store the new value in shared memory to be used in the next round
//be careful that values that should be read are
//not overwritten before they are read
//make sure to call __syncthreads() when needed
}
//write back one value per thread block
if (ti == 0) {
//out_array[blockIdx.x] = ; //store the per-thread block reduced value to global memory
}
}
int main() {
int n = (int)5e7; //problem size
float time;
cudaError_t err;
//allocate arrays and fill them
float *in_array = (float *) malloc(n * sizeof(float));
float *out_array = (float *) malloc(num_blocks * sizeof(float));
for (int i=0; i<n; i++) {
in_array[i] = (rand() % 10000) / 100000.0;
}
memset(out_array, 0, num_blocks * sizeof(float));
//measure the CPU function
auto start = std::chrono::high_resolution_clock::now();
float sum = sum_floats(in_array, n);
auto stop = std::chrono::high_resolution_clock::now();
time = (float)std::chrono::duration_cast<std::chrono::microseconds>(stop-start).count()/1000.0;
printf("sum_floats took %.3f ms\n", time);
//allocate GPU memory
float *d_in; float *d_out;
err = cudaMalloc((void **)&d_in, n*sizeof(float));
if (err != cudaSuccess) fprintf(stderr, "Error in cudaMalloc: %s\n", cudaGetErrorString( err ));
err = cudaMalloc((void **)&d_out, num_blocks*sizeof(float));
if (err != cudaSuccess) fprintf(stderr, "Error in cudaMalloc: %s\n", cudaGetErrorString( err ));
//copy the input data to the GPU
err = cudaMemcpy(d_in, in_array, n*sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess) fprintf(stderr, "Error in cudaMemcpy host to device: %s\n", cudaGetErrorString( err ));
//zero the output array
err = cudaMemset(d_out, 0, num_blocks*sizeof(float));
if (err != cudaSuccess) fprintf(stderr, "Error in cudaMemset: %s\n", cudaGetErrorString( err ));
//setup the grid and thread blocks
dim3 grid(num_blocks, 1);
dim3 grid2(1, 1);
dim3 threads(block_size_x, 1, 1);
//measure the GPU function
cudaDeviceSynchronize();
start = std::chrono::high_resolution_clock::now();
reduce_kernel<<<grid, threads>>>(d_out, d_in, n);
reduce_kernel<<<grid2, threads>>>(d_out, d_out, num_blocks); //call the kernel again with only 1 thread block
cudaDeviceSynchronize();
stop = std::chrono::high_resolution_clock::now();
time = (float)std::chrono::duration_cast<std::chrono::microseconds>(stop-start).count()/1000.0;
printf("reduce_kernel took %.3f ms\n", time);
//check to see if all went well
err = cudaGetLastError();
if (err != cudaSuccess) fprintf(stderr, "Error during kernel launch: %s\n", cudaGetErrorString( err ));
//copy the result back to host memory
err = cudaMemcpy(out_array, d_out, 1*sizeof(float), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) fprintf(stderr, "Error in cudaMemcpy device to host: %s\n", cudaGetErrorString( err ));
//compute a reliable reference answer on the host
float sum2 = sum_floats_kahan(in_array, n);
//check the result
float diff = abs(*out_array - sum2);
printf("cpu: %f, corrected: %f\n", sum, sum2);
printf("gpu: %f\n", *out_array);
if (diff < 1.0) {
printf("TEST PASSED!\n");
} else {
printf("TEST FAILED!\n");
}
//clean up
cudaFree(d_in);
cudaFree(d_out);
free(in_array);
free(out_array);
return 0;
}
|
f17e5c7bff01876f803d7d0a5942b1310177c73b.hip | // !!! This is a file automatically generated by hipify!!!
#include "DeviceProcPrimitives.h"
#include "DeviceParallel.h"
#include "DeviceSum.cuh"
#include "DeviceFunctors.cuh"
#include <algorithm>
using namespace qgate_cuda;
using qgate::QstateIdx;
using qgate::QstateSize;
using qgate::Qone;
using qgate::Qtwo;
template<class real>
DeviceProcPrimitives<real>::DeviceProcPrimitives(CUDADevice &device) : device_(device), deviceSum_(device) {
}
template<class real>
void DeviceProcPrimitives<real>::set(DevicePtrs &d_qStatesPtrs,
const void *pv, QstateIdx offset, qgate::QstateSize size) {
DeviceComplex *d_buf = d_qStatesPtrs.getPtr(offset);
device_.makeCurrent();
throwOnError(hipMemcpyAsync(d_buf, pv, size, hipMemcpyDefault));
}
template<class real>
void DeviceProcPrimitives<real>::fillZero(DevicePtrs &d_qStatesPtrs,
qgate::QstateIdx begin, qgate::QstateIdx end) {
DeviceComplex *d_buf = d_qStatesPtrs.getPtr(begin);
QstateSize size = end - begin;
device_.makeCurrent();
throwOnError(hipMemsetAsync(d_buf, 0, sizeof(DeviceComplex) * size));
}
template<class real>
void DeviceProcPrimitives<real>::calcProb_launch(const DevicePtrs &d_qStatesPtrs, int lane,
qgate::QstateIdx begin, qgate::QstateIdx end) {
QstateIdx bit = Qone << lane;
QstateIdx bitmask_hi = ~((bit << 1) - 1);
QstateIdx bitmask_lo = bit - 1;
device_.makeCurrent();
deviceSum_.launch(begin, end, [=] __device__(QstateIdx idx) {
QstateIdx idx_lo = ((idx << 1) & bitmask_hi) | (idx & bitmask_lo);
return abs2<real>()(d_qStatesPtrs[idx_lo]);
});
}
template<class real>
real DeviceProcPrimitives<real>::calcProb_sync() {
device_.makeCurrent();
return deviceSum_.sync();
}
template<class real>
void DeviceProcPrimitives<real>::measure_set0(DevicePtrs &d_qStatesPtrs, int lane, real prob,
qgate::QstateIdx begin, qgate::QstateIdx end) {
QstateIdx bitmask_lane = Qone << lane;
QstateIdx bitmask_hi = ~((Qtwo << lane) - 1);
QstateIdx bitmask_lo = (Qone << lane) - 1;
device_.makeCurrent();
real norm = real(1.) / std::sqrt(prob);
transform(begin, end,
[=]__device__(QstateIdx idx) mutable {
QstateIdx idx_lo = ((idx << 1) & bitmask_hi) | (idx & bitmask_lo);
QstateIdx idx_hi = idx_lo | bitmask_lane;
d_qStatesPtrs[idx_lo] *= norm;
d_qStatesPtrs[idx_hi] = real(0.);
});
}
template<class real>
void DeviceProcPrimitives<real>::measure_set1(DevicePtrs &d_qStatesPtrs, int lane, real prob,
qgate::QstateIdx begin, qgate::QstateIdx end) {
QstateIdx bitmask_lane = Qone << lane;
QstateIdx bitmask_hi = ~((Qtwo << lane) - 1);
QstateIdx bitmask_lo = (Qone << lane) - 1;
device_.makeCurrent();
real norm = real(1.) / std::sqrt(real(1.) - prob);
transform(begin, end,
[=]__device__(QstateIdx idx) mutable {
QstateIdx idx_lo = ((idx << 1) & bitmask_hi) | (idx & bitmask_lo);
QstateIdx idx_hi = idx_lo | bitmask_lane;
d_qStatesPtrs[idx_lo] = real(0.);
d_qStatesPtrs[idx_hi] *= norm;
});
}
template<class real>
void DeviceProcPrimitives<real>::applyReset(DevicePtrs &d_qStatesPtrs, int lane,
qgate::QstateIdx begin, qgate::QstateIdx end) {
QstateIdx bitmask_lane = Qone << lane;
QstateIdx bitmask_hi = ~((Qtwo << lane) - 1);
QstateIdx bitmask_lo = (Qone << lane) - 1;
/* Assuming reset is able to be applyed after measurement.
* Ref: https://quantumcomputing.stackexchange.com/questions/3908/possibility-of-a-reset-quantum-gate */
device_.makeCurrent();
transform(begin, end,
[=]__device__(QstateIdx idx) mutable {
QstateIdx idx_lo = ((idx << 1) & bitmask_hi) | (idx & bitmask_lo);
QstateIdx idx_hi = idx_lo | bitmask_lane;
d_qStatesPtrs[idx_lo] = d_qStatesPtrs[idx_hi];
d_qStatesPtrs[idx_hi] = real(0.);
});
}
template<class real>
void DeviceProcPrimitives<real>::applyUnaryGate(const DeviceMatrix2x2C<real> &mat,
DevicePtrs &d_qStatesPtrs, int lane,
qgate::QstateIdx begin, qgate::QstateIdx end) {
DeviceMatrix2x2C<real> dmat(mat);
QstateIdx bitmask_lane = Qone << lane;
QstateIdx bitmask_hi = ~((Qtwo << lane) - 1);
QstateIdx bitmask_lo = (Qone << lane) - 1;
device_.makeCurrent();
transform(begin, end,
[=]__device__(QstateIdx idx) mutable {
typedef DeviceComplexType<real> DeviceComplex;
QstateIdx idx_lo = ((idx << 1) & bitmask_hi) | (idx & bitmask_lo);
QstateIdx idx_hi = idx_lo | bitmask_lane;
const DeviceComplex &qs0 = d_qStatesPtrs[idx_lo];
const DeviceComplex &qs1 = d_qStatesPtrs[idx_hi];
DeviceComplex qsout0 = dmat(0, 0) * qs0 + dmat(0, 1) * qs1;
DeviceComplex qsout1 = dmat(1, 0) * qs0 + dmat(1, 1) * qs1;
d_qStatesPtrs[idx_lo] = qsout0;
d_qStatesPtrs[idx_hi] = qsout1;
});
}
template<class real> void DeviceProcPrimitives<real>::
applyControlGate(const DeviceMatrix2x2C<real> &mat,
DevicePtrs &d_qStatesPtrs, const qgate::QstateIdxTable256 *d_bitPermTables,
qgate::QstateIdx controlBits, qgate::QstateIdx targetBit,
qgate::QstateIdx begin, qgate::QstateIdx end) {
DeviceMatrix2x2C<real> dmat(mat);
transform(begin, end,
[=]__device__(QstateIdx idx) mutable {
QstateIdx permuted = 0;
for (int iTable = 0; iTable < 6; ++iTable) {
int iByte = (idx >> (8 * iTable)) & 0xff;
permuted |= d_bitPermTables[iTable][iByte];
}
QstateIdx idx_0 = permuted | controlBits;
QstateIdx idx_1 = idx_0 | targetBit;
const DeviceComplex &qs0 = d_qStatesPtrs[idx_0];
const DeviceComplex &qs1 = d_qStatesPtrs[idx_1];;
DeviceComplex qsout0 = dmat(0, 0) * qs0 + dmat(0, 1) * qs1;
DeviceComplex qsout1 = dmat(1, 0) * qs0 + dmat(1, 1) * qs1;
d_qStatesPtrs[idx_0] = qsout0;
d_qStatesPtrs[idx_1] = qsout1;
});
}
template class DeviceProcPrimitives<float>;
template class DeviceProcPrimitives<double>;
| f17e5c7bff01876f803d7d0a5942b1310177c73b.cu | #include "DeviceProcPrimitives.h"
#include "DeviceParallel.h"
#include "DeviceSum.cuh"
#include "DeviceFunctors.cuh"
#include <algorithm>
using namespace qgate_cuda;
using qgate::QstateIdx;
using qgate::QstateSize;
using qgate::Qone;
using qgate::Qtwo;
template<class real>
DeviceProcPrimitives<real>::DeviceProcPrimitives(CUDADevice &device) : device_(device), deviceSum_(device) {
}
template<class real>
void DeviceProcPrimitives<real>::set(DevicePtrs &d_qStatesPtrs,
const void *pv, QstateIdx offset, qgate::QstateSize size) {
DeviceComplex *d_buf = d_qStatesPtrs.getPtr(offset);
device_.makeCurrent();
throwOnError(cudaMemcpyAsync(d_buf, pv, size, cudaMemcpyDefault));
}
template<class real>
void DeviceProcPrimitives<real>::fillZero(DevicePtrs &d_qStatesPtrs,
qgate::QstateIdx begin, qgate::QstateIdx end) {
DeviceComplex *d_buf = d_qStatesPtrs.getPtr(begin);
QstateSize size = end - begin;
device_.makeCurrent();
throwOnError(cudaMemsetAsync(d_buf, 0, sizeof(DeviceComplex) * size));
}
template<class real>
void DeviceProcPrimitives<real>::calcProb_launch(const DevicePtrs &d_qStatesPtrs, int lane,
qgate::QstateIdx begin, qgate::QstateIdx end) {
QstateIdx bit = Qone << lane;
QstateIdx bitmask_hi = ~((bit << 1) - 1);
QstateIdx bitmask_lo = bit - 1;
device_.makeCurrent();
deviceSum_.launch(begin, end, [=] __device__(QstateIdx idx) {
QstateIdx idx_lo = ((idx << 1) & bitmask_hi) | (idx & bitmask_lo);
return abs2<real>()(d_qStatesPtrs[idx_lo]);
});
}
template<class real>
real DeviceProcPrimitives<real>::calcProb_sync() {
device_.makeCurrent();
return deviceSum_.sync();
}
template<class real>
void DeviceProcPrimitives<real>::measure_set0(DevicePtrs &d_qStatesPtrs, int lane, real prob,
qgate::QstateIdx begin, qgate::QstateIdx end) {
QstateIdx bitmask_lane = Qone << lane;
QstateIdx bitmask_hi = ~((Qtwo << lane) - 1);
QstateIdx bitmask_lo = (Qone << lane) - 1;
device_.makeCurrent();
real norm = real(1.) / std::sqrt(prob);
transform(begin, end,
[=]__device__(QstateIdx idx) mutable {
QstateIdx idx_lo = ((idx << 1) & bitmask_hi) | (idx & bitmask_lo);
QstateIdx idx_hi = idx_lo | bitmask_lane;
d_qStatesPtrs[idx_lo] *= norm;
d_qStatesPtrs[idx_hi] = real(0.);
});
}
template<class real>
void DeviceProcPrimitives<real>::measure_set1(DevicePtrs &d_qStatesPtrs, int lane, real prob,
qgate::QstateIdx begin, qgate::QstateIdx end) {
QstateIdx bitmask_lane = Qone << lane;
QstateIdx bitmask_hi = ~((Qtwo << lane) - 1);
QstateIdx bitmask_lo = (Qone << lane) - 1;
device_.makeCurrent();
real norm = real(1.) / std::sqrt(real(1.) - prob);
transform(begin, end,
[=]__device__(QstateIdx idx) mutable {
QstateIdx idx_lo = ((idx << 1) & bitmask_hi) | (idx & bitmask_lo);
QstateIdx idx_hi = idx_lo | bitmask_lane;
d_qStatesPtrs[idx_lo] = real(0.);
d_qStatesPtrs[idx_hi] *= norm;
});
}
template<class real>
void DeviceProcPrimitives<real>::applyReset(DevicePtrs &d_qStatesPtrs, int lane,
qgate::QstateIdx begin, qgate::QstateIdx end) {
QstateIdx bitmask_lane = Qone << lane;
QstateIdx bitmask_hi = ~((Qtwo << lane) - 1);
QstateIdx bitmask_lo = (Qone << lane) - 1;
/* Assuming reset is able to be applyed after measurement.
* Ref: https://quantumcomputing.stackexchange.com/questions/3908/possibility-of-a-reset-quantum-gate */
device_.makeCurrent();
transform(begin, end,
[=]__device__(QstateIdx idx) mutable {
QstateIdx idx_lo = ((idx << 1) & bitmask_hi) | (idx & bitmask_lo);
QstateIdx idx_hi = idx_lo | bitmask_lane;
d_qStatesPtrs[idx_lo] = d_qStatesPtrs[idx_hi];
d_qStatesPtrs[idx_hi] = real(0.);
});
}
template<class real>
void DeviceProcPrimitives<real>::applyUnaryGate(const DeviceMatrix2x2C<real> &mat,
DevicePtrs &d_qStatesPtrs, int lane,
qgate::QstateIdx begin, qgate::QstateIdx end) {
DeviceMatrix2x2C<real> dmat(mat);
QstateIdx bitmask_lane = Qone << lane;
QstateIdx bitmask_hi = ~((Qtwo << lane) - 1);
QstateIdx bitmask_lo = (Qone << lane) - 1;
device_.makeCurrent();
transform(begin, end,
[=]__device__(QstateIdx idx) mutable {
typedef DeviceComplexType<real> DeviceComplex;
QstateIdx idx_lo = ((idx << 1) & bitmask_hi) | (idx & bitmask_lo);
QstateIdx idx_hi = idx_lo | bitmask_lane;
const DeviceComplex &qs0 = d_qStatesPtrs[idx_lo];
const DeviceComplex &qs1 = d_qStatesPtrs[idx_hi];
DeviceComplex qsout0 = dmat(0, 0) * qs0 + dmat(0, 1) * qs1;
DeviceComplex qsout1 = dmat(1, 0) * qs0 + dmat(1, 1) * qs1;
d_qStatesPtrs[idx_lo] = qsout0;
d_qStatesPtrs[idx_hi] = qsout1;
});
}
template<class real> void DeviceProcPrimitives<real>::
applyControlGate(const DeviceMatrix2x2C<real> &mat,
DevicePtrs &d_qStatesPtrs, const qgate::QstateIdxTable256 *d_bitPermTables,
qgate::QstateIdx controlBits, qgate::QstateIdx targetBit,
qgate::QstateIdx begin, qgate::QstateIdx end) {
DeviceMatrix2x2C<real> dmat(mat);
transform(begin, end,
[=]__device__(QstateIdx idx) mutable {
QstateIdx permuted = 0;
for (int iTable = 0; iTable < 6; ++iTable) {
int iByte = (idx >> (8 * iTable)) & 0xff;
permuted |= d_bitPermTables[iTable][iByte];
}
QstateIdx idx_0 = permuted | controlBits;
QstateIdx idx_1 = idx_0 | targetBit;
const DeviceComplex &qs0 = d_qStatesPtrs[idx_0];
const DeviceComplex &qs1 = d_qStatesPtrs[idx_1];;
DeviceComplex qsout0 = dmat(0, 0) * qs0 + dmat(0, 1) * qs1;
DeviceComplex qsout1 = dmat(1, 0) * qs0 + dmat(1, 1) * qs1;
d_qStatesPtrs[idx_0] = qsout0;
d_qStatesPtrs[idx_1] = qsout1;
});
}
template class DeviceProcPrimitives<float>;
template class DeviceProcPrimitives<double>;
|
a7161665b0f2fd70523490166a29c0a4ace4a6ff.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
int main(int argc,char **argv){
// set up device
int dev = 0;
hipSetDevice(dev);
// memory size
unsigned int isize = 1<<22;
unsigned int nbytes = isize * sizeof(float);
// get device information
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp,dev);
printf("%s starting at ",argv[0]);
printf("device %d: %s memory size %d nbyte %5.2f MB\n",dev,deviceProp.name,
isize,nbytes/(1024.0f*1024.0f));
// allocate pinned host memory
float *h_a;
hipHostMalloc((float**)&h_a,nbytes);
// allocate the device memory
// Attention the different definition format between the malloc & cuadMalloc
float *d_a;
hipMalloc((float **)&d_a,nbytes);
// initialize the host memory
for(unsigned int i = 0; i < isize; i++) h_a[i] = .5f;
// transfer data from the host to the device
hipMemcpy(d_a,h_a,nbytes,hipMemcpyHostToDevice);
// transfer data from device to the host
hipMemcpy(h_a,d_a,nbytes,hipMemcpyDeviceToHost);
// free
hipFree(d_a);
hipHostFree(h_a);
// reset
hipDeviceReset();
return EXIT_SUCCESS;
}
| a7161665b0f2fd70523490166a29c0a4ace4a6ff.cu | #include <cuda_runtime.h>
#include <stdio.h>
int main(int argc,char **argv){
// set up device
int dev = 0;
cudaSetDevice(dev);
// memory size
unsigned int isize = 1<<22;
unsigned int nbytes = isize * sizeof(float);
// get device information
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,dev);
printf("%s starting at ",argv[0]);
printf("device %d: %s memory size %d nbyte %5.2f MB\n",dev,deviceProp.name,
isize,nbytes/(1024.0f*1024.0f));
// allocate pinned host memory
float *h_a;
cudaMallocHost((float**)&h_a,nbytes);
// allocate the device memory
// Attention the different definition format between the malloc & cuadMalloc
float *d_a;
cudaMalloc((float **)&d_a,nbytes);
// initialize the host memory
for(unsigned int i = 0; i < isize; i++) h_a[i] = .5f;
// transfer data from the host to the device
cudaMemcpy(d_a,h_a,nbytes,cudaMemcpyHostToDevice);
// transfer data from device to the host
cudaMemcpy(h_a,d_a,nbytes,cudaMemcpyDeviceToHost);
// free
cudaFree(d_a);
cudaFreeHost(h_a);
// reset
cudaDeviceReset();
return EXIT_SUCCESS;
}
|
840d4083b569e2efc9fa80a1096e2481fe98d829.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "lodepng.h"
#include "gputimer.h"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <string>
#include <vector>
#include <algorithm>
using namespace std;
using namespace lodepng;
constexpr auto MAX_NUMBER_THREADS = 1024;
hipError_t imageRectificationWithCuda(int numOfThreads, char* inputImageName, char* outputImageName);
hipError_t imagePoolingWithCuda(int numOfThreads, char* inputImageName, char* outputImageName);
__global__ void imgRectificationKernel(unsigned char* inMatrix, int size, int numOfThreads)
{
for (int i = 0; i < size / numOfThreads; i++) {
int k = (numOfThreads * i + threadIdx.x) + (blockIdx.x * numOfThreads);
if (inMatrix[k] < 127) {
inMatrix[k] = 127;
}
else {
inMatrix[k] = inMatrix[k];
}
}
}
__global__ void arrayMaxPerQuarterPixelKernel(unsigned char* inArray, unsigned char* outArray, int sizeofQuarterPixels, int numOfThreads, int width)
{
for (int i = 0; i < ((sizeofQuarterPixels / 4) / numOfThreads); i++) {
int j = (threadIdx.x + numOfThreads * i) + (blockIdx.x * numOfThreads);
int k = 2 * j + width * (j / (width / 2));
if (inArray[k] > inArray[k + 1]) {
outArray[j] = inArray[k];
}
else {
outArray[j] = inArray[k + 1];
}
if (inArray[k + width] > outArray[j]) {
outArray[j] = inArray[k + width];
}
if (inArray[k + width + 1] > outArray[j]) {
outArray[j] = inArray[k + width + 1];
}
}
}
__global__ void pixelsSplitIntoQuarters(unsigned char* rgbaArray, unsigned char* rArray, unsigned char* gArray, unsigned char* bArray, unsigned char* aArray,
int sizeofQuarterPixels, int numOfThreads)
{
for (int i = 0; i < (sizeofQuarterPixels) / numOfThreads; i++) {
int j = (threadIdx.x + numOfThreads * i) + (blockIdx.x * numOfThreads);
int k = j * 4;
rArray[j] = rgbaArray[k];
gArray[j] = rgbaArray[k + 1];
bArray[j] = rgbaArray[k + 2];
aArray[j] = rgbaArray[k + 3];
}
}
__global__ void pixelsMerge(unsigned char* outrArray, unsigned char* outgArray, unsigned char* outbArray, unsigned char* outaArray, unsigned char* outfinalArray,
int sizeofQuarterPixels, int numOfThreads) {
for (int i = 0; i < ((sizeofQuarterPixels/4) / numOfThreads); i++) {
int j = (threadIdx.x + numOfThreads * i) + (blockIdx.x * numOfThreads);
int k = 4 * j;
outfinalArray[k] = outrArray[j];
outfinalArray[k + 1] = outgArray[j];
outfinalArray[k + 2] = outbArray[j];
outfinalArray[k + 3] = outaArray[j];
}
}
int main(int argc, char* argv[])
{
char* inputImgName = nullptr;
char* outImgName = nullptr;
int numOfThreads = 0;
if (argc != 5 || argv[1] == NULL || argv[2] == NULL || argv[3] == NULL || argv[4] == NULL ||
argv[1] == "-h" || argv[1] == "--help" || argv[1] == "--h") {
cout << "Assignment1.exe <Command> <name of input png> <name of output png> < # threads>" << endl;
return 0;
}
else {
if (argv[2] != NULL) {
inputImgName = argv[2];
}
if (argv[3] != NULL) {
outImgName = argv[3];
}
if (argv[4] != NULL) {
numOfThreads = stoi(argv[4]);
}
}
if (argv[1] != NULL && !strcmp(argv[1],"rectify")) {
cout << "Rectifing" << endl;
hipError_t status = imageRectificationWithCuda(numOfThreads, inputImgName, outImgName);
}
if (argv[1] != NULL && !strcmp(argv[1], "pool")) {
cout << "Pooling" << endl;
hipError_t status = imagePoolingWithCuda(numOfThreads, inputImgName, outImgName);
}
std::cout << "Name of Input Image File: " << inputImgName << std::endl;
std::cout << "Name of Output Image File: " << outImgName << std::endl;
std::cout << "Name of Output Image File: " << numOfThreads << std::endl;
/*inputImageVec.clear();
outputImageVec.clear();
free(&inputImageVec);
free(&outputImageVec);*/
return 0;
}
hipError_t imageRectificationWithCuda(int numOfThreads, char* inputImageName, char* outputImageName)
{
hipError_t cudaStatus = hipError_t::cudaErrorDeviceUninitilialized;
GpuTimer gpuTimer; // Struct for timing the GPU
unsigned char * inputImage = nullptr;
unsigned width, height = 0;
int error = lodepng_decode32_file(&inputImage, &width, &height, inputImageName);
if (error != 0) {
cout << "You F**ed up decoding the image" << endl;
cudaStatus = hipError_t::hipErrorAssert;
goto Error;
}
int sizeOfMat = width * height * 4;
unsigned char* dev_inMat;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMallocManaged((void**)&dev_inMat, sizeOfMat * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
for (int i = 0; i < sizeOfMat; i++) {
dev_inMat[i] = inputImage[i];
}
//Code for running the timer
//timer = myCPUTimer()
//int numOfThreadsPerBlock = 1024;
// Launch kernel on the GPU with one thread for each element.
int numBlocks = ((numOfThreads + (MAX_NUMBER_THREADS - 1)) / MAX_NUMBER_THREADS);
int threadsPerBlock = ((numOfThreads + (numBlocks - 1)) / numBlocks);
/*************************************** Parrallel Part of Execution **********************************************/
gpuTimer.Start();
imgRectificationKernel <<<numBlocks, threadsPerBlock>> > (dev_inMat, sizeOfMat, threadsPerBlock);
gpuTimer.Stop();
/******************************************************************************************************************/
printf("-- Number of Threads: %d -- Execution Time (ms): %g \n", numOfThreads, gpuTimer.Elapsed());
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "imgRectificationKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching imgRectificationKernel!\n", cudaStatus);
goto Error;
}
error = lodepng_encode32_file(outputImageName, dev_inMat, width, height);
if (error != 0) {
cout << "You f**ed up encoding the image" << endl;
cudaStatus = hipError_t::hipErrorAssert;
goto Error;
}
free(inputImage);
Error:
hipFree(dev_inMat);
return cudaStatus;
}
hipError_t imagePoolingWithCuda(int numOfThreads, char* inputImageName, char* outputImageName)
{
hipError_t cudaStatus = hipError_t::cudaErrorDeviceUninitilialized;
GpuTimer gpuTimer; // Struct for timing the GPU
unsigned char* inputImage = nullptr;
unsigned width, height = 0;
int error = lodepng_decode32_file(&inputImage, &width, &height, inputImageName);
if (error != 0) {
cout << "You F**ed up decoding the image" << endl;
cudaStatus = hipError_t::hipErrorAssert;
goto Error;
}
int sizeOfArray = width * height * 4;
unsigned char *dev_RGBAArray, *dev_RArray, *dev_GArray, *dev_BArray, *dev_AArray, *dev_outRArray, *dev_outGArray, *dev_outBArray, *dev_outAArray, *dev_outArray;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMallocManaged((void**)& dev_RGBAArray, sizeOfArray * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
for (int i = 0; i < sizeOfArray; i++) {
dev_RGBAArray[i] = inputImage[i];
}
// To make our life easier, we're going to split the RGBA values into separate arrays - let's start by mallocing them
cudaStatus = hipMallocManaged((void**)& dev_RArray, (sizeOfArray /4) * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMallocManaged((void**)& dev_GArray, (sizeOfArray /4) * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMallocManaged((void**)& dev_BArray, (sizeOfArray /4) * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMallocManaged((void**)& dev_AArray, (sizeOfArray / 4) * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMallocManaged((void**)& dev_outRArray, (sizeOfArray / 16) * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMallocManaged((void**)& dev_outGArray, (sizeOfArray / 16) * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMallocManaged((void**)& dev_outBArray, (sizeOfArray / 16) * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMallocManaged((void**)& dev_outAArray, (sizeOfArray / 16) * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMallocManaged((void**)& dev_outArray, (sizeOfArray / 4) * sizeof(unsigned char));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
int numBlocks = ((numOfThreads + (MAX_NUMBER_THREADS - 1)) / MAX_NUMBER_THREADS);
int threadsPerBlock = ((numOfThreads + (numBlocks - 1)) / numBlocks);
/*************************************** Parrallel Part of Execution **********************************************/
gpuTimer.Start();
pixelsSplitIntoQuarters << <numBlocks, threadsPerBlock >> > (dev_RGBAArray, dev_RArray, dev_GArray, dev_BArray, dev_AArray, sizeOfArray/4, threadsPerBlock);
//int numOfThreadsPerBlock = 1024;
// Launch kernel on the GPU with one thread for each element.
arrayMaxPerQuarterPixelKernel <<<numBlocks, threadsPerBlock >> > (dev_RArray, dev_outRArray, sizeOfArray/4, threadsPerBlock, width);
arrayMaxPerQuarterPixelKernel <<<numBlocks, threadsPerBlock >> > (dev_GArray, dev_outGArray, sizeOfArray/4, threadsPerBlock, width);
arrayMaxPerQuarterPixelKernel <<<numBlocks, threadsPerBlock >> > (dev_BArray, dev_outBArray, sizeOfArray/4, threadsPerBlock, width);
arrayMaxPerQuarterPixelKernel <<<numBlocks, threadsPerBlock >> > (dev_AArray, dev_outAArray, sizeOfArray/4, threadsPerBlock, width);
pixelsMerge <<<numBlocks, threadsPerBlock >> > (dev_outRArray, dev_outGArray, dev_outBArray, dev_outAArray, dev_outArray, sizeOfArray/4, threadsPerBlock);
gpuTimer.Stop();
/*****************************************************************************************************************/
printf("-- Number of Threads: %d -- Execution Time (ms): %g \n", numOfThreads, gpuTimer.Elapsed());
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "imgPoolingKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching imgPoolingKernel!\n", cudaStatus);
goto Error;
}
error = lodepng_encode32_file(outputImageName, dev_outArray, width/2, height/2);
if (error != 0) {
cout << "You f**ed up encoding the image" << endl;
cudaStatus = hipError_t::hipErrorAssert;
goto Error;
}
free(inputImage);
Error:
// BE FREE MY LOVLIES
hipFree(dev_RGBAArray);
hipFree(dev_RArray);
hipFree(dev_GArray);
hipFree(dev_BArray);
hipFree(dev_AArray);
hipFree(dev_outRArray);
hipFree(dev_outGArray);
hipFree(dev_outBArray);
hipFree(dev_outAArray);
hipFree(dev_outArray);
return cudaStatus;
}
| 840d4083b569e2efc9fa80a1096e2481fe98d829.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "lodepng.h"
#include "gputimer.h"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <string>
#include <vector>
#include <algorithm>
using namespace std;
using namespace lodepng;
constexpr auto MAX_NUMBER_THREADS = 1024;
cudaError_t imageRectificationWithCuda(int numOfThreads, char* inputImageName, char* outputImageName);
cudaError_t imagePoolingWithCuda(int numOfThreads, char* inputImageName, char* outputImageName);
__global__ void imgRectificationKernel(unsigned char* inMatrix, int size, int numOfThreads)
{
for (int i = 0; i < size / numOfThreads; i++) {
int k = (numOfThreads * i + threadIdx.x) + (blockIdx.x * numOfThreads);
if (inMatrix[k] < 127) {
inMatrix[k] = 127;
}
else {
inMatrix[k] = inMatrix[k];
}
}
}
__global__ void arrayMaxPerQuarterPixelKernel(unsigned char* inArray, unsigned char* outArray, int sizeofQuarterPixels, int numOfThreads, int width)
{
for (int i = 0; i < ((sizeofQuarterPixels / 4) / numOfThreads); i++) {
int j = (threadIdx.x + numOfThreads * i) + (blockIdx.x * numOfThreads);
int k = 2 * j + width * (j / (width / 2));
if (inArray[k] > inArray[k + 1]) {
outArray[j] = inArray[k];
}
else {
outArray[j] = inArray[k + 1];
}
if (inArray[k + width] > outArray[j]) {
outArray[j] = inArray[k + width];
}
if (inArray[k + width + 1] > outArray[j]) {
outArray[j] = inArray[k + width + 1];
}
}
}
__global__ void pixelsSplitIntoQuarters(unsigned char* rgbaArray, unsigned char* rArray, unsigned char* gArray, unsigned char* bArray, unsigned char* aArray,
int sizeofQuarterPixels, int numOfThreads)
{
for (int i = 0; i < (sizeofQuarterPixels) / numOfThreads; i++) {
int j = (threadIdx.x + numOfThreads * i) + (blockIdx.x * numOfThreads);
int k = j * 4;
rArray[j] = rgbaArray[k];
gArray[j] = rgbaArray[k + 1];
bArray[j] = rgbaArray[k + 2];
aArray[j] = rgbaArray[k + 3];
}
}
__global__ void pixelsMerge(unsigned char* outrArray, unsigned char* outgArray, unsigned char* outbArray, unsigned char* outaArray, unsigned char* outfinalArray,
int sizeofQuarterPixels, int numOfThreads) {
for (int i = 0; i < ((sizeofQuarterPixels/4) / numOfThreads); i++) {
int j = (threadIdx.x + numOfThreads * i) + (blockIdx.x * numOfThreads);
int k = 4 * j;
outfinalArray[k] = outrArray[j];
outfinalArray[k + 1] = outgArray[j];
outfinalArray[k + 2] = outbArray[j];
outfinalArray[k + 3] = outaArray[j];
}
}
int main(int argc, char* argv[])
{
char* inputImgName = nullptr;
char* outImgName = nullptr;
int numOfThreads = 0;
if (argc != 5 || argv[1] == NULL || argv[2] == NULL || argv[3] == NULL || argv[4] == NULL ||
argv[1] == "-h" || argv[1] == "--help" || argv[1] == "--h") {
cout << "Assignment1.exe <Command> <name of input png> <name of output png> < # threads>" << endl;
return 0;
}
else {
if (argv[2] != NULL) {
inputImgName = argv[2];
}
if (argv[3] != NULL) {
outImgName = argv[3];
}
if (argv[4] != NULL) {
numOfThreads = stoi(argv[4]);
}
}
if (argv[1] != NULL && !strcmp(argv[1],"rectify")) {
cout << "Rectifing" << endl;
cudaError_t status = imageRectificationWithCuda(numOfThreads, inputImgName, outImgName);
}
if (argv[1] != NULL && !strcmp(argv[1], "pool")) {
cout << "Pooling" << endl;
cudaError_t status = imagePoolingWithCuda(numOfThreads, inputImgName, outImgName);
}
std::cout << "Name of Input Image File: " << inputImgName << std::endl;
std::cout << "Name of Output Image File: " << outImgName << std::endl;
std::cout << "Name of Output Image File: " << numOfThreads << std::endl;
/*inputImageVec.clear();
outputImageVec.clear();
free(&inputImageVec);
free(&outputImageVec);*/
return 0;
}
cudaError_t imageRectificationWithCuda(int numOfThreads, char* inputImageName, char* outputImageName)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorDeviceUninitilialized;
GpuTimer gpuTimer; // Struct for timing the GPU
unsigned char * inputImage = nullptr;
unsigned width, height = 0;
int error = lodepng_decode32_file(&inputImage, &width, &height, inputImageName);
if (error != 0) {
cout << "You F**ed up decoding the image" << endl;
cudaStatus = cudaError_t::cudaErrorAssert;
goto Error;
}
int sizeOfMat = width * height * 4;
unsigned char* dev_inMat;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMallocManaged((void**)&dev_inMat, sizeOfMat * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
for (int i = 0; i < sizeOfMat; i++) {
dev_inMat[i] = inputImage[i];
}
//Code for running the timer
//timer = myCPUTimer()
//int numOfThreadsPerBlock = 1024;
// Launch kernel on the GPU with one thread for each element.
int numBlocks = ((numOfThreads + (MAX_NUMBER_THREADS - 1)) / MAX_NUMBER_THREADS);
int threadsPerBlock = ((numOfThreads + (numBlocks - 1)) / numBlocks);
/*************************************** Parrallel Part of Execution **********************************************/
gpuTimer.Start();
imgRectificationKernel <<<numBlocks, threadsPerBlock>> > (dev_inMat, sizeOfMat, threadsPerBlock);
gpuTimer.Stop();
/******************************************************************************************************************/
printf("-- Number of Threads: %d -- Execution Time (ms): %g \n", numOfThreads, gpuTimer.Elapsed());
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "imgRectificationKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching imgRectificationKernel!\n", cudaStatus);
goto Error;
}
error = lodepng_encode32_file(outputImageName, dev_inMat, width, height);
if (error != 0) {
cout << "You f**ed up encoding the image" << endl;
cudaStatus = cudaError_t::cudaErrorAssert;
goto Error;
}
free(inputImage);
Error:
cudaFree(dev_inMat);
return cudaStatus;
}
cudaError_t imagePoolingWithCuda(int numOfThreads, char* inputImageName, char* outputImageName)
{
cudaError_t cudaStatus = cudaError_t::cudaErrorDeviceUninitilialized;
GpuTimer gpuTimer; // Struct for timing the GPU
unsigned char* inputImage = nullptr;
unsigned width, height = 0;
int error = lodepng_decode32_file(&inputImage, &width, &height, inputImageName);
if (error != 0) {
cout << "You F**ed up decoding the image" << endl;
cudaStatus = cudaError_t::cudaErrorAssert;
goto Error;
}
int sizeOfArray = width * height * 4;
unsigned char *dev_RGBAArray, *dev_RArray, *dev_GArray, *dev_BArray, *dev_AArray, *dev_outRArray, *dev_outGArray, *dev_outBArray, *dev_outAArray, *dev_outArray;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMallocManaged((void**)& dev_RGBAArray, sizeOfArray * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
for (int i = 0; i < sizeOfArray; i++) {
dev_RGBAArray[i] = inputImage[i];
}
// To make our life easier, we're going to split the RGBA values into separate arrays - let's start by mallocing them
cudaStatus = cudaMallocManaged((void**)& dev_RArray, (sizeOfArray /4) * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMallocManaged((void**)& dev_GArray, (sizeOfArray /4) * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMallocManaged((void**)& dev_BArray, (sizeOfArray /4) * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMallocManaged((void**)& dev_AArray, (sizeOfArray / 4) * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMallocManaged((void**)& dev_outRArray, (sizeOfArray / 16) * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMallocManaged((void**)& dev_outGArray, (sizeOfArray / 16) * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMallocManaged((void**)& dev_outBArray, (sizeOfArray / 16) * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMallocManaged((void**)& dev_outAArray, (sizeOfArray / 16) * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMallocManaged((void**)& dev_outArray, (sizeOfArray / 4) * sizeof(unsigned char));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
int numBlocks = ((numOfThreads + (MAX_NUMBER_THREADS - 1)) / MAX_NUMBER_THREADS);
int threadsPerBlock = ((numOfThreads + (numBlocks - 1)) / numBlocks);
/*************************************** Parrallel Part of Execution **********************************************/
gpuTimer.Start();
pixelsSplitIntoQuarters << <numBlocks, threadsPerBlock >> > (dev_RGBAArray, dev_RArray, dev_GArray, dev_BArray, dev_AArray, sizeOfArray/4, threadsPerBlock);
//int numOfThreadsPerBlock = 1024;
// Launch kernel on the GPU with one thread for each element.
arrayMaxPerQuarterPixelKernel <<<numBlocks, threadsPerBlock >> > (dev_RArray, dev_outRArray, sizeOfArray/4, threadsPerBlock, width);
arrayMaxPerQuarterPixelKernel <<<numBlocks, threadsPerBlock >> > (dev_GArray, dev_outGArray, sizeOfArray/4, threadsPerBlock, width);
arrayMaxPerQuarterPixelKernel <<<numBlocks, threadsPerBlock >> > (dev_BArray, dev_outBArray, sizeOfArray/4, threadsPerBlock, width);
arrayMaxPerQuarterPixelKernel <<<numBlocks, threadsPerBlock >> > (dev_AArray, dev_outAArray, sizeOfArray/4, threadsPerBlock, width);
pixelsMerge <<<numBlocks, threadsPerBlock >> > (dev_outRArray, dev_outGArray, dev_outBArray, dev_outAArray, dev_outArray, sizeOfArray/4, threadsPerBlock);
gpuTimer.Stop();
/*****************************************************************************************************************/
printf("-- Number of Threads: %d -- Execution Time (ms): %g \n", numOfThreads, gpuTimer.Elapsed());
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "imgPoolingKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching imgPoolingKernel!\n", cudaStatus);
goto Error;
}
error = lodepng_encode32_file(outputImageName, dev_outArray, width/2, height/2);
if (error != 0) {
cout << "You f**ed up encoding the image" << endl;
cudaStatus = cudaError_t::cudaErrorAssert;
goto Error;
}
free(inputImage);
Error:
// BE FREE MY LOVLIES
cudaFree(dev_RGBAArray);
cudaFree(dev_RArray);
cudaFree(dev_GArray);
cudaFree(dev_BArray);
cudaFree(dev_AArray);
cudaFree(dev_outRArray);
cudaFree(dev_outGArray);
cudaFree(dev_outBArray);
cudaFree(dev_outAArray);
cudaFree(dev_outArray);
return cudaStatus;
}
|
bae07d3eb46082dc3e67477b89a951c79f6f88a4.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace {
/*
* This implementation of the regularized incomplete gamma functions and
* their helper functions are derived from the implementation of SciPy's
* gammainc, Cephes's igam and igamc, and Boost's Lanczos approximations.
* See NOTICE for the licenses.
*/
// regularized lower & upper incomplete gamma
template <typename scalar_t>
__host__ __device__ scalar_t ratevl(scalar_t x, const scalar_t num[], int64_t M,
const scalar_t denom[], int64_t N) {
// evaluating rational function, i.e., the ratio of two polynomials
// the coefficients for numerator are given by `num` while coeffs for
// denumerator are given by `denom`
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
int64_t i, dir;
accscalar_t y, num_ans, denom_ans;
accscalar_t absx = ::fabs(x);
const accscalar_t *p;
if (absx > 1) {
/* Evaluate as a polynomial in 1/x. */
dir = -1;
p = num + M;
y = 1 / x;
}
else {
dir = 1;
p = num;
y = x;
}
/* Evaluate the numerator */
num_ans = *p;
p += dir;
for (i = 1; i <= M; i++) {
num_ans = num_ans * y + *p;
p += dir;
}
/* Evaluate the denominator */
if (absx > 1) {
p = denom + N;
}
else {
p = denom;
}
denom_ans = *p;
p += dir;
for (i = 1; i <= N; i++) {
denom_ans = denom_ans * y + *p;
p += dir;
}
if (absx > 1) {
i = N - M;
return ::pow(x, static_cast<accscalar_t>(i)) * num_ans / denom_ans;
}
else {
return num_ans / denom_ans;
}
}
template <typename scalar_t>
__host__ __device__ scalar_t lanczos_sum_expg_scaled(scalar_t x) {
// lanczos approximation
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
static const accscalar_t lanczos_sum_expg_scaled_num[13] = {
0.006061842346248906525783753964555936883222,
0.5098416655656676188125178644804694509993,
19.51992788247617482847860966235652136208,
449.9445569063168119446858607650988409623,
6955.999602515376140356310115515198987526,
75999.29304014542649875303443598909137092,
601859.6171681098786670226533699352302507,
3481712.15498064590882071018964774556468,
14605578.08768506808414169982791359218571,
43338889.32467613834773723740590533316085,
86363131.28813859145546927288977868422342,
103794043.1163445451906271053616070238554,
56906521.91347156388090791033559122686859
};
static const accscalar_t lanczos_sum_expg_scaled_denom[13] = {
1.,
66.,
1925.,
32670.,
357423.,
2637558.,
13339535.,
45995730.,
105258076.,
150917976.,
120543840.,
39916800.,
0
};
return ratevl(static_cast<accscalar_t>(x), lanczos_sum_expg_scaled_num,
sizeof(lanczos_sum_expg_scaled_num) / sizeof(lanczos_sum_expg_scaled_num[0]) - 1,
lanczos_sum_expg_scaled_denom,
sizeof(lanczos_sum_expg_scaled_denom) / sizeof(lanczos_sum_expg_scaled_denom[0]) - 1);
}
template <typename scalar_t>
__host__ __device__ scalar_t _igam_helper_fac(scalar_t a, scalar_t x) {
// compute x^a * exp(-a) / gamma(a)
// corrected from (15) and (16) in [igam2] by replacing exp(x - a) with
// exp(a - x).
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
accscalar_t ax, fac, res, num, numfac;
static const accscalar_t MAXLOG = std::is_same<accscalar_t,double>::value ?
7.09782712893383996843E2 : 88.72283905206835;
static const accscalar_t EXP1 = 2.718281828459045;
static const accscalar_t lanczos_g = 6.024680040776729583740234375;
if (::fabs(a - x) > 0.4 * ::fabs(a)) {
ax = a * ::log(x) - x - ::lgamma(a);
if (ax < -MAXLOG) {
return 0.0;
}
return ::exp(ax);
}
fac = a + lanczos_g - 0.5;
res = ::sqrt(fac / EXP1) / lanczos_sum_expg_scaled(a);
if ((a < 200) && (x < 200)) {
res *= ::exp(a - x) * ::pow(x / fac, a);
}
else {
num = x - a - lanczos_g + 0.5;
numfac = num / fac;
res *= ::exp(a * (::log1p(numfac) - numfac) + x * (0.5 - lanczos_g) / fac);
}
return res;
}
template <typename scalar_t>
__host__ __device__ scalar_t _igam_helper_series(scalar_t a, scalar_t x) {
// Compute igam using DLMF 8.11.4. [igam1]
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
static const accscalar_t MACHEP = std::is_same<accscalar_t, double>::value ?
1.11022302462515654042E-16 : 5.9604644775390625E-8;
static const int MAXITER = 2000;
int i;
accscalar_t ans, ax, c, r;
ax = _igam_helper_fac(a, x);
if (ax == 0.0) {
return 0.0;
}
/* power series */
r = a;
c = 1.0;
ans = 1.0;
for (i = 0; i < MAXITER; i++) {
r += 1.0;
c *= x / r;
ans += c;
if (c <= MACHEP * ans) {
break;
}
}
return (ans * ax / a);
}
template <typename scalar_t>
__host__ __device__ scalar_t _igamc_helper_series(scalar_t a, scalar_t x) {
// Compute igamc using DLMF 8.7.3 [igam1]. This is related to the series in
// _igam_helper_series but extra care is taken to avoid cancellation.
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
int n;
accscalar_t fac = 1;
accscalar_t sum = 0;
accscalar_t term, logx;
static const int MAXITER = 2000;
static const accscalar_t MACHEP = std::is_same<accscalar_t, double>::value ?
1.11022302462515654042E-16 : 5.9604644775390625E-8;
for (n = 1; n < MAXITER; n++) {
fac *= -x / n;
term = fac / (a + n);
sum += term;
if (::fabs(term) <= MACHEP * ::fabs(sum)) {
break;
}
}
logx = ::log(x);
term = -::expm1(a * logx - ::lgamma(1+a));
return term - ::exp(a * logx - ::lgamma(a)) * sum;
}
template <typename scalar_t>
__host__ __device__ scalar_t _igam_helper_asymptotic_series(scalar_t a, scalar_t x, bool igam) {
// Compute igam/igamc using DLMF 8.12.3/8.12.4 [igam1]
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
static const accscalar_t d[25][25] =
{{-3.3333333333333333e-1, 8.3333333333333333e-2, -1.4814814814814815e-2, 1.1574074074074074e-3, 3.527336860670194e-4, -1.7875514403292181e-4, 3.9192631785224378e-5, -2.1854485106799922e-6, -1.85406221071516e-6, 8.296711340953086e-7, -1.7665952736826079e-7, 6.7078535434014986e-9, 1.0261809784240308e-8, -4.3820360184533532e-9, 9.1476995822367902e-10, -2.551419399494625e-11, -5.8307721325504251e-11, 2.4361948020667416e-11, -5.0276692801141756e-12, 1.1004392031956135e-13, 3.3717632624009854e-13, -1.3923887224181621e-13, 2.8534893807047443e-14, -5.1391118342425726e-16, -1.9752288294349443e-15},
{-1.8518518518518519e-3, -3.4722222222222222e-3, 2.6455026455026455e-3, -9.9022633744855967e-4, 2.0576131687242798e-4, -4.0187757201646091e-7, -1.8098550334489978e-5, 7.6491609160811101e-6, -1.6120900894563446e-6, 4.6471278028074343e-9, 1.378633446915721e-7, -5.752545603517705e-8, 1.1951628599778147e-8, -1.7543241719747648e-11, -1.0091543710600413e-9, 4.1627929918425826e-10, -8.5639070264929806e-11, 6.0672151016047586e-14, 7.1624989648114854e-12, -2.9331866437714371e-12, 5.9966963656836887e-13, -2.1671786527323314e-16, -4.9783399723692616e-14, 2.0291628823713425e-14, -4.13125571381061e-15},
{4.1335978835978836e-3, -2.6813271604938272e-3, 7.7160493827160494e-4, 2.0093878600823045e-6, -1.0736653226365161e-4, 5.2923448829120125e-5, -1.2760635188618728e-5, 3.4235787340961381e-8, 1.3721957309062933e-6, -6.298992138380055e-7, 1.4280614206064242e-7, -2.0477098421990866e-10, -1.4092529910867521e-8, 6.228974084922022e-9, -1.3670488396617113e-9, 9.4283561590146782e-13, 1.2872252400089318e-10, -5.5645956134363321e-11, 1.1975935546366981e-11, -4.1689782251838635e-15, -1.0940640427884594e-12, 4.6622399463901357e-13, -9.905105763906906e-14, 1.8931876768373515e-17, 8.8592218725911273e-15},
{6.4943415637860082e-4, 2.2947209362139918e-4, -4.6918949439525571e-4, 2.6772063206283885e-4, -7.5618016718839764e-5, -2.3965051138672967e-7, 1.1082654115347302e-5, -5.6749528269915966e-6, 1.4230900732435884e-6, -2.7861080291528142e-11, -1.6958404091930277e-7, 8.0994649053880824e-8, -1.9111168485973654e-8, 2.3928620439808118e-12, 2.0620131815488798e-9, -9.4604966618551322e-10, 2.1541049775774908e-10, -1.388823336813903e-14, -2.1894761681963939e-11, 9.7909989511716851e-12, -2.1782191880180962e-12, 6.2088195734079014e-17, 2.126978363279737e-13, -9.3446887915174333e-14, 2.0453671226782849e-14},
{-8.618882909167117e-4, 7.8403922172006663e-4, -2.9907248030319018e-4, -1.4638452578843418e-6, 6.6414982154651222e-5, -3.9683650471794347e-5, 1.1375726970678419e-5, 2.5074972262375328e-10, -1.6954149536558306e-6, 8.9075075322053097e-7, -2.2929348340008049e-7, 2.956794137544049e-11, 2.8865829742708784e-8, -1.4189739437803219e-8, 3.4463580499464897e-9, -2.3024517174528067e-13, -3.9409233028046405e-10, 1.8602338968504502e-10, -4.356323005056618e-11, 1.2786001016296231e-15, 4.6792750266579195e-12, -2.1492464706134829e-12, 4.9088156148096522e-13, -6.3385914848915603e-18, -5.0453320690800944e-14},
{-3.3679855336635815e-4, -6.9728137583658578e-5, 2.7727532449593921e-4, -1.9932570516188848e-4, 6.7977804779372078e-5, 1.419062920643967e-7, -1.3594048189768693e-5, 8.0184702563342015e-6, -2.2914811765080952e-6, -3.252473551298454e-10, 3.4652846491085265e-7, -1.8447187191171343e-7, 4.8240967037894181e-8, -1.7989466721743515e-14, -6.3061945000135234e-9, 3.1624176287745679e-9, -7.8409242536974293e-10, 5.1926791652540407e-15, 9.3589442423067836e-11, -4.5134262161632782e-11, 1.0799129993116827e-11, -3.661886712685252e-17, -1.210902069055155e-12, 5.6807435849905643e-13, -1.3249659916340829e-13},
{5.3130793646399222e-4, -5.9216643735369388e-4, 2.7087820967180448e-4, 7.9023532326603279e-7, -8.1539693675619688e-5, 5.6116827531062497e-5, -1.8329116582843376e-5, -3.0796134506033048e-9, 3.4651553688036091e-6, -2.0291327396058604e-6, 5.7887928631490037e-7, 2.338630673826657e-13, -8.8286007463304835e-8, 4.7435958880408128e-8, -1.2545415020710382e-8, 8.6496488580102925e-14, 1.6846058979264063e-9, -8.5754928235775947e-10, 2.1598224929232125e-10, -7.6132305204761539e-16, -2.6639822008536144e-11, 1.3065700536611057e-11, -3.1799163902367977e-12, 4.7109761213674315e-18, 3.6902800842763467e-13},
{3.4436760689237767e-4, 5.1717909082605922e-5, -3.3493161081142236e-4, 2.812695154763237e-4, -1.0976582244684731e-4, -1.2741009095484485e-7, 2.7744451511563644e-5, -1.8263488805711333e-5, 5.7876949497350524e-6, 4.9387589339362704e-10, -1.0595367014026043e-6, 6.1667143761104075e-7, -1.7562973359060462e-7, -1.2974473287015439e-12, 2.695423606288966e-8, -1.4578352908731271e-8, 3.887645959386175e-9, -3.8810022510194121e-17, -5.3279941738772867e-10, 2.7437977643314845e-10, -6.9957960920705679e-11, 2.5899863874868481e-17, 8.8566890996696381e-12, -4.403168815871311e-12, 1.0865561947091654e-12},
{-6.5262391859530942e-4, 8.3949872067208728e-4, -4.3829709854172101e-4, -6.969091458420552e-7, 1.6644846642067548e-4, -1.2783517679769219e-4, 4.6299532636913043e-5, 4.5579098679227077e-9, -1.0595271125805195e-5, 6.7833429048651666e-6, -2.1075476666258804e-6, -1.7213731432817145e-11, 3.7735877416110979e-7, -2.1867506700122867e-7, 6.2202288040189269e-8, 6.5977038267330006e-16, -9.5903864974256858e-9, 5.2132144922808078e-9, -1.3991589583935709e-9, 5.382058999060575e-16, 1.9484714275467745e-10, -1.0127287556389682e-10, 2.6077347197254926e-11, -5.0904186999932993e-18, -3.3721464474854592e-12},
{-5.9676129019274625e-4, -7.2048954160200106e-5, 6.7823088376673284e-4, -6.4014752602627585e-4, 2.7750107634328704e-4, 1.8197008380465151e-7, -8.4795071170685032e-5, 6.105192082501531e-5, -2.1073920183404862e-5, -8.8585890141255994e-10, 4.5284535953805377e-6, -2.8427815022504408e-6, 8.7082341778646412e-7, 3.6886101871706965e-12, -1.5344695190702061e-7, 8.862466778790695e-8, -2.5184812301826817e-8, -1.0225912098215092e-14, 3.8969470758154777e-9, -2.1267304792235635e-9, 5.7370135528051385e-10, -1.887749850169741e-19, -8.0931538694657866e-11, 4.2382723283449199e-11, -1.1002224534207726e-11},
{1.3324454494800656e-3, -1.9144384985654775e-3, 1.1089369134596637e-3, 9.932404122642299e-7, -5.0874501293093199e-4, 4.2735056665392884e-4, -1.6858853767910799e-4, -8.1301893922784998e-9, 4.5284402370562147e-5, -3.127053674781734e-5, 1.044986828530338e-5, 4.8435226265680926e-11, -2.1482565873456258e-6, 1.329369701097492e-6, -4.0295693092101029e-7, -1.7567877666323291e-13, 7.0145043163668257e-8, -4.040787734999483e-8, 1.1474026743371963e-8, 3.9642746853563325e-18, -1.7804938269892714e-9, 9.7480262548731646e-10, -2.6405338676507616e-10, 5.794875163403742e-18, 3.7647749553543836e-11},
{1.579727660730835e-3, 1.6251626278391582e-4, -2.0633421035543276e-3, 2.1389686185689098e-3, -1.0108559391263003e-3, -3.9912705529919201e-7, 3.6235025084764691e-4, -2.8143901463712154e-4, 1.0449513336495887e-4, 2.1211418491830297e-9, -2.5779417251947842e-5, 1.7281818956040463e-5, -5.6413773872904282e-6, -1.1024320105776174e-11, 1.1223224418895175e-6, -6.8693396379526735e-7, 2.0653236975414887e-7, 4.6714772409838506e-14, -3.5609886164949055e-8, 2.0470855345905963e-8, -5.8091738633283358e-9, -1.332821287582869e-16, 9.0354604391335133e-10, -4.9598782517330834e-10, 1.3481607129399749e-10},
{-4.0725121195140166e-3, 6.4033628338080698e-3, -4.0410161081676618e-3, -2.183732802866233e-6, 2.1740441801254639e-3, -1.9700440518418892e-3, 8.3595469747962458e-4, 1.9445447567109655e-8, -2.5779387120421696e-4, 1.9009987368139304e-4, -6.7696499937438965e-5, -1.4440629666426572e-10, 1.5712512518742269e-5, -1.0304008744776893e-5, 3.304517767401387e-6, 7.9829760242325709e-13, -6.4097794149313004e-7, 3.8894624761300056e-7, -1.1618347644948869e-7, -2.816808630596451e-15, 1.9878012911297093e-8, -1.1407719956357511e-8, 3.2355857064185555e-9, 4.1759468293455945e-20, -5.0423112718105824e-10},
{-5.9475779383993003e-3, -5.4016476789260452e-4, 8.7910413550767898e-3, -9.8576315587856125e-3, 5.0134695031021538e-3, 1.2807521786221875e-6, -2.0626019342754683e-3, 1.7109128573523058e-3, -6.7695312714133799e-4, -6.9011545676562133e-9, 1.8855128143995902e-4, -1.3395215663491969e-4, 4.6263183033528039e-5, 4.0034230613321351e-11, -1.0255652921494033e-5, 6.612086372797651e-6, -2.0913022027253008e-6, -2.0951775649603837e-13, 3.9756029041993247e-7, -2.3956211978815887e-7, 7.1182883382145864e-8, 8.925574873053455e-16, -1.2101547235064676e-8, 6.9350618248334386e-9, -1.9661464453856102e-9},
{1.7402027787522711e-2, -2.9527880945699121e-2, 2.0045875571402799e-2, 7.0289515966903407e-6, -1.2375421071343148e-2, 1.1976293444235254e-2, -5.4156038466518525e-3, -6.3290893396418616e-8, 1.8855118129005065e-3, -1.473473274825001e-3, 5.5515810097708387e-4, 5.2406834412550662e-10, -1.4357913535784836e-4, 9.9181293224943297e-5, -3.3460834749478311e-5, -3.5755837291098993e-12, 7.1560851960630076e-6, -4.5516802628155526e-6, 1.4236576649271475e-6, 1.8803149082089664e-14, -2.6623403898929211e-7, 1.5950642189595716e-7, -4.7187514673841102e-8, -6.5107872958755177e-17, 7.9795091026746235e-9},
{3.0249124160905891e-2, 2.4817436002649977e-3, -4.9939134373457022e-2, 5.9915643009307869e-2, -3.2483207601623391e-2, -5.7212968652103441e-6, 1.5085251778569354e-2, -1.3261324005088445e-2, 5.5515262632426148e-3, 3.0263182257030016e-8, -1.7229548406756723e-3, 1.2893570099929637e-3, -4.6845138348319876e-4, -1.830259937893045e-10, 1.1449739014822654e-4, -7.7378565221244477e-5, 2.5625836246985201e-5, 1.0766165333192814e-12, -5.3246809282422621e-6, 3.349634863064464e-6, -1.0381253128684018e-6, -5.608909920621128e-15, 1.9150821930676591e-7, -1.1418365800203486e-7, 3.3654425209171788e-8},
{-9.9051020880159045e-2, 1.7954011706123486e-1, -1.2989606383463778e-1, -3.1478872752284357e-5, 9.0510635276848131e-2, -9.2828824411184397e-2, 4.4412112839877808e-2, 2.7779236316835888e-7, -1.7229543805449697e-2, 1.4182925050891573e-2, -5.6214161633747336e-3, -2.39598509186381e-9, 1.6029634366079908e-3, -1.1606784674435773e-3, 4.1001337768153873e-4, 1.8365800754090661e-11, -9.5844256563655903e-5, 6.3643062337764708e-5, -2.076250624489065e-5, -1.1806020912804483e-13, 4.2131808239120649e-6, -2.6262241337012467e-6, 8.0770620494930662e-7, 6.0125912123632725e-16, -1.4729737374018841e-7},
{-1.9994542198219728e-1, -1.5056113040026424e-2, 3.6470239469348489e-1, -4.6435192311733545e-1, 2.6640934719197893e-1, 3.4038266027147191e-5, -1.3784338709329624e-1, 1.276467178337056e-1, -5.6213828755200985e-2, -1.753150885483011e-7, 1.9235592956768113e-2, -1.5088821281095315e-2, 5.7401854451350123e-3, 1.0622382710310225e-9, -1.5335082692563998e-3, 1.0819320643228214e-3, -3.7372510193945659e-4, -6.6170909729031985e-12, 8.4263617380909628e-5, -5.5150706827483479e-5, 1.7769536448348069e-5, 3.8827923210205533e-14, -3.53513697488768e-6, 2.1865832130045269e-6, -6.6812849447625594e-7},
{7.2438608504029431e-1, -1.3918010932653375, 1.0654143352413968, 1.876173868950258e-4, -8.2705501176152696e-1, 8.9352433347828414e-1, -4.4971003995291339e-1, -1.6107401567546652e-6, 1.9235590165271091e-1, -1.6597702160042609e-1, 6.8882222681814333e-2, 1.3910091724608687e-8, -2.146911561508663e-2, 1.6228980898865892e-2, -5.9796016172584256e-3, -1.1287469112826745e-10, 1.5167451119784857e-3, -1.0478634293553899e-3, 3.5539072889126421e-4, 8.1704322111801517e-13, -7.7773013442452395e-5, 5.0291413897007722e-5, -1.6035083867000518e-5, 1.2469354315487605e-14, 3.1369106244517615e-6},
{1.6668949727276811, 1.165462765994632e-1, -3.3288393225018906, 4.4692325482864037, -2.6977693045875807, -2.600667859891061e-4, 1.5389017615694539, -1.4937962361134612, 6.8881964633233148e-1, 1.3077482004552385e-6, -2.5762963325596288e-1, 2.1097676102125449e-1, -8.3714408359219882e-2, -7.7920428881354753e-9, 2.4267923064833599e-2, -1.7813678334552311e-2, 6.3970330388900056e-3, 4.9430807090480523e-11, -1.5554602758465635e-3, 1.0561196919903214e-3, -3.5277184460472902e-4, 9.3002334645022459e-14, 7.5285855026557172e-5, -4.8186515569156351e-5, 1.5227271505597605e-5},
{-6.6188298861372935, 1.3397985455142589e+1, -1.0789350606845146e+1, -1.4352254537875018e-3, 9.2333694596189809, -1.0456552819547769e+1, 5.5105526029033471, 1.2024439690716742e-5, -2.5762961164755816, 2.3207442745387179, -1.0045728797216284, -1.0207833290021914e-7, 3.3975092171169466e-1, -2.6720517450757468e-1, 1.0235252851562706e-1, 8.4329730484871625e-10, -2.7998284958442595e-2, 2.0066274144976813e-2, -7.0554368915086242e-3, 1.9402238183698188e-12, 1.6562888105449611e-3, -1.1082898580743683e-3, 3.654545161310169e-4, -5.1290032026971794e-11, -7.6340103696869031e-5},
{-1.7112706061976095e+1, -1.1208044642899116, 3.7131966511885444e+1, -5.2298271025348962e+1, 3.3058589696624618e+1, 2.4791298976200222e-3, -2.061089403411526e+1, 2.088672775145582e+1, -1.0045703956517752e+1, -1.2238783449063012e-5, 4.0770134274221141, -3.473667358470195, 1.4329352617312006, 7.1359914411879712e-8, -4.4797257159115612e-1, 3.4112666080644461e-1, -1.2699786326594923e-1, -2.8953677269081528e-10, 3.3125776278259863e-2, -2.3274087021036101e-2, 8.0399993503648882e-3, -1.177805216235265e-9, -1.8321624891071668e-3, 1.2108282933588665e-3, -3.9479941246822517e-4},
{7.389033153567425e+1, -1.5680141270402273e+2, 1.322177542759164e+2, 1.3692876877324546e-2, -1.2366496885920151e+2, 1.4620689391062729e+2, -8.0365587724865346e+1, -1.1259851148881298e-4, 4.0770132196179938e+1, -3.8210340013273034e+1, 1.719522294277362e+1, 9.3519707955168356e-7, -6.2716159907747034, 5.1168999071852637, -2.0319658112299095, -4.9507215582761543e-9, 5.9626397294332597e-1, -4.4220765337238094e-1, 1.6079998700166273e-1, -2.4733786203223402e-8, -4.0307574759979762e-2, 2.7849050747097869e-2, -9.4751858992054221e-3, 6.419922235909132e-6, 2.1250180774699461e-3},
{2.1216837098382522e+2, 1.3107863022633868e+1, -4.9698285932871748e+2, 7.3121595266969204e+2, -4.8213821720890847e+2, -2.8817248692894889e-2, 3.2616720302947102e+2, -3.4389340280087117e+2, 1.7195193870816232e+2, 1.4038077378096158e-4, -7.52594195897599e+1, 6.651969984520934e+1, -2.8447519748152462e+1, -7.613702615875391e-7, 9.5402237105304373, -7.5175301113311376, 2.8943997568871961, -4.6612194999538201e-7, -8.0615149598794088e-1, 5.8483006570631029e-1, -2.0845408972964956e-1, 1.4765818959305817e-4, 5.1000433863753019e-2, -3.3066252141883665e-2, 1.5109265210467774e-2},
{-9.8959643098322368e+2, 2.1925555360905233e+3, -1.9283586782723356e+3, -1.5925738122215253e-1, 1.9569985945919857e+3, -2.4072514765081556e+3, 1.3756149959336496e+3, 1.2920735237496668e-3, -7.525941715948055e+2, 7.3171668742208716e+2, -3.4137023466220065e+2, -9.9857390260608043e-6, 1.3356313181291573e+2, -1.1276295161252794e+2, 4.6310396098204458e+1, -7.9237387133614756e-6, -1.4510726927018646e+1, 1.1111771248100563e+1, -4.1690817945270892, 3.1008219800117808e-3, 1.1220095449981468, -7.6052379926149916e-1, 3.6262236505085254e-1, 2.216867741940747e-1, 4.8683443692930507e-1}};
int k, n, sgn;
int maxpow = 0;
static const accscalar_t MACHEP = std::is_same<accscalar_t, double>::value ?
1.11022302462515654042E-16 : 5.9604644775390625E-8;
accscalar_t lambda = x / a;
accscalar_t sigma = (x - a) / a;
accscalar_t eta, res, ck, ckterm, term, absterm;
accscalar_t absoldterm = INFINITY;
accscalar_t etapow[25] = {1};
accscalar_t sum = 0;
accscalar_t afac = 1;
if (igam) {
sgn = -1;
}
else {
sgn = 1;
}
if (lambda > 1) {
eta = ::sqrt(-2 * (::log1p(sigma) - sigma));
}
else if (lambda < 1) {
eta = -::sqrt(-2 * (::log1p(sigma) - sigma));
}
else {
eta = 0;
}
res = 0.5 * ::erfc(sgn * eta * ::sqrt(a / 2));
for (k = 0; k < 25; k++) {
ck = d[k][0];
for (n = 1; n < 25; n++) {
if (n > maxpow) {
etapow[n] = eta * etapow[n-1];
maxpow += 1;
}
ckterm = d[k][n]*etapow[n];
ck += ckterm;
if (::fabs(ckterm) < MACHEP * ::fabs(ck)) {
break;
}
}
term = ck * afac;
absterm = ::fabs(term);
if (absterm > absoldterm) {
break;
}
sum += term;
if (absterm < MACHEP * ::fabs(sum)) {
break;
}
absoldterm = absterm;
afac /= a;
}
res += sgn * ::exp(-0.5 * a * eta * eta) * sum / ::sqrt(2 * 3.1415926535 * a);
return res;
}
template <typename scalar_t>
__host__ __device__ scalar_t _igamc_helper_continued_fraction(scalar_t a, scalar_t x) {
// Compute igamc using DLMF 8.9.2. [igam1]
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
int i;
accscalar_t ans, ax, c, yc, r, t, y, z;
accscalar_t pk, pkm1, pkm2, qk, qkm1, qkm2;
static const int MAXITER = 2000;
static const accscalar_t MACHEP = std::is_same<accscalar_t, double>::value ?
1.11022302462515654042E-16 : 5.9604644775390625E-8;
static const accscalar_t BIG = std::is_same<accscalar_t,double>::value ?
4.503599627370496e15 : 16777216.;
static const accscalar_t BIGINV = std::is_same<accscalar_t,double>::value ?
2.22044604925031308085e-16 : 5.9604644775390625E-8;
ax = _igam_helper_fac(a, x);
if (ax == 0.0) {
return 0.0;
}
/* continued fraction */
y = 1.0 - a;
z = x + y + 1.0;
c = 0.0;
pkm2 = 1.0;
qkm2 = x;
pkm1 = x + 1.0;
qkm1 = z * x;
ans = pkm1 / qkm1;
for (i = 0; i < MAXITER; i++) {
c += 1.0;
y += 1.0;
z += 2.0;
yc = y * c;
pk = pkm1 * z - pkm2 * yc;
qk = qkm1 * z - qkm2 * yc;
if (qk != 0) {
r = pk / qk;
t = ::fabs((ans - r) / r);
ans = r;
}
else {
t = 1.0;
}
pkm2 = pkm1;
pkm1 = pk;
qkm2 = qkm1;
qkm1 = qk;
if (::fabs(pk) > BIG) {
pkm2 *= BIGINV;
pkm1 *= BIGINV;
qkm2 *= BIGINV;
qkm1 *= BIGINV;
}
if (t <= MACHEP) {
break;
}
}
return ans * ax;
}
template <typename scalar_t>
__noinline__ __host__ __device__ scalar_t calc_igammac(scalar_t a, scalar_t x) {
/* the calculation of the regularized upper incomplete gamma function
* is done differently based on the values of a and x:
* - if x and/or a is at the boundary of defined region, then assign the
* result at the boundary
* - if a is large and a ~ x, then using Uniform Asymptotic Expansions for
* Large Parameter (see DLMF 8.12.4 [igam1])
* - if x > 1.1 and x < a, using the substraction from the regularized lower
* incomplete gamma
* - otherwise, calculate the series from [igam2] eq (5)
*/
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
accscalar_t absxma_a;
static const accscalar_t SMALL = 20.0;
static const accscalar_t LARGE = 200.0;
static const accscalar_t SMALLRATIO = 0.3;
static const accscalar_t LARGERATIO = 4.5;
if ((x < 0) || (a < 0)) {
// out of defined-region of the function
return std::numeric_limits<accscalar_t>::quiet_NaN();
}
else if (a == 0) {
if (x > 0) {
return 0.0;
}
else {
return std::numeric_limits<accscalar_t>::quiet_NaN();
}
}
else if (x == 0) {
return 1.0;
}
else if (::isinf(static_cast<accscalar_t>(a))) {
if (::isinf(static_cast<accscalar_t>(x))) {
return std::numeric_limits<accscalar_t>::quiet_NaN();
}
return 1.0;
}
else if (::isinf(static_cast<accscalar_t>(x))) {
return 0.0;
}
absxma_a = ::fabs(x - a) / a;
if ((a > SMALL) && (a < LARGE) && (absxma_a < SMALLRATIO)) {
return _igam_helper_asymptotic_series(a, x, 0);
}
else if ((a > LARGE) && (absxma_a < LARGERATIO / ::sqrt(a))) {
return _igam_helper_asymptotic_series(a, x, 0);
}
if (x > 1.1) {
if (x < a) {
return 1.0 - _igam_helper_series(a, x);
}
else {
return _igamc_helper_continued_fraction(a, x);
}
}
else if (x <= 0.5) {
if (-0.4 / ::log(x) < a) {
return 1.0 - _igam_helper_series(a, x);
}
else {
return _igamc_helper_series(a, x);
}
}
else {
if (x * 1.1 < a) {
return 1.0 - _igam_helper_series(a, x);
}
else {
return _igamc_helper_series(a, x);
}
}
}
// NOTE: this __noinline__ is important -- otherwise, observed compile times significantly
// increase. The same kernel seems to get recompiled mulitple times via gpu_kernel_with_scalars,
// multiple dtypes, etc.
template <typename scalar_t>
__noinline__ __host__ __device__ scalar_t calc_igamma(scalar_t a, scalar_t x) {
/* the calculation of the regularized lower incomplete gamma function
* is done differently based on the values of a and x:
* - if x and/or a is at the boundary of defined region, then assign the
* result at the boundary
* - if a is large and a ~ x, then using Uniform Asymptotic Expansions for
* Large Parameter (see DLMF 8.12.3 [igam1])
* - if x > 1 and x > a, using the substraction from the regularized upper
* incomplete gamma
* - otherwise, calculate the series from [igam2] eq (4)
*/
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
accscalar_t absxma_a;
static const accscalar_t SMALL = 20.0;
static const accscalar_t LARGE = 200.0;
static const accscalar_t SMALLRATIO = 0.3;
static const accscalar_t LARGERATIO = 4.5;
// boundary values following SciPy
if ((x < 0) || (a < 0)) {
// out of defined-region of the function
return std::numeric_limits<accscalar_t>::quiet_NaN();
}
else if (a == 0) {
if (x > 0) {
return 1.0;
}
else {
return std::numeric_limits<accscalar_t>::quiet_NaN();
}
}
else if (x == 0) {
return 0.0; // zero integration limit
}
else if (::isinf(static_cast<accscalar_t>(a))) {
if (::isinf(static_cast<accscalar_t>(x))) {
return std::numeric_limits<accscalar_t>::quiet_NaN();
}
return 0.0;
}
else if (::isinf(static_cast<accscalar_t>(x))) {
return 1.0;
}
/* Asymptotic regime where a ~ x. */
absxma_a = ::fabs(x - a) / a;
if ((a > SMALL) && (a < LARGE) && (absxma_a < SMALLRATIO)) {
return _igam_helper_asymptotic_series(a, x, 1);
}
else if ((a > LARGE) && (absxma_a < LARGERATIO / ::sqrt(a))) {
return _igam_helper_asymptotic_series(a, x, 1);
}
if ((x > 1.0) && (x > a)) {
return 1.0 - calc_igammac(a, x);
}
return _igam_helper_series(a, x);
}
}
// end of regularized lower & upper incomplete gamma
namespace at { namespace native {
void igamma_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "igamma_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return calc_igamma(a, b);
});
});
}
void igammac_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "igammac_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return calc_igammac(a, b);
});
});
}
REGISTER_DISPATCH(igamma_stub, &igamma_kernel_cuda);
REGISTER_DISPATCH(igammac_stub, &igammac_kernel_cuda);
// DO NOT ADD ANY NEW KERNELS HERE
// CUDA compilation times grow quickly. It's perfectly acceptable to have a file per kernel.
}} // namespace at::native
| bae07d3eb46082dc3e67477b89a951c79f6f88a4.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace {
/*
* This implementation of the regularized incomplete gamma functions and
* their helper functions are derived from the implementation of SciPy's
* gammainc, Cephes's igam and igamc, and Boost's Lanczos approximations.
* See NOTICE for the licenses.
*/
// regularized lower & upper incomplete gamma
template <typename scalar_t>
__host__ __device__ scalar_t ratevl(scalar_t x, const scalar_t num[], int64_t M,
const scalar_t denom[], int64_t N) {
// evaluating rational function, i.e., the ratio of two polynomials
// the coefficients for numerator are given by `num` while coeffs for
// denumerator are given by `denom`
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
int64_t i, dir;
accscalar_t y, num_ans, denom_ans;
accscalar_t absx = ::fabs(x);
const accscalar_t *p;
if (absx > 1) {
/* Evaluate as a polynomial in 1/x. */
dir = -1;
p = num + M;
y = 1 / x;
}
else {
dir = 1;
p = num;
y = x;
}
/* Evaluate the numerator */
num_ans = *p;
p += dir;
for (i = 1; i <= M; i++) {
num_ans = num_ans * y + *p;
p += dir;
}
/* Evaluate the denominator */
if (absx > 1) {
p = denom + N;
}
else {
p = denom;
}
denom_ans = *p;
p += dir;
for (i = 1; i <= N; i++) {
denom_ans = denom_ans * y + *p;
p += dir;
}
if (absx > 1) {
i = N - M;
return ::pow(x, static_cast<accscalar_t>(i)) * num_ans / denom_ans;
}
else {
return num_ans / denom_ans;
}
}
template <typename scalar_t>
__host__ __device__ scalar_t lanczos_sum_expg_scaled(scalar_t x) {
// lanczos approximation
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
static const accscalar_t lanczos_sum_expg_scaled_num[13] = {
0.006061842346248906525783753964555936883222,
0.5098416655656676188125178644804694509993,
19.51992788247617482847860966235652136208,
449.9445569063168119446858607650988409623,
6955.999602515376140356310115515198987526,
75999.29304014542649875303443598909137092,
601859.6171681098786670226533699352302507,
3481712.15498064590882071018964774556468,
14605578.08768506808414169982791359218571,
43338889.32467613834773723740590533316085,
86363131.28813859145546927288977868422342,
103794043.1163445451906271053616070238554,
56906521.91347156388090791033559122686859
};
static const accscalar_t lanczos_sum_expg_scaled_denom[13] = {
1.,
66.,
1925.,
32670.,
357423.,
2637558.,
13339535.,
45995730.,
105258076.,
150917976.,
120543840.,
39916800.,
0
};
return ratevl(static_cast<accscalar_t>(x), lanczos_sum_expg_scaled_num,
sizeof(lanczos_sum_expg_scaled_num) / sizeof(lanczos_sum_expg_scaled_num[0]) - 1,
lanczos_sum_expg_scaled_denom,
sizeof(lanczos_sum_expg_scaled_denom) / sizeof(lanczos_sum_expg_scaled_denom[0]) - 1);
}
template <typename scalar_t>
__host__ __device__ scalar_t _igam_helper_fac(scalar_t a, scalar_t x) {
// compute x^a * exp(-a) / gamma(a)
// corrected from (15) and (16) in [igam2] by replacing exp(x - a) with
// exp(a - x).
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
accscalar_t ax, fac, res, num, numfac;
static const accscalar_t MAXLOG = std::is_same<accscalar_t,double>::value ?
7.09782712893383996843E2 : 88.72283905206835;
static const accscalar_t EXP1 = 2.718281828459045;
static const accscalar_t lanczos_g = 6.024680040776729583740234375;
if (::fabs(a - x) > 0.4 * ::fabs(a)) {
ax = a * ::log(x) - x - ::lgamma(a);
if (ax < -MAXLOG) {
return 0.0;
}
return ::exp(ax);
}
fac = a + lanczos_g - 0.5;
res = ::sqrt(fac / EXP1) / lanczos_sum_expg_scaled(a);
if ((a < 200) && (x < 200)) {
res *= ::exp(a - x) * ::pow(x / fac, a);
}
else {
num = x - a - lanczos_g + 0.5;
numfac = num / fac;
res *= ::exp(a * (::log1p(numfac) - numfac) + x * (0.5 - lanczos_g) / fac);
}
return res;
}
template <typename scalar_t>
__host__ __device__ scalar_t _igam_helper_series(scalar_t a, scalar_t x) {
// Compute igam using DLMF 8.11.4. [igam1]
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
static const accscalar_t MACHEP = std::is_same<accscalar_t, double>::value ?
1.11022302462515654042E-16 : 5.9604644775390625E-8;
static const int MAXITER = 2000;
int i;
accscalar_t ans, ax, c, r;
ax = _igam_helper_fac(a, x);
if (ax == 0.0) {
return 0.0;
}
/* power series */
r = a;
c = 1.0;
ans = 1.0;
for (i = 0; i < MAXITER; i++) {
r += 1.0;
c *= x / r;
ans += c;
if (c <= MACHEP * ans) {
break;
}
}
return (ans * ax / a);
}
template <typename scalar_t>
__host__ __device__ scalar_t _igamc_helper_series(scalar_t a, scalar_t x) {
// Compute igamc using DLMF 8.7.3 [igam1]. This is related to the series in
// _igam_helper_series but extra care is taken to avoid cancellation.
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
int n;
accscalar_t fac = 1;
accscalar_t sum = 0;
accscalar_t term, logx;
static const int MAXITER = 2000;
static const accscalar_t MACHEP = std::is_same<accscalar_t, double>::value ?
1.11022302462515654042E-16 : 5.9604644775390625E-8;
for (n = 1; n < MAXITER; n++) {
fac *= -x / n;
term = fac / (a + n);
sum += term;
if (::fabs(term) <= MACHEP * ::fabs(sum)) {
break;
}
}
logx = ::log(x);
term = -::expm1(a * logx - ::lgamma(1+a));
return term - ::exp(a * logx - ::lgamma(a)) * sum;
}
template <typename scalar_t>
__host__ __device__ scalar_t _igam_helper_asymptotic_series(scalar_t a, scalar_t x, bool igam) {
// Compute igam/igamc using DLMF 8.12.3/8.12.4 [igam1]
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
static const accscalar_t d[25][25] =
{{-3.3333333333333333e-1, 8.3333333333333333e-2, -1.4814814814814815e-2, 1.1574074074074074e-3, 3.527336860670194e-4, -1.7875514403292181e-4, 3.9192631785224378e-5, -2.1854485106799922e-6, -1.85406221071516e-6, 8.296711340953086e-7, -1.7665952736826079e-7, 6.7078535434014986e-9, 1.0261809784240308e-8, -4.3820360184533532e-9, 9.1476995822367902e-10, -2.551419399494625e-11, -5.8307721325504251e-11, 2.4361948020667416e-11, -5.0276692801141756e-12, 1.1004392031956135e-13, 3.3717632624009854e-13, -1.3923887224181621e-13, 2.8534893807047443e-14, -5.1391118342425726e-16, -1.9752288294349443e-15},
{-1.8518518518518519e-3, -3.4722222222222222e-3, 2.6455026455026455e-3, -9.9022633744855967e-4, 2.0576131687242798e-4, -4.0187757201646091e-7, -1.8098550334489978e-5, 7.6491609160811101e-6, -1.6120900894563446e-6, 4.6471278028074343e-9, 1.378633446915721e-7, -5.752545603517705e-8, 1.1951628599778147e-8, -1.7543241719747648e-11, -1.0091543710600413e-9, 4.1627929918425826e-10, -8.5639070264929806e-11, 6.0672151016047586e-14, 7.1624989648114854e-12, -2.9331866437714371e-12, 5.9966963656836887e-13, -2.1671786527323314e-16, -4.9783399723692616e-14, 2.0291628823713425e-14, -4.13125571381061e-15},
{4.1335978835978836e-3, -2.6813271604938272e-3, 7.7160493827160494e-4, 2.0093878600823045e-6, -1.0736653226365161e-4, 5.2923448829120125e-5, -1.2760635188618728e-5, 3.4235787340961381e-8, 1.3721957309062933e-6, -6.298992138380055e-7, 1.4280614206064242e-7, -2.0477098421990866e-10, -1.4092529910867521e-8, 6.228974084922022e-9, -1.3670488396617113e-9, 9.4283561590146782e-13, 1.2872252400089318e-10, -5.5645956134363321e-11, 1.1975935546366981e-11, -4.1689782251838635e-15, -1.0940640427884594e-12, 4.6622399463901357e-13, -9.905105763906906e-14, 1.8931876768373515e-17, 8.8592218725911273e-15},
{6.4943415637860082e-4, 2.2947209362139918e-4, -4.6918949439525571e-4, 2.6772063206283885e-4, -7.5618016718839764e-5, -2.3965051138672967e-7, 1.1082654115347302e-5, -5.6749528269915966e-6, 1.4230900732435884e-6, -2.7861080291528142e-11, -1.6958404091930277e-7, 8.0994649053880824e-8, -1.9111168485973654e-8, 2.3928620439808118e-12, 2.0620131815488798e-9, -9.4604966618551322e-10, 2.1541049775774908e-10, -1.388823336813903e-14, -2.1894761681963939e-11, 9.7909989511716851e-12, -2.1782191880180962e-12, 6.2088195734079014e-17, 2.126978363279737e-13, -9.3446887915174333e-14, 2.0453671226782849e-14},
{-8.618882909167117e-4, 7.8403922172006663e-4, -2.9907248030319018e-4, -1.4638452578843418e-6, 6.6414982154651222e-5, -3.9683650471794347e-5, 1.1375726970678419e-5, 2.5074972262375328e-10, -1.6954149536558306e-6, 8.9075075322053097e-7, -2.2929348340008049e-7, 2.956794137544049e-11, 2.8865829742708784e-8, -1.4189739437803219e-8, 3.4463580499464897e-9, -2.3024517174528067e-13, -3.9409233028046405e-10, 1.8602338968504502e-10, -4.356323005056618e-11, 1.2786001016296231e-15, 4.6792750266579195e-12, -2.1492464706134829e-12, 4.9088156148096522e-13, -6.3385914848915603e-18, -5.0453320690800944e-14},
{-3.3679855336635815e-4, -6.9728137583658578e-5, 2.7727532449593921e-4, -1.9932570516188848e-4, 6.7977804779372078e-5, 1.419062920643967e-7, -1.3594048189768693e-5, 8.0184702563342015e-6, -2.2914811765080952e-6, -3.252473551298454e-10, 3.4652846491085265e-7, -1.8447187191171343e-7, 4.8240967037894181e-8, -1.7989466721743515e-14, -6.3061945000135234e-9, 3.1624176287745679e-9, -7.8409242536974293e-10, 5.1926791652540407e-15, 9.3589442423067836e-11, -4.5134262161632782e-11, 1.0799129993116827e-11, -3.661886712685252e-17, -1.210902069055155e-12, 5.6807435849905643e-13, -1.3249659916340829e-13},
{5.3130793646399222e-4, -5.9216643735369388e-4, 2.7087820967180448e-4, 7.9023532326603279e-7, -8.1539693675619688e-5, 5.6116827531062497e-5, -1.8329116582843376e-5, -3.0796134506033048e-9, 3.4651553688036091e-6, -2.0291327396058604e-6, 5.7887928631490037e-7, 2.338630673826657e-13, -8.8286007463304835e-8, 4.7435958880408128e-8, -1.2545415020710382e-8, 8.6496488580102925e-14, 1.6846058979264063e-9, -8.5754928235775947e-10, 2.1598224929232125e-10, -7.6132305204761539e-16, -2.6639822008536144e-11, 1.3065700536611057e-11, -3.1799163902367977e-12, 4.7109761213674315e-18, 3.6902800842763467e-13},
{3.4436760689237767e-4, 5.1717909082605922e-5, -3.3493161081142236e-4, 2.812695154763237e-4, -1.0976582244684731e-4, -1.2741009095484485e-7, 2.7744451511563644e-5, -1.8263488805711333e-5, 5.7876949497350524e-6, 4.9387589339362704e-10, -1.0595367014026043e-6, 6.1667143761104075e-7, -1.7562973359060462e-7, -1.2974473287015439e-12, 2.695423606288966e-8, -1.4578352908731271e-8, 3.887645959386175e-9, -3.8810022510194121e-17, -5.3279941738772867e-10, 2.7437977643314845e-10, -6.9957960920705679e-11, 2.5899863874868481e-17, 8.8566890996696381e-12, -4.403168815871311e-12, 1.0865561947091654e-12},
{-6.5262391859530942e-4, 8.3949872067208728e-4, -4.3829709854172101e-4, -6.969091458420552e-7, 1.6644846642067548e-4, -1.2783517679769219e-4, 4.6299532636913043e-5, 4.5579098679227077e-9, -1.0595271125805195e-5, 6.7833429048651666e-6, -2.1075476666258804e-6, -1.7213731432817145e-11, 3.7735877416110979e-7, -2.1867506700122867e-7, 6.2202288040189269e-8, 6.5977038267330006e-16, -9.5903864974256858e-9, 5.2132144922808078e-9, -1.3991589583935709e-9, 5.382058999060575e-16, 1.9484714275467745e-10, -1.0127287556389682e-10, 2.6077347197254926e-11, -5.0904186999932993e-18, -3.3721464474854592e-12},
{-5.9676129019274625e-4, -7.2048954160200106e-5, 6.7823088376673284e-4, -6.4014752602627585e-4, 2.7750107634328704e-4, 1.8197008380465151e-7, -8.4795071170685032e-5, 6.105192082501531e-5, -2.1073920183404862e-5, -8.8585890141255994e-10, 4.5284535953805377e-6, -2.8427815022504408e-6, 8.7082341778646412e-7, 3.6886101871706965e-12, -1.5344695190702061e-7, 8.862466778790695e-8, -2.5184812301826817e-8, -1.0225912098215092e-14, 3.8969470758154777e-9, -2.1267304792235635e-9, 5.7370135528051385e-10, -1.887749850169741e-19, -8.0931538694657866e-11, 4.2382723283449199e-11, -1.1002224534207726e-11},
{1.3324454494800656e-3, -1.9144384985654775e-3, 1.1089369134596637e-3, 9.932404122642299e-7, -5.0874501293093199e-4, 4.2735056665392884e-4, -1.6858853767910799e-4, -8.1301893922784998e-9, 4.5284402370562147e-5, -3.127053674781734e-5, 1.044986828530338e-5, 4.8435226265680926e-11, -2.1482565873456258e-6, 1.329369701097492e-6, -4.0295693092101029e-7, -1.7567877666323291e-13, 7.0145043163668257e-8, -4.040787734999483e-8, 1.1474026743371963e-8, 3.9642746853563325e-18, -1.7804938269892714e-9, 9.7480262548731646e-10, -2.6405338676507616e-10, 5.794875163403742e-18, 3.7647749553543836e-11},
{1.579727660730835e-3, 1.6251626278391582e-4, -2.0633421035543276e-3, 2.1389686185689098e-3, -1.0108559391263003e-3, -3.9912705529919201e-7, 3.6235025084764691e-4, -2.8143901463712154e-4, 1.0449513336495887e-4, 2.1211418491830297e-9, -2.5779417251947842e-5, 1.7281818956040463e-5, -5.6413773872904282e-6, -1.1024320105776174e-11, 1.1223224418895175e-6, -6.8693396379526735e-7, 2.0653236975414887e-7, 4.6714772409838506e-14, -3.5609886164949055e-8, 2.0470855345905963e-8, -5.8091738633283358e-9, -1.332821287582869e-16, 9.0354604391335133e-10, -4.9598782517330834e-10, 1.3481607129399749e-10},
{-4.0725121195140166e-3, 6.4033628338080698e-3, -4.0410161081676618e-3, -2.183732802866233e-6, 2.1740441801254639e-3, -1.9700440518418892e-3, 8.3595469747962458e-4, 1.9445447567109655e-8, -2.5779387120421696e-4, 1.9009987368139304e-4, -6.7696499937438965e-5, -1.4440629666426572e-10, 1.5712512518742269e-5, -1.0304008744776893e-5, 3.304517767401387e-6, 7.9829760242325709e-13, -6.4097794149313004e-7, 3.8894624761300056e-7, -1.1618347644948869e-7, -2.816808630596451e-15, 1.9878012911297093e-8, -1.1407719956357511e-8, 3.2355857064185555e-9, 4.1759468293455945e-20, -5.0423112718105824e-10},
{-5.9475779383993003e-3, -5.4016476789260452e-4, 8.7910413550767898e-3, -9.8576315587856125e-3, 5.0134695031021538e-3, 1.2807521786221875e-6, -2.0626019342754683e-3, 1.7109128573523058e-3, -6.7695312714133799e-4, -6.9011545676562133e-9, 1.8855128143995902e-4, -1.3395215663491969e-4, 4.6263183033528039e-5, 4.0034230613321351e-11, -1.0255652921494033e-5, 6.612086372797651e-6, -2.0913022027253008e-6, -2.0951775649603837e-13, 3.9756029041993247e-7, -2.3956211978815887e-7, 7.1182883382145864e-8, 8.925574873053455e-16, -1.2101547235064676e-8, 6.9350618248334386e-9, -1.9661464453856102e-9},
{1.7402027787522711e-2, -2.9527880945699121e-2, 2.0045875571402799e-2, 7.0289515966903407e-6, -1.2375421071343148e-2, 1.1976293444235254e-2, -5.4156038466518525e-3, -6.3290893396418616e-8, 1.8855118129005065e-3, -1.473473274825001e-3, 5.5515810097708387e-4, 5.2406834412550662e-10, -1.4357913535784836e-4, 9.9181293224943297e-5, -3.3460834749478311e-5, -3.5755837291098993e-12, 7.1560851960630076e-6, -4.5516802628155526e-6, 1.4236576649271475e-6, 1.8803149082089664e-14, -2.6623403898929211e-7, 1.5950642189595716e-7, -4.7187514673841102e-8, -6.5107872958755177e-17, 7.9795091026746235e-9},
{3.0249124160905891e-2, 2.4817436002649977e-3, -4.9939134373457022e-2, 5.9915643009307869e-2, -3.2483207601623391e-2, -5.7212968652103441e-6, 1.5085251778569354e-2, -1.3261324005088445e-2, 5.5515262632426148e-3, 3.0263182257030016e-8, -1.7229548406756723e-3, 1.2893570099929637e-3, -4.6845138348319876e-4, -1.830259937893045e-10, 1.1449739014822654e-4, -7.7378565221244477e-5, 2.5625836246985201e-5, 1.0766165333192814e-12, -5.3246809282422621e-6, 3.349634863064464e-6, -1.0381253128684018e-6, -5.608909920621128e-15, 1.9150821930676591e-7, -1.1418365800203486e-7, 3.3654425209171788e-8},
{-9.9051020880159045e-2, 1.7954011706123486e-1, -1.2989606383463778e-1, -3.1478872752284357e-5, 9.0510635276848131e-2, -9.2828824411184397e-2, 4.4412112839877808e-2, 2.7779236316835888e-7, -1.7229543805449697e-2, 1.4182925050891573e-2, -5.6214161633747336e-3, -2.39598509186381e-9, 1.6029634366079908e-3, -1.1606784674435773e-3, 4.1001337768153873e-4, 1.8365800754090661e-11, -9.5844256563655903e-5, 6.3643062337764708e-5, -2.076250624489065e-5, -1.1806020912804483e-13, 4.2131808239120649e-6, -2.6262241337012467e-6, 8.0770620494930662e-7, 6.0125912123632725e-16, -1.4729737374018841e-7},
{-1.9994542198219728e-1, -1.5056113040026424e-2, 3.6470239469348489e-1, -4.6435192311733545e-1, 2.6640934719197893e-1, 3.4038266027147191e-5, -1.3784338709329624e-1, 1.276467178337056e-1, -5.6213828755200985e-2, -1.753150885483011e-7, 1.9235592956768113e-2, -1.5088821281095315e-2, 5.7401854451350123e-3, 1.0622382710310225e-9, -1.5335082692563998e-3, 1.0819320643228214e-3, -3.7372510193945659e-4, -6.6170909729031985e-12, 8.4263617380909628e-5, -5.5150706827483479e-5, 1.7769536448348069e-5, 3.8827923210205533e-14, -3.53513697488768e-6, 2.1865832130045269e-6, -6.6812849447625594e-7},
{7.2438608504029431e-1, -1.3918010932653375, 1.0654143352413968, 1.876173868950258e-4, -8.2705501176152696e-1, 8.9352433347828414e-1, -4.4971003995291339e-1, -1.6107401567546652e-6, 1.9235590165271091e-1, -1.6597702160042609e-1, 6.8882222681814333e-2, 1.3910091724608687e-8, -2.146911561508663e-2, 1.6228980898865892e-2, -5.9796016172584256e-3, -1.1287469112826745e-10, 1.5167451119784857e-3, -1.0478634293553899e-3, 3.5539072889126421e-4, 8.1704322111801517e-13, -7.7773013442452395e-5, 5.0291413897007722e-5, -1.6035083867000518e-5, 1.2469354315487605e-14, 3.1369106244517615e-6},
{1.6668949727276811, 1.165462765994632e-1, -3.3288393225018906, 4.4692325482864037, -2.6977693045875807, -2.600667859891061e-4, 1.5389017615694539, -1.4937962361134612, 6.8881964633233148e-1, 1.3077482004552385e-6, -2.5762963325596288e-1, 2.1097676102125449e-1, -8.3714408359219882e-2, -7.7920428881354753e-9, 2.4267923064833599e-2, -1.7813678334552311e-2, 6.3970330388900056e-3, 4.9430807090480523e-11, -1.5554602758465635e-3, 1.0561196919903214e-3, -3.5277184460472902e-4, 9.3002334645022459e-14, 7.5285855026557172e-5, -4.8186515569156351e-5, 1.5227271505597605e-5},
{-6.6188298861372935, 1.3397985455142589e+1, -1.0789350606845146e+1, -1.4352254537875018e-3, 9.2333694596189809, -1.0456552819547769e+1, 5.5105526029033471, 1.2024439690716742e-5, -2.5762961164755816, 2.3207442745387179, -1.0045728797216284, -1.0207833290021914e-7, 3.3975092171169466e-1, -2.6720517450757468e-1, 1.0235252851562706e-1, 8.4329730484871625e-10, -2.7998284958442595e-2, 2.0066274144976813e-2, -7.0554368915086242e-3, 1.9402238183698188e-12, 1.6562888105449611e-3, -1.1082898580743683e-3, 3.654545161310169e-4, -5.1290032026971794e-11, -7.6340103696869031e-5},
{-1.7112706061976095e+1, -1.1208044642899116, 3.7131966511885444e+1, -5.2298271025348962e+1, 3.3058589696624618e+1, 2.4791298976200222e-3, -2.061089403411526e+1, 2.088672775145582e+1, -1.0045703956517752e+1, -1.2238783449063012e-5, 4.0770134274221141, -3.473667358470195, 1.4329352617312006, 7.1359914411879712e-8, -4.4797257159115612e-1, 3.4112666080644461e-1, -1.2699786326594923e-1, -2.8953677269081528e-10, 3.3125776278259863e-2, -2.3274087021036101e-2, 8.0399993503648882e-3, -1.177805216235265e-9, -1.8321624891071668e-3, 1.2108282933588665e-3, -3.9479941246822517e-4},
{7.389033153567425e+1, -1.5680141270402273e+2, 1.322177542759164e+2, 1.3692876877324546e-2, -1.2366496885920151e+2, 1.4620689391062729e+2, -8.0365587724865346e+1, -1.1259851148881298e-4, 4.0770132196179938e+1, -3.8210340013273034e+1, 1.719522294277362e+1, 9.3519707955168356e-7, -6.2716159907747034, 5.1168999071852637, -2.0319658112299095, -4.9507215582761543e-9, 5.9626397294332597e-1, -4.4220765337238094e-1, 1.6079998700166273e-1, -2.4733786203223402e-8, -4.0307574759979762e-2, 2.7849050747097869e-2, -9.4751858992054221e-3, 6.419922235909132e-6, 2.1250180774699461e-3},
{2.1216837098382522e+2, 1.3107863022633868e+1, -4.9698285932871748e+2, 7.3121595266969204e+2, -4.8213821720890847e+2, -2.8817248692894889e-2, 3.2616720302947102e+2, -3.4389340280087117e+2, 1.7195193870816232e+2, 1.4038077378096158e-4, -7.52594195897599e+1, 6.651969984520934e+1, -2.8447519748152462e+1, -7.613702615875391e-7, 9.5402237105304373, -7.5175301113311376, 2.8943997568871961, -4.6612194999538201e-7, -8.0615149598794088e-1, 5.8483006570631029e-1, -2.0845408972964956e-1, 1.4765818959305817e-4, 5.1000433863753019e-2, -3.3066252141883665e-2, 1.5109265210467774e-2},
{-9.8959643098322368e+2, 2.1925555360905233e+3, -1.9283586782723356e+3, -1.5925738122215253e-1, 1.9569985945919857e+3, -2.4072514765081556e+3, 1.3756149959336496e+3, 1.2920735237496668e-3, -7.525941715948055e+2, 7.3171668742208716e+2, -3.4137023466220065e+2, -9.9857390260608043e-6, 1.3356313181291573e+2, -1.1276295161252794e+2, 4.6310396098204458e+1, -7.9237387133614756e-6, -1.4510726927018646e+1, 1.1111771248100563e+1, -4.1690817945270892, 3.1008219800117808e-3, 1.1220095449981468, -7.6052379926149916e-1, 3.6262236505085254e-1, 2.216867741940747e-1, 4.8683443692930507e-1}};
int k, n, sgn;
int maxpow = 0;
static const accscalar_t MACHEP = std::is_same<accscalar_t, double>::value ?
1.11022302462515654042E-16 : 5.9604644775390625E-8;
accscalar_t lambda = x / a;
accscalar_t sigma = (x - a) / a;
accscalar_t eta, res, ck, ckterm, term, absterm;
accscalar_t absoldterm = INFINITY;
accscalar_t etapow[25] = {1};
accscalar_t sum = 0;
accscalar_t afac = 1;
if (igam) {
sgn = -1;
}
else {
sgn = 1;
}
if (lambda > 1) {
eta = ::sqrt(-2 * (::log1p(sigma) - sigma));
}
else if (lambda < 1) {
eta = -::sqrt(-2 * (::log1p(sigma) - sigma));
}
else {
eta = 0;
}
res = 0.5 * ::erfc(sgn * eta * ::sqrt(a / 2));
for (k = 0; k < 25; k++) {
ck = d[k][0];
for (n = 1; n < 25; n++) {
if (n > maxpow) {
etapow[n] = eta * etapow[n-1];
maxpow += 1;
}
ckterm = d[k][n]*etapow[n];
ck += ckterm;
if (std::fabs(ckterm) < MACHEP * std::fabs(ck)) {
break;
}
}
term = ck * afac;
absterm = std::fabs(term);
if (absterm > absoldterm) {
break;
}
sum += term;
if (absterm < MACHEP * std::fabs(sum)) {
break;
}
absoldterm = absterm;
afac /= a;
}
res += sgn * ::exp(-0.5 * a * eta * eta) * sum / ::sqrt(2 * 3.1415926535 * a);
return res;
}
template <typename scalar_t>
__host__ __device__ scalar_t _igamc_helper_continued_fraction(scalar_t a, scalar_t x) {
// Compute igamc using DLMF 8.9.2. [igam1]
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
int i;
accscalar_t ans, ax, c, yc, r, t, y, z;
accscalar_t pk, pkm1, pkm2, qk, qkm1, qkm2;
static const int MAXITER = 2000;
static const accscalar_t MACHEP = std::is_same<accscalar_t, double>::value ?
1.11022302462515654042E-16 : 5.9604644775390625E-8;
static const accscalar_t BIG = std::is_same<accscalar_t,double>::value ?
4.503599627370496e15 : 16777216.;
static const accscalar_t BIGINV = std::is_same<accscalar_t,double>::value ?
2.22044604925031308085e-16 : 5.9604644775390625E-8;
ax = _igam_helper_fac(a, x);
if (ax == 0.0) {
return 0.0;
}
/* continued fraction */
y = 1.0 - a;
z = x + y + 1.0;
c = 0.0;
pkm2 = 1.0;
qkm2 = x;
pkm1 = x + 1.0;
qkm1 = z * x;
ans = pkm1 / qkm1;
for (i = 0; i < MAXITER; i++) {
c += 1.0;
y += 1.0;
z += 2.0;
yc = y * c;
pk = pkm1 * z - pkm2 * yc;
qk = qkm1 * z - qkm2 * yc;
if (qk != 0) {
r = pk / qk;
t = ::fabs((ans - r) / r);
ans = r;
}
else {
t = 1.0;
}
pkm2 = pkm1;
pkm1 = pk;
qkm2 = qkm1;
qkm1 = qk;
if (::fabs(pk) > BIG) {
pkm2 *= BIGINV;
pkm1 *= BIGINV;
qkm2 *= BIGINV;
qkm1 *= BIGINV;
}
if (t <= MACHEP) {
break;
}
}
return ans * ax;
}
template <typename scalar_t>
__noinline__ __host__ __device__ scalar_t calc_igammac(scalar_t a, scalar_t x) {
/* the calculation of the regularized upper incomplete gamma function
* is done differently based on the values of a and x:
* - if x and/or a is at the boundary of defined region, then assign the
* result at the boundary
* - if a is large and a ~ x, then using Uniform Asymptotic Expansions for
* Large Parameter (see DLMF 8.12.4 [igam1])
* - if x > 1.1 and x < a, using the substraction from the regularized lower
* incomplete gamma
* - otherwise, calculate the series from [igam2] eq (5)
*/
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
accscalar_t absxma_a;
static const accscalar_t SMALL = 20.0;
static const accscalar_t LARGE = 200.0;
static const accscalar_t SMALLRATIO = 0.3;
static const accscalar_t LARGERATIO = 4.5;
if ((x < 0) || (a < 0)) {
// out of defined-region of the function
return std::numeric_limits<accscalar_t>::quiet_NaN();
}
else if (a == 0) {
if (x > 0) {
return 0.0;
}
else {
return std::numeric_limits<accscalar_t>::quiet_NaN();
}
}
else if (x == 0) {
return 1.0;
}
else if (::isinf(static_cast<accscalar_t>(a))) {
if (::isinf(static_cast<accscalar_t>(x))) {
return std::numeric_limits<accscalar_t>::quiet_NaN();
}
return 1.0;
}
else if (::isinf(static_cast<accscalar_t>(x))) {
return 0.0;
}
absxma_a = ::fabs(x - a) / a;
if ((a > SMALL) && (a < LARGE) && (absxma_a < SMALLRATIO)) {
return _igam_helper_asymptotic_series(a, x, 0);
}
else if ((a > LARGE) && (absxma_a < LARGERATIO / ::sqrt(a))) {
return _igam_helper_asymptotic_series(a, x, 0);
}
if (x > 1.1) {
if (x < a) {
return 1.0 - _igam_helper_series(a, x);
}
else {
return _igamc_helper_continued_fraction(a, x);
}
}
else if (x <= 0.5) {
if (-0.4 / ::log(x) < a) {
return 1.0 - _igam_helper_series(a, x);
}
else {
return _igamc_helper_series(a, x);
}
}
else {
if (x * 1.1 < a) {
return 1.0 - _igam_helper_series(a, x);
}
else {
return _igamc_helper_series(a, x);
}
}
}
// NOTE: this __noinline__ is important -- otherwise, observed compile times significantly
// increase. The same kernel seems to get recompiled mulitple times via gpu_kernel_with_scalars,
// multiple dtypes, etc.
template <typename scalar_t>
__noinline__ __host__ __device__ scalar_t calc_igamma(scalar_t a, scalar_t x) {
/* the calculation of the regularized lower incomplete gamma function
* is done differently based on the values of a and x:
* - if x and/or a is at the boundary of defined region, then assign the
* result at the boundary
* - if a is large and a ~ x, then using Uniform Asymptotic Expansions for
* Large Parameter (see DLMF 8.12.3 [igam1])
* - if x > 1 and x > a, using the substraction from the regularized upper
* incomplete gamma
* - otherwise, calculate the series from [igam2] eq (4)
*/
using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>;
accscalar_t absxma_a;
static const accscalar_t SMALL = 20.0;
static const accscalar_t LARGE = 200.0;
static const accscalar_t SMALLRATIO = 0.3;
static const accscalar_t LARGERATIO = 4.5;
// boundary values following SciPy
if ((x < 0) || (a < 0)) {
// out of defined-region of the function
return std::numeric_limits<accscalar_t>::quiet_NaN();
}
else if (a == 0) {
if (x > 0) {
return 1.0;
}
else {
return std::numeric_limits<accscalar_t>::quiet_NaN();
}
}
else if (x == 0) {
return 0.0; // zero integration limit
}
else if (::isinf(static_cast<accscalar_t>(a))) {
if (::isinf(static_cast<accscalar_t>(x))) {
return std::numeric_limits<accscalar_t>::quiet_NaN();
}
return 0.0;
}
else if (::isinf(static_cast<accscalar_t>(x))) {
return 1.0;
}
/* Asymptotic regime where a ~ x. */
absxma_a = ::fabs(x - a) / a;
if ((a > SMALL) && (a < LARGE) && (absxma_a < SMALLRATIO)) {
return _igam_helper_asymptotic_series(a, x, 1);
}
else if ((a > LARGE) && (absxma_a < LARGERATIO / ::sqrt(a))) {
return _igam_helper_asymptotic_series(a, x, 1);
}
if ((x > 1.0) && (x > a)) {
return 1.0 - calc_igammac(a, x);
}
return _igam_helper_series(a, x);
}
}
// end of regularized lower & upper incomplete gamma
namespace at { namespace native {
void igamma_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "igamma_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return calc_igamma(a, b);
});
});
}
void igammac_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "igammac_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return calc_igammac(a, b);
});
});
}
REGISTER_DISPATCH(igamma_stub, &igamma_kernel_cuda);
REGISTER_DISPATCH(igammac_stub, &igammac_kernel_cuda);
// DO NOT ADD ANY NEW KERNELS HERE
// CUDA compilation times grow quickly. It's perfectly acceptable to have a file per kernel.
}} // namespace at::native
|
f0757c09cf816d3158ebe1d17281981a3574c8c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/mapping_by_similarity_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void mapping_forward_gpu_kernel(const int n_kernels, const Dtype* data_im1, const Dtype* data_im2,
const int channels, const int height, const int width,
const Dtype* data_sim, Dtype alpha,
Dtype* output_im1, Dtype* output_im2){
CUDA_KERNEL_LOOP(index, n_kernels) {
const int w = index % width;
const int h = index / width;
const Dtype* data_im1_ptr = data_im1;
data_im1_ptr += h * width + w;
const Dtype* data_im2_ptr = data_im2;
data_im2_ptr += h * width + w;
Dtype* output_im1_ptr = output_im1;
output_im1_ptr += h * width + w;
Dtype* output_im2_ptr = output_im2;
output_im2_ptr += h * width + w;
Dtype beta = *(data_sim + h * width + w);
const int channel_size = width * height;
for (int c = 0; c < channels; ++c) {
Dtype x1 = *(data_im1_ptr + c * channel_size);
Dtype x2 = *(data_im2_ptr + c * channel_size);
output_im1_ptr[c * channel_size] = (x1 + alpha*(1-beta) * x2)/(1 + alpha - alpha*beta);
output_im2_ptr[c * channel_size] = (x2 + alpha*(1-beta) * x1)/(1 + alpha - alpha*beta);
}
}
}
template <typename Dtype>
__global__ void image_backward_gpu_kernel(const int n_kernels, const Dtype* top_diff1, const Dtype* top_diff2,
const int channels, const int height, const int width,
const Dtype* data_sim, Dtype alpha,
Dtype* bottom_diff1, Dtype* bottom_diff2){
CUDA_KERNEL_LOOP(index, n_kernels) {
const int w = index % width;
const int h = index / width;
const Dtype* top_diff1_ptr = top_diff1;
top_diff1_ptr += h * width + w;
const Dtype* top_diff2_ptr = top_diff2;
top_diff2_ptr += h * width + w;
Dtype* bottom_diff1_ptr = bottom_diff1;
bottom_diff1_ptr += h * width + w;
Dtype* bottom_diff2_ptr = bottom_diff2;
bottom_diff2_ptr += h * width + w;
const int channel_size = width * height;
Dtype beta = *(data_sim + h * width + w);
for (int c = 0; c < channels; ++c) {
Dtype d1_diff = top_diff1_ptr[c * channel_size];
Dtype d2_diff = top_diff2_ptr[c * channel_size];
Dtype factor = 1+alpha-alpha*beta;
bottom_diff1_ptr[c * channel_size] += 1/factor * d1_diff + alpha*(1-beta)/factor*d2_diff;
bottom_diff2_ptr[c * channel_size] += 1/factor * d2_diff + alpha*(1-beta)/factor*d1_diff;
}
}
}
template <typename Dtype>
__global__ void similarity_backward_gpu_kernel(const int n_kernels, const Dtype* bottom_data1, const Dtype* bottom_data2,
const int channels, const int height, const int width,
const Dtype* top_diff1, const Dtype* top_diff2,
const Dtype* data_sim,
Dtype alpha,
Dtype* diff_sim){
CUDA_KERNEL_LOOP(index, n_kernels) {
const int w = index % width;
const int h = index / width;
const Dtype* bottom_data1_ptr = bottom_data1;
bottom_data1_ptr += h * width + w;
const Dtype* bottom_data2_ptr = bottom_data2;
bottom_data2_ptr += h * width + w;
const Dtype* top_diff1_ptr = top_diff1;
top_diff1_ptr += h * width + w;
const Dtype* top_diff2_ptr = top_diff2;
top_diff2_ptr += h * width + w;
Dtype* diff_sim_ptr = diff_sim;
diff_sim_ptr += h * width + w;
const int channel_size = width * height;
Dtype beta = data_sim[h * width + w];
for (int c = 0; c < channels; ++c) {
Dtype x1 = bottom_data1_ptr[c * channel_size];
Dtype x2 = bottom_data2_ptr[c * channel_size];
Dtype factor = 1 + alpha - alpha * beta;
Dtype factor1 =(alpha*x1 - alpha*x2)/(factor*factor);
Dtype factor2 =(alpha*x2 - alpha*x1)/(factor*factor);
// Accumulate diffs
Dtype d1_diff = top_diff1_ptr[c * channel_size];
Dtype d2_diff = top_diff2_ptr[c * channel_size];
*diff_sim_ptr += factor1 * d1_diff + factor2 * d2_diff;
}
}
}
template <typename Dtype>
void MappingBySimilarityLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if(false){
this->Forward_cpu(bottom, top);
}
const Dtype *bottom_data = bottom[0]->gpu_data();
const Dtype *sim_data = bottom[1]->gpu_data();
Dtype *top_data = top[0]->mutable_gpu_data();
int n_images = bottom[0]->shape(0);
int channels = bottom[0]->shape(1);
int height = bottom[0]->shape(2);
int width = bottom[0]->shape(3);
int channel_size = bottom[0]->count(2);
// We lanch width * height kernels.
const int num_kernels = width * height;
for (int n = 0; n < n_images/2; ++n) {
const Dtype *s1 = bottom_data + 2*n * bottom[0]->count(1);
const Dtype *s2 = bottom_data + (2*n+1) * bottom[0]->count(1);
Dtype *d1 = top_data + 2 * n * top[0]->count(1);
Dtype *d2 = top_data + (2*n+1) * top[0]->count(1);
hipLaunchKernelGGL(( mapping_forward_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, s1, s2, channels, height, width,
sim_data + n * bottom[1]->count(1),
//alpha
alpha_,
//Output
d1,d2);
}
}
template <typename Dtype>
void MappingBySimilarityLayer<Dtype>::Backward_gpu(const std::vector<caffe::Blob<Dtype> *> &top,
const std::vector<bool> &propagate_down,
const std::vector<caffe::Blob<Dtype> *> &bottom) {
if (propagate_down[0]) {
if(false){
this->Backward_cpu(top, propagate_down, bottom);
}
// gradient w.r.t. image. Note that we will accumulate diffs.
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype *top_diff = top[0]->gpu_diff();
const Dtype *sim_data = bottom[1]->gpu_data();
int n_images = bottom[0]->shape(0);
int channels = bottom[0]->shape(1);
int height = bottom[0]->shape(2);
int width = bottom[0]->shape(3);
// Clear grad
caffe_gpu_set(bottom[0]->count(0), Dtype(0.0), bottom_diff);
int channel_size = bottom[0]->count(2);
// We launch width*height kernels.
const int num_kernels = width * height;
for (int n = 0; n < n_images/2; ++n) {
Dtype *diff1 = bottom_diff + 2*n * bottom[0]->count(1) ;
Dtype *diff2 = bottom_diff + (2*n+1) * bottom[0]->count(1);
const Dtype *d1 = top_diff + 2*n * top[0]->count(1) ;
const Dtype *d2 = top_diff + (2*n+1) * top[0]->count(1);
hipLaunchKernelGGL(( image_backward_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, d1, d2, channels, height, width,
sim_data + n * bottom[1]->count(1),
alpha_,
//Output
diff1,diff2);
}
}
if (propagate_down[1]) {
Dtype* sim_diff = bottom[1]->mutable_gpu_diff();
const Dtype *sim_data = bottom[1]->gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype *top_diff = top[0]->gpu_diff();
int n_images = bottom[0]->shape(0);
int channels = bottom[0]->shape(1);
int height = bottom[0]->shape(2);
int width = bottom[0]->shape(3);
int channel_size = bottom[0]->count(2);
// Clear grads. Because we need to accumulate diffs.
// Note that the similarity map has only one channel.
caffe_gpu_set(bottom[1]->count(0), Dtype(0.0), sim_diff);
// We launch width*height kernels.
const int num_kernels = width * height;
for (int n = 0; n < n_images/2; ++n) {
const Dtype *s1 = bottom_data + 2*n * bottom[0]->count(1) ;
const Dtype *s2 = bottom_data + (2*n+1) * bottom[0]->count(1) ;
Dtype *diff = sim_diff + n * bottom[1]->count(1) ;
const Dtype *d1 = top_diff + 2*n * top[0]->count(1) ;
const Dtype *d2 = top_diff + (2*n+1) * top[0]->count(1);
hipLaunchKernelGGL(( similarity_backward_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, s1, s2, channels, height, width,
d1, d2,
sim_data + n * bottom[1]->count(1),
alpha_,
//Output
diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(MappingBySimilarityLayer);
} // namespace caffe
| f0757c09cf816d3158ebe1d17281981a3574c8c3.cu | #include <vector>
#include "caffe/layers/mapping_by_similarity_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void mapping_forward_gpu_kernel(const int n_kernels, const Dtype* data_im1, const Dtype* data_im2,
const int channels, const int height, const int width,
const Dtype* data_sim, Dtype alpha,
Dtype* output_im1, Dtype* output_im2){
CUDA_KERNEL_LOOP(index, n_kernels) {
const int w = index % width;
const int h = index / width;
const Dtype* data_im1_ptr = data_im1;
data_im1_ptr += h * width + w;
const Dtype* data_im2_ptr = data_im2;
data_im2_ptr += h * width + w;
Dtype* output_im1_ptr = output_im1;
output_im1_ptr += h * width + w;
Dtype* output_im2_ptr = output_im2;
output_im2_ptr += h * width + w;
Dtype beta = *(data_sim + h * width + w);
const int channel_size = width * height;
for (int c = 0; c < channels; ++c) {
Dtype x1 = *(data_im1_ptr + c * channel_size);
Dtype x2 = *(data_im2_ptr + c * channel_size);
output_im1_ptr[c * channel_size] = (x1 + alpha*(1-beta) * x2)/(1 + alpha - alpha*beta);
output_im2_ptr[c * channel_size] = (x2 + alpha*(1-beta) * x1)/(1 + alpha - alpha*beta);
}
}
}
template <typename Dtype>
__global__ void image_backward_gpu_kernel(const int n_kernels, const Dtype* top_diff1, const Dtype* top_diff2,
const int channels, const int height, const int width,
const Dtype* data_sim, Dtype alpha,
Dtype* bottom_diff1, Dtype* bottom_diff2){
CUDA_KERNEL_LOOP(index, n_kernels) {
const int w = index % width;
const int h = index / width;
const Dtype* top_diff1_ptr = top_diff1;
top_diff1_ptr += h * width + w;
const Dtype* top_diff2_ptr = top_diff2;
top_diff2_ptr += h * width + w;
Dtype* bottom_diff1_ptr = bottom_diff1;
bottom_diff1_ptr += h * width + w;
Dtype* bottom_diff2_ptr = bottom_diff2;
bottom_diff2_ptr += h * width + w;
const int channel_size = width * height;
Dtype beta = *(data_sim + h * width + w);
for (int c = 0; c < channels; ++c) {
Dtype d1_diff = top_diff1_ptr[c * channel_size];
Dtype d2_diff = top_diff2_ptr[c * channel_size];
Dtype factor = 1+alpha-alpha*beta;
bottom_diff1_ptr[c * channel_size] += 1/factor * d1_diff + alpha*(1-beta)/factor*d2_diff;
bottom_diff2_ptr[c * channel_size] += 1/factor * d2_diff + alpha*(1-beta)/factor*d1_diff;
}
}
}
template <typename Dtype>
__global__ void similarity_backward_gpu_kernel(const int n_kernels, const Dtype* bottom_data1, const Dtype* bottom_data2,
const int channels, const int height, const int width,
const Dtype* top_diff1, const Dtype* top_diff2,
const Dtype* data_sim,
Dtype alpha,
Dtype* diff_sim){
CUDA_KERNEL_LOOP(index, n_kernels) {
const int w = index % width;
const int h = index / width;
const Dtype* bottom_data1_ptr = bottom_data1;
bottom_data1_ptr += h * width + w;
const Dtype* bottom_data2_ptr = bottom_data2;
bottom_data2_ptr += h * width + w;
const Dtype* top_diff1_ptr = top_diff1;
top_diff1_ptr += h * width + w;
const Dtype* top_diff2_ptr = top_diff2;
top_diff2_ptr += h * width + w;
Dtype* diff_sim_ptr = diff_sim;
diff_sim_ptr += h * width + w;
const int channel_size = width * height;
Dtype beta = data_sim[h * width + w];
for (int c = 0; c < channels; ++c) {
Dtype x1 = bottom_data1_ptr[c * channel_size];
Dtype x2 = bottom_data2_ptr[c * channel_size];
Dtype factor = 1 + alpha - alpha * beta;
Dtype factor1 =(alpha*x1 - alpha*x2)/(factor*factor);
Dtype factor2 =(alpha*x2 - alpha*x1)/(factor*factor);
// Accumulate diffs
Dtype d1_diff = top_diff1_ptr[c * channel_size];
Dtype d2_diff = top_diff2_ptr[c * channel_size];
*diff_sim_ptr += factor1 * d1_diff + factor2 * d2_diff;
}
}
}
template <typename Dtype>
void MappingBySimilarityLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if(false){
this->Forward_cpu(bottom, top);
}
const Dtype *bottom_data = bottom[0]->gpu_data();
const Dtype *sim_data = bottom[1]->gpu_data();
Dtype *top_data = top[0]->mutable_gpu_data();
int n_images = bottom[0]->shape(0);
int channels = bottom[0]->shape(1);
int height = bottom[0]->shape(2);
int width = bottom[0]->shape(3);
int channel_size = bottom[0]->count(2);
// We lanch width * height kernels.
const int num_kernels = width * height;
for (int n = 0; n < n_images/2; ++n) {
const Dtype *s1 = bottom_data + 2*n * bottom[0]->count(1);
const Dtype *s2 = bottom_data + (2*n+1) * bottom[0]->count(1);
Dtype *d1 = top_data + 2 * n * top[0]->count(1);
Dtype *d2 = top_data + (2*n+1) * top[0]->count(1);
mapping_forward_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, s1, s2, channels, height, width,
sim_data + n * bottom[1]->count(1),
//alpha
alpha_,
//Output
d1,d2);
}
}
template <typename Dtype>
void MappingBySimilarityLayer<Dtype>::Backward_gpu(const std::vector<caffe::Blob<Dtype> *> &top,
const std::vector<bool> &propagate_down,
const std::vector<caffe::Blob<Dtype> *> &bottom) {
if (propagate_down[0]) {
if(false){
this->Backward_cpu(top, propagate_down, bottom);
}
// gradient w.r.t. image. Note that we will accumulate diffs.
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype *top_diff = top[0]->gpu_diff();
const Dtype *sim_data = bottom[1]->gpu_data();
int n_images = bottom[0]->shape(0);
int channels = bottom[0]->shape(1);
int height = bottom[0]->shape(2);
int width = bottom[0]->shape(3);
// Clear grad
caffe_gpu_set(bottom[0]->count(0), Dtype(0.0), bottom_diff);
int channel_size = bottom[0]->count(2);
// We launch width*height kernels.
const int num_kernels = width * height;
for (int n = 0; n < n_images/2; ++n) {
Dtype *diff1 = bottom_diff + 2*n * bottom[0]->count(1) ;
Dtype *diff2 = bottom_diff + (2*n+1) * bottom[0]->count(1);
const Dtype *d1 = top_diff + 2*n * top[0]->count(1) ;
const Dtype *d2 = top_diff + (2*n+1) * top[0]->count(1);
image_backward_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, d1, d2, channels, height, width,
sim_data + n * bottom[1]->count(1),
alpha_,
//Output
diff1,diff2);
}
}
if (propagate_down[1]) {
Dtype* sim_diff = bottom[1]->mutable_gpu_diff();
const Dtype *sim_data = bottom[1]->gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype *top_diff = top[0]->gpu_diff();
int n_images = bottom[0]->shape(0);
int channels = bottom[0]->shape(1);
int height = bottom[0]->shape(2);
int width = bottom[0]->shape(3);
int channel_size = bottom[0]->count(2);
// Clear grads. Because we need to accumulate diffs.
// Note that the similarity map has only one channel.
caffe_gpu_set(bottom[1]->count(0), Dtype(0.0), sim_diff);
// We launch width*height kernels.
const int num_kernels = width * height;
for (int n = 0; n < n_images/2; ++n) {
const Dtype *s1 = bottom_data + 2*n * bottom[0]->count(1) ;
const Dtype *s2 = bottom_data + (2*n+1) * bottom[0]->count(1) ;
Dtype *diff = sim_diff + n * bottom[1]->count(1) ;
const Dtype *d1 = top_diff + 2*n * top[0]->count(1) ;
const Dtype *d2 = top_diff + (2*n+1) * top[0]->count(1);
similarity_backward_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, s1, s2, channels, height, width,
d1, d2,
sim_data + n * bottom[1]->count(1),
alpha_,
//Output
diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(MappingBySimilarityLayer);
} // namespace caffe
|
fdf02b36d9bb0fef81c566ecde5f77f770b4292e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
// Define your kernels in this file you may use more than one kernel if you
// need to
#define BLOCK_SIZE 512
// INSERT KERNEL(S) HERE
__global__ void fillBins(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins) {
extern __shared__ unsigned int private_histogram[];
//Initializing private histogram bins
int bin_stride = 0;
while((threadIdx.x + bin_stride) < num_bins) {
private_histogram[threadIdx.x + bin_stride] = 0;
bin_stride += blockDim.x;
}
__syncthreads();
//Computation of private histogram
int i = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while(i < num_elements) {
atomicAdd(&private_histogram[input[i]], 1);
i += stride;
}
__syncthreads();
//Merging private history bins with global history bins
bin_stride = 0;
while((threadIdx.x + bin_stride) < num_bins) {
atomicAdd(&bins[threadIdx.x + bin_stride], private_histogram[threadIdx.x + bin_stride]);
bin_stride += blockDim.x;
}
__syncthreads();
}
/******************************************************************************
Setup and invoke your kernel(s) in this function. You may also allocate more
GPU memory if you need to
*******************************************************************************/
void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements,
unsigned int num_bins) {
// INSERT CODE HERE
dim3 dimGrid((num_elements - 1)/BLOCK_SIZE + 1, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( fillBins), dim3(dimGrid), dim3(dimBlock), num_bins * sizeof(unsigned int), 0, input, bins, num_elements, num_bins);
}
| fdf02b36d9bb0fef81c566ecde5f77f770b4292e.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
// Define your kernels in this file you may use more than one kernel if you
// need to
#define BLOCK_SIZE 512
// INSERT KERNEL(S) HERE
__global__ void fillBins(unsigned int* input, unsigned int* bins, unsigned int num_elements, unsigned int num_bins) {
extern __shared__ unsigned int private_histogram[];
//Initializing private histogram bins
int bin_stride = 0;
while((threadIdx.x + bin_stride) < num_bins) {
private_histogram[threadIdx.x + bin_stride] = 0;
bin_stride += blockDim.x;
}
__syncthreads();
//Computation of private histogram
int i = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while(i < num_elements) {
atomicAdd(&private_histogram[input[i]], 1);
i += stride;
}
__syncthreads();
//Merging private history bins with global history bins
bin_stride = 0;
while((threadIdx.x + bin_stride) < num_bins) {
atomicAdd(&bins[threadIdx.x + bin_stride], private_histogram[threadIdx.x + bin_stride]);
bin_stride += blockDim.x;
}
__syncthreads();
}
/******************************************************************************
Setup and invoke your kernel(s) in this function. You may also allocate more
GPU memory if you need to
*******************************************************************************/
void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements,
unsigned int num_bins) {
// INSERT CODE HERE
dim3 dimGrid((num_elements - 1)/BLOCK_SIZE + 1, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
fillBins<<<dimGrid, dimBlock, num_bins * sizeof(unsigned int)>>>(input, bins, num_elements, num_bins);
}
|
31dc86f233426125035813e74a4904df5e8c7288.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Matrix Multiplication
*/
#include "matrix_multiplication.h"
int main (int argc, char **argv)
{
double rtclock();
double clkbegin, clkend, t;
bool is_same;
srand(time(NULL));
extern char *optarg;
extern int optind;
int character, error = 0;
char usage[] = "usage: %s [-N <size of matrix>]\n";
while ((character = getopt(argc, argv, "N:?")) != -1)
switch(character)
{
case 'N':
matrix_size = atoi(optarg);
break;
case '?':
error = 1;
break;
}
if (error)
{
printf(usage, argv[0]);
exit(error);
}
if (matrix_size < 1)
matrix_size = DEFAULT_MATRIX_SIZE;
printf("Matrix size is: %i x %i\n", matrix_size, matrix_size);
// define the matrices to multiply
int i, j, k;
X = (int *)malloc(matrix_size * matrix_size * sizeof(int*));
Y = (int *)malloc(matrix_size * matrix_size * sizeof(int*));
Zcpu = (int *)malloc(matrix_size * matrix_size * sizeof(int*));
Zgpu = (int *)malloc(matrix_size * matrix_size * sizeof(int*));
Zgpu_s = (int *)malloc(matrix_size * matrix_size * sizeof(int*));
for (i = 0; i < matrix_size * matrix_size; i++)
{
X[i] = rand();
Y[i] = rand();
Zcpu[i] = 0;
Zgpu[i] = 0;
Zgpu_s[i] = 0;
}
// start the clock: CPU
clkbegin = rtclock();
for (i = 0; i < matrix_size; i++)
for (j = 0; j < matrix_size; j++)
for (k = 0; k < matrix_size; k++)
// multiply the matrix on the CPU
Zcpu[matrix_size * i + j] +=
X[matrix_size * i + k] * Y[matrix_size * k + j];
// end the clock: CPU
clkend = rtclock();
t = clkend-clkbegin;
printf("CPU matrix-multiplication finished.\n");
printf("CPU Matrix-Multiplication: %.1f MFLOPS; Time = %.3f sec;\n",
4.0*matrix_size*matrix_size*matrix_size/t/1000000,t);
// start the clock: GPU non-shared-memory
clkbegin = rtclock();
matrix_multiplication(X, Y, Zgpu);
// end the clock: GPU non-shared-memory
clkend = rtclock();
t = clkend-clkbegin;
printf("\nGPU non-shared-memory matrix-multiplication finished.\n");
printf("GPU non-shared-memory Matrix-Multiplication: %.1f MFLOPS; Time = %.3f sec;\n",
4.0*matrix_size*matrix_size*matrix_size/t/1000000,t);
// check if multiplication for GPU non-shared-memory matches CPU...
is_same = true;
for (i = 0; i < matrix_size; i++)
for (j = 0; j < matrix_size; j++)
if (Zcpu[matrix_size * i + j] != Zgpu[matrix_size * i + j])
is_same = false;
if (is_same)
printf("GPU non-shared-memory Matrix-Multiplication completed successfully.\n");
else
printf("GPU non-shared-memory Matrix-Multiplication failed.\n");
// start the clock: GPU shared-memory
clkbegin = rtclock();
matrix_multiplication(X, Y, Zgpu_s);
// end the clock: GPU shared-memory
clkend = rtclock();
t = clkend-clkbegin;
printf("\nGPU shared-memory matrix-multiplication finished.\n");
printf("GPU shared-memory Matrix-Multiplication: %.1f MFLOPS; Time = %.3f sec;\n",
4.0*matrix_size*matrix_size*matrix_size/t/1000000,t);
// check if multiplication for GPU shared-memory matches CPU...
is_same = true;
for (i = 0; i < matrix_size; i++)
for (j = 0; j < matrix_size; j++)
if (Zcpu[matrix_size * i + j] != Zgpu_s[matrix_size * i + j])
is_same = false;
if (is_same)
printf("GPU shared-memory Matrix-Multiplication completed successfully.\n");
else
printf("GPU shared-memory Matrix-Multiplication failed.\n");
return 0;
}
/**
* Begin Non-Shared Memory Section
*
* This will take in two matrices (A, B) and produce C=A*B.
*/
void matrix_multiplication(int *A, int *B, int *C)
{
int mem_size = matrix_size * matrix_size * sizeof(int);
int *gpu_A, *gpu_B, *gpu_C;
hipError_t error_code;
// Load matrix A into GPU device memory
error_code = hipMalloc(&gpu_A, mem_size);
if (error_code != hipSuccess)
printf("CUDA malloc matrix A failed: %s\n",hipGetErrorString(error_code));
error_code = hipMemcpy(gpu_A, A, mem_size, hipMemcpyHostToDevice);
if (error_code != hipSuccess)
printf("Copy matrix A to gpu device failed: %s\n",hipGetErrorString(error_code));
// Load matrix B into GPU device memory
error_code = hipMalloc(&gpu_B, mem_size);
if (error_code != hipSuccess)
printf("CUDA malloc matrix B failed: %s\n",hipGetErrorString(error_code));
error_code = hipMemcpy(gpu_B, B, mem_size, hipMemcpyHostToDevice);
if (error_code != hipSuccess)
printf("Copy matrix B to gpu device failed: %s\n",hipGetErrorString(error_code));
// Allocate matrix C into GPU device memory
error_code = hipMalloc(&gpu_C, mem_size);
if (error_code != hipSuccess)
printf("CUDA malloc matrix C failed: %s\n",hipGetErrorString(error_code));
// Invoke the CUDA kernel to actually multiply the matrices
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((matrix_size + dimBlock.x - 1) / dimBlock.x,
(matrix_size + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( matrix_multiplication_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu_A, gpu_B, gpu_C, matrix_size);
error_code = hipDeviceSynchronize();
if (error_code != hipSuccess)
printf("Running CUDA kernel failed: %s\n", hipGetErrorString(error_code));
// Load matrix C from GPU device memory to CPU
error_code = hipMemcpy(C, gpu_C, mem_size, hipMemcpyDeviceToHost);
if (error_code != hipSuccess)
printf("Copy matrix C from gpu device failed: %s\n",hipGetErrorString(error_code));
// Free GPU device memory
hipFree(gpu_A);
hipFree(gpu_B);
hipFree(gpu_C);
}
/**
* KERNEL: non-shared memory matrix multiplication
*
* Each thread computes one element of matrix C by multiplying a single row from matrix A
* with a single column from matrix B and accumulating the results into the value c.
*/
__global__ void matrix_multiplication_kernel(int *A, int *B, int *C, int N)
{
int c = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row > N || col > N)
return;
for (int i = 0; i < N; i++)
c += (A[row * N + i]) * (B[i * N + col]);
C[row * N + col] = c;
}
/**
* Begin Shared Memory Section
*
* This will take in two matrices (A, B) and produce C=A*B.
*/
void matrix_multiplication_shared(int *A, int *B, int *C)
{
int mem_size = matrix_size * matrix_size * sizeof(int);
int *gpu_A, *gpu_B, *gpu_C;
hipError_t error_code;
// Load matrix A into GPU device memory
error_code = hipMalloc(&gpu_A, mem_size);
if (error_code != hipSuccess)
printf("CUDA malloc matrix A failed: %s\n",hipGetErrorString(error_code));
error_code = hipMemcpy(gpu_A, A, mem_size, hipMemcpyHostToDevice);
if (error_code != hipSuccess)
printf("Copy matrix A to gpu device failed: %s\n",hipGetErrorString(error_code));
// Load matrix B into GPU device memory
error_code = hipMalloc(&gpu_B, mem_size);
if (error_code != hipSuccess)
printf("CUDA malloc matrix B failed: %s\n",hipGetErrorString(error_code));
error_code = hipMemcpy(gpu_B, B, mem_size, hipMemcpyHostToDevice);
if (error_code != hipSuccess)
printf("Copy matrix B to gpu device failed: %s\n",hipGetErrorString(error_code));
// Allocate matrix C into GPU device memory
error_code = hipMalloc(&gpu_C, mem_size);
if (error_code != hipSuccess)
printf("CUDA malloc matrix C failed: %s\n",hipGetErrorString(error_code));
// Invoke the CUDA kernel to actually multiply the matrices
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(matrix_size / dimBlock.x, matrix_size / dimBlock.y);
hipLaunchKernelGGL(( matrix_mult_shared_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu_A, gpu_B, gpu_C, matrix_size);
error_code = hipDeviceSynchronize();
if (error_code != hipSuccess)
printf("Running CUDA kernel failed: %s\n", hipGetErrorString(error_code));
// Load matrix C from GPU device memory to CPU
error_code = hipMemcpy(C, gpu_C, mem_size, hipMemcpyDeviceToHost);
if (error_code != hipSuccess)
printf("Copy matrix C from gpu device failed: %s\n",hipGetErrorString(error_code));
// Free GPU device memory
hipFree(gpu_A);
hipFree(gpu_B);
hipFree(gpu_C);
}
/**
* KERNEL: shared-memory matrix multiplication
*
* Each thread computes one element of a sub-matrix of matrix C by multiplying a single
* row from a sub-matrix of matrix A with a single column from a sub-matrix of matrix B
* and accumulating the results into sub-value for the sub-matrix of matrix C. These
* values are then accumulated into the resulting matrix C.
*/
__global__ void matrix_mult_shared_kernel(int *A, int *B, int *C, int N)
{
// Block row and column
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int *C_sub = get_sub_matrix(C, N, block_row, block_col);
int c = 0;
// Thread row and column within C_sub
int row = threadIdx.y;
int col = threadIdx.x;
for (int i = 0; i < (N / BLOCK_SIZE); i++)
{
int *A_sub = get_sub_matrix(A, N, block_row, i);
int *B_sub = get_sub_matrix(B, N, i, block_col);
// Shared memory used to store A_sub and B_sub respectively
__shared__ int A_shared[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int B_shared[BLOCK_SIZE][BLOCK_SIZE];
// Each thread loads one element of each sub-matrix to shared memory
A_shared[row][col] = get_matrix_element(A_sub, N, row, col);
B_shared[row][col] = get_matrix_element(B_sub, N, row, col);
// need to be in sync before continuing
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; j++)
c += A_shared[row][j] * B_shared[j][col];
// need to be in sync before continuing
__syncthreads();
}
set_matrix_element(C_sub, N, row, col, c);
}
// Get the BLOCK_SIZE * BLOCK_SIZE sub-matrix of a given matrix,
// needed for shared memory implementation
__device__ int* get_sub_matrix(int *matrix, int N, int row, int col)
{
int *matrix_sub;
matrix_sub = &matrix[N * BLOCK_SIZE * row + BLOCK_SIZE * col];
return matrix_sub;
}
// Get a single matrix element, needed for shared memory implementation
__device__ int get_matrix_element(int* A, int N, int row, int col)
{
return A[row * N + col];
}
// Set a single matrix element, needed for shared memory implementation
__device__ void set_matrix_element(int *A, int N, int row, int col, int value)
{
A[row * N + col] = value;
}
| 31dc86f233426125035813e74a4904df5e8c7288.cu | /**
* Matrix Multiplication
*/
#include "matrix_multiplication.h"
int main (int argc, char **argv)
{
double rtclock();
double clkbegin, clkend, t;
bool is_same;
srand(time(NULL));
extern char *optarg;
extern int optind;
int character, error = 0;
char usage[] = "usage: %s [-N <size of matrix>]\n";
while ((character = getopt(argc, argv, "N:?")) != -1)
switch(character)
{
case 'N':
matrix_size = atoi(optarg);
break;
case '?':
error = 1;
break;
}
if (error)
{
printf(usage, argv[0]);
exit(error);
}
if (matrix_size < 1)
matrix_size = DEFAULT_MATRIX_SIZE;
printf("Matrix size is: %i x %i\n", matrix_size, matrix_size);
// define the matrices to multiply
int i, j, k;
X = (int *)malloc(matrix_size * matrix_size * sizeof(int*));
Y = (int *)malloc(matrix_size * matrix_size * sizeof(int*));
Zcpu = (int *)malloc(matrix_size * matrix_size * sizeof(int*));
Zgpu = (int *)malloc(matrix_size * matrix_size * sizeof(int*));
Zgpu_s = (int *)malloc(matrix_size * matrix_size * sizeof(int*));
for (i = 0; i < matrix_size * matrix_size; i++)
{
X[i] = rand();
Y[i] = rand();
Zcpu[i] = 0;
Zgpu[i] = 0;
Zgpu_s[i] = 0;
}
// start the clock: CPU
clkbegin = rtclock();
for (i = 0; i < matrix_size; i++)
for (j = 0; j < matrix_size; j++)
for (k = 0; k < matrix_size; k++)
// multiply the matrix on the CPU
Zcpu[matrix_size * i + j] +=
X[matrix_size * i + k] * Y[matrix_size * k + j];
// end the clock: CPU
clkend = rtclock();
t = clkend-clkbegin;
printf("CPU matrix-multiplication finished.\n");
printf("CPU Matrix-Multiplication: %.1f MFLOPS; Time = %.3f sec;\n",
4.0*matrix_size*matrix_size*matrix_size/t/1000000,t);
// start the clock: GPU non-shared-memory
clkbegin = rtclock();
matrix_multiplication(X, Y, Zgpu);
// end the clock: GPU non-shared-memory
clkend = rtclock();
t = clkend-clkbegin;
printf("\nGPU non-shared-memory matrix-multiplication finished.\n");
printf("GPU non-shared-memory Matrix-Multiplication: %.1f MFLOPS; Time = %.3f sec;\n",
4.0*matrix_size*matrix_size*matrix_size/t/1000000,t);
// check if multiplication for GPU non-shared-memory matches CPU...
is_same = true;
for (i = 0; i < matrix_size; i++)
for (j = 0; j < matrix_size; j++)
if (Zcpu[matrix_size * i + j] != Zgpu[matrix_size * i + j])
is_same = false;
if (is_same)
printf("GPU non-shared-memory Matrix-Multiplication completed successfully.\n");
else
printf("GPU non-shared-memory Matrix-Multiplication failed.\n");
// start the clock: GPU shared-memory
clkbegin = rtclock();
matrix_multiplication(X, Y, Zgpu_s);
// end the clock: GPU shared-memory
clkend = rtclock();
t = clkend-clkbegin;
printf("\nGPU shared-memory matrix-multiplication finished.\n");
printf("GPU shared-memory Matrix-Multiplication: %.1f MFLOPS; Time = %.3f sec;\n",
4.0*matrix_size*matrix_size*matrix_size/t/1000000,t);
// check if multiplication for GPU shared-memory matches CPU...
is_same = true;
for (i = 0; i < matrix_size; i++)
for (j = 0; j < matrix_size; j++)
if (Zcpu[matrix_size * i + j] != Zgpu_s[matrix_size * i + j])
is_same = false;
if (is_same)
printf("GPU shared-memory Matrix-Multiplication completed successfully.\n");
else
printf("GPU shared-memory Matrix-Multiplication failed.\n");
return 0;
}
/**
* Begin Non-Shared Memory Section
*
* This will take in two matrices (A, B) and produce C=A*B.
*/
void matrix_multiplication(int *A, int *B, int *C)
{
int mem_size = matrix_size * matrix_size * sizeof(int);
int *gpu_A, *gpu_B, *gpu_C;
cudaError_t error_code;
// Load matrix A into GPU device memory
error_code = cudaMalloc(&gpu_A, mem_size);
if (error_code != cudaSuccess)
printf("CUDA malloc matrix A failed: %s\n",cudaGetErrorString(error_code));
error_code = cudaMemcpy(gpu_A, A, mem_size, cudaMemcpyHostToDevice);
if (error_code != cudaSuccess)
printf("Copy matrix A to gpu device failed: %s\n",cudaGetErrorString(error_code));
// Load matrix B into GPU device memory
error_code = cudaMalloc(&gpu_B, mem_size);
if (error_code != cudaSuccess)
printf("CUDA malloc matrix B failed: %s\n",cudaGetErrorString(error_code));
error_code = cudaMemcpy(gpu_B, B, mem_size, cudaMemcpyHostToDevice);
if (error_code != cudaSuccess)
printf("Copy matrix B to gpu device failed: %s\n",cudaGetErrorString(error_code));
// Allocate matrix C into GPU device memory
error_code = cudaMalloc(&gpu_C, mem_size);
if (error_code != cudaSuccess)
printf("CUDA malloc matrix C failed: %s\n",cudaGetErrorString(error_code));
// Invoke the CUDA kernel to actually multiply the matrices
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((matrix_size + dimBlock.x - 1) / dimBlock.x,
(matrix_size + dimBlock.y - 1) / dimBlock.y);
matrix_multiplication_kernel<<<dimGrid, dimBlock>>>(gpu_A, gpu_B, gpu_C, matrix_size);
error_code = cudaThreadSynchronize();
if (error_code != cudaSuccess)
printf("Running CUDA kernel failed: %s\n", cudaGetErrorString(error_code));
// Load matrix C from GPU device memory to CPU
error_code = cudaMemcpy(C, gpu_C, mem_size, cudaMemcpyDeviceToHost);
if (error_code != cudaSuccess)
printf("Copy matrix C from gpu device failed: %s\n",cudaGetErrorString(error_code));
// Free GPU device memory
cudaFree(gpu_A);
cudaFree(gpu_B);
cudaFree(gpu_C);
}
/**
* KERNEL: non-shared memory matrix multiplication
*
* Each thread computes one element of matrix C by multiplying a single row from matrix A
* with a single column from matrix B and accumulating the results into the value c.
*/
__global__ void matrix_multiplication_kernel(int *A, int *B, int *C, int N)
{
int c = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row > N || col > N)
return;
for (int i = 0; i < N; i++)
c += (A[row * N + i]) * (B[i * N + col]);
C[row * N + col] = c;
}
/**
* Begin Shared Memory Section
*
* This will take in two matrices (A, B) and produce C=A*B.
*/
void matrix_multiplication_shared(int *A, int *B, int *C)
{
int mem_size = matrix_size * matrix_size * sizeof(int);
int *gpu_A, *gpu_B, *gpu_C;
cudaError_t error_code;
// Load matrix A into GPU device memory
error_code = cudaMalloc(&gpu_A, mem_size);
if (error_code != cudaSuccess)
printf("CUDA malloc matrix A failed: %s\n",cudaGetErrorString(error_code));
error_code = cudaMemcpy(gpu_A, A, mem_size, cudaMemcpyHostToDevice);
if (error_code != cudaSuccess)
printf("Copy matrix A to gpu device failed: %s\n",cudaGetErrorString(error_code));
// Load matrix B into GPU device memory
error_code = cudaMalloc(&gpu_B, mem_size);
if (error_code != cudaSuccess)
printf("CUDA malloc matrix B failed: %s\n",cudaGetErrorString(error_code));
error_code = cudaMemcpy(gpu_B, B, mem_size, cudaMemcpyHostToDevice);
if (error_code != cudaSuccess)
printf("Copy matrix B to gpu device failed: %s\n",cudaGetErrorString(error_code));
// Allocate matrix C into GPU device memory
error_code = cudaMalloc(&gpu_C, mem_size);
if (error_code != cudaSuccess)
printf("CUDA malloc matrix C failed: %s\n",cudaGetErrorString(error_code));
// Invoke the CUDA kernel to actually multiply the matrices
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(matrix_size / dimBlock.x, matrix_size / dimBlock.y);
matrix_mult_shared_kernel<<<dimGrid, dimBlock>>>(gpu_A, gpu_B, gpu_C, matrix_size);
error_code = cudaThreadSynchronize();
if (error_code != cudaSuccess)
printf("Running CUDA kernel failed: %s\n", cudaGetErrorString(error_code));
// Load matrix C from GPU device memory to CPU
error_code = cudaMemcpy(C, gpu_C, mem_size, cudaMemcpyDeviceToHost);
if (error_code != cudaSuccess)
printf("Copy matrix C from gpu device failed: %s\n",cudaGetErrorString(error_code));
// Free GPU device memory
cudaFree(gpu_A);
cudaFree(gpu_B);
cudaFree(gpu_C);
}
/**
* KERNEL: shared-memory matrix multiplication
*
* Each thread computes one element of a sub-matrix of matrix C by multiplying a single
* row from a sub-matrix of matrix A with a single column from a sub-matrix of matrix B
* and accumulating the results into sub-value for the sub-matrix of matrix C. These
* values are then accumulated into the resulting matrix C.
*/
__global__ void matrix_mult_shared_kernel(int *A, int *B, int *C, int N)
{
// Block row and column
int block_row = blockIdx.y;
int block_col = blockIdx.x;
int *C_sub = get_sub_matrix(C, N, block_row, block_col);
int c = 0;
// Thread row and column within C_sub
int row = threadIdx.y;
int col = threadIdx.x;
for (int i = 0; i < (N / BLOCK_SIZE); i++)
{
int *A_sub = get_sub_matrix(A, N, block_row, i);
int *B_sub = get_sub_matrix(B, N, i, block_col);
// Shared memory used to store A_sub and B_sub respectively
__shared__ int A_shared[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int B_shared[BLOCK_SIZE][BLOCK_SIZE];
// Each thread loads one element of each sub-matrix to shared memory
A_shared[row][col] = get_matrix_element(A_sub, N, row, col);
B_shared[row][col] = get_matrix_element(B_sub, N, row, col);
// need to be in sync before continuing
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; j++)
c += A_shared[row][j] * B_shared[j][col];
// need to be in sync before continuing
__syncthreads();
}
set_matrix_element(C_sub, N, row, col, c);
}
// Get the BLOCK_SIZE * BLOCK_SIZE sub-matrix of a given matrix,
// needed for shared memory implementation
__device__ int* get_sub_matrix(int *matrix, int N, int row, int col)
{
int *matrix_sub;
matrix_sub = &matrix[N * BLOCK_SIZE * row + BLOCK_SIZE * col];
return matrix_sub;
}
// Get a single matrix element, needed for shared memory implementation
__device__ int get_matrix_element(int* A, int N, int row, int col)
{
return A[row * N + col];
}
// Set a single matrix element, needed for shared memory implementation
__device__ void set_matrix_element(int *A, int N, int row, int col, int value)
{
A[row * N + col] = value;
}
|
a359fe10ad63f3a44b56613ee351f22ff0786223.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//device functions
__device__ int getGlobalIdx_1D_1D()
{
return blockIdx.x *blockDim.x + threadIdx.x;
}
__global__ void kernel_1D_1D()
{
printf("Local thread ID: %i Global thread ID: %i\n", threadIdx.x, getGlobalIdx_1D_1D());
} | a359fe10ad63f3a44b56613ee351f22ff0786223.cu | #include "includes.h"
//device functions
__device__ int getGlobalIdx_1D_1D()
{
return blockIdx.x *blockDim.x + threadIdx.x;
}
__global__ void kernel_1D_1D()
{
printf("Local thread ID: %i Global thread ID: %i\n", threadIdx.x, getGlobalIdx_1D_1D());
} |
8b162a6859622bcee5557933ad5b80ba66ec1643.hip | // !!! This is a file automatically generated by hipify!!!
// ########################################################################
// Practical Course: GPU Programming in Computer Vision
// Technical University of Munich, Computer Vision Group
// ########################################################################
#include "norm.cuh"
#include <iostream>
#include <hip/hip_runtime.h>
#include "helper.cuh"
//__global__
//void computeSqrtKernel(float *imgOut, int w, int h, int nc)
//{
// int x = threadIdx.x + blockDim.x * blockIdx.x;
// int y = threadIdx.y + blockDim.y * blockIdx.y;
// if (x >= w || y >= h || threadIdx.z != 0) return;
// int at = x + y * w;
// imgOut[at] = sqrt(imgOut[at]);
//}
//__global__
//void computeSumSquaresAcrossChannelsKernel(float *imgOut, const float *u, int w, int h, int nc)
//{
// int x = threadIdx.x + blockDim.x * blockIdx.x;
// int y = threadIdx.y + blockDim.y * blockIdx.y;
// int z = threadIdx.z;
// if (x >= w || y >= h || z >= nc) return;
// int at = x + y * w;
// imgOut[at] = 0;
//// __syncthreads();
// imgOut[at] += pow(u[at + w * h * z], 2);
//}
__global__
void computeNormIterateKernel(float *imgOut, const float *u, int w, int h, int nc)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x >= w || y >= h) return;
int at = x + y * w;
imgOut[at] = 0;
for (int ch = 0; ch < nc; ch++) {
imgOut[at] += pow(u[at + w * h * ch], 2);
}
imgOut[at] = sqrt(imgOut[at]);
}
void computeNormCuda(float *imgOut, const float *u, int w, int h, int nc)
{
// calculate block and grid size
dim3 block(32, 32, 1);
dim3 grid = computeGrid2D(block, w, h);
// run cuda kernel
hipLaunchKernelGGL(( computeNormIterateKernel), dim3(grid), dim3(block), 0, 0, imgOut, u, w, h, nc);
// computeSumSquaresAcrossChannelsKernel<<<grid, block>>>(imgOut, u, w, h, nc);
// computeSqrtKernel<<<grid, block>>>(imgOut, w, h, nc);
// check for errors
CUDA_CHECK;
}
| 8b162a6859622bcee5557933ad5b80ba66ec1643.cu | // ########################################################################
// Practical Course: GPU Programming in Computer Vision
// Technical University of Munich, Computer Vision Group
// ########################################################################
#include "norm.cuh"
#include <iostream>
#include <cuda_runtime.h>
#include "helper.cuh"
//__global__
//void computeSqrtKernel(float *imgOut, int w, int h, int nc)
//{
// int x = threadIdx.x + blockDim.x * blockIdx.x;
// int y = threadIdx.y + blockDim.y * blockIdx.y;
// if (x >= w || y >= h || threadIdx.z != 0) return;
// int at = x + y * w;
// imgOut[at] = sqrt(imgOut[at]);
//}
//__global__
//void computeSumSquaresAcrossChannelsKernel(float *imgOut, const float *u, int w, int h, int nc)
//{
// int x = threadIdx.x + blockDim.x * blockIdx.x;
// int y = threadIdx.y + blockDim.y * blockIdx.y;
// int z = threadIdx.z;
// if (x >= w || y >= h || z >= nc) return;
// int at = x + y * w;
// imgOut[at] = 0;
//// __syncthreads();
// imgOut[at] += pow(u[at + w * h * z], 2);
//}
__global__
void computeNormIterateKernel(float *imgOut, const float *u, int w, int h, int nc)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x >= w || y >= h) return;
int at = x + y * w;
imgOut[at] = 0;
for (int ch = 0; ch < nc; ch++) {
imgOut[at] += pow(u[at + w * h * ch], 2);
}
imgOut[at] = sqrt(imgOut[at]);
}
void computeNormCuda(float *imgOut, const float *u, int w, int h, int nc)
{
// calculate block and grid size
dim3 block(32, 32, 1);
dim3 grid = computeGrid2D(block, w, h);
// run cuda kernel
computeNormIterateKernel<<<grid, block>>>(imgOut, u, w, h, nc);
// computeSumSquaresAcrossChannelsKernel<<<grid, block>>>(imgOut, u, w, h, nc);
// computeSqrtKernel<<<grid, block>>>(imgOut, w, h, nc);
// check for errors
CUDA_CHECK;
}
|
25c6e6e9a571912ab55acb907e09c1756f80d699.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <mpi.h>
#define NUM_THREADS 512
#define min(a, b) (((a) < (b)) ? (a) : (b))
__device__ void run_mdf(float *univ, int w, int size, int id, float *new_univ) {
// Neighbor positions
unsigned int x = id % w;
unsigned int y = id - x;
unsigned int x_l = x - 1;
unsigned int x_r = x + 1;
unsigned int y_u = y - w;
unsigned int y_d = y + w;
new_univ[x + y] = (0.25 * (univ[x_r + y] + univ[x_l + y] + univ[x + y_u] + univ[x + y_d] - (4 * univ[y + x]))) + univ[y + x];
}
__global__ void middle_kernel(float *univ, int h, int w, int p_id, float *new_univ) {
int id = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
int size = h * w;
//printf("%d %d %d -> %d\n", blockIdx.x, threadIdx.x, blockDim.x, id);
if (p_id == 0) {
if (id < (size / 2) - w) { // Caso no seja borda compartilhada
run_mdf(univ, w, size, id, new_univ);
}
else {
new_univ[id] = 100;
}
} else if (p_id == 1) {
if ((id >= (size / 2) + w) && (id <= size)) { // Caso no seja borda compartilhada
run_mdf(univ, w, size, id, new_univ);
}
else {
new_univ[id] = 100;
}
}
}
__global__ void border_kernel(float *univ, int h, int w, int p_id, float *new_univ) {
int id = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
int size = h * w;
//printf("%d %d %d -> %d\n", blockIdx.x, threadIdx.x, blockDim.x, id);
if (p_id == 0) {
if ((id >= (size / 2) - w) && (id < size / 2)) { // Caso SEJA borda compartilhada
run_mdf(univ, w, size, id, new_univ);
}
else {
new_univ[id] = 100;
}
} else if (p_id == 1) {
if ((id >= size / 2) && (id < (size) / 2 + w)) { // Caso SEJA borda compartilhada
run_mdf(univ, w, size, id, new_univ);
}
else {
new_univ[id] = 100;
}
}
}
void print_array(int arr[], int w, int size) {
printf("\n");
for (int i = 0; i < size; i++)
{
printf("%s", (arr[i] == 1 ? "0" : " "));
//printf("%d", arr[i]);
if ((i + 1) % w == 0) {
printf("\n");
}
}
printf("\n");
}
void create_universe(float *univ, int w, int h) {
for(int i = 0; i < h; i++){
for(int j = 0; j < w; j++){
int k = (i * w) + j;
if(i == 0 || j == 0 || i == h-1 || j == w-1){
univ[k] = 100;
} else{
univ[k] = 0;
}
}
}
}
int main(int argc, char **argv)
{
int g, h, w;
printf("Enter desired number of generations:\n");
scanf("%d", &g);
printf("Enter desired height of universe:\n");
scanf("%d", &h);
printf("Enter desired width of universe:\n");
scanf("%d", &w);
hipStream_t border_p1_stream;
hipStream_t middle_p1_stream;
hipStream_t border_p2_stream;
hipStream_t middle_p2_stream;
hipStreamCreate(&border_p1_stream);
hipStreamCreate(&middle_p1_stream);
hipStreamCreate(&border_p2_stream);
hipStreamCreate(&middle_p2_stream);
MPI_Status status;
int p_id, p_group, p_name;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &p_group);
MPI_Comm_rank(MPI_COMM_WORLD, &p_id);
MPI_Get_processor_name(processor_name, &p_name);
// Number of cells in universe
int size = h * w;
// Host(CPU) arrays
float *h_univ = (float*)malloc(size * sizeof(float));
float *h_new_univ = (float*)malloc(size * sizeof(float));
// Devide(GPU) arrays
float *d_univ;
float *d_new_univ;
hipMalloc((void**)&d_univ, size * sizeof(float));
hipMalloc((void**)&d_new_univ, size * sizeof(float));
create_universe(h_univ, size, 0.15);
size_t n_threads = size > NUM_THREADS ? NUM_THREADS : size;
unsigned n_blocks = size > NUM_THREADS ? (unsigned)size / NUM_THREADS : (unsigned)1;
//printf("size: %d - blocks: %d - threads: %d\n", size, n_blocks, t);
int my_part;
int iter_count = g;
if (p_id == 0) {
while (iter_count > 0) {
my_part = (h * w) / 2;
hipMemcpyAsync(d_univ, h_univ, size * sizeof(int), hipMemcpyHostToDevice, middle_p1_stream); // passa matriz para a GPU
hipLaunchKernelGGL(( middle_kernel) , dim3(n_blocks), dim3(n_threads), 0, middle_p1_stream , d_univ, h, w, p_id, d_new_univ); // processa a matriz
//std::swap(d_univ, d_new_univ);
if (iter_count < g) {
for (int i = 1; i < w - 1; i++) {
MPI_Recv(&h_univ[my_part + i], 1, MPI_FLOAT, 1, 1, MPI_COMM_WORLD, &status); // recebe borda do outro processo
}
hipMemcpyAsync(d_univ, h_univ, size * sizeof(int), hipMemcpyHostToDevice, border_p1_stream); // passa a matriz para a GPU em outra stream (para processamento paralelo)
}
hipLaunchKernelGGL(( border_kernel) , dim3(n_blocks), dim3(n_threads), 0, border_p1_stream , d_univ, h, w, p_id, d_new_univ);
hipDeviceSynchronize();
hipMemcpyAsync(h_univ, d_univ, size * sizeof(int), hipMemcpyDeviceToHost, border_p1_stream);
// Envia a borda para o prximo processo
my_part = my_part - w;
for (int i = 1; i < w - 1; i++) {
MPI_Send(&h_univ[my_part + i], 1, MPI_FLOAT, 1, 1, MPI_COMM_WORLD);
}
// print_array(h_univ, w, size);
iter_count--;
}
}
else {
while (iter_count > 0) {
my_part = (h * w) / 2;
hipMemcpyAsync(d_univ, h_univ, size * sizeof(int), hipMemcpyHostToDevice, middle_p2_stream); // passa matriz para a GPU
middle_kernel << <n_blocks, n_threads, 0, middle_p2_stream >> > (d_univ, h, w, p_id, d_new_univ); // processa a matriz
//std::swap(d_univ, d_new_univ);
if (iter_count < g) {
for (int i = 1; i < w - 1; i++) {
MPI_Recv(&h_univ[my_part + i], 1, MPI_FLOAT, 1, 1, MPI_COMM_WORLD, &status); // recebe borda do outro processo
}
}
hipMemcpyAsync(d_univ, h_univ, size * sizeof(int), hipMemcpyHostToDevice, border_p2_stream); // passa a matriz para a GPU em outra stream (para processamento paralelo)
border_kernel << <n_blocks, n_threads, 0, border_p2_stream >> >(d_univ, h, w, p_id, d_new_univ);
hipDeviceSynchronize();
hipMemcpyAsync(h_univ, d_univ, size * sizeof(int), hipMemcpyDeviceToHost, border_p2_stream);
// Envia a borda para o prximo processo
my_part = my_part - w;
for (int i = 1; i < w - 1; i++) {
MPI_Send(&h_univ[my_part + i], 1, MPI_FLOAT, 1, 1, MPI_COMM_WORLD);
}
// print_array(h_univ, w, size);
iter_count--;
}
}
// Release memory?
free(h_univ);
free(h_new_univ);
hipFree(d_univ);
hipFree(d_new_univ);
hipStreamDestroy(border_p1_stream);
hipStreamDestroy(middle_p1_stream);
hipStreamDestroy(border_p2_stream);
hipStreamDestroy(middle_p2_stream);
MPI_Finalize();
return 0;
} | 25c6e6e9a571912ab55acb907e09c1756f80d699.cu | #include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <mpi.h>
#define NUM_THREADS 512
#define min(a, b) (((a) < (b)) ? (a) : (b))
__device__ void run_mdf(float *univ, int w, int size, int id, float *new_univ) {
// Neighbor positions
unsigned int x = id % w;
unsigned int y = id - x;
unsigned int x_l = x - 1;
unsigned int x_r = x + 1;
unsigned int y_u = y - w;
unsigned int y_d = y + w;
new_univ[x + y] = (0.25 * (univ[x_r + y] + univ[x_l + y] + univ[x + y_u] + univ[x + y_d] - (4 * univ[y + x]))) + univ[y + x];
}
__global__ void middle_kernel(float *univ, int h, int w, int p_id, float *new_univ) {
int id = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
int size = h * w;
//printf("%d %d %d -> %d\n", blockIdx.x, threadIdx.x, blockDim.x, id);
if (p_id == 0) {
if (id < (size / 2) - w) { // Caso não seja borda compartilhada
run_mdf(univ, w, size, id, new_univ);
}
else {
new_univ[id] = 100;
}
} else if (p_id == 1) {
if ((id >= (size / 2) + w) && (id <= size)) { // Caso não seja borda compartilhada
run_mdf(univ, w, size, id, new_univ);
}
else {
new_univ[id] = 100;
}
}
}
__global__ void border_kernel(float *univ, int h, int w, int p_id, float *new_univ) {
int id = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
int size = h * w;
//printf("%d %d %d -> %d\n", blockIdx.x, threadIdx.x, blockDim.x, id);
if (p_id == 0) {
if ((id >= (size / 2) - w) && (id < size / 2)) { // Caso SEJA borda compartilhada
run_mdf(univ, w, size, id, new_univ);
}
else {
new_univ[id] = 100;
}
} else if (p_id == 1) {
if ((id >= size / 2) && (id < (size) / 2 + w)) { // Caso SEJA borda compartilhada
run_mdf(univ, w, size, id, new_univ);
}
else {
new_univ[id] = 100;
}
}
}
void print_array(int arr[], int w, int size) {
printf("\n");
for (int i = 0; i < size; i++)
{
printf("%s", (arr[i] == 1 ? "0" : " "));
//printf("%d", arr[i]);
if ((i + 1) % w == 0) {
printf("\n");
}
}
printf("\n");
}
void create_universe(float *univ, int w, int h) {
for(int i = 0; i < h; i++){
for(int j = 0; j < w; j++){
int k = (i * w) + j;
if(i == 0 || j == 0 || i == h-1 || j == w-1){
univ[k] = 100;
} else{
univ[k] = 0;
}
}
}
}
int main(int argc, char **argv)
{
int g, h, w;
printf("Enter desired number of generations:\n");
scanf("%d", &g);
printf("Enter desired height of universe:\n");
scanf("%d", &h);
printf("Enter desired width of universe:\n");
scanf("%d", &w);
cudaStream_t border_p1_stream;
cudaStream_t middle_p1_stream;
cudaStream_t border_p2_stream;
cudaStream_t middle_p2_stream;
cudaStreamCreate(&border_p1_stream);
cudaStreamCreate(&middle_p1_stream);
cudaStreamCreate(&border_p2_stream);
cudaStreamCreate(&middle_p2_stream);
MPI_Status status;
int p_id, p_group, p_name;
char processor_name[MPI_MAX_PROCESSOR_NAME];
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &p_group);
MPI_Comm_rank(MPI_COMM_WORLD, &p_id);
MPI_Get_processor_name(processor_name, &p_name);
// Number of cells in universe
int size = h * w;
// Host(CPU) arrays
float *h_univ = (float*)malloc(size * sizeof(float));
float *h_new_univ = (float*)malloc(size * sizeof(float));
// Devide(GPU) arrays
float *d_univ;
float *d_new_univ;
cudaMalloc((void**)&d_univ, size * sizeof(float));
cudaMalloc((void**)&d_new_univ, size * sizeof(float));
create_universe(h_univ, size, 0.15);
size_t n_threads = size > NUM_THREADS ? NUM_THREADS : size;
unsigned n_blocks = size > NUM_THREADS ? (unsigned)size / NUM_THREADS : (unsigned)1;
//printf("size: %d - blocks: %d - threads: %d\n", size, n_blocks, t);
int my_part;
int iter_count = g;
if (p_id == 0) {
while (iter_count > 0) {
my_part = (h * w) / 2;
cudaMemcpyAsync(d_univ, h_univ, size * sizeof(int), cudaMemcpyHostToDevice, middle_p1_stream); // passa matriz para a GPU
middle_kernel <<<n_blocks, n_threads, 0, middle_p1_stream >>> (d_univ, h, w, p_id, d_new_univ); // processa a matriz
//std::swap(d_univ, d_new_univ);
if (iter_count < g) {
for (int i = 1; i < w - 1; i++) {
MPI_Recv(&h_univ[my_part + i], 1, MPI_FLOAT, 1, 1, MPI_COMM_WORLD, &status); // recebe borda do outro processo
}
cudaMemcpyAsync(d_univ, h_univ, size * sizeof(int), cudaMemcpyHostToDevice, border_p1_stream); // passa a matriz para a GPU em outra stream (para processamento paralelo)
}
border_kernel <<<n_blocks, n_threads, 0, border_p1_stream >>>(d_univ, h, w, p_id, d_new_univ);
cudaDeviceSynchronize();
cudaMemcpyAsync(h_univ, d_univ, size * sizeof(int), cudaMemcpyDeviceToHost, border_p1_stream);
// Envia a borda para o próximo processo
my_part = my_part - w;
for (int i = 1; i < w - 1; i++) {
MPI_Send(&h_univ[my_part + i], 1, MPI_FLOAT, 1, 1, MPI_COMM_WORLD);
}
// print_array(h_univ, w, size);
iter_count--;
}
}
else {
while (iter_count > 0) {
my_part = (h * w) / 2;
cudaMemcpyAsync(d_univ, h_univ, size * sizeof(int), cudaMemcpyHostToDevice, middle_p2_stream); // passa matriz para a GPU
middle_kernel << <n_blocks, n_threads, 0, middle_p2_stream >> > (d_univ, h, w, p_id, d_new_univ); // processa a matriz
//std::swap(d_univ, d_new_univ);
if (iter_count < g) {
for (int i = 1; i < w - 1; i++) {
MPI_Recv(&h_univ[my_part + i], 1, MPI_FLOAT, 1, 1, MPI_COMM_WORLD, &status); // recebe borda do outro processo
}
}
cudaMemcpyAsync(d_univ, h_univ, size * sizeof(int), cudaMemcpyHostToDevice, border_p2_stream); // passa a matriz para a GPU em outra stream (para processamento paralelo)
border_kernel << <n_blocks, n_threads, 0, border_p2_stream >> >(d_univ, h, w, p_id, d_new_univ);
cudaDeviceSynchronize();
cudaMemcpyAsync(h_univ, d_univ, size * sizeof(int), cudaMemcpyDeviceToHost, border_p2_stream);
// Envia a borda para o próximo processo
my_part = my_part - w;
for (int i = 1; i < w - 1; i++) {
MPI_Send(&h_univ[my_part + i], 1, MPI_FLOAT, 1, 1, MPI_COMM_WORLD);
}
// print_array(h_univ, w, size);
iter_count--;
}
}
// Release memory?
free(h_univ);
free(h_new_univ);
cudaFree(d_univ);
cudaFree(d_new_univ);
cudaStreamDestroy(border_p1_stream);
cudaStreamDestroy(middle_p1_stream);
cudaStreamDestroy(border_p2_stream);
cudaStreamDestroy(middle_p2_stream);
MPI_Finalize();
return 0;
} |
02a851be40caee894eaeadcb7a737ae01b102128.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudf/copying.hpp>
#include "MessageUtil.cuh"
namespace ral {
namespace communication {
namespace messages {
std::pair<int32_t, int32_t> getCharsColumnStartAndEnd(const cudf::strings_column_view & column){
cudf::size_type offset = column.offset();
cudf::column_view offsets_column = column.offsets();
int32_t chars_column_start, chars_column_end;
hipMemcpy(&chars_column_start, (void*)(offsets_column.head<int32_t>() + offset), sizeof(int32_t), hipMemcpyDeviceToHost);
hipMemcpy(&chars_column_end, (void*)(offsets_column.head<int32_t>() + offset + column.size()), sizeof(int32_t), hipMemcpyDeviceToHost);
return {chars_column_start, chars_column_end};
}
std::unique_ptr<cudf::column> getRebasedStringOffsets(const cudf::strings_column_view & column, int32_t chars_column_start){
cudf::size_type offset = column.offset();
cudf::column_view offsets_column = column.offsets();
// NOTE that the offsets column size is usually one more than the number of strings. It starts at 0 and ends at chars_column.size()
auto new_offsets = cudf::allocate_like(offsets_column, column.size() + 1, cudf::mask_allocation_policy::NEVER);
auto mutable_col = new_offsets->mutable_view();
cudf::copy_range_in_place(offsets_column, mutable_col, offset, offset + column.size() + 1, 0);
thrust::transform(rmm::exec_policy(0)->on(0),
mutable_col.begin<int32_t>(),
mutable_col.end<int32_t>(),
mutable_col.begin<int32_t>(),
[chars_column_start] __device__ (int32_t value){
return value - chars_column_start;
});
return new_offsets;
}
} // namespace messages
} // namespace communication
} // namespace ral
| 02a851be40caee894eaeadcb7a737ae01b102128.cu | #include <cudf/copying.hpp>
#include "MessageUtil.cuh"
namespace ral {
namespace communication {
namespace messages {
std::pair<int32_t, int32_t> getCharsColumnStartAndEnd(const cudf::strings_column_view & column){
cudf::size_type offset = column.offset();
cudf::column_view offsets_column = column.offsets();
int32_t chars_column_start, chars_column_end;
cudaMemcpy(&chars_column_start, (void*)(offsets_column.head<int32_t>() + offset), sizeof(int32_t), cudaMemcpyDeviceToHost);
cudaMemcpy(&chars_column_end, (void*)(offsets_column.head<int32_t>() + offset + column.size()), sizeof(int32_t), cudaMemcpyDeviceToHost);
return {chars_column_start, chars_column_end};
}
std::unique_ptr<cudf::column> getRebasedStringOffsets(const cudf::strings_column_view & column, int32_t chars_column_start){
cudf::size_type offset = column.offset();
cudf::column_view offsets_column = column.offsets();
// NOTE that the offsets column size is usually one more than the number of strings. It starts at 0 and ends at chars_column.size()
auto new_offsets = cudf::allocate_like(offsets_column, column.size() + 1, cudf::mask_allocation_policy::NEVER);
auto mutable_col = new_offsets->mutable_view();
cudf::copy_range_in_place(offsets_column, mutable_col, offset, offset + column.size() + 1, 0);
thrust::transform(rmm::exec_policy(0)->on(0),
mutable_col.begin<int32_t>(),
mutable_col.end<int32_t>(),
mutable_col.begin<int32_t>(),
[chars_column_start] __device__ (int32_t value){
return value - chars_column_start;
});
return new_offsets;
}
} // namespace messages
} // namespace communication
} // namespace ral
|
2a45e480feac39287210bdf8eb5868232822bf5f.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/BinaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/JitLoops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at {
namespace native {
const char sigmoid_backward_name[] = "sigmoid_backward";
void sigmoid_backward_kernel_cuda(TensorIteratorBase& iter) {
auto dtype = iter.dtype();
if(isComplexType(dtype)) {
#if AT_USE_JITERATOR()
static const auto sigmoid_backward_string = jiterator_stringify(
template <typename T>
T sigmoid_backward(T a, T b) {
return a * std::conj((T{1.} - b) * b);
}
); // sigmoid_backward_string
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "sigmoid_backward_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/ sigmoid_backward_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 2>(iter, sigmoid_backward_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "sigmoid_backward_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
using comp_t = at::opmath_type<scalar_t>;
const auto one = comp_t{1.};
const auto comp_b = static_cast<comp_t>(b);
const auto comp_a = static_cast<comp_t>(a);
return static_cast<scalar_t>(comp_a * std::conj((one - comp_b) * comp_b));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, dtype, "sigmoid_backward_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * (scalar_t(1.) - b) * b;
});
});
}
}
void logit_backward_kernel_cuda(TensorIteratorBase& iter, const Scalar& eps_scalar) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"logit_cuda",
[&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC eps = eps_scalar.to<T_ACC>();
if (eps < T_ACC(0)) {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
return (x_acc < T_ACC(0) || x_acc > T_ACC(1))
? std::numeric_limits<T_ACC>::quiet_NaN()
: dy_acc / (x_acc * (T_ACC(1) - x_acc));
});
} else {
const T_ACC lo = eps;
const T_ACC hi = T_ACC(1) - eps;
gpu_kernel(
iter, [lo, hi] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
return (x_acc < lo || x_acc > hi)
? T_ACC(0)
: dy_acc / (x_acc * (T_ACC(1) - x_acc));
});
}
});
}
void tanh_backward_kernel_cuda(TensorIteratorBase& iter) {
if(isComplexType(iter.dtype())) {
AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "tanh_backward_complex_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * std::conj(scalar_t{1.} - b * b);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "tanh_backward_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * (scalar_t{1.} - b * b);
});
});
}
}
REGISTER_DISPATCH(sigmoid_backward_stub, &sigmoid_backward_kernel_cuda);
REGISTER_DISPATCH(logit_backward_stub, &logit_backward_kernel_cuda);
REGISTER_DISPATCH(tanh_backward_stub, &tanh_backward_kernel_cuda);
} // namespace native
} // namespace at
| 2a45e480feac39287210bdf8eb5868232822bf5f.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/BinaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/JitLoops.cuh>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at {
namespace native {
const char sigmoid_backward_name[] = "sigmoid_backward";
void sigmoid_backward_kernel_cuda(TensorIteratorBase& iter) {
auto dtype = iter.dtype();
if(isComplexType(dtype)) {
#if AT_USE_JITERATOR()
static const auto sigmoid_backward_string = jiterator_stringify(
template <typename T>
T sigmoid_backward(T a, T b) {
return a * std::conj((T{1.} - b) * b);
}
); // sigmoid_backward_string
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "sigmoid_backward_cuda", [&]() {
jitted_gpu_kernel<
/*name=*/ sigmoid_backward_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 2>(iter, sigmoid_backward_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, dtype, "sigmoid_backward_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
using comp_t = at::opmath_type<scalar_t>;
const auto one = comp_t{1.};
const auto comp_b = static_cast<comp_t>(b);
const auto comp_a = static_cast<comp_t>(a);
return static_cast<scalar_t>(comp_a * std::conj((one - comp_b) * comp_b));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, dtype, "sigmoid_backward_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * (scalar_t(1.) - b) * b;
});
});
}
}
void logit_backward_kernel_cuda(TensorIteratorBase& iter, const Scalar& eps_scalar) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.dtype(),
"logit_cuda",
[&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC eps = eps_scalar.to<T_ACC>();
if (eps < T_ACC(0)) {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
return (x_acc < T_ACC(0) || x_acc > T_ACC(1))
? std::numeric_limits<T_ACC>::quiet_NaN()
: dy_acc / (x_acc * (T_ACC(1) - x_acc));
});
} else {
const T_ACC lo = eps;
const T_ACC hi = T_ACC(1) - eps;
gpu_kernel(
iter, [lo, hi] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
const T_ACC dy_acc = static_cast<T_ACC>(dy);
const T_ACC x_acc = static_cast<T_ACC>(x);
return (x_acc < lo || x_acc > hi)
? T_ACC(0)
: dy_acc / (x_acc * (T_ACC(1) - x_acc));
});
}
});
}
void tanh_backward_kernel_cuda(TensorIteratorBase& iter) {
if(isComplexType(iter.dtype())) {
AT_DISPATCH_COMPLEX_TYPES(iter.dtype(), "tanh_backward_complex_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * std::conj(scalar_t{1.} - b * b);
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "tanh_backward_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * (scalar_t{1.} - b * b);
});
});
}
}
REGISTER_DISPATCH(sigmoid_backward_stub, &sigmoid_backward_kernel_cuda);
REGISTER_DISPATCH(logit_backward_stub, &logit_backward_kernel_cuda);
REGISTER_DISPATCH(tanh_backward_stub, &tanh_backward_kernel_cuda);
} // namespace native
} // namespace at
|
e842a44d77923590f79fb69074bd410db6b88c9c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <iomanip>
#include <bitset>
#include "cuda_ptr.cuh"
__global__ void test_any(int* a,
int* b,
int* c) {
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
a[tid] = __any(threadIdx.x == 16);
b[tid] = __any(threadIdx.x == 128);
if (threadIdx.x == 10) c[tid] = __any(threadIdx.x == 16);
// if (threadIdx.x == 15 || threadIdx.x == 16) c[tid] = __any(threadIdx.x == 16);
// if (threadIdx.x == 10 || threadIdx.x == 16) c[tid] = __any(threadIdx.x == 16);
}
__global__ void test_all(int* a,
int* b,
int* c) {
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
a[tid] = __all(threadIdx.x == 16);
b[tid] = __all(b[tid] == -1);
if (threadIdx.x == 10) c[tid] = __all(threadIdx.x == 16);
}
__global__ void test_ballot(int* a,
int* b,
int* c) {
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
a[tid] = __ballot(threadIdx.x == 16);
b[tid] = __ballot(threadIdx.x % 2 == 0);
if (threadIdx.x == 16) c[tid] = __ballot(threadIdx.x == 16);
}
int main() {
const int tb_size = 64;
const int grid_size = 2;
const int array_size = tb_size * grid_size;
cuda_ptr<int> a, b, c;
a.allocate(array_size);
b.allocate(array_size);
c.allocate(array_size);
a.set_val(-1);
b.set_val(-1);
c.set_val(-1);
a.host2dev();
b.host2dev();
c.host2dev();
#ifdef TEST_ANY
std::cerr << "TEST_ANY\n";
hipLaunchKernelGGL(( test_any), dim3(grid_size), dim3(tb_size), 0, 0, a, b, c);
#elif TEST_ALL
std::cerr << "TEST_ALL\n";
hipLaunchKernelGGL(( test_all), dim3(grid_size), dim3(tb_size), 0, 0, a, b, c);
#elif TEST_BALLOT
std::cerr << "TEST_BALLOT\n";
hipLaunchKernelGGL(( test_ballot), dim3(grid_size), dim3(tb_size), 0, 0, a, b, c);
#endif
a.dev2host();
b.dev2host();
c.dev2host();
std::cout << "threadIdx.x i a b c\n";
for (int i = 0; i < array_size; i++) {
#ifdef TEST_BALLOT
std::cout << i % tb_size << " " << i << " " <<
static_cast<std::bitset<32> >(a[i]) << " " <<
static_cast<std::bitset<32> >(b[i]) << " " <<
static_cast<std::bitset<32> >(c[i]) << "\n";
#else
std::cout << i % tb_size << " " << i << " " << a[i] << " " << b[i] << " " << c[i] << "\n";
#endif
}
}
| e842a44d77923590f79fb69074bd410db6b88c9c.cu | #include <iostream>
#include <iomanip>
#include <bitset>
#include "cuda_ptr.cuh"
__global__ void test_any(int* a,
int* b,
int* c) {
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
a[tid] = __any(threadIdx.x == 16);
b[tid] = __any(threadIdx.x == 128);
if (threadIdx.x == 10) c[tid] = __any(threadIdx.x == 16);
// if (threadIdx.x == 15 || threadIdx.x == 16) c[tid] = __any(threadIdx.x == 16);
// if (threadIdx.x == 10 || threadIdx.x == 16) c[tid] = __any(threadIdx.x == 16);
}
__global__ void test_all(int* a,
int* b,
int* c) {
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
a[tid] = __all(threadIdx.x == 16);
b[tid] = __all(b[tid] == -1);
if (threadIdx.x == 10) c[tid] = __all(threadIdx.x == 16);
}
__global__ void test_ballot(int* a,
int* b,
int* c) {
const auto tid = threadIdx.x + blockIdx.x * blockDim.x;
a[tid] = __ballot(threadIdx.x == 16);
b[tid] = __ballot(threadIdx.x % 2 == 0);
if (threadIdx.x == 16) c[tid] = __ballot(threadIdx.x == 16);
}
int main() {
const int tb_size = 64;
const int grid_size = 2;
const int array_size = tb_size * grid_size;
cuda_ptr<int> a, b, c;
a.allocate(array_size);
b.allocate(array_size);
c.allocate(array_size);
a.set_val(-1);
b.set_val(-1);
c.set_val(-1);
a.host2dev();
b.host2dev();
c.host2dev();
#ifdef TEST_ANY
std::cerr << "TEST_ANY\n";
test_any<<<grid_size, tb_size>>>(a, b, c);
#elif TEST_ALL
std::cerr << "TEST_ALL\n";
test_all<<<grid_size, tb_size>>>(a, b, c);
#elif TEST_BALLOT
std::cerr << "TEST_BALLOT\n";
test_ballot<<<grid_size, tb_size>>>(a, b, c);
#endif
a.dev2host();
b.dev2host();
c.dev2host();
std::cout << "threadIdx.x i a b c\n";
for (int i = 0; i < array_size; i++) {
#ifdef TEST_BALLOT
std::cout << i % tb_size << " " << i << " " <<
static_cast<std::bitset<32> >(a[i]) << " " <<
static_cast<std::bitset<32> >(b[i]) << " " <<
static_cast<std::bitset<32> >(c[i]) << "\n";
#else
std::cout << i % tb_size << " " << i << " " << a[i] << " " << b[i] << " " << c[i] << "\n";
#endif
}
}
|
ae2585700af4c0ae255e85cd9b453258376232a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "luaT.h"
#include "TH.h"
#include "THH.h"
#include "THLogAdd.h" /* DEBUG: WTF */
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include "utils.h"
LUA_EXTERNC DLL_EXPORT int luaopen_libcunn(lua_State *L);
int luaopen_libcunn(lua_State *L)
{
lua_newtable(L);
cunn_SpatialCrossMapLRN_init(L);
cunn_Tanh_init(L);
cunn_Sigmoid_init(L);
cunn_SoftMax_init(L);
cunn_TemporalConvolution_init(L);
cunn_TemporalMaxPooling_init(L);
cunn_SpatialBatchNormalization_init(L);
cunn_SpatialConvolutionMM_init(L);
cunn_SpatialConvolutionLocal_init(L);
cunn_SpatialFullConvolution_init(L);
cunn_SpatialMaxPooling_init(L);
cunn_SpatialMaxUnpooling_init(L);
cunn_SpatialFractionalMaxPooling_init(L);
cunn_SpatialAdaptiveMaxPooling_init(L);
cunn_SpatialSubSampling_init(L);
cunn_SpatialAveragePooling_init(L);
cunn_MultiMarginCriterion_init(L);
cunn_MarginCriterion_init(L);
cunn_Square_init(L);
cunn_Sqrt_init(L);
cunn_Threshold_init(L);
cunn_MSECriterion_init(L);
cunn_SmoothL1Criterion_init(L);
cunn_SoftPlus_init(L);
cunn_SoftShrink_init(L);
cunn_SpatialUpSamplingNearest_init(L);
cunn_VolumetricConvolution_init(L);
cunn_VolumetricFullConvolution_init(L);
cunn_VolumetricMaxPooling_init(L);
cunn_VolumetricAveragePooling_init(L);
cunn_PReLU_init(L);
cunn_RReLU_init(L);
return 1;
}
| ae2585700af4c0ae255e85cd9b453258376232a7.cu | #include "luaT.h"
#include "TH.h"
#include "THC.h"
#include "THLogAdd.h" /* DEBUG: WTF */
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
#include "utils.h"
LUA_EXTERNC DLL_EXPORT int luaopen_libcunn(lua_State *L);
int luaopen_libcunn(lua_State *L)
{
lua_newtable(L);
cunn_SpatialCrossMapLRN_init(L);
cunn_Tanh_init(L);
cunn_Sigmoid_init(L);
cunn_SoftMax_init(L);
cunn_TemporalConvolution_init(L);
cunn_TemporalMaxPooling_init(L);
cunn_SpatialBatchNormalization_init(L);
cunn_SpatialConvolutionMM_init(L);
cunn_SpatialConvolutionLocal_init(L);
cunn_SpatialFullConvolution_init(L);
cunn_SpatialMaxPooling_init(L);
cunn_SpatialMaxUnpooling_init(L);
cunn_SpatialFractionalMaxPooling_init(L);
cunn_SpatialAdaptiveMaxPooling_init(L);
cunn_SpatialSubSampling_init(L);
cunn_SpatialAveragePooling_init(L);
cunn_MultiMarginCriterion_init(L);
cunn_MarginCriterion_init(L);
cunn_Square_init(L);
cunn_Sqrt_init(L);
cunn_Threshold_init(L);
cunn_MSECriterion_init(L);
cunn_SmoothL1Criterion_init(L);
cunn_SoftPlus_init(L);
cunn_SoftShrink_init(L);
cunn_SpatialUpSamplingNearest_init(L);
cunn_VolumetricConvolution_init(L);
cunn_VolumetricFullConvolution_init(L);
cunn_VolumetricMaxPooling_init(L);
cunn_VolumetricAveragePooling_init(L);
cunn_PReLU_init(L);
cunn_RReLU_init(L);
return 1;
}
|
b8edc56e175b1d54755271c7adfce46c5f1a2d21.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by Huy Vo on 11/26/18.
//
#include <iostream>
#include <time.h>
#include <hip/hip_runtime.h>
#include <hipsparse.h>
#include <thrust/transform.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <cvode/cvode.h>
#include <nvector/nvector_cuda.h>
#include <sunlinsol/sunlinsol_spgmr.h>
#include <sunlinsol/sunlinsol_spbcgs.h>
#include <cvode/cvode_spils.h>
#include <sundials/sundials_types.h>
#include <sundials/sundials_math.h>
#include "cme_util.h"
#include "FSPMat.h"
namespace hog1p {
// reaction parameters
const double k12{1.29}, k21{1.0e0}, k23{0.0067},
k32{0.027}, k34{0.133}, k43{0.0381},
kr2{0.0116}, kr3{0.987}, kr4{0.0538},
trans{0.01}, gamma{0.0049},
// parameters for the time-dependent factors
r1{6.9e-5}, r2{7.1e-3}, eta{3.1}, Ahog{9.3e09}, Mhog{6.4e-4};
// propensity function
__device__
double propensity(int *X, int k) {
switch (X[0]) {
case 0: {
switch (k) {
case 0:
return k12;
case 1:
return 0.0;
case 2:
return 0.0;
case 3:
return 0.0;
case 4:
return 0.0;
}
}
case 1: {
switch (k) {
case 0:
return k23;
case 1:
return 0.0;
case 2:
return k21;
case 3:
return kr2;
case 4:
return kr2;
}
}
case 2: {
switch (k) {
case 0:
return k34;
case 1:
return k32;
case 2:
return 0.0;
case 3:
return kr3;
case 4:
return kr3;
}
}
case 3: {
switch (k) {
case 0:
return 0.0;
case 1:
return k43;
case 2:
return 0.0;
case 3:
return kr4;
case 4:
return kr4;
}
}
}
switch (k) {
case 5:
return trans * double(X[1]);
case 6:
return trans * double(X[2]);
case 7:
return gamma * double(X[3]);
case 8:
return gamma * double(X[4]);
}
return 0.0;
}
__host__
double propensity_factor(int X, int species, int reaction) {
if (species == 0) {
switch (X){
case 0: {
switch (reaction) {
case 0:
return k12;
case 1:
return 0.0;
case 2:
return 0.0;
case 3:
return 0.0;
case 4:
return 0.0;
}
}
case 1: {
switch (reaction) {
case 0:
return k23;
case 1:
return 0.0;
case 2:
return k21;
case 3:
return kr2;
case 4:
return kr2;
}
}
case 2: {
switch (reaction) {
case 0:
return k34;
case 1:
return k32;
case 2:
return 0.0;
case 3:
return kr3;
case 4:
return kr3;
}
}
case 3: {
switch (reaction) {
case 0:
return 0.0;
case 1:
return k43;
case 2:
return 0.0;
case 3:
return kr4;
case 4:
return kr4;
}
}
default:
return 1.0;
}
}
switch (reaction) {
case 5:
if (species == 1) return trans * double(X);
case 6:
if (species == 2) return trans * double(X);
case 7:
if (species == 3) return gamma * double(X);
case 8:
if (species == 4) return gamma * double(X);
default:
return 1.0;
}
return 1.0;
}
// function to compute the time-dependent coefficients of the propensity functions
void t_func(double t, double *out) {
for (int i = 0; i < 9; ++i) {
out[i] = 1.0;
}
double h1 = (1.0 - exp(-r1 * t)) * exp(-r2 * t);
double hog1p = pow(h1 / (1.0 + h1 / Mhog), eta) * Ahog;
out[2] = ::max(0.0, 3200.0 - 7710.0 * (hog1p));
//u(2) = ::max(0.0, 3200.0 - (hog1p));
}
}
/* RHS of CME routine. */
__host__
static int cvode_rhs(double t, N_Vector u, N_Vector udot, void *FSPMat_ptr) {
double *udata = N_VGetDeviceArrayPointer_Cuda(u);
double *udotdata = N_VGetDeviceArrayPointer_Cuda(udot);
thrust::fill(thrust::device_pointer_cast<double>(udotdata),
thrust::device_pointer_cast<double>(udotdata + ((cuFSP::FSPMat *) FSPMat_ptr)->get_n_rows()), 0.0);
((cuFSP::FSPMat *) FSPMat_ptr)->action(t, udata, udotdata);
CUDACHKERR();
return 0;
}
__device__ cuFSP::PropFun prop_pointer = &hog1p::propensity;
/* Jacobian-times-vector routine. */
__host__
static int cvode_jac(N_Vector v, N_Vector Jv, realtype t,
N_Vector u, N_Vector fu,
void *FSPMat_ptr, N_Vector tmp) {
double *vdata = N_VGetDeviceArrayPointer_Cuda(v);
double *Jvdata = N_VGetDeviceArrayPointer_Cuda(Jv);
thrust::fill(thrust::device_pointer_cast<double>(Jvdata),
thrust::device_pointer_cast<double>(Jvdata + ((cuFSP::FSPMat *) FSPMat_ptr)->get_n_rows()), 0.0);
((cuFSP::FSPMat *) FSPMat_ptr)->action(t, vdata, Jvdata);
CUDACHKERR();
return 0;
}
static int check_flag(void *flagvalue, const char *funcname, int opt);
int main() {
int n_species = 5;
int n_reactions = 9;
double t_final = 5.0 * 60;
double rel_tol = 1.0e-2, abs_tol = 1.0e-8;
int flag;
int stoich_vals[] = {1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1};
int stoich_colidxs[] = {0, 0, 0, 1, 2, 1, 3, 2, 4, 3, 4};
int stoich_rowptrs[] = {0, 1, 2, 3, 4, 5, 7, 9, 10, 11};
// stoichiometric matrix of the toggle switch model
// const arma::Mat<int> SM {
// { 1, -1, -1, 0, 0, 0, 0, 0, 0 },
// { 0, 0, 0, 1, 0, -1, 0, 0, 0 },
// { 0, 0, 0, 0, 1, 0, -1, 0, 0 },
// { 0, 0, 0, 0, 0, 1, 0, -1, 0 },
// { 0, 0, 0, 0, 0, 0, 1, 0, -1 },
// };
cuFSP::CSRMatInt stoich;
stoich.vals = &stoich_vals[0];
stoich.col_idxs = &stoich_colidxs[0];
stoich.row_ptrs = &stoich_rowptrs[0];
stoich.n_rows = n_reactions;
stoich.n_cols = n_species;
stoich.nnz = 11;
int n_bounds[] = {3, 50, 50, 60, 60};
int n_states = cuFSP::rect_fsp_num_states(n_species, n_bounds);
std::cout << "Total number of states:" << n_states << "\n";
cuFSP::PropFun host_prop_ptr;
hipMemcpyFromSymbol(&host_prop_ptr, prop_pointer, sizeof(cuFSP::PropFun));
CUDACHKERR();
cuFSP::FSPMat A(n_reactions, n_species, n_bounds, stoich, &hog1p::t_func, host_prop_ptr, cuFSP::HYB);
// cuFSP::FSPMat A
// (n_reactions, n_species, n_bounds,
// stoich, &hog1p::t_func, &hog1p::propensity_factor, cuFSP::KRONECKER);
/* Create a CUDA vector with initial values */
N_Vector p0 = N_VNew_Cuda(n_states); /* Allocate p0 vector */
if (check_flag((void *) p0, "N_VNew_Cuda", 0)) return (1);
double *p0_h = N_VGetHostArrayPointer_Cuda(p0);
for (int i = 0; i < n_states; ++i) {
p0_h[i] = 0.0;
}
p0_h[0] = 1.0;
N_VCopyToDevice_Cuda(p0);
/* Call CVodeCreate to create the solver memory and specify the
* Backward Differentiation Formula and the use of a Newton iteration */
void *cvode_mem = CVodeCreate(CV_BDF, CV_NEWTON);
if (check_flag((void *) cvode_mem, "CVodeCreate", 0)) return (1);
/* Call CVodeInit to initialize the integrator memory and specify the
* user's right hand side function in u'=f(t,u), the initial time T0, and
* the initial dependent variable vector u. */
flag = CVodeInit(cvode_mem, cvode_rhs, 0.0, p0);
if (check_flag(&flag, "CVodeInit", 1)) return (1);
/* Call CVodeSStolerances to specify the scalar relative tolerance
* and scalar absolute tolerance */
flag = CVodeSStolerances(cvode_mem, rel_tol, abs_tol);
if (check_flag(&flag, "CVodeSStolerances", 1)) return (1);
/* Set the pointer to user-defined data */
flag = CVodeSetUserData(cvode_mem, (void *) &A);
if (check_flag(&flag, "CVodeSetUserData", 1)) return (1);
flag = CVodeSetMaxNumSteps(cvode_mem, 10000000);
flag = CVodeSetMaxConvFails(cvode_mem, 10000000);
flag = CVodeSetStabLimDet(cvode_mem, 1);
flag = CVodeSetMaxNonlinIters(cvode_mem, 100000);
/* Create SPGMR solver structure without preconditioning
* and the maximum Krylov dimension maxl */
// SUNLinearSolver LS = SUNSPGMR(p0, PREC_NONE, 10);
// if(check_flag(&flag, "SUNSPGMR", 1)) return(1);
SUNLinearSolver LS = SUNSPBCGS(p0, PREC_NONE, 0);
if (check_flag(&flag, "SUNSPBCGS", 1)) return (1);
/* Set CVSpils linear solver to LS */
flag = CVSpilsSetLinearSolver(cvode_mem, LS);
if (check_flag(&flag, "CVSpilsSetLinearSolver", 1)) return (1);
/* Set the JAcobian-times-vector function */
flag = CVSpilsSetJacTimes(cvode_mem, NULL, cvode_jac);
if (check_flag(&flag, "CVSpilsSetJacTimesVecFn", 1)) return (1);
double t = 0.0;
double psum = 0.0;
double *p0_d = N_VGetDeviceArrayPointer_Cuda(p0);
while (t < t_final) {
flag = CVode(cvode_mem, t_final, p0, &t, CV_ONE_STEP);
if (check_flag(&flag, "CVode", 1)) break;
psum = thrust::reduce(thrust::device_pointer_cast<double>(p0_d),
thrust::device_pointer_cast<double>(p0_d + n_states));
std::cout << "t = " << t << " psum = " << psum << "\n";
}
assert(std::abs(1.0 - psum) <= 1.0e-10);
long num_step;
flag = CVodeGetNumSteps(cvode_mem, &num_step);
check_flag(&flag, "CVodeGetNumSteps", 1);
std::cout << "CVODE takes " << num_step << " steps.\n";
SUNLinSolFree(LS);
CVodeFree(&cvode_mem);
return 0;
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns a flag so check if
flag >= 0
opt == 2 means function allocates memory so check if returned
NULL pointer */
static int check_flag(void *flagvalue, const char *funcname, int opt) {
int *errflag;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && flagvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return (1);
}
/* Check if flag < 0 */
else if (opt == 1) {
errflag = (int *) flagvalue;
if (*errflag < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n",
funcname, *errflag);
return (1);
}
}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && flagvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return (1);
}
return (0);
} | b8edc56e175b1d54755271c7adfce46c5f1a2d21.cu | //
// Created by Huy Vo on 11/26/18.
//
#include <iostream>
#include <time.h>
#include <cuda_runtime.h>
#include <cusparse.h>
#include <thrust/transform.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <cvode/cvode.h>
#include <nvector/nvector_cuda.h>
#include <sunlinsol/sunlinsol_spgmr.h>
#include <sunlinsol/sunlinsol_spbcgs.h>
#include <cvode/cvode_spils.h>
#include <sundials/sundials_types.h>
#include <sundials/sundials_math.h>
#include "cme_util.h"
#include "FSPMat.h"
namespace hog1p {
// reaction parameters
const double k12{1.29}, k21{1.0e0}, k23{0.0067},
k32{0.027}, k34{0.133}, k43{0.0381},
kr2{0.0116}, kr3{0.987}, kr4{0.0538},
trans{0.01}, gamma{0.0049},
// parameters for the time-dependent factors
r1{6.9e-5}, r2{7.1e-3}, eta{3.1}, Ahog{9.3e09}, Mhog{6.4e-4};
// propensity function
__device__
double propensity(int *X, int k) {
switch (X[0]) {
case 0: {
switch (k) {
case 0:
return k12;
case 1:
return 0.0;
case 2:
return 0.0;
case 3:
return 0.0;
case 4:
return 0.0;
}
}
case 1: {
switch (k) {
case 0:
return k23;
case 1:
return 0.0;
case 2:
return k21;
case 3:
return kr2;
case 4:
return kr2;
}
}
case 2: {
switch (k) {
case 0:
return k34;
case 1:
return k32;
case 2:
return 0.0;
case 3:
return kr3;
case 4:
return kr3;
}
}
case 3: {
switch (k) {
case 0:
return 0.0;
case 1:
return k43;
case 2:
return 0.0;
case 3:
return kr4;
case 4:
return kr4;
}
}
}
switch (k) {
case 5:
return trans * double(X[1]);
case 6:
return trans * double(X[2]);
case 7:
return gamma * double(X[3]);
case 8:
return gamma * double(X[4]);
}
return 0.0;
}
__host__
double propensity_factor(int X, int species, int reaction) {
if (species == 0) {
switch (X){
case 0: {
switch (reaction) {
case 0:
return k12;
case 1:
return 0.0;
case 2:
return 0.0;
case 3:
return 0.0;
case 4:
return 0.0;
}
}
case 1: {
switch (reaction) {
case 0:
return k23;
case 1:
return 0.0;
case 2:
return k21;
case 3:
return kr2;
case 4:
return kr2;
}
}
case 2: {
switch (reaction) {
case 0:
return k34;
case 1:
return k32;
case 2:
return 0.0;
case 3:
return kr3;
case 4:
return kr3;
}
}
case 3: {
switch (reaction) {
case 0:
return 0.0;
case 1:
return k43;
case 2:
return 0.0;
case 3:
return kr4;
case 4:
return kr4;
}
}
default:
return 1.0;
}
}
switch (reaction) {
case 5:
if (species == 1) return trans * double(X);
case 6:
if (species == 2) return trans * double(X);
case 7:
if (species == 3) return gamma * double(X);
case 8:
if (species == 4) return gamma * double(X);
default:
return 1.0;
}
return 1.0;
}
// function to compute the time-dependent coefficients of the propensity functions
void t_func(double t, double *out) {
for (int i = 0; i < 9; ++i) {
out[i] = 1.0;
}
double h1 = (1.0 - exp(-r1 * t)) * exp(-r2 * t);
double hog1p = pow(h1 / (1.0 + h1 / Mhog), eta) * Ahog;
out[2] = std::max(0.0, 3200.0 - 7710.0 * (hog1p));
//u(2) = std::max(0.0, 3200.0 - (hog1p));
}
}
/* RHS of CME routine. */
__host__
static int cvode_rhs(double t, N_Vector u, N_Vector udot, void *FSPMat_ptr) {
double *udata = N_VGetDeviceArrayPointer_Cuda(u);
double *udotdata = N_VGetDeviceArrayPointer_Cuda(udot);
thrust::fill(thrust::device_pointer_cast<double>(udotdata),
thrust::device_pointer_cast<double>(udotdata + ((cuFSP::FSPMat *) FSPMat_ptr)->get_n_rows()), 0.0);
((cuFSP::FSPMat *) FSPMat_ptr)->action(t, udata, udotdata);
CUDACHKERR();
return 0;
}
__device__ cuFSP::PropFun prop_pointer = &hog1p::propensity;
/* Jacobian-times-vector routine. */
__host__
static int cvode_jac(N_Vector v, N_Vector Jv, realtype t,
N_Vector u, N_Vector fu,
void *FSPMat_ptr, N_Vector tmp) {
double *vdata = N_VGetDeviceArrayPointer_Cuda(v);
double *Jvdata = N_VGetDeviceArrayPointer_Cuda(Jv);
thrust::fill(thrust::device_pointer_cast<double>(Jvdata),
thrust::device_pointer_cast<double>(Jvdata + ((cuFSP::FSPMat *) FSPMat_ptr)->get_n_rows()), 0.0);
((cuFSP::FSPMat *) FSPMat_ptr)->action(t, vdata, Jvdata);
CUDACHKERR();
return 0;
}
static int check_flag(void *flagvalue, const char *funcname, int opt);
int main() {
int n_species = 5;
int n_reactions = 9;
double t_final = 5.0 * 60;
double rel_tol = 1.0e-2, abs_tol = 1.0e-8;
int flag;
int stoich_vals[] = {1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1};
int stoich_colidxs[] = {0, 0, 0, 1, 2, 1, 3, 2, 4, 3, 4};
int stoich_rowptrs[] = {0, 1, 2, 3, 4, 5, 7, 9, 10, 11};
// stoichiometric matrix of the toggle switch model
// const arma::Mat<int> SM {
// { 1, -1, -1, 0, 0, 0, 0, 0, 0 },
// { 0, 0, 0, 1, 0, -1, 0, 0, 0 },
// { 0, 0, 0, 0, 1, 0, -1, 0, 0 },
// { 0, 0, 0, 0, 0, 1, 0, -1, 0 },
// { 0, 0, 0, 0, 0, 0, 1, 0, -1 },
// };
cuFSP::CSRMatInt stoich;
stoich.vals = &stoich_vals[0];
stoich.col_idxs = &stoich_colidxs[0];
stoich.row_ptrs = &stoich_rowptrs[0];
stoich.n_rows = n_reactions;
stoich.n_cols = n_species;
stoich.nnz = 11;
int n_bounds[] = {3, 50, 50, 60, 60};
int n_states = cuFSP::rect_fsp_num_states(n_species, n_bounds);
std::cout << "Total number of states:" << n_states << "\n";
cuFSP::PropFun host_prop_ptr;
cudaMemcpyFromSymbol(&host_prop_ptr, prop_pointer, sizeof(cuFSP::PropFun));
CUDACHKERR();
cuFSP::FSPMat A(n_reactions, n_species, n_bounds, stoich, &hog1p::t_func, host_prop_ptr, cuFSP::HYB);
// cuFSP::FSPMat A
// (n_reactions, n_species, n_bounds,
// stoich, &hog1p::t_func, &hog1p::propensity_factor, cuFSP::KRONECKER);
/* Create a CUDA vector with initial values */
N_Vector p0 = N_VNew_Cuda(n_states); /* Allocate p0 vector */
if (check_flag((void *) p0, "N_VNew_Cuda", 0)) return (1);
double *p0_h = N_VGetHostArrayPointer_Cuda(p0);
for (int i = 0; i < n_states; ++i) {
p0_h[i] = 0.0;
}
p0_h[0] = 1.0;
N_VCopyToDevice_Cuda(p0);
/* Call CVodeCreate to create the solver memory and specify the
* Backward Differentiation Formula and the use of a Newton iteration */
void *cvode_mem = CVodeCreate(CV_BDF, CV_NEWTON);
if (check_flag((void *) cvode_mem, "CVodeCreate", 0)) return (1);
/* Call CVodeInit to initialize the integrator memory and specify the
* user's right hand side function in u'=f(t,u), the initial time T0, and
* the initial dependent variable vector u. */
flag = CVodeInit(cvode_mem, cvode_rhs, 0.0, p0);
if (check_flag(&flag, "CVodeInit", 1)) return (1);
/* Call CVodeSStolerances to specify the scalar relative tolerance
* and scalar absolute tolerance */
flag = CVodeSStolerances(cvode_mem, rel_tol, abs_tol);
if (check_flag(&flag, "CVodeSStolerances", 1)) return (1);
/* Set the pointer to user-defined data */
flag = CVodeSetUserData(cvode_mem, (void *) &A);
if (check_flag(&flag, "CVodeSetUserData", 1)) return (1);
flag = CVodeSetMaxNumSteps(cvode_mem, 10000000);
flag = CVodeSetMaxConvFails(cvode_mem, 10000000);
flag = CVodeSetStabLimDet(cvode_mem, 1);
flag = CVodeSetMaxNonlinIters(cvode_mem, 100000);
/* Create SPGMR solver structure without preconditioning
* and the maximum Krylov dimension maxl */
// SUNLinearSolver LS = SUNSPGMR(p0, PREC_NONE, 10);
// if(check_flag(&flag, "SUNSPGMR", 1)) return(1);
SUNLinearSolver LS = SUNSPBCGS(p0, PREC_NONE, 0);
if (check_flag(&flag, "SUNSPBCGS", 1)) return (1);
/* Set CVSpils linear solver to LS */
flag = CVSpilsSetLinearSolver(cvode_mem, LS);
if (check_flag(&flag, "CVSpilsSetLinearSolver", 1)) return (1);
/* Set the JAcobian-times-vector function */
flag = CVSpilsSetJacTimes(cvode_mem, NULL, cvode_jac);
if (check_flag(&flag, "CVSpilsSetJacTimesVecFn", 1)) return (1);
double t = 0.0;
double psum = 0.0;
double *p0_d = N_VGetDeviceArrayPointer_Cuda(p0);
while (t < t_final) {
flag = CVode(cvode_mem, t_final, p0, &t, CV_ONE_STEP);
if (check_flag(&flag, "CVode", 1)) break;
psum = thrust::reduce(thrust::device_pointer_cast<double>(p0_d),
thrust::device_pointer_cast<double>(p0_d + n_states));
std::cout << "t = " << t << " psum = " << psum << "\n";
}
assert(std::abs(1.0 - psum) <= 1.0e-10);
long num_step;
flag = CVodeGetNumSteps(cvode_mem, &num_step);
check_flag(&flag, "CVodeGetNumSteps", 1);
std::cout << "CVODE takes " << num_step << " steps.\n";
SUNLinSolFree(LS);
CVodeFree(&cvode_mem);
return 0;
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns a flag so check if
flag >= 0
opt == 2 means function allocates memory so check if returned
NULL pointer */
static int check_flag(void *flagvalue, const char *funcname, int opt) {
int *errflag;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && flagvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return (1);
}
/* Check if flag < 0 */
else if (opt == 1) {
errflag = (int *) flagvalue;
if (*errflag < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n",
funcname, *errflag);
return (1);
}
}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && flagvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return (1);
}
return (0);
} |
e71194b9407d53a97e0e879d955509aba64144e9.hip | // !!! This is a file automatically generated by hipify!!!
/**
* 2mm.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Updated by Grigori Fursin (http://cTuning.org/lab/people/gfursin)
* to work with Collective Mind Framework and OpenME interfqce for automatic
* and collective tuning and data mining: http://cTuning.org
*
*/
#ifndef WINDOWS
#include <unistd.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include "polybench.h"
#ifdef OPENME
#include <openme.h>
#endif
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size. */
# define NI 256 //2048
# define NJ 256 //2048
# define NK 256 //2048
# define NL 256 //2048
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 8 //32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
# ifndef DATA_TYPE
# define DATA_TYPE float
# endif
void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D)
{
int i, j;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NK; j++)
{
A[i*NI + j] = ((DATA_TYPE) i*j) / NI;
}
}
for (i = 0; i < NK; i++)
{
for (j = 0; j < NJ; j++)
{
B[i*NK + j] = ((DATA_TYPE) i*(j+1)) / NJ;
}
}
for (i = 0; i < NL; i++)
{
for (j = 0; j < NJ; j++)
{
C[i*NL + j] = ((DATA_TYPE) i*(j+3)) / NL;
}
}
for (i = 0; i < NI; i++)
{
for (j = 0; j < NL; j++)
{
D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK;
}
}
}
void compareResults(DATA_TYPE *E, DATA_TYPE *E_outputFromGpu)
{
int i,j,fail;
fail = 0;
for (i=0; i < NL; i++)
{
for (j=0; j < NI; j++)
{
if (percentDiff(E[i*NI + j], E_outputFromGpu[i*NI + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
int devID = 0;
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&devID);
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
printf("Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess)
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
else
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
hipSetDevice( GPU_DEVICE );
}
__global__ void mm2_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NJ))
{
int k;
for (k = 0; k < NK; k++)
{
C[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
__global__ void mm2_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *E)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NL))
{
int k;
for (k = 0; k < NJ; k++)
{
E[i * NL + j] += C[i * NJ + k] * D[k * NL + j];
}
}
}
void mm2_cpu(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E)
{
int i, j, k;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NJ; j++)
{
C[i*NJ + j] = 0.0;
for (k = 0; k < NK; ++k)
{
C[i*NJ + j] += A[i*NK + k] * B[k*NJ + j];
}
}
}
for (i = 0; i < NI; i++)
{
for (j = 0; j < NL; j++)
{
E[i*NL + j] = 0.0;
for (k = 0; k < NJ; ++k)
{
E[i*NL + j] += C[i*NJ + k] * D[k*NL + j];
}
}
}
}
void mm2Cuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E, DATA_TYPE* E_outputFromGpu)
{
hipError_t error;
double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
DATA_TYPE *C_gpu;
DATA_TYPE *D_gpu;
DATA_TYPE *E_gpu;
error=hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NK);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NK * NJ);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NI * NJ);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMalloc((void **)&D_gpu, sizeof(DATA_TYPE) * NJ * NL);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMalloc((void **)&E_gpu, sizeof(DATA_TYPE) * NI * NL);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NK, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NK * NJ, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMemcpy(C_gpu, C, sizeof(DATA_TYPE) * NK * NJ, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMemcpy(D_gpu, D, sizeof(DATA_TYPE) * NJ * NL, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=hipMemcpy(E_gpu, E, sizeof(DATA_TYPE) * NI * NL, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)ceil( ((float)NJ) / ((float)block.x) ), (size_t)ceil( ((float)NI) / ((float)block.y)) );
dim3 grid2((size_t)ceil( ((float)NL) / ((float)block.x) ), (size_t)ceil( ((float)NI) / ((float)block.y)) );
// t_start = rtclock();
hipLaunchKernelGGL(( mm2_kernel1), dim3(grid1),dim3(block), 0, 0, A_gpu, B_gpu, C_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( mm2_kernel2), dim3(grid2),dim3(block), 0, 0, C_gpu, D_gpu, E_gpu);
hipDeviceSynchronize();
// t_end = rtclock();
// fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
error=hipMemcpy(E_outputFromGpu, E_gpu, sizeof(DATA_TYPE) * NI * NL, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
hipFree(A_gpu);
hipFree(B_gpu);
hipFree(C_gpu);
hipFree(D_gpu);
hipFree(E_gpu);
}
int main(int argc, char** argv)
{
/* Prepare ctuning vars */
long ct_repeat=0;
long ct_repeat_max=1;
DATA_TYPE* C;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* D;
DATA_TYPE* E;
DATA_TYPE* E_outputFromGpu;
#ifdef OPENME
openme_init(NULL,NULL,NULL,0);
openme_callback("PROGRAM_START", NULL);
#endif
/* Run kernel. */
if (getenv("CT_REPEAT_MAIN")!=NULL) ct_repeat_max=atol(getenv("CT_REPEAT_MAIN"));
C = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE));
D = (DATA_TYPE*)malloc(NJ*NL*sizeof(DATA_TYPE));
E = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE));
E_outputFromGpu = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE));
srand(1);
init_array(A, B, C, D);
GPU_argv_init();
#ifdef OPENME
openme_callback("ACC_KERNEL_START", NULL);
#endif
for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++)
{
mm2Cuda(A, B, C, D, E, E_outputFromGpu);
}
#ifdef OPENME
openme_callback("ACC_KERNEL_END", NULL);
#endif
srand(1);
init_array(A, B, C, D);
#ifdef OPENME
openme_callback("KERNEL_START", NULL);
#endif
for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++)
{
mm2_cpu(A, B, C, D, E);
}
#ifdef OPENME
openme_callback("KERNEL_END", NULL);
#endif
compareResults(E, E_outputFromGpu);
free(C);
free(A);
free(B);
free(D);
free(E);
free(E_outputFromGpu);
#ifdef OPENME
openme_callback("PROGRAM_END", NULL);
#endif
return 0;
}
| e71194b9407d53a97e0e879d955509aba64144e9.cu | /**
* 2mm.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Updated by Grigori Fursin (http://cTuning.org/lab/people/gfursin)
* to work with Collective Mind Framework and OpenME interfqce for automatic
* and collective tuning and data mining: http://cTuning.org
*
*/
#ifndef WINDOWS
#include <unistd.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include "polybench.h"
#ifdef OPENME
#include <openme.h>
#endif
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size. */
# define NI 256 //2048
# define NJ 256 //2048
# define NK 256 //2048
# define NL 256 //2048
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 8 //32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
# ifndef DATA_TYPE
# define DATA_TYPE float
# endif
void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D)
{
int i, j;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NK; j++)
{
A[i*NI + j] = ((DATA_TYPE) i*j) / NI;
}
}
for (i = 0; i < NK; i++)
{
for (j = 0; j < NJ; j++)
{
B[i*NK + j] = ((DATA_TYPE) i*(j+1)) / NJ;
}
}
for (i = 0; i < NL; i++)
{
for (j = 0; j < NJ; j++)
{
C[i*NL + j] = ((DATA_TYPE) i*(j+3)) / NL;
}
}
for (i = 0; i < NI; i++)
{
for (j = 0; j < NL; j++)
{
D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK;
}
}
}
void compareResults(DATA_TYPE *E, DATA_TYPE *E_outputFromGpu)
{
int i,j,fail;
fail = 0;
for (i=0; i < NL; i++)
{
for (j=0; j < NI; j++)
{
if (percentDiff(E[i*NI + j], E_outputFromGpu[i*NI + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
int devID = 0;
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
printf("Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
else
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
cudaSetDevice( GPU_DEVICE );
}
__global__ void mm2_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NJ))
{
int k;
for (k = 0; k < NK; k++)
{
C[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
}
}
}
__global__ void mm2_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *E)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NL))
{
int k;
for (k = 0; k < NJ; k++)
{
E[i * NL + j] += C[i * NJ + k] * D[k * NL + j];
}
}
}
void mm2_cpu(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E)
{
int i, j, k;
for (i = 0; i < NI; i++)
{
for (j = 0; j < NJ; j++)
{
C[i*NJ + j] = 0.0;
for (k = 0; k < NK; ++k)
{
C[i*NJ + j] += A[i*NK + k] * B[k*NJ + j];
}
}
}
for (i = 0; i < NI; i++)
{
for (j = 0; j < NL; j++)
{
E[i*NL + j] = 0.0;
for (k = 0; k < NJ; ++k)
{
E[i*NL + j] += C[i*NJ + k] * D[k*NL + j];
}
}
}
}
void mm2Cuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E, DATA_TYPE* E_outputFromGpu)
{
cudaError_t error;
double t_start, t_end;
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
DATA_TYPE *C_gpu;
DATA_TYPE *D_gpu;
DATA_TYPE *E_gpu;
error=cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NK);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NK * NJ);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NI * NJ);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMalloc((void **)&D_gpu, sizeof(DATA_TYPE) * NJ * NL);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMalloc((void **)&E_gpu, sizeof(DATA_TYPE) * NI * NL);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NK, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NK * NJ, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMemcpy(C_gpu, C, sizeof(DATA_TYPE) * NK * NJ, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMemcpy(D_gpu, D, sizeof(DATA_TYPE) * NJ * NL, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error=cudaMemcpy(E_gpu, E, sizeof(DATA_TYPE) * NI * NL, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid1((size_t)ceil( ((float)NJ) / ((float)block.x) ), (size_t)ceil( ((float)NI) / ((float)block.y)) );
dim3 grid2((size_t)ceil( ((float)NL) / ((float)block.x) ), (size_t)ceil( ((float)NI) / ((float)block.y)) );
// t_start = rtclock();
mm2_kernel1<<<grid1,block>>>(A_gpu, B_gpu, C_gpu);
cudaThreadSynchronize();
mm2_kernel2<<<grid2,block>>>(C_gpu, D_gpu, E_gpu);
cudaThreadSynchronize();
// t_end = rtclock();
// fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
error=cudaMemcpy(E_outputFromGpu, E_gpu, sizeof(DATA_TYPE) * NI * NL, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
cudaFree(A_gpu);
cudaFree(B_gpu);
cudaFree(C_gpu);
cudaFree(D_gpu);
cudaFree(E_gpu);
}
int main(int argc, char** argv)
{
/* Prepare ctuning vars */
long ct_repeat=0;
long ct_repeat_max=1;
DATA_TYPE* C;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* D;
DATA_TYPE* E;
DATA_TYPE* E_outputFromGpu;
#ifdef OPENME
openme_init(NULL,NULL,NULL,0);
openme_callback("PROGRAM_START", NULL);
#endif
/* Run kernel. */
if (getenv("CT_REPEAT_MAIN")!=NULL) ct_repeat_max=atol(getenv("CT_REPEAT_MAIN"));
C = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE));
D = (DATA_TYPE*)malloc(NJ*NL*sizeof(DATA_TYPE));
E = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE));
E_outputFromGpu = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE));
srand(1);
init_array(A, B, C, D);
GPU_argv_init();
#ifdef OPENME
openme_callback("ACC_KERNEL_START", NULL);
#endif
for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++)
{
mm2Cuda(A, B, C, D, E, E_outputFromGpu);
}
#ifdef OPENME
openme_callback("ACC_KERNEL_END", NULL);
#endif
srand(1);
init_array(A, B, C, D);
#ifdef OPENME
openme_callback("KERNEL_START", NULL);
#endif
for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++)
{
mm2_cpu(A, B, C, D, E);
}
#ifdef OPENME
openme_callback("KERNEL_END", NULL);
#endif
compareResults(E, E_outputFromGpu);
free(C);
free(A);
free(B);
free(D);
free(E);
free(E_outputFromGpu);
#ifdef OPENME
openme_callback("PROGRAM_END", NULL);
#endif
return 0;
}
|
097ccbdcaa4704497e2c3244872034ef99619730.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <getopt.h>
#include <string.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include "pcnn.h"
int usage(char* program_name){
std::cout << "Usage: ";
std::cout << program_name << " [Options] image_file_path" << std::endl;
std::cout << "-- Options --" << std::endl;
std::cout << "Print usage:" << std::endl;
std::cout << "[-h|--help]\n" << std::endl;
std::cout << "Set output pcnn-icon to file:" << std::endl;
std::cout << "[-o|--output_file] output_pcnn-icon_file_path\n" << std::endl;
std::cout << "Set output directory for pcnn processed image files:" << std::endl;
std::cout << "[-O|--output_images] output_image_directory_path\n" << std::endl;
std::cout << "Set the number of time steps for processing the PCNN:" << std::endl;
std::cout << "[-s|--step] time_steps\n" << std::endl;
std::cout << "Set the size of the weights kernel:" << std::endl;
std::cout << "[-k|--kernel] kernel_size\n" << std::endl;
std::cout << "Run with CPU:" << std::endl;
std::cout << "[-C|--with-cpu]\n" << std::endl;
std::cout << "Parameters setting options: " << std::endl;
std::cout << "[--beta] beta" << std::endl;
//std::cout << "[--vF] vF" << std::endl;
std::cout << "[--vL] vL" << std::endl;
std::cout << "[--vT] vT" << std::endl;
std::cout << "[--tauL] tauL" << std::endl;
std::cout << "[--tauT] tauT" << std::endl;
std::cout << "[--hh] h" << std::endl;
return 0;
}
int main(int argc, char* argv[]){
// If input image file wasn't set, print usage.
if(argc < 2){
usage(argv[0]);
return 1;
}
pcnn_params_t parameter;
int bad_option_flag = 0;
int output_icon_to_file_flag = 0;
char *icon_to_file;
int output_images_to_file_flag = 0;
char *images_to_file;
int with_cpu_flag = 0;
parameter.beta = 0.03;
parameter.vF = 0.01;
parameter.vL = 1.0;
parameter.vT = 10.0;
parameter.tauL = 10.0;
parameter.tauT = 2.5;
parameter.time_steps = 100;
parameter.kernel_size = 11;
parameter.hh = 1.0;
while(1){
int option_index = 0;
static struct option long_options[] = {
{"help", no_argument, 0, 'h' },
{"output_file", no_argument, 0, 'o' },
{"output_images", no_argument, 0, 'O' },
{"beta", required_argument, 0, 0 },
{"vF", required_argument, 0, 0 },
{"vL", required_argument, 0, 0 },
{"vT", required_argument, 0, 0 },
{"tauL", required_argument, 0, 0 },
{"tauT", required_argument, 0, 0 },
{"step", required_argument, 0, 's' },
{"kernel", required_argument, 0, 'k' },
{"hh", required_argument, 0, 0 },
{"with-cpu", no_argument, 0, 'C' }
};
int option = getopt_long(argc, argv, "ho:O:s:k:C",
long_options, &option_index);
if(option == -1){
break;
}
switch(option){
case 0:
if(!strcmp(long_options[option_index].name, "beta")){
parameter.beta = atof(optarg);
break;
}
if(!strcmp(long_options[option_index].name, "vF")){
parameter.vF = atof(optarg);
break;
}
if(!strcmp(long_options[option_index].name, "vL")){
parameter.vL = atof(optarg);
break;
}
if(!strcmp(long_options[option_index].name, "vT")){
parameter.vT = atof(optarg);
break;
}
if(!strcmp(long_options[option_index].name, "tauL")){
parameter.tauL = atof(optarg);
break;
}
if(!strcmp(long_options[option_index].name, "tauT")){
parameter.tauT = atof(optarg);
break;
}
if(!strcmp(long_options[option_index].name, "hh")){
parameter.hh = atof(optarg);
break;
}
break;
case 'h':
usage(argv[0]);
return 0;
case 'o':
if(optarg == NULL || output_icon_to_file_flag == 1){
bad_option_flag = 1;
} else {
output_icon_to_file_flag = 1;
icon_to_file = optarg;
std::cout << "Output PCNN-Icon to: " << optarg << std::endl;
}
break;
case 'O':
if(optarg == NULL || output_images_to_file_flag == 1){
bad_option_flag = 1;
} else {
output_images_to_file_flag = 1;
images_to_file = optarg;
std::cout << "Output PCNN output images to: " << optarg << std::endl;
}
break;
case 's':
parameter.time_steps = atoi(optarg);
break;
case 'k':
parameter.kernel_size = atoi(optarg);
break;
case 'C':
with_cpu_flag = 1;
break;
default:
// Unknown option.
bad_option_flag = 1;
}
}
if(bad_option_flag != 0){
usage(argv[0]);
return 1;
}
if(argc - optind > 1){
usage(argv[0]);
return 1;
}
char* input_filename = argv[optind];
std::cout << "Input image: " << input_filename << std::endl;
float* stimu;
stimu = image2stimuF(input_filename, ¶meter);
if(stimu == NULL){
std::cout << "ERROR" << std::endl;
return 1;
}
std::cout << "PCNN parameters: " << std::endl;
std::cout << "beta = " << parameter.beta << std::endl;
//std::cout << "vF = " << parameter.vF << std::endl;
std::cout << "vL = " << parameter.vL << std::endl;
std::cout << "vT = " << parameter.vT << std::endl;
std::cout << "tauL = " << parameter.tauL << std::endl;
std::cout << "tauT = " << parameter.tauT << std::endl;
std::cout << "time_steps = " << parameter.time_steps << std::endl;
std::cout << "kernel_size = " << parameter.kernel_size << std::endl;
std::cout << "h = " << parameter.hh << std::endl;
int ret;
if(with_cpu_flag != 0){
std::cout << "PCNN on CPU start ..." << std::endl;
ret = pcnn(stimu, ¶meter, output_icon_to_file_flag, icon_to_file, output_images_to_file_flag, images_to_file);
if(ret == 0){
std::cout << "PCNN on CPU end.\n\n" << std::endl;
}
} else {
std::cout << "PCNN on GPU start ..." << std::endl;
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if(error_id != hipSuccess){
std::cout << "hipGetDeviceCount returned " << error_id << std::endl;
std::cout << "->" << hipGetErrorString(error_id) << std::endl;
std::cout << "ERROR" << std::endl;
return 1;
}
ret = pcnn_gpu(stimu, ¶meter, output_icon_to_file_flag, icon_to_file, output_images_to_file_flag, images_to_file);
if(ret == 0){
std::cout << "PCNN on GPU end.\n\n" << std::endl;
}
}
if(ret != 0){
std::cout << "ERROR" << std::endl;
return 1;
}
free(stimu);
return 0;
}
| 097ccbdcaa4704497e2c3244872034ef99619730.cu | #include <iostream>
#include <getopt.h>
#include <string.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include "pcnn.h"
int usage(char* program_name){
std::cout << "Usage: ";
std::cout << program_name << " [Options] image_file_path" << std::endl;
std::cout << "-- Options --" << std::endl;
std::cout << "Print usage:" << std::endl;
std::cout << "[-h|--help]\n" << std::endl;
std::cout << "Set output pcnn-icon to file:" << std::endl;
std::cout << "[-o|--output_file] output_pcnn-icon_file_path\n" << std::endl;
std::cout << "Set output directory for pcnn processed image files:" << std::endl;
std::cout << "[-O|--output_images] output_image_directory_path\n" << std::endl;
std::cout << "Set the number of time steps for processing the PCNN:" << std::endl;
std::cout << "[-s|--step] time_steps\n" << std::endl;
std::cout << "Set the size of the weights kernel:" << std::endl;
std::cout << "[-k|--kernel] kernel_size\n" << std::endl;
std::cout << "Run with CPU:" << std::endl;
std::cout << "[-C|--with-cpu]\n" << std::endl;
std::cout << "Parameters setting options: " << std::endl;
std::cout << "[--beta] beta" << std::endl;
//std::cout << "[--vF] vF" << std::endl;
std::cout << "[--vL] vL" << std::endl;
std::cout << "[--vT] vT" << std::endl;
std::cout << "[--tauL] tauL" << std::endl;
std::cout << "[--tauT] tauT" << std::endl;
std::cout << "[--hh] h" << std::endl;
return 0;
}
int main(int argc, char* argv[]){
// If input image file wasn't set, print usage.
if(argc < 2){
usage(argv[0]);
return 1;
}
pcnn_params_t parameter;
int bad_option_flag = 0;
int output_icon_to_file_flag = 0;
char *icon_to_file;
int output_images_to_file_flag = 0;
char *images_to_file;
int with_cpu_flag = 0;
parameter.beta = 0.03;
parameter.vF = 0.01;
parameter.vL = 1.0;
parameter.vT = 10.0;
parameter.tauL = 10.0;
parameter.tauT = 2.5;
parameter.time_steps = 100;
parameter.kernel_size = 11;
parameter.hh = 1.0;
while(1){
int option_index = 0;
static struct option long_options[] = {
{"help", no_argument, 0, 'h' },
{"output_file", no_argument, 0, 'o' },
{"output_images", no_argument, 0, 'O' },
{"beta", required_argument, 0, 0 },
{"vF", required_argument, 0, 0 },
{"vL", required_argument, 0, 0 },
{"vT", required_argument, 0, 0 },
{"tauL", required_argument, 0, 0 },
{"tauT", required_argument, 0, 0 },
{"step", required_argument, 0, 's' },
{"kernel", required_argument, 0, 'k' },
{"hh", required_argument, 0, 0 },
{"with-cpu", no_argument, 0, 'C' }
};
int option = getopt_long(argc, argv, "ho:O:s:k:C",
long_options, &option_index);
if(option == -1){
break;
}
switch(option){
case 0:
if(!strcmp(long_options[option_index].name, "beta")){
parameter.beta = atof(optarg);
break;
}
if(!strcmp(long_options[option_index].name, "vF")){
parameter.vF = atof(optarg);
break;
}
if(!strcmp(long_options[option_index].name, "vL")){
parameter.vL = atof(optarg);
break;
}
if(!strcmp(long_options[option_index].name, "vT")){
parameter.vT = atof(optarg);
break;
}
if(!strcmp(long_options[option_index].name, "tauL")){
parameter.tauL = atof(optarg);
break;
}
if(!strcmp(long_options[option_index].name, "tauT")){
parameter.tauT = atof(optarg);
break;
}
if(!strcmp(long_options[option_index].name, "hh")){
parameter.hh = atof(optarg);
break;
}
break;
case 'h':
usage(argv[0]);
return 0;
case 'o':
if(optarg == NULL || output_icon_to_file_flag == 1){
bad_option_flag = 1;
} else {
output_icon_to_file_flag = 1;
icon_to_file = optarg;
std::cout << "Output PCNN-Icon to: " << optarg << std::endl;
}
break;
case 'O':
if(optarg == NULL || output_images_to_file_flag == 1){
bad_option_flag = 1;
} else {
output_images_to_file_flag = 1;
images_to_file = optarg;
std::cout << "Output PCNN output images to: " << optarg << std::endl;
}
break;
case 's':
parameter.time_steps = atoi(optarg);
break;
case 'k':
parameter.kernel_size = atoi(optarg);
break;
case 'C':
with_cpu_flag = 1;
break;
default:
// Unknown option.
bad_option_flag = 1;
}
}
if(bad_option_flag != 0){
usage(argv[0]);
return 1;
}
if(argc - optind > 1){
usage(argv[0]);
return 1;
}
char* input_filename = argv[optind];
std::cout << "Input image: " << input_filename << std::endl;
float* stimu;
stimu = image2stimuF(input_filename, ¶meter);
if(stimu == NULL){
std::cout << "ERROR" << std::endl;
return 1;
}
std::cout << "PCNN parameters: " << std::endl;
std::cout << "beta = " << parameter.beta << std::endl;
//std::cout << "vF = " << parameter.vF << std::endl;
std::cout << "vL = " << parameter.vL << std::endl;
std::cout << "vT = " << parameter.vT << std::endl;
std::cout << "tauL = " << parameter.tauL << std::endl;
std::cout << "tauT = " << parameter.tauT << std::endl;
std::cout << "time_steps = " << parameter.time_steps << std::endl;
std::cout << "kernel_size = " << parameter.kernel_size << std::endl;
std::cout << "h = " << parameter.hh << std::endl;
int ret;
if(with_cpu_flag != 0){
std::cout << "PCNN on CPU start ..." << std::endl;
ret = pcnn(stimu, ¶meter, output_icon_to_file_flag, icon_to_file, output_images_to_file_flag, images_to_file);
if(ret == 0){
std::cout << "PCNN on CPU end.\n\n" << std::endl;
}
} else {
std::cout << "PCNN on GPU start ..." << std::endl;
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if(error_id != cudaSuccess){
std::cout << "cudaGetDeviceCount returned " << error_id << std::endl;
std::cout << "->" << cudaGetErrorString(error_id) << std::endl;
std::cout << "ERROR" << std::endl;
return 1;
}
ret = pcnn_gpu(stimu, ¶meter, output_icon_to_file_flag, icon_to_file, output_images_to_file_flag, images_to_file);
if(ret == 0){
std::cout << "PCNN on GPU end.\n\n" << std::endl;
}
}
if(ret != 0){
std::cout << "ERROR" << std::endl;
return 1;
}
free(stimu);
return 0;
}
|
8be29e42ebba83412937018030f2db6d485bdd68.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdio>
#include "matrix.h"
matrix::matrix(){
height = 1;
width = 1;
sizeArray = height*width;
hipError_t err = hipMallocManaged(&array,sizeArray*sizeof(array[0]));
if (err != hipSuccess)
{
//cout << "Memory allocation failed"<<endl;
printf("Memory allocation failed");
}
}
matrix::matrix(size_t h){
height = h;
width = 1;
sizeArray = height*width;
hipError_t err = hipMallocManaged(&array,sizeArray*sizeof(array[0]));
if (err != hipSuccess)
{
//cout << "Memory allocation failed"<<endl;
printf("Memory allocation failed");
}
}
matrix::matrix(size_t h,size_t w,int value){
height = h;
width = w;
sizeArray = height*width;
hipError_t err = hipMallocManaged(&array,sizeArray*sizeof(array[0]));
if (err != hipSuccess)
{
//cout << "Memory allocation failed"<<endl;
printf("Memory allocation failed");
}
for(int i = 0; i < height*width; i++)
array[i] = value;
}
matrix::matrix(const matrix &mat){
height = mat.height;
width = mat.width;
sizeArray = mat.sizeArray;
hipError_t err = hipMallocManaged(&array,sizeArray*sizeof(array[0]));
if (err != hipSuccess)
{
//cout << "Memory allocation failed"<<endl;
printf("Memory allocation failed");
}
for(size_t i = 0;i<sizeArray;++i){
array[i] = mat.array[i];
}
//copy(mat.array,mat.array+mat.sizeArray,array);
}
matrix &matrix::operator=(const matrix &mat){
height = mat.height;
width = mat.width;
sizeArray = mat.sizeArray;
hipError_t err = hipMallocManaged(&array,sizeArray*sizeof(array[0]));
if (err != hipSuccess)
{
//cout << "Memory allocation failed"<<endl;
printf("Memory allocation failed");
}
for(size_t i = 0;i<sizeArray;++i){
array[i] = mat.array[i];
}
//copy(mat.array,mat.array+mat.sizeArray,array);
return *this;
}
matrix &matrix::operator=(const matrix *mat){
height = mat->height;
width = mat->width;
sizeArray = mat->sizeArray;
hipError_t err = hipMallocManaged(&array,sizeArray*sizeof(array[0]));
if (err != hipSuccess)
{
//cout << "Memory allocation failed"<<endl;
printf("Memory allocation failed");
}
for(size_t i = 0;i<sizeArray;++i){
array[i] = mat->array[i];
}
//copy(mat.array,mat.array+mat.sizeArray,array);
return *this;
}
matrix::~matrix(){
hipFree(array);
}
void matrix::assignValue(int i,int j, bool value){
int l = i*width + j;
array[l] = value;
}
void matrix::assignValue(int l, bool value){
array[l] = value;
}
void matrix::displayArray(){
size_t i,j,l;
for(i=0;i<height;++i){
for(j=0;j<width;++j){
l =i*width + j;
//cout<<array[l]<<"\t";
printf("%d ",array[l]);
}
//cout<<endl;
printf("\n");
}
printf("\n");
}
void matrix::add_edge(int x, int y)
{
if(x > sizeArray || y > sizeArray)
printf("Invalid edge!\n");
else
{
array[x*width + y] = 1;
array[y*width + x] = 1;
}
}
int matrix::nodeCount()
{
return sizeArray;
}
int matrix::edgeCount()
{
int count = 0;
int i,j;
for(i = 0; i < width; i++)
{
for(j = 0; j < height; j++)
{
if(array[i*width + j] == 1)
count++;
}
}
return count;
}
int matrix::nodeDegree(int d)
{
int count = 0;
int i;
for(i = 0; i < width; i++)
{
if(array[d*width + i] == 1)
count++;
}
return count;
}
bool matrix::recursiveNode(int d)
{
if(array[d*width + d] == 1)
return true;
else
return false;
}
bool matrix::getValueAtIndex(int i, int j)
{
return array[i*width+j];
}
int matrix::getJPos(int idx)
{
for(int i = 0; i < width; i++)
{
if(array[idx*width+i] == 1)
return i;
}
return 0;
}
int matrix::getNodeG(int i)
{
return i/width;
}
int matrix::getNodeH(int i)
{
return i%width;
}
| 8be29e42ebba83412937018030f2db6d485bdd68.cu | #include <iostream>
#include <cstdio>
#include "matrix.h"
matrix::matrix(){
height = 1;
width = 1;
sizeArray = height*width;
cudaError_t err = cudaMallocManaged(&array,sizeArray*sizeof(array[0]));
if (err != cudaSuccess)
{
//cout << "Memory allocation failed"<<endl;
printf("Memory allocation failed");
}
}
matrix::matrix(size_t h){
height = h;
width = 1;
sizeArray = height*width;
cudaError_t err = cudaMallocManaged(&array,sizeArray*sizeof(array[0]));
if (err != cudaSuccess)
{
//cout << "Memory allocation failed"<<endl;
printf("Memory allocation failed");
}
}
matrix::matrix(size_t h,size_t w,int value){
height = h;
width = w;
sizeArray = height*width;
cudaError_t err = cudaMallocManaged(&array,sizeArray*sizeof(array[0]));
if (err != cudaSuccess)
{
//cout << "Memory allocation failed"<<endl;
printf("Memory allocation failed");
}
for(int i = 0; i < height*width; i++)
array[i] = value;
}
matrix::matrix(const matrix &mat){
height = mat.height;
width = mat.width;
sizeArray = mat.sizeArray;
cudaError_t err = cudaMallocManaged(&array,sizeArray*sizeof(array[0]));
if (err != cudaSuccess)
{
//cout << "Memory allocation failed"<<endl;
printf("Memory allocation failed");
}
for(size_t i = 0;i<sizeArray;++i){
array[i] = mat.array[i];
}
//copy(mat.array,mat.array+mat.sizeArray,array);
}
matrix &matrix::operator=(const matrix &mat){
height = mat.height;
width = mat.width;
sizeArray = mat.sizeArray;
cudaError_t err = cudaMallocManaged(&array,sizeArray*sizeof(array[0]));
if (err != cudaSuccess)
{
//cout << "Memory allocation failed"<<endl;
printf("Memory allocation failed");
}
for(size_t i = 0;i<sizeArray;++i){
array[i] = mat.array[i];
}
//copy(mat.array,mat.array+mat.sizeArray,array);
return *this;
}
matrix &matrix::operator=(const matrix *mat){
height = mat->height;
width = mat->width;
sizeArray = mat->sizeArray;
cudaError_t err = cudaMallocManaged(&array,sizeArray*sizeof(array[0]));
if (err != cudaSuccess)
{
//cout << "Memory allocation failed"<<endl;
printf("Memory allocation failed");
}
for(size_t i = 0;i<sizeArray;++i){
array[i] = mat->array[i];
}
//copy(mat.array,mat.array+mat.sizeArray,array);
return *this;
}
matrix::~matrix(){
cudaFree(array);
}
void matrix::assignValue(int i,int j, bool value){
int l = i*width + j;
array[l] = value;
}
void matrix::assignValue(int l, bool value){
array[l] = value;
}
void matrix::displayArray(){
size_t i,j,l;
for(i=0;i<height;++i){
for(j=0;j<width;++j){
l =i*width + j;
//cout<<array[l]<<"\t";
printf("%d ",array[l]);
}
//cout<<endl;
printf("\n");
}
printf("\n");
}
void matrix::add_edge(int x, int y)
{
if(x > sizeArray || y > sizeArray)
printf("Invalid edge!\n");
else
{
array[x*width + y] = 1;
array[y*width + x] = 1;
}
}
int matrix::nodeCount()
{
return sizeArray;
}
int matrix::edgeCount()
{
int count = 0;
int i,j;
for(i = 0; i < width; i++)
{
for(j = 0; j < height; j++)
{
if(array[i*width + j] == 1)
count++;
}
}
return count;
}
int matrix::nodeDegree(int d)
{
int count = 0;
int i;
for(i = 0; i < width; i++)
{
if(array[d*width + i] == 1)
count++;
}
return count;
}
bool matrix::recursiveNode(int d)
{
if(array[d*width + d] == 1)
return true;
else
return false;
}
bool matrix::getValueAtIndex(int i, int j)
{
return array[i*width+j];
}
int matrix::getJPos(int idx)
{
for(int i = 0; i < width; i++)
{
if(array[idx*width+i] == 1)
return i;
}
return 0;
}
int matrix::getNodeG(int i)
{
return i/width;
}
int matrix::getNodeH(int i)
{
return i%width;
}
|
416811df387234794b63e9c003d6b3d4c02a26fa.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void lshift_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "lshift_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return static_cast<std::make_unsigned_t<scalar_t>>(a) << b;
});
});
}
void rshift_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "rshift_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a >> b;
});
});
}
REGISTER_DISPATCH(lshift_stub, &lshift_kernel_cuda);
REGISTER_DISPATCH(rshift_stub, &rshift_kernel_cuda);
}} // namespace at::native
| 416811df387234794b63e9c003d6b3d4c02a26fa.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void lshift_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "lshift_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return static_cast<std::make_unsigned_t<scalar_t>>(a) << b;
});
});
}
void rshift_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "rshift_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a >> b;
});
});
}
REGISTER_DISPATCH(lshift_stub, &lshift_kernel_cuda);
REGISTER_DISPATCH(rshift_stub, &rshift_kernel_cuda);
}} // namespace at::native
|
4b2c466b35dc41fb83b347a591f03fbee84d9c96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define GROUP_SIZE 256
#define BUFFER_SIZE 256
/**
* Find a bounding box for the atoms in each block.
*/
extern "C" __global__ void findBlockBounds(int numAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ,
const real4* __restrict__ posq, real4* __restrict__ blockCenter, real4* __restrict__ blockBoundingBox, int* __restrict__ rebuildNeighborList,
real2* __restrict__ sortedBlocks) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
int base = index*TILE_SIZE;
while (base < numAtoms) {
real4 pos = posq[base];
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_POS(pos)
#endif
real4 minPos = pos;
real4 maxPos = pos;
int last = min(base+TILE_SIZE, numAtoms);
for (int i = base+1; i < last; i++) {
pos = posq[i];
#ifdef USE_PERIODIC
real4 center = 0.5f*(maxPos+minPos);
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos, center)
#endif
minPos = make_real4(min(minPos.x,pos.x), min(minPos.y,pos.y), min(minPos.z,pos.z), 0);
maxPos = make_real4(max(maxPos.x,pos.x), max(maxPos.y,pos.y), max(maxPos.z,pos.z), 0);
}
real4 blockSize = 0.5f*(maxPos-minPos);
real4 center = 0.5f*(maxPos+minPos);
center.w = 0;
for (int i = base; i < last; i++) {
pos = posq[i];
real4 delta = posq[i]-center;
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
center.w = max(center.w, delta.x*delta.x+delta.y*delta.y+delta.z*delta.z);
}
center.w = sqrt(center.w);
blockBoundingBox[index] = blockSize;
blockCenter[index] = center;
sortedBlocks[index] = make_real2(blockSize.x+blockSize.y+blockSize.z, index);
index += blockDim.x*gridDim.x;
base = index*TILE_SIZE;
}
if (blockIdx.x == 0 && threadIdx.x == 0)
rebuildNeighborList[0] = 0;
}
/**
* Sort the data about bounding boxes so it can be accessed more efficiently in the next kernel.
*/
extern "C" __global__ void sortBoxData(const real2* __restrict__ sortedBlock, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockBoundingBox, real4* __restrict__ sortedBlockCenter,
real4* __restrict__ sortedBlockBoundingBox, const real4* __restrict__ posq, const real4* __restrict__ oldPositions,
unsigned int* __restrict__ interactionCount, int* __restrict__ rebuildNeighborList, bool forceRebuild) {
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_BLOCKS; i += blockDim.x*gridDim.x) {
int index = (int) sortedBlock[i].y;
sortedBlockCenter[i] = blockCenter[index];
sortedBlockBoundingBox[i] = blockBoundingBox[index];
}
// Also check whether any atom has moved enough so that we really need to rebuild the neighbor list.
bool rebuild = forceRebuild;
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) {
real4 delta = oldPositions[i]-posq[i];
if (delta.x*delta.x + delta.y*delta.y + delta.z*delta.z > 0.25f*PADDING*PADDING)
rebuild = true;
}
if (rebuild) {
rebuildNeighborList[0] = 1;
interactionCount[0] = 0;
interactionCount[1] = 0;
}
}
__device__ int saveSinglePairs(int x, int* atoms, int* flags, int length, unsigned int maxSinglePairs, unsigned int* singlePairCount, int2* singlePairs, int* sumBuffer, volatile int& pairStartIndex) {
// Record interactions that should be computed as single pairs rather than in blocks.
const int indexInWarp = threadIdx.x%32;
int sum = 0;
for (int i = indexInWarp; i < length; i += 32) {
int count = __popc(flags[i]);
sum += (count <= MAX_BITS_FOR_PAIRS ? count : 0);
}
sumBuffer[indexInWarp] = sum;
for (int step = 1; step < 32; step *= 2) {
int add = (indexInWarp >= step ? sumBuffer[indexInWarp-step] : 0);
sumBuffer[indexInWarp] += add;
}
int pairsToStore = sumBuffer[31];
if (indexInWarp == 0)
pairStartIndex = atomicAdd(singlePairCount, pairsToStore);
int pairIndex = pairStartIndex + (indexInWarp > 0 ? sumBuffer[indexInWarp-1] : 0);
for (int i = indexInWarp; i < length; i += 32) {
int count = __popc(flags[i]);
if (count <= MAX_BITS_FOR_PAIRS && pairIndex+count < maxSinglePairs) {
int f = flags[i];
while (f != 0) {
singlePairs[pairIndex] = make_int2(atoms[i], x*TILE_SIZE+__ffs(f)-1);
f &= f-1;
pairIndex++;
}
}
}
// Compact the remaining interactions.
const int warpMask = (1<<indexInWarp)-1;
int numCompacted = 0;
for (int start = 0; start < length; start += 32) {
int i = start+indexInWarp;
int atom = atoms[i];
int flag = flags[i];
bool include = (i < length && __popc(flags[i]) > MAX_BITS_FOR_PAIRS);
int includeFlags = BALLOT(include);
if (include) {
int index = numCompacted+__popc(includeFlags&warpMask);
atoms[index] = atom;
flags[index] = flag;
}
numCompacted += __popc(includeFlags);
}
return numCompacted;
}
/**
* Compare the bounding boxes for each pair of atom blocks (comprised of 32 atoms each), forming a tile. If the two
* atom blocks are sufficiently far apart, mark them as non-interacting. There are two stages in the algorithm.
*
* STAGE 1:
*
* A coarse grained atom block against interacting atom block neighbour list is constructed.
*
* Each warp first loads in some block X of interest. Each thread within the warp then loads
* in a different atom block Y. If Y has exclusions with X, then Y is not processed. If the bounding boxes
* of the two atom blocks are within the cutoff distance, then the two atom blocks are considered to be
* interacting and Y is added to the buffer for X.
*
* STAGE 2:
*
* A fine grained atom block against interacting atoms neighbour list is constructed.
*
* The warp loops over atom blocks Y that were found to (possibly) interact with atom block X. Each thread
* in the warp loops over the 32 atoms in X and compares their positions to one particular atom from block Y.
* If it finds one closer than the cutoff distance, the atom is added to the list of atoms interacting with block X.
* This continues until the buffer fills up, at which point the results are written to global memory.
*
* [in] periodicBoxSize - size of the rectangular periodic box
* [in] invPeriodicBoxSize - inverse of the periodic box
* [in] blockCenter - the center of each bounding box
* [in] blockBoundingBox - bounding box of each atom block
* [out] interactionCount - total number of tiles that have interactions
* [out] interactingTiles - set of blocks that have interactions
* [out] interactingAtoms - a list of atoms that interact with each atom block
* [in] posq - x,y,z coordinates of each atom and charge q
* [in] maxTiles - maximum number of tiles to process, used for multi-GPUs
* [in] startBlockIndex - first block to process, used for multi-GPUs,
* [in] numBlocks - total number of atom blocks
* [in] sortedBlocks - a sorted list of atom blocks based on volume
* [in] sortedBlockCenter - sorted centers, duplicated for fast access to avoid indexing
* [in] sortedBlockBoundingBox - sorted bounding boxes, duplicated for fast access
* [in] exclusionIndices - maps into exclusionRowIndices with the starting position for a given atom
* [in] exclusionRowIndices - stores the a continuous list of exclusions
* eg: block 0 is excluded from atom 3,5,6
* block 1 is excluded from atom 3,4
* block 2 is excluded from atom 1,3,5,6
* exclusionIndices[0][3][5][8]
* exclusionRowIndices[3][5][6][3][4][1][3][5][6]
* index 0 1 2 3 4 5 6 7 8
* [out] oldPos - stores the positions of the atoms in which this neighbourlist was built on
* - this is used to decide when to rebuild a neighbourlist
* [in] rebuildNeighbourList - whether or not to execute this kernel
*
*/
extern "C" __global__ void findBlocksWithInteractions(real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ,
unsigned int* __restrict__ interactionCount, int* __restrict__ interactingTiles, unsigned int* __restrict__ interactingAtoms,
int2* __restrict__ singlePairs, const real4* __restrict__ posq, unsigned int maxTiles, unsigned int maxSinglePairs,
unsigned int startBlockIndex, unsigned int numBlocks, real2* __restrict__ sortedBlocks, const real4* __restrict__ sortedBlockCenter,
const real4* __restrict__ sortedBlockBoundingBox, const unsigned int* __restrict__ exclusionIndices, const unsigned int* __restrict__ exclusionRowIndices,
real4* __restrict__ oldPositions, const int* __restrict__ rebuildNeighborList) {
if (rebuildNeighborList[0] == 0)
return; // The neighbor list doesn't need to be rebuilt.
const int indexInWarp = threadIdx.x%32;
const int warpStart = threadIdx.x-indexInWarp;
const int totalWarps = blockDim.x*gridDim.x/32;
const int warpIndex = (blockIdx.x*blockDim.x+threadIdx.x)/32;
const int warpMask = (1<<indexInWarp)-1;
__shared__ int workgroupBuffer[BUFFER_SIZE*(GROUP_SIZE/32)];
__shared__ int workgroupFlagsBuffer[BUFFER_SIZE*(GROUP_SIZE/32)];
__shared__ int warpExclusions[MAX_EXCLUSIONS*(GROUP_SIZE/32)];
__shared__ real3 posBuffer[GROUP_SIZE];
__shared__ volatile int workgroupTileIndex[GROUP_SIZE/32];
__shared__ int worksgroupPairStartIndex[GROUP_SIZE/32];
int* sumBuffer = (int*) posBuffer; // Reuse the same buffer to save memory
int* buffer = workgroupBuffer+BUFFER_SIZE*(warpStart/32);
int* flagsBuffer = workgroupFlagsBuffer+BUFFER_SIZE*(warpStart/32);
int* exclusionsForX = warpExclusions+MAX_EXCLUSIONS*(warpStart/32);
volatile int& tileStartIndex = workgroupTileIndex[warpStart/32];
volatile int& pairStartIndex = worksgroupPairStartIndex[warpStart/32];
// Loop over blocks.
for (int block1 = startBlockIndex+warpIndex; block1 < startBlockIndex+numBlocks; block1 += totalWarps) {
// Load data for this block. Note that all threads in a warp are processing the same block.
real2 sortedKey = sortedBlocks[block1];
int x = (int) sortedKey.y;
real4 blockCenterX = sortedBlockCenter[block1];
real4 blockSizeX = sortedBlockBoundingBox[block1];
int neighborsInBuffer = 0;
real3 pos1 = trimTo3(posq[x*TILE_SIZE+indexInWarp]);
#ifdef USE_PERIODIC
const bool singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= PADDED_CUTOFF);
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos1, blockCenterX)
}
#endif
posBuffer[threadIdx.x] = pos1;
// Load exclusion data for block x.
const int exclusionStart = exclusionRowIndices[x];
const int exclusionEnd = exclusionRowIndices[x+1];
const int numExclusions = exclusionEnd-exclusionStart;
for (int j = indexInWarp; j < numExclusions; j += 32)
exclusionsForX[j] = exclusionIndices[exclusionStart+j];
if (MAX_EXCLUSIONS > 32)
__syncthreads();
// Loop over atom blocks to search for neighbors. The threads in a warp compare block1 against 32
// other blocks in parallel.
for (int block2Base = block1+1; block2Base < NUM_BLOCKS; block2Base += 32) {
int block2 = block2Base+indexInWarp;
bool includeBlock2 = (block2 < NUM_BLOCKS);
if (includeBlock2) {
real4 blockCenterY = sortedBlockCenter[block2];
real4 blockSizeY = sortedBlockBoundingBox[block2];
real4 blockDelta = blockCenterX-blockCenterY;
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(blockDelta)
#endif
includeBlock2 &= (blockDelta.x*blockDelta.x+blockDelta.y*blockDelta.y+blockDelta.z*blockDelta.z < (PADDED_CUTOFF+blockCenterX.w+blockCenterY.w)*(PADDED_CUTOFF+blockCenterX.w+blockCenterY.w));
blockDelta.x = max(0.0f, fabs(blockDelta.x)-blockSizeX.x-blockSizeY.x);
blockDelta.y = max(0.0f, fabs(blockDelta.y)-blockSizeX.y-blockSizeY.y);
blockDelta.z = max(0.0f, fabs(blockDelta.z)-blockSizeX.z-blockSizeY.z);
includeBlock2 &= (blockDelta.x*blockDelta.x+blockDelta.y*blockDelta.y+blockDelta.z*blockDelta.z < PADDED_CUTOFF_SQUARED);
#ifdef TRICLINIC
// The calculation to find the nearest periodic copy is only guaranteed to work if the nearest copy is less than half a box width away.
// If there's any possibility we might have missed it, do a detailed check.
if (periodicBoxSize.z/2-blockSizeX.z-blockSizeY.z < PADDED_CUTOFF || periodicBoxSize.y/2-blockSizeX.y-blockSizeY.y < PADDED_CUTOFF)
includeBlock2 = true;
#endif
if (includeBlock2) {
unsigned short y = (unsigned short) sortedBlocks[block2].y;
for (int k = 0; k < numExclusions; k++)
includeBlock2 &= (exclusionsForX[k] != y);
}
}
// Loop over any blocks we identified as potentially containing neighbors.
int includeBlockFlags = BALLOT(includeBlock2);
while (includeBlockFlags != 0) {
int i = __ffs(includeBlockFlags)-1;
includeBlockFlags &= includeBlockFlags-1;
unsigned short y = (unsigned short) sortedBlocks[block2Base+i].y;
// Check each atom in block Y for interactions.
int atom2 = y*TILE_SIZE+indexInWarp;
real3 pos2 = trimTo3(posq[atom2]);
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos2, blockCenterX)
}
#endif
real4 blockCenterY = sortedBlockCenter[block2Base+i];
real3 atomDelta = posBuffer[warpStart+indexInWarp]-trimTo3(blockCenterY);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(atomDelta)
#endif
int atomFlags = BALLOT(atomDelta.x*atomDelta.x+atomDelta.y*atomDelta.y+atomDelta.z*atomDelta.z < (PADDED_CUTOFF+blockCenterY.w)*(PADDED_CUTOFF+blockCenterY.w));
int interacts = 0;
if (atom2 < NUM_ATOMS && atomFlags != 0) {
int first = __ffs(atomFlags)-1;
int last = 32-__clz(atomFlags);
#ifdef USE_PERIODIC
if (!singlePeriodicCopy) {
for (int j = first; j < last; j++) {
real3 delta = pos2-posBuffer[warpStart+j];
APPLY_PERIODIC_TO_DELTA(delta)
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED ? 1<<j : 0);
}
}
else {
#endif
for (int j = first; j < last; j++) {
real3 delta = pos2-posBuffer[warpStart+j];
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED ? 1<<j : 0);
}
#ifdef USE_PERIODIC
}
#endif
}
// Add any interacting atoms to the buffer.
int includeAtomFlags = BALLOT(interacts);
if (interacts) {
int index = neighborsInBuffer+__popc(includeAtomFlags&warpMask);
buffer[index] = atom2;
flagsBuffer[index] = interacts;
}
neighborsInBuffer += __popc(includeAtomFlags);
if (neighborsInBuffer > BUFFER_SIZE-TILE_SIZE) {
// Store the new tiles to memory.
#if MAX_BITS_FOR_PAIRS > 0
neighborsInBuffer = saveSinglePairs(x, buffer, flagsBuffer, neighborsInBuffer, maxSinglePairs, &interactionCount[1], singlePairs, sumBuffer+warpStart, pairStartIndex);
#endif
int tilesToStore = neighborsInBuffer/TILE_SIZE;
if (tilesToStore > 0) {
if (indexInWarp == 0)
tileStartIndex = atomicAdd(&interactionCount[0], tilesToStore);
int newTileStartIndex = tileStartIndex;
if (newTileStartIndex+tilesToStore <= maxTiles) {
if (indexInWarp < tilesToStore)
interactingTiles[newTileStartIndex+indexInWarp] = x;
for (int j = 0; j < tilesToStore; j++)
interactingAtoms[(newTileStartIndex+j)*TILE_SIZE+indexInWarp] = buffer[indexInWarp+j*TILE_SIZE];
}
buffer[indexInWarp] = buffer[indexInWarp+TILE_SIZE*tilesToStore];
neighborsInBuffer -= TILE_SIZE*tilesToStore;
}
}
}
}
// If we have a partially filled buffer, store it to memory.
#if MAX_BITS_FOR_PAIRS > 0
if (neighborsInBuffer > 32)
neighborsInBuffer = saveSinglePairs(x, buffer, flagsBuffer, neighborsInBuffer, maxSinglePairs, &interactionCount[1], singlePairs, sumBuffer+warpStart, pairStartIndex);
#endif
if (neighborsInBuffer > 0) {
int tilesToStore = (neighborsInBuffer+TILE_SIZE-1)/TILE_SIZE;
if (indexInWarp == 0)
tileStartIndex = atomicAdd(&interactionCount[0], tilesToStore);
int newTileStartIndex = tileStartIndex;
if (newTileStartIndex+tilesToStore <= maxTiles) {
if (indexInWarp < tilesToStore)
interactingTiles[newTileStartIndex+indexInWarp] = x;
for (int j = 0; j < tilesToStore; j++)
interactingAtoms[(newTileStartIndex+j)*TILE_SIZE+indexInWarp] = (indexInWarp+j*TILE_SIZE < neighborsInBuffer ? buffer[indexInWarp+j*TILE_SIZE] : NUM_ATOMS);
}
}
}
// Record the positions the neighbor list is based on.
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x)
oldPositions[i] = posq[i];
}
| 4b2c466b35dc41fb83b347a591f03fbee84d9c96.cu | #define GROUP_SIZE 256
#define BUFFER_SIZE 256
/**
* Find a bounding box for the atoms in each block.
*/
extern "C" __global__ void findBlockBounds(int numAtoms, real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ,
const real4* __restrict__ posq, real4* __restrict__ blockCenter, real4* __restrict__ blockBoundingBox, int* __restrict__ rebuildNeighborList,
real2* __restrict__ sortedBlocks) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
int base = index*TILE_SIZE;
while (base < numAtoms) {
real4 pos = posq[base];
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_POS(pos)
#endif
real4 minPos = pos;
real4 maxPos = pos;
int last = min(base+TILE_SIZE, numAtoms);
for (int i = base+1; i < last; i++) {
pos = posq[i];
#ifdef USE_PERIODIC
real4 center = 0.5f*(maxPos+minPos);
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos, center)
#endif
minPos = make_real4(min(minPos.x,pos.x), min(minPos.y,pos.y), min(minPos.z,pos.z), 0);
maxPos = make_real4(max(maxPos.x,pos.x), max(maxPos.y,pos.y), max(maxPos.z,pos.z), 0);
}
real4 blockSize = 0.5f*(maxPos-minPos);
real4 center = 0.5f*(maxPos+minPos);
center.w = 0;
for (int i = base; i < last; i++) {
pos = posq[i];
real4 delta = posq[i]-center;
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
center.w = max(center.w, delta.x*delta.x+delta.y*delta.y+delta.z*delta.z);
}
center.w = sqrt(center.w);
blockBoundingBox[index] = blockSize;
blockCenter[index] = center;
sortedBlocks[index] = make_real2(blockSize.x+blockSize.y+blockSize.z, index);
index += blockDim.x*gridDim.x;
base = index*TILE_SIZE;
}
if (blockIdx.x == 0 && threadIdx.x == 0)
rebuildNeighborList[0] = 0;
}
/**
* Sort the data about bounding boxes so it can be accessed more efficiently in the next kernel.
*/
extern "C" __global__ void sortBoxData(const real2* __restrict__ sortedBlock, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockBoundingBox, real4* __restrict__ sortedBlockCenter,
real4* __restrict__ sortedBlockBoundingBox, const real4* __restrict__ posq, const real4* __restrict__ oldPositions,
unsigned int* __restrict__ interactionCount, int* __restrict__ rebuildNeighborList, bool forceRebuild) {
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_BLOCKS; i += blockDim.x*gridDim.x) {
int index = (int) sortedBlock[i].y;
sortedBlockCenter[i] = blockCenter[index];
sortedBlockBoundingBox[i] = blockBoundingBox[index];
}
// Also check whether any atom has moved enough so that we really need to rebuild the neighbor list.
bool rebuild = forceRebuild;
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x) {
real4 delta = oldPositions[i]-posq[i];
if (delta.x*delta.x + delta.y*delta.y + delta.z*delta.z > 0.25f*PADDING*PADDING)
rebuild = true;
}
if (rebuild) {
rebuildNeighborList[0] = 1;
interactionCount[0] = 0;
interactionCount[1] = 0;
}
}
__device__ int saveSinglePairs(int x, int* atoms, int* flags, int length, unsigned int maxSinglePairs, unsigned int* singlePairCount, int2* singlePairs, int* sumBuffer, volatile int& pairStartIndex) {
// Record interactions that should be computed as single pairs rather than in blocks.
const int indexInWarp = threadIdx.x%32;
int sum = 0;
for (int i = indexInWarp; i < length; i += 32) {
int count = __popc(flags[i]);
sum += (count <= MAX_BITS_FOR_PAIRS ? count : 0);
}
sumBuffer[indexInWarp] = sum;
for (int step = 1; step < 32; step *= 2) {
int add = (indexInWarp >= step ? sumBuffer[indexInWarp-step] : 0);
sumBuffer[indexInWarp] += add;
}
int pairsToStore = sumBuffer[31];
if (indexInWarp == 0)
pairStartIndex = atomicAdd(singlePairCount, pairsToStore);
int pairIndex = pairStartIndex + (indexInWarp > 0 ? sumBuffer[indexInWarp-1] : 0);
for (int i = indexInWarp; i < length; i += 32) {
int count = __popc(flags[i]);
if (count <= MAX_BITS_FOR_PAIRS && pairIndex+count < maxSinglePairs) {
int f = flags[i];
while (f != 0) {
singlePairs[pairIndex] = make_int2(atoms[i], x*TILE_SIZE+__ffs(f)-1);
f &= f-1;
pairIndex++;
}
}
}
// Compact the remaining interactions.
const int warpMask = (1<<indexInWarp)-1;
int numCompacted = 0;
for (int start = 0; start < length; start += 32) {
int i = start+indexInWarp;
int atom = atoms[i];
int flag = flags[i];
bool include = (i < length && __popc(flags[i]) > MAX_BITS_FOR_PAIRS);
int includeFlags = BALLOT(include);
if (include) {
int index = numCompacted+__popc(includeFlags&warpMask);
atoms[index] = atom;
flags[index] = flag;
}
numCompacted += __popc(includeFlags);
}
return numCompacted;
}
/**
* Compare the bounding boxes for each pair of atom blocks (comprised of 32 atoms each), forming a tile. If the two
* atom blocks are sufficiently far apart, mark them as non-interacting. There are two stages in the algorithm.
*
* STAGE 1:
*
* A coarse grained atom block against interacting atom block neighbour list is constructed.
*
* Each warp first loads in some block X of interest. Each thread within the warp then loads
* in a different atom block Y. If Y has exclusions with X, then Y is not processed. If the bounding boxes
* of the two atom blocks are within the cutoff distance, then the two atom blocks are considered to be
* interacting and Y is added to the buffer for X.
*
* STAGE 2:
*
* A fine grained atom block against interacting atoms neighbour list is constructed.
*
* The warp loops over atom blocks Y that were found to (possibly) interact with atom block X. Each thread
* in the warp loops over the 32 atoms in X and compares their positions to one particular atom from block Y.
* If it finds one closer than the cutoff distance, the atom is added to the list of atoms interacting with block X.
* This continues until the buffer fills up, at which point the results are written to global memory.
*
* [in] periodicBoxSize - size of the rectangular periodic box
* [in] invPeriodicBoxSize - inverse of the periodic box
* [in] blockCenter - the center of each bounding box
* [in] blockBoundingBox - bounding box of each atom block
* [out] interactionCount - total number of tiles that have interactions
* [out] interactingTiles - set of blocks that have interactions
* [out] interactingAtoms - a list of atoms that interact with each atom block
* [in] posq - x,y,z coordinates of each atom and charge q
* [in] maxTiles - maximum number of tiles to process, used for multi-GPUs
* [in] startBlockIndex - first block to process, used for multi-GPUs,
* [in] numBlocks - total number of atom blocks
* [in] sortedBlocks - a sorted list of atom blocks based on volume
* [in] sortedBlockCenter - sorted centers, duplicated for fast access to avoid indexing
* [in] sortedBlockBoundingBox - sorted bounding boxes, duplicated for fast access
* [in] exclusionIndices - maps into exclusionRowIndices with the starting position for a given atom
* [in] exclusionRowIndices - stores the a continuous list of exclusions
* eg: block 0 is excluded from atom 3,5,6
* block 1 is excluded from atom 3,4
* block 2 is excluded from atom 1,3,5,6
* exclusionIndices[0][3][5][8]
* exclusionRowIndices[3][5][6][3][4][1][3][5][6]
* index 0 1 2 3 4 5 6 7 8
* [out] oldPos - stores the positions of the atoms in which this neighbourlist was built on
* - this is used to decide when to rebuild a neighbourlist
* [in] rebuildNeighbourList - whether or not to execute this kernel
*
*/
extern "C" __global__ void findBlocksWithInteractions(real4 periodicBoxSize, real4 invPeriodicBoxSize, real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ,
unsigned int* __restrict__ interactionCount, int* __restrict__ interactingTiles, unsigned int* __restrict__ interactingAtoms,
int2* __restrict__ singlePairs, const real4* __restrict__ posq, unsigned int maxTiles, unsigned int maxSinglePairs,
unsigned int startBlockIndex, unsigned int numBlocks, real2* __restrict__ sortedBlocks, const real4* __restrict__ sortedBlockCenter,
const real4* __restrict__ sortedBlockBoundingBox, const unsigned int* __restrict__ exclusionIndices, const unsigned int* __restrict__ exclusionRowIndices,
real4* __restrict__ oldPositions, const int* __restrict__ rebuildNeighborList) {
if (rebuildNeighborList[0] == 0)
return; // The neighbor list doesn't need to be rebuilt.
const int indexInWarp = threadIdx.x%32;
const int warpStart = threadIdx.x-indexInWarp;
const int totalWarps = blockDim.x*gridDim.x/32;
const int warpIndex = (blockIdx.x*blockDim.x+threadIdx.x)/32;
const int warpMask = (1<<indexInWarp)-1;
__shared__ int workgroupBuffer[BUFFER_SIZE*(GROUP_SIZE/32)];
__shared__ int workgroupFlagsBuffer[BUFFER_SIZE*(GROUP_SIZE/32)];
__shared__ int warpExclusions[MAX_EXCLUSIONS*(GROUP_SIZE/32)];
__shared__ real3 posBuffer[GROUP_SIZE];
__shared__ volatile int workgroupTileIndex[GROUP_SIZE/32];
__shared__ int worksgroupPairStartIndex[GROUP_SIZE/32];
int* sumBuffer = (int*) posBuffer; // Reuse the same buffer to save memory
int* buffer = workgroupBuffer+BUFFER_SIZE*(warpStart/32);
int* flagsBuffer = workgroupFlagsBuffer+BUFFER_SIZE*(warpStart/32);
int* exclusionsForX = warpExclusions+MAX_EXCLUSIONS*(warpStart/32);
volatile int& tileStartIndex = workgroupTileIndex[warpStart/32];
volatile int& pairStartIndex = worksgroupPairStartIndex[warpStart/32];
// Loop over blocks.
for (int block1 = startBlockIndex+warpIndex; block1 < startBlockIndex+numBlocks; block1 += totalWarps) {
// Load data for this block. Note that all threads in a warp are processing the same block.
real2 sortedKey = sortedBlocks[block1];
int x = (int) sortedKey.y;
real4 blockCenterX = sortedBlockCenter[block1];
real4 blockSizeX = sortedBlockBoundingBox[block1];
int neighborsInBuffer = 0;
real3 pos1 = trimTo3(posq[x*TILE_SIZE+indexInWarp]);
#ifdef USE_PERIODIC
const bool singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= PADDED_CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= PADDED_CUTOFF);
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos1, blockCenterX)
}
#endif
posBuffer[threadIdx.x] = pos1;
// Load exclusion data for block x.
const int exclusionStart = exclusionRowIndices[x];
const int exclusionEnd = exclusionRowIndices[x+1];
const int numExclusions = exclusionEnd-exclusionStart;
for (int j = indexInWarp; j < numExclusions; j += 32)
exclusionsForX[j] = exclusionIndices[exclusionStart+j];
if (MAX_EXCLUSIONS > 32)
__syncthreads();
// Loop over atom blocks to search for neighbors. The threads in a warp compare block1 against 32
// other blocks in parallel.
for (int block2Base = block1+1; block2Base < NUM_BLOCKS; block2Base += 32) {
int block2 = block2Base+indexInWarp;
bool includeBlock2 = (block2 < NUM_BLOCKS);
if (includeBlock2) {
real4 blockCenterY = sortedBlockCenter[block2];
real4 blockSizeY = sortedBlockBoundingBox[block2];
real4 blockDelta = blockCenterX-blockCenterY;
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(blockDelta)
#endif
includeBlock2 &= (blockDelta.x*blockDelta.x+blockDelta.y*blockDelta.y+blockDelta.z*blockDelta.z < (PADDED_CUTOFF+blockCenterX.w+blockCenterY.w)*(PADDED_CUTOFF+blockCenterX.w+blockCenterY.w));
blockDelta.x = max(0.0f, fabs(blockDelta.x)-blockSizeX.x-blockSizeY.x);
blockDelta.y = max(0.0f, fabs(blockDelta.y)-blockSizeX.y-blockSizeY.y);
blockDelta.z = max(0.0f, fabs(blockDelta.z)-blockSizeX.z-blockSizeY.z);
includeBlock2 &= (blockDelta.x*blockDelta.x+blockDelta.y*blockDelta.y+blockDelta.z*blockDelta.z < PADDED_CUTOFF_SQUARED);
#ifdef TRICLINIC
// The calculation to find the nearest periodic copy is only guaranteed to work if the nearest copy is less than half a box width away.
// If there's any possibility we might have missed it, do a detailed check.
if (periodicBoxSize.z/2-blockSizeX.z-blockSizeY.z < PADDED_CUTOFF || periodicBoxSize.y/2-blockSizeX.y-blockSizeY.y < PADDED_CUTOFF)
includeBlock2 = true;
#endif
if (includeBlock2) {
unsigned short y = (unsigned short) sortedBlocks[block2].y;
for (int k = 0; k < numExclusions; k++)
includeBlock2 &= (exclusionsForX[k] != y);
}
}
// Loop over any blocks we identified as potentially containing neighbors.
int includeBlockFlags = BALLOT(includeBlock2);
while (includeBlockFlags != 0) {
int i = __ffs(includeBlockFlags)-1;
includeBlockFlags &= includeBlockFlags-1;
unsigned short y = (unsigned short) sortedBlocks[block2Base+i].y;
// Check each atom in block Y for interactions.
int atom2 = y*TILE_SIZE+indexInWarp;
real3 pos2 = trimTo3(posq[atom2]);
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
APPLY_PERIODIC_TO_POS_WITH_CENTER(pos2, blockCenterX)
}
#endif
real4 blockCenterY = sortedBlockCenter[block2Base+i];
real3 atomDelta = posBuffer[warpStart+indexInWarp]-trimTo3(blockCenterY);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(atomDelta)
#endif
int atomFlags = BALLOT(atomDelta.x*atomDelta.x+atomDelta.y*atomDelta.y+atomDelta.z*atomDelta.z < (PADDED_CUTOFF+blockCenterY.w)*(PADDED_CUTOFF+blockCenterY.w));
int interacts = 0;
if (atom2 < NUM_ATOMS && atomFlags != 0) {
int first = __ffs(atomFlags)-1;
int last = 32-__clz(atomFlags);
#ifdef USE_PERIODIC
if (!singlePeriodicCopy) {
for (int j = first; j < last; j++) {
real3 delta = pos2-posBuffer[warpStart+j];
APPLY_PERIODIC_TO_DELTA(delta)
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED ? 1<<j : 0);
}
}
else {
#endif
for (int j = first; j < last; j++) {
real3 delta = pos2-posBuffer[warpStart+j];
interacts |= (delta.x*delta.x+delta.y*delta.y+delta.z*delta.z < PADDED_CUTOFF_SQUARED ? 1<<j : 0);
}
#ifdef USE_PERIODIC
}
#endif
}
// Add any interacting atoms to the buffer.
int includeAtomFlags = BALLOT(interacts);
if (interacts) {
int index = neighborsInBuffer+__popc(includeAtomFlags&warpMask);
buffer[index] = atom2;
flagsBuffer[index] = interacts;
}
neighborsInBuffer += __popc(includeAtomFlags);
if (neighborsInBuffer > BUFFER_SIZE-TILE_SIZE) {
// Store the new tiles to memory.
#if MAX_BITS_FOR_PAIRS > 0
neighborsInBuffer = saveSinglePairs(x, buffer, flagsBuffer, neighborsInBuffer, maxSinglePairs, &interactionCount[1], singlePairs, sumBuffer+warpStart, pairStartIndex);
#endif
int tilesToStore = neighborsInBuffer/TILE_SIZE;
if (tilesToStore > 0) {
if (indexInWarp == 0)
tileStartIndex = atomicAdd(&interactionCount[0], tilesToStore);
int newTileStartIndex = tileStartIndex;
if (newTileStartIndex+tilesToStore <= maxTiles) {
if (indexInWarp < tilesToStore)
interactingTiles[newTileStartIndex+indexInWarp] = x;
for (int j = 0; j < tilesToStore; j++)
interactingAtoms[(newTileStartIndex+j)*TILE_SIZE+indexInWarp] = buffer[indexInWarp+j*TILE_SIZE];
}
buffer[indexInWarp] = buffer[indexInWarp+TILE_SIZE*tilesToStore];
neighborsInBuffer -= TILE_SIZE*tilesToStore;
}
}
}
}
// If we have a partially filled buffer, store it to memory.
#if MAX_BITS_FOR_PAIRS > 0
if (neighborsInBuffer > 32)
neighborsInBuffer = saveSinglePairs(x, buffer, flagsBuffer, neighborsInBuffer, maxSinglePairs, &interactionCount[1], singlePairs, sumBuffer+warpStart, pairStartIndex);
#endif
if (neighborsInBuffer > 0) {
int tilesToStore = (neighborsInBuffer+TILE_SIZE-1)/TILE_SIZE;
if (indexInWarp == 0)
tileStartIndex = atomicAdd(&interactionCount[0], tilesToStore);
int newTileStartIndex = tileStartIndex;
if (newTileStartIndex+tilesToStore <= maxTiles) {
if (indexInWarp < tilesToStore)
interactingTiles[newTileStartIndex+indexInWarp] = x;
for (int j = 0; j < tilesToStore; j++)
interactingAtoms[(newTileStartIndex+j)*TILE_SIZE+indexInWarp] = (indexInWarp+j*TILE_SIZE < neighborsInBuffer ? buffer[indexInWarp+j*TILE_SIZE] : NUM_ATOMS);
}
}
}
// Record the positions the neighbor list is based on.
for (int i = threadIdx.x+blockIdx.x*blockDim.x; i < NUM_ATOMS; i += blockDim.x*gridDim.x)
oldPositions[i] = posq[i];
}
|
6489aa052086899593be6a7a7685dbf49c500008.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#define CUDA_NUM_THREADS 512
#define THREADS_PER_BLOCK 64
#define DIM0(TENSOR) ((TENSOR).x)
#define DIM1(TENSOR) ((TENSOR).y)
#define DIM2(TENSOR) ((TENSOR).z)
#define DIM3(TENSOR) ((TENSOR).w)
#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))])
template <typename scalar_t>
__global__ void kernel_resample2d_update_output(const int n,
const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
scalar_t* __restrict__ output, const long4 output_size, const long4 output_stride, int kernel_size, bool bilinear) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
scalar_t val = 0.0f;
int dim_b = DIM0(output_size);
int dim_c = DIM1(output_size);
int dim_h = DIM2(output_size);
int dim_w = DIM3(output_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
scalar_t alpha = xf - floor(xf); // alpha
scalar_t beta = yf - floor(yf); // beta
if (bilinear) {
int xL = max(min( int (floor(xf)), dim_w-1), 0);
int xR = max(min( int (floor(xf)+1), dim_w -1), 0);
int yT = max(min( int (floor(yf)), dim_h-1), 0);
int yB = max(min( int (floor(yf)+1), dim_h-1), 0);
for (int fy = 0; fy < kernel_size; fy += 1) {
for (int fx = 0; fx < kernel_size; fx += 1) {
val += static_cast<float>((1. - alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xL + fx));
val += static_cast<float>((alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xR + fx));
val += static_cast<float>((1. - alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xL + fx));
val += static_cast<float>((alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xR + fx));
}
}
output[index] = val;
}
else {
int xN = max(min( int (floor(xf + 0.5)), dim_w - 1), 0);
int yN = max(min( int (floor(yf + 0.5)), dim_h - 1), 0);
output[index] = static_cast<float> ( DIM3_INDEX(input1, b, c, yN, xN) );
}
}
template <typename scalar_t>
__global__ void kernel_resample2d_backward_input1(
const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride,
scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size, bool bilinear) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int dim_b = DIM0(gradOutput_size);
int dim_c = DIM1(gradOutput_size);
int dim_h = DIM2(gradOutput_size);
int dim_w = DIM3(gradOutput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
scalar_t alpha = xf - int(xf); // alpha
scalar_t beta = yf - int(yf); // beta
int idim_h = DIM2(input1_size);
int idim_w = DIM3(input1_size);
int xL = max(min( int (floor(xf)), idim_w-1), 0);
int xR = max(min( int (floor(xf)+1), idim_w -1), 0);
int yT = max(min( int (floor(yf)), idim_h-1), 0);
int yB = max(min( int (floor(yf)+1), idim_h-1), 0);
for (int fy = 0; fy < kernel_size; fy += 1) {
for (int fx = 0; fx < kernel_size; fx += 1) {
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xL + fx)), (1-alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xR + fx)), (alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xL + fx)), (1-alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xR + fx)), (alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x));
}
}
}
template <typename scalar_t>
__global__ void kernel_resample2d_backward_input2(
const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride,
scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size, bool bilinear) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
scalar_t output = 0.0;
int kernel_rad = (kernel_size - 1)/2;
int dim_b = DIM0(gradInput_size);
int dim_c = DIM1(gradInput_size);
int dim_h = DIM2(gradInput_size);
int dim_w = DIM3(gradInput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int odim_c = DIM1(gradOutput_size);
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
int xL = max(min( int (floor(xf)), dim_w-1), 0);
int xR = max(min( int (floor(xf)+1), dim_w -1), 0);
int yT = max(min( int (floor(yf)), dim_h-1), 0);
int yB = max(min( int (floor(yf)+1), dim_h-1), 0);
if (c % 2) {
float gamma = 1 - (xf - floor(xf)); // alpha
for (int i = 0; i <= 2*kernel_rad; ++i) {
for (int j = 0; j <= 2*kernel_rad; ++j) {
for (int ch = 0; ch < odim_c; ++ch) {
output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i));
output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i));
output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i));
output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i));
}
}
}
}
else {
float gamma = 1 - (yf - floor(yf)); // alpha
for (int i = 0; i <= 2*kernel_rad; ++i) {
for (int j = 0; j <= 2*kernel_rad; ++j) {
for (int ch = 0; ch < odim_c; ++ch) {
output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i));
output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i));
output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i));
output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i));
}
}
}
}
gradInput[index] = output;
}
void resample2d_kernel_forward(
at::Tensor& input1,
at::Tensor& input2,
at::Tensor& output,
int kernel_size,
bool bilinear) {
int n = output.numel();
const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3));
const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3));
const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3));
const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3));
const long4 output_size = make_long4(output.size(0), output.size(1), output.size(2), output.size(3));
const long4 output_stride = make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3));
// TODO: when atomicAdd gets resolved, change to AT_DISPATCH_FLOATING_TYPES_AND_HALF
// AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_forward_kernel", ([&] {
hipLaunchKernelGGL(( kernel_resample2d_update_output<float>), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() ,
//at::globalContext().getCurrentHIPStreamMasqueradingAsCUDA() >>>(
n,
input1.data<float>(),
input1_size,
input1_stride,
input2.data<float>(),
input2_size,
input2_stride,
output.data<float>(),
output_size,
output_stride,
kernel_size,
bilinear);
// }));
// TODO: ATen-equivalent check
// THCudaCheck(hipGetLastError());
}
void resample2d_kernel_backward(
at::Tensor& input1,
at::Tensor& input2,
at::Tensor& gradOutput,
at::Tensor& gradInput1,
at::Tensor& gradInput2,
int kernel_size,
bool bilinear) {
int n = gradOutput.numel();
const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3));
const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3));
const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3));
const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3));
const long4 gradOutput_size = make_long4(gradOutput.size(0), gradOutput.size(1), gradOutput.size(2), gradOutput.size(3));
const long4 gradOutput_stride = make_long4(gradOutput.stride(0), gradOutput.stride(1), gradOutput.stride(2), gradOutput.stride(3));
const long4 gradInput1_size = make_long4(gradInput1.size(0), gradInput1.size(1), gradInput1.size(2), gradInput1.size(3));
const long4 gradInput1_stride = make_long4(gradInput1.stride(0), gradInput1.stride(1), gradInput1.stride(2), gradInput1.stride(3));
// AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_backward_input1", ([&] {
hipLaunchKernelGGL(( kernel_resample2d_backward_input1<float>), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() ,
//at::globalContext().getCurrentHIPStreamMasqueradingAsCUDA() >>>(
n,
input1.data<float>(),
input1_size,
input1_stride,
input2.data<float>(),
input2_size,
input2_stride,
gradOutput.data<float>(),
gradOutput_size,
gradOutput_stride,
gradInput1.data<float>(),
gradInput1_size,
gradInput1_stride,
kernel_size,
bilinear
);
// }));
const long4 gradInput2_size = make_long4(gradInput2.size(0), gradInput2.size(1), gradInput2.size(2), gradInput2.size(3));
const long4 gradInput2_stride = make_long4(gradInput2.stride(0), gradInput2.stride(1), gradInput2.stride(2), gradInput2.stride(3));
n = gradInput2.numel();
// AT_DISPATCH_FLOATING_TYPES(gradInput2.type(), "resample_backward_input2", ([&] {
hipLaunchKernelGGL(( kernel_resample2d_backward_input2<float>), dim3((n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA() ,
//at::globalContext().getCurrentHIPStreamMasqueradingAsCUDA() >>>(
n,
input1.data<float>(),
input1_size,
input1_stride,
input2.data<float>(),
input2_size,
input2_stride,
gradOutput.data<float>(),
gradOutput_size,
gradOutput_stride,
gradInput2.data<float>(),
gradInput2_size,
gradInput2_stride,
kernel_size,
bilinear
);
// }));
// TODO: Use the ATen equivalent to get last error
// THCudaCheck(hipGetLastError());
}
| 6489aa052086899593be6a7a7685dbf49c500008.cu | #include <ATen/ATen.h>
#include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#define CUDA_NUM_THREADS 512
#define THREADS_PER_BLOCK 64
#define DIM0(TENSOR) ((TENSOR).x)
#define DIM1(TENSOR) ((TENSOR).y)
#define DIM2(TENSOR) ((TENSOR).z)
#define DIM3(TENSOR) ((TENSOR).w)
#define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))])
template <typename scalar_t>
__global__ void kernel_resample2d_update_output(const int n,
const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
scalar_t* __restrict__ output, const long4 output_size, const long4 output_stride, int kernel_size, bool bilinear) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
scalar_t val = 0.0f;
int dim_b = DIM0(output_size);
int dim_c = DIM1(output_size);
int dim_h = DIM2(output_size);
int dim_w = DIM3(output_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
scalar_t alpha = xf - floor(xf); // alpha
scalar_t beta = yf - floor(yf); // beta
if (bilinear) {
int xL = max(min( int (floor(xf)), dim_w-1), 0);
int xR = max(min( int (floor(xf)+1), dim_w -1), 0);
int yT = max(min( int (floor(yf)), dim_h-1), 0);
int yB = max(min( int (floor(yf)+1), dim_h-1), 0);
for (int fy = 0; fy < kernel_size; fy += 1) {
for (int fx = 0; fx < kernel_size; fx += 1) {
val += static_cast<float>((1. - alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xL + fx));
val += static_cast<float>((alpha)*(1. - beta) * DIM3_INDEX(input1, b, c, yT + fy, xR + fx));
val += static_cast<float>((1. - alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xL + fx));
val += static_cast<float>((alpha)*(beta) * DIM3_INDEX(input1, b, c, yB + fy, xR + fx));
}
}
output[index] = val;
}
else {
int xN = max(min( int (floor(xf + 0.5)), dim_w - 1), 0);
int yN = max(min( int (floor(yf + 0.5)), dim_h - 1), 0);
output[index] = static_cast<float> ( DIM3_INDEX(input1, b, c, yN, xN) );
}
}
template <typename scalar_t>
__global__ void kernel_resample2d_backward_input1(
const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride,
scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size, bool bilinear) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
int dim_b = DIM0(gradOutput_size);
int dim_c = DIM1(gradOutput_size);
int dim_h = DIM2(gradOutput_size);
int dim_w = DIM3(gradOutput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
scalar_t alpha = xf - int(xf); // alpha
scalar_t beta = yf - int(yf); // beta
int idim_h = DIM2(input1_size);
int idim_w = DIM3(input1_size);
int xL = max(min( int (floor(xf)), idim_w-1), 0);
int xR = max(min( int (floor(xf)+1), idim_w -1), 0);
int yT = max(min( int (floor(yf)), idim_h-1), 0);
int yB = max(min( int (floor(yf)+1), idim_h-1), 0);
for (int fy = 0; fy < kernel_size; fy += 1) {
for (int fx = 0; fx < kernel_size; fx += 1) {
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xL + fx)), (1-alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yT + fy), (xR + fx)), (alpha)*(1-beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xL + fx)), (1-alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x));
atomicAdd(&DIM3_INDEX(gradInput, b, c, (yB + fy), (xR + fx)), (alpha)*(beta) * DIM3_INDEX(gradOutput, b, c, y, x));
}
}
}
template <typename scalar_t>
__global__ void kernel_resample2d_backward_input2(
const int n, const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride,
const scalar_t* __restrict__ input2, const long4 input2_size, const long4 input2_stride,
const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride,
scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, int kernel_size, bool bilinear) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n) {
return;
}
scalar_t output = 0.0;
int kernel_rad = (kernel_size - 1)/2;
int dim_b = DIM0(gradInput_size);
int dim_c = DIM1(gradInput_size);
int dim_h = DIM2(gradInput_size);
int dim_w = DIM3(gradInput_size);
int dim_chw = dim_c * dim_h * dim_w;
int dim_hw = dim_h * dim_w;
int b = ( index / dim_chw ) % dim_b;
int c = ( index / dim_hw ) % dim_c;
int y = ( index / dim_w ) % dim_h;
int x = ( index ) % dim_w;
int odim_c = DIM1(gradOutput_size);
scalar_t dx = DIM3_INDEX(input2, b, 0, y, x);
scalar_t dy = DIM3_INDEX(input2, b, 1, y, x);
scalar_t xf = static_cast<scalar_t>(x) + dx;
scalar_t yf = static_cast<scalar_t>(y) + dy;
int xL = max(min( int (floor(xf)), dim_w-1), 0);
int xR = max(min( int (floor(xf)+1), dim_w -1), 0);
int yT = max(min( int (floor(yf)), dim_h-1), 0);
int yB = max(min( int (floor(yf)+1), dim_h-1), 0);
if (c % 2) {
float gamma = 1 - (xf - floor(xf)); // alpha
for (int i = 0; i <= 2*kernel_rad; ++i) {
for (int j = 0; j <= 2*kernel_rad; ++j) {
for (int ch = 0; ch < odim_c; ++ch) {
output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i));
output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i));
output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i));
output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i));
}
}
}
}
else {
float gamma = 1 - (yf - floor(yf)); // alpha
for (int i = 0; i <= 2*kernel_rad; ++i) {
for (int j = 0; j <= 2*kernel_rad; ++j) {
for (int ch = 0; ch < odim_c; ++ch) {
output += (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i));
output -= (gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i));
output += (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i));
output -= (1-gamma) * DIM3_INDEX(gradOutput, b, ch, y, x) * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i));
}
}
}
}
gradInput[index] = output;
}
void resample2d_kernel_forward(
at::Tensor& input1,
at::Tensor& input2,
at::Tensor& output,
int kernel_size,
bool bilinear) {
int n = output.numel();
const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3));
const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3));
const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3));
const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3));
const long4 output_size = make_long4(output.size(0), output.size(1), output.size(2), output.size(3));
const long4 output_stride = make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3));
// TODO: when atomicAdd gets resolved, change to AT_DISPATCH_FLOATING_TYPES_AND_HALF
// AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_forward_kernel", ([&] {
kernel_resample2d_update_output<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>(
//at::globalContext().getCurrentCUDAStream() >>>(
n,
input1.data<float>(),
input1_size,
input1_stride,
input2.data<float>(),
input2_size,
input2_stride,
output.data<float>(),
output_size,
output_stride,
kernel_size,
bilinear);
// }));
// TODO: ATen-equivalent check
// THCudaCheck(cudaGetLastError());
}
void resample2d_kernel_backward(
at::Tensor& input1,
at::Tensor& input2,
at::Tensor& gradOutput,
at::Tensor& gradInput1,
at::Tensor& gradInput2,
int kernel_size,
bool bilinear) {
int n = gradOutput.numel();
const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3));
const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3));
const long4 input2_size = make_long4(input2.size(0), input2.size(1), input2.size(2), input2.size(3));
const long4 input2_stride = make_long4(input2.stride(0), input2.stride(1), input2.stride(2), input2.stride(3));
const long4 gradOutput_size = make_long4(gradOutput.size(0), gradOutput.size(1), gradOutput.size(2), gradOutput.size(3));
const long4 gradOutput_stride = make_long4(gradOutput.stride(0), gradOutput.stride(1), gradOutput.stride(2), gradOutput.stride(3));
const long4 gradInput1_size = make_long4(gradInput1.size(0), gradInput1.size(1), gradInput1.size(2), gradInput1.size(3));
const long4 gradInput1_stride = make_long4(gradInput1.stride(0), gradInput1.stride(1), gradInput1.stride(2), gradInput1.stride(3));
// AT_DISPATCH_FLOATING_TYPES(input1.type(), "resample_backward_input1", ([&] {
kernel_resample2d_backward_input1<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>(
//at::globalContext().getCurrentCUDAStream() >>>(
n,
input1.data<float>(),
input1_size,
input1_stride,
input2.data<float>(),
input2_size,
input2_stride,
gradOutput.data<float>(),
gradOutput_size,
gradOutput_stride,
gradInput1.data<float>(),
gradInput1_size,
gradInput1_stride,
kernel_size,
bilinear
);
// }));
const long4 gradInput2_size = make_long4(gradInput2.size(0), gradInput2.size(1), gradInput2.size(2), gradInput2.size(3));
const long4 gradInput2_stride = make_long4(gradInput2.stride(0), gradInput2.stride(1), gradInput2.stride(2), gradInput2.stride(3));
n = gradInput2.numel();
// AT_DISPATCH_FLOATING_TYPES(gradInput2.type(), "resample_backward_input2", ([&] {
kernel_resample2d_backward_input2<float><<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>(
//at::globalContext().getCurrentCUDAStream() >>>(
n,
input1.data<float>(),
input1_size,
input1_stride,
input2.data<float>(),
input2_size,
input2_stride,
gradOutput.data<float>(),
gradOutput_size,
gradOutput_stride,
gradInput2.data<float>(),
gradInput2_size,
gradInput2_stride,
kernel_size,
bilinear
);
// }));
// TODO: Use the ATen equivalent to get last error
// THCudaCheck(cudaGetLastError());
}
|
ca75546c829b87dcc730e869ae80284c42d893f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
__global__ void Convolution(int*,const int*,int*,int,int);
bool CheckAnswer(int*,int*,int*,int,int);
void printMatrix(int* mat,int,int);
#define TW 32
#define MaskWidth 5
#define MaskRadius 2
int main()
{
clock_t start_overhead,end_overhead;
start_overhead=clock();
/* No. of rows and column in the image*/
int nImageROW=800;
int nImageCOL=800;
/* Dynamically allocate memory to the input image, output image and the mask. Each matrix is represented as a 1D array*/
int* ImageIn=new int[nImageROW*nImageCOL];
int* ImageOut=new int[nImageROW*nImageCOL];
int* Mask=new int[MaskWidth*MaskWidth];
srand(time(NULL));
/* Populate the input image and the mask with random numbers*/
for(int i=0;i<nImageROW;i++)
for(int j=0;j<nImageCOL;j++)
ImageIn[i*nImageCOL+j]=rand()%4;
for(int i=0;i<MaskWidth;i++)
for(int j=0;j<MaskWidth;j++)
Mask[i*MaskWidth+j]=rand()%5;
int ImageSize=nImageROW*nImageCOL*sizeof(int);
int MaskSize=MaskWidth*MaskWidth*sizeof(int);
int* ImageIn_d, *ImageOut_d,*Mask_d;
/* Allocate memory on the device*/
hipMalloc((void**)&ImageIn_d,ImageSize);
hipMalloc((void**)&ImageOut_d,ImageSize);
hipMalloc((void**)&Mask_d,MaskSize);
/* Copy data from host to device*/
hipMemcpy(ImageIn_d,ImageIn,ImageSize,hipMemcpyHostToDevice);
hipMemcpy(Mask_d,Mask,MaskSize,hipMemcpyHostToDevice);
/* Invoke the convolution kernel*/
dim3 dimBlock(TW,TW);
dim3 dimGrid(ceil((float)nImageCOL/TW),ceil((float)nImageROW/TW));
clock_t start,end,total;
start=clock();
hipLaunchKernelGGL(( Convolution), dim3(dimGrid),dim3(dimBlock), 0, 0, ImageIn_d,Mask_d,ImageOut_d,nImageROW,nImageCOL);
end=clock();
total=(double)(end - start) / CLOCKS_PER_SEC;
printf("Time taken on device: %lf\n",total);
/* Copy the convoluted image to host*/
hipMemcpy(ImageOut,ImageOut_d,ImageSize,hipMemcpyDeviceToHost);
end_overhead=clock();
if(CheckAnswer(ImageIn,Mask,ImageOut,nImageROW,nImageCOL))
printf("Solution is right!\n");
else
printf("Solution is wrong!\n");
printf("Time spent on overhead calculations: %lf\n",(double)(end_overhead - start_overhead) / CLOCKS_PER_SEC);
/* printMatrix(ImageIn,nImageROW,nImageCOL);
printMatrix(ImageOut,nImageROW,nImageCOL);
printMatrix(Mask,MaskWidth,MaskWidth); */
hipFree(ImageIn);
hipFree(ImageOut);
hipFree(Mask);
}
__global__ void Convolution(int* ImageIn_d, const int* __restrict__ Mask_d,int* ImageOut_d, int nImageROW, int nImageCOL)
{
/* Store the thread dimensions on registers*/
int bx=blockIdx.x,by=blockIdx.y;
int tx=threadIdx.x,ty=threadIdx.y;
/* Declare a shared memory region for each block of threads*/
__shared__ int SharedMemBlock[TW][TW];
/* Find out the row and column for each thread*/
int row=by*blockDim.y+ty;
int col=bx*blockDim.x+tx;
SharedMemBlock[ty][tx]=ImageIn_d[row*nImageCOL+col];
__syncthreads();
int Pvalue=0;
for(int i=-MaskRadius;i<=MaskRadius;i++)
for(int j=-MaskRadius;j<=MaskRadius;j++)
{
int GloabalMemRow=row+i;
int GloabalMemCol=col+j;
int SharedMemRow=ty+i;
int SharedMemCol=tx+j;
int MaskRow=MaskRadius+i;
int MaskCol=MaskRadius+j;
if(GloabalMemRow<by*TW || GloabalMemRow>=(by+1)*TW||GloabalMemCol<bx*TW || GloabalMemCol>=(bx+1)*TW)
{
if(GloabalMemRow>=0 && GloabalMemRow<nImageROW && GloabalMemCol>=0 && GloabalMemCol<nImageCOL)
Pvalue+=ImageIn_d[GloabalMemRow*nImageCOL+GloabalMemCol]*Mask_d[MaskRow*MaskWidth+MaskCol];
}
else
Pvalue+=SharedMemBlock[SharedMemRow][SharedMemCol]*Mask_d[MaskRow*MaskWidth+MaskCol];
}
ImageOut_d[row*nImageCOL+col]=Pvalue;
}
/* A function to verify correctness of solution*/
bool CheckAnswer(int* ImageIn, int* Mask, int* ImageOut,int nImageROW, int nImageCOL)
{
clock_t start,end;
start=clock();
for(int row=0;row<nImageROW;row++)
{
for(int col=0;col<nImageCOL;col++)
{
int Pvalue=0;
for(int MaskRow=-MaskRadius;MaskRow<=MaskRadius;MaskRow++)
{
for(int MaskCol=-MaskRadius;MaskCol<=MaskRadius;MaskCol++)
{
int imageRow=row+MaskRow;
int imageCol=col+MaskCol;
if(imageRow>=0 && imageRow<nImageROW && imageCol>=0 && imageCol<nImageCOL)
Pvalue+=ImageIn[imageRow*nImageCOL+imageCol]*Mask[(MaskRow+MaskRadius)*MaskWidth+(MaskCol+MaskRadius)];
}
}
if(Pvalue!=ImageOut[row*nImageCOL+col])
return false;
}
}
end=clock();
printf("Time taken on host: %lf\n",(double)(end - start) / CLOCKS_PER_SEC);
return true;
}
void printMatrix(int* mat,int nrow,int ncol)
{
for(int i=0;i<nrow;i++)
{
for(int j=0;j<ncol;j++)
printf("%d ",mat[i*ncol+j]);
printf("\n");
}
printf("\n\n");
}
| ca75546c829b87dcc730e869ae80284c42d893f0.cu | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
__global__ void Convolution(int*,const int*,int*,int,int);
bool CheckAnswer(int*,int*,int*,int,int);
void printMatrix(int* mat,int,int);
#define TW 32
#define MaskWidth 5
#define MaskRadius 2
int main()
{
clock_t start_overhead,end_overhead;
start_overhead=clock();
/* No. of rows and column in the image*/
int nImageROW=800;
int nImageCOL=800;
/* Dynamically allocate memory to the input image, output image and the mask. Each matrix is represented as a 1D array*/
int* ImageIn=new int[nImageROW*nImageCOL];
int* ImageOut=new int[nImageROW*nImageCOL];
int* Mask=new int[MaskWidth*MaskWidth];
srand(time(NULL));
/* Populate the input image and the mask with random numbers*/
for(int i=0;i<nImageROW;i++)
for(int j=0;j<nImageCOL;j++)
ImageIn[i*nImageCOL+j]=rand()%4;
for(int i=0;i<MaskWidth;i++)
for(int j=0;j<MaskWidth;j++)
Mask[i*MaskWidth+j]=rand()%5;
int ImageSize=nImageROW*nImageCOL*sizeof(int);
int MaskSize=MaskWidth*MaskWidth*sizeof(int);
int* ImageIn_d, *ImageOut_d,*Mask_d;
/* Allocate memory on the device*/
cudaMalloc((void**)&ImageIn_d,ImageSize);
cudaMalloc((void**)&ImageOut_d,ImageSize);
cudaMalloc((void**)&Mask_d,MaskSize);
/* Copy data from host to device*/
cudaMemcpy(ImageIn_d,ImageIn,ImageSize,cudaMemcpyHostToDevice);
cudaMemcpy(Mask_d,Mask,MaskSize,cudaMemcpyHostToDevice);
/* Invoke the convolution kernel*/
dim3 dimBlock(TW,TW);
dim3 dimGrid(ceil((float)nImageCOL/TW),ceil((float)nImageROW/TW));
clock_t start,end,total;
start=clock();
Convolution<<<dimGrid,dimBlock>>>(ImageIn_d,Mask_d,ImageOut_d,nImageROW,nImageCOL);
end=clock();
total=(double)(end - start) / CLOCKS_PER_SEC;
printf("Time taken on device: %lf\n",total);
/* Copy the convoluted image to host*/
cudaMemcpy(ImageOut,ImageOut_d,ImageSize,cudaMemcpyDeviceToHost);
end_overhead=clock();
if(CheckAnswer(ImageIn,Mask,ImageOut,nImageROW,nImageCOL))
printf("Solution is right!\n");
else
printf("Solution is wrong!\n");
printf("Time spent on overhead calculations: %lf\n",(double)(end_overhead - start_overhead) / CLOCKS_PER_SEC);
/* printMatrix(ImageIn,nImageROW,nImageCOL);
printMatrix(ImageOut,nImageROW,nImageCOL);
printMatrix(Mask,MaskWidth,MaskWidth); */
cudaFree(ImageIn);
cudaFree(ImageOut);
cudaFree(Mask);
}
__global__ void Convolution(int* ImageIn_d, const int* __restrict__ Mask_d,int* ImageOut_d, int nImageROW, int nImageCOL)
{
/* Store the thread dimensions on registers*/
int bx=blockIdx.x,by=blockIdx.y;
int tx=threadIdx.x,ty=threadIdx.y;
/* Declare a shared memory region for each block of threads*/
__shared__ int SharedMemBlock[TW][TW];
/* Find out the row and column for each thread*/
int row=by*blockDim.y+ty;
int col=bx*blockDim.x+tx;
SharedMemBlock[ty][tx]=ImageIn_d[row*nImageCOL+col];
__syncthreads();
int Pvalue=0;
for(int i=-MaskRadius;i<=MaskRadius;i++)
for(int j=-MaskRadius;j<=MaskRadius;j++)
{
int GloabalMemRow=row+i;
int GloabalMemCol=col+j;
int SharedMemRow=ty+i;
int SharedMemCol=tx+j;
int MaskRow=MaskRadius+i;
int MaskCol=MaskRadius+j;
if(GloabalMemRow<by*TW || GloabalMemRow>=(by+1)*TW||GloabalMemCol<bx*TW || GloabalMemCol>=(bx+1)*TW)
{
if(GloabalMemRow>=0 && GloabalMemRow<nImageROW && GloabalMemCol>=0 && GloabalMemCol<nImageCOL)
Pvalue+=ImageIn_d[GloabalMemRow*nImageCOL+GloabalMemCol]*Mask_d[MaskRow*MaskWidth+MaskCol];
}
else
Pvalue+=SharedMemBlock[SharedMemRow][SharedMemCol]*Mask_d[MaskRow*MaskWidth+MaskCol];
}
ImageOut_d[row*nImageCOL+col]=Pvalue;
}
/* A function to verify correctness of solution*/
bool CheckAnswer(int* ImageIn, int* Mask, int* ImageOut,int nImageROW, int nImageCOL)
{
clock_t start,end;
start=clock();
for(int row=0;row<nImageROW;row++)
{
for(int col=0;col<nImageCOL;col++)
{
int Pvalue=0;
for(int MaskRow=-MaskRadius;MaskRow<=MaskRadius;MaskRow++)
{
for(int MaskCol=-MaskRadius;MaskCol<=MaskRadius;MaskCol++)
{
int imageRow=row+MaskRow;
int imageCol=col+MaskCol;
if(imageRow>=0 && imageRow<nImageROW && imageCol>=0 && imageCol<nImageCOL)
Pvalue+=ImageIn[imageRow*nImageCOL+imageCol]*Mask[(MaskRow+MaskRadius)*MaskWidth+(MaskCol+MaskRadius)];
}
}
if(Pvalue!=ImageOut[row*nImageCOL+col])
return false;
}
}
end=clock();
printf("Time taken on host: %lf\n",(double)(end - start) / CLOCKS_PER_SEC);
return true;
}
void printMatrix(int* mat,int nrow,int ncol)
{
for(int i=0;i<nrow;i++)
{
for(int j=0;j<ncol;j++)
printf("%d ",mat[i*ncol+j]);
printf("\n");
}
printf("\n\n");
}
|
20adace351600fd0409b5612c6ccb12046ddaded.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
/* ECE 277: GPU Programmming 2021 WINTER quarter
/* Author and Instructer: Cheolhong An
/* Copyright 2019
/* University of California, San Diego
/*************************************************************************/
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define ACTIONS 4
#define NUM_AGENTS 512
#define COLS 46
#define ROWS 46
#define QSIZE COLS * ROWS * ACTIONS
#define THREADS 256
#define GAMMA 0.9
#define ALPHA 0.5
#define EPSILON 1.0
#define DELTA_EPS 0.01
#define EPS_CEIL 1.0
#define EPS_BOTTOM 0.0
short *d_action;
hiprandState_t *d_states;
bool *d_active;
float *d_qtable;
float epsilon;
////////////////////////// agent_init() //////////////////////////
// <<<NUM_AGENTS * ACTIONS / THREADS, THREADS >>>
__global__ void Init_agent(hiprandState_t *d_states, bool *d_active)
{
unsigned int agent_id = threadIdx.x + blockIdx.x * blockDim.x;
if (agent_id < NUM_AGENTS) {
hiprand_init(clock() + agent_id, agent_id, 0, &d_states[agent_id]);
d_active[agent_id] = 1;
}
}
// occupency
__global__ void Init_qtable(float *d_qtable)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int nx = gridDim.x * blockDim.x;
unsigned int tid = ix + iy * nx;
if (tid < QSIZE) {
d_qtable[tid] = 0.0f;
}
}
void agent_init()
{
// clear action + initQ table + self initialization
epsilon = EPSILON;
hipMalloc((void **)&d_action, NUM_AGENTS * sizeof(short));
hipMalloc((void **)&d_states, NUM_AGENTS * sizeof(hiprandState_t));
hipMalloc((void **)&d_active, NUM_AGENTS * sizeof(bool));
dim3 block(THREADS, 1, 1);
dim3 grid(NUM_AGENTS / block.x, 1, 1);
Init_agent << <grid, block >> > (d_states, d_active);
hipMalloc((void **)&d_qtable, QSIZE * sizeof(float));
dim3 qblock(THREADS, 1, 1);
dim3 qgrid(QSIZE / block.x + 1); // COLS 46 * ROWS 46 * ACTIONS 4
hipLaunchKernelGGL(( Init_qtable) , dim3(qgrid) , dim3(qblock) , 0, 0, d_qtable);
}
////////////////////////// agent_init_episode() //////////////////////////
// <<<NUM_AGENTS * ACTIONS / THREADS, THREADS >>>
__global__ void Init_epsiode(bool *d_active) {
// agent 1 alive, 0 dead;
unsigned int agent_id = threadIdx.x + blockIdx.x * blockDim.x;
d_active[agent_id] = 1;
}
void agent_init_episode() {
// set all agents in active status
dim3 block(THREADS, 1, 1);
dim3 grid(NUM_AGENTS / block.x, 1, 1);
hipLaunchKernelGGL(( Init_epsiode) , dim3(grid), dim3(block) , 0, 0, d_active);
}
////////////////////////// adjust_epsilon() //////////////////////////
float agent_adjustepsilon()
{
if (epsilon > EPS_CEIL) {
epsilon = EPS_CEIL;
}
else if (epsilon < EPS_BOTTOM) {
epsilon = EPS_BOTTOM;
}
else {
epsilon -= (float)DELTA_EPS;
}
return epsilon;
}
////////////////////////// agent_action() //////////////////////////
// <<<NUM_AGENTS * ACTIONS / THREADS, THREADS >>>
__global__ void Agent_action(int2 *cstate, short *d_action, hiprandState_t *d_states, float epsilon, float *d_qtable, bool *d_active) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int nx = gridDim.x * blockDim.x;
int tid = iy * nx + ix;
int agent_id = tid / ACTIONS;
float rand_state = hiprand_uniform(&d_states[agent_id]);
if (rand_state < epsilon) {
short action = (short)(hiprand_uniform(&d_states[agent_id]) * ACTIONS);
if (action == 4) action = 0; // (0, 1], for keeping uniform distributed to make action==4 to 0
d_action[agent_id] = action;
}
else {
__shared__ float qval_cache[THREADS];
__shared__ short action_cache[THREADS];
int sid = threadIdx.x;
int aid = sid % ACTIONS;
action_cache[sid] = aid;
int x = cstate[agent_id].x, y = cstate[agent_id].y;
int qid = (y * COLS + x) * ACTIONS;
qval_cache[sid] = d_qtable[qid + aid];
__syncthreads();
unsigned int stride = ACTIONS / 2;
#pragma unroll
while (stride != 0) {
if (aid < stride) {
if (qval_cache[sid] < qval_cache[sid + stride]) {
qval_cache[sid] = qval_cache[sid + stride];
action_cache[sid] = action_cache[sid + stride];
}
}
__syncthreads();
stride /= 2;
}
if (sid % ACTIONS == 0) {
d_action[agent_id] = action_cache[sid];
}
}
}
short* agent_action(int2* cstate) {
// do exploration or exploitation
dim3 block(THREADS, 1, 1);
dim3 grid(NUM_AGENTS * ACTIONS / block.x, 1, 1);
hipLaunchKernelGGL(( Agent_action) , dim3(grid), dim3(block) , 0, 0, cstate, d_action, d_states, epsilon, d_qtable, d_active);
return d_action;
}
////////////////////////// agent_update() //////////////////////////
// <<<NUM_AGENTS * ACTIONS / THREADS, THREADS >>>
__global__ void Agent_update(int2* cstate, int2* nstate, float *rewards, float *d_qtable, short *d_action, bool *d_active)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int nx = gridDim.x * blockDim.x;
int tid = iy * nx + ix;
int agent_id = tid / ACTIONS;
if (d_active[agent_id] == 1) {
unsigned int x0 = cstate[agent_id].x, y0 = cstate[agent_id].y;
unsigned int c_qid = (y0 * COLS + x0) * ACTIONS + (int)d_action[agent_id];
float gamma_item = 0;
if (rewards[agent_id] != 0) {
d_qtable[c_qid] += ALPHA * (rewards[agent_id] + gamma_item - d_qtable[c_qid]);
}
else {
// memory shared
__shared__ float qval_cache[THREADS];
int sid = threadIdx.x;
int aid = sid % ACTIONS;
// next state
int x = nstate[agent_id].x, y = nstate[agent_id].y;
int qid = (y * COLS + x) * ACTIONS;
qval_cache[sid] = d_qtable[qid + aid];
// reduction, max qval
unsigned int stride = ACTIONS / 2;
#pragma unroll
while (stride != 0) {
if (aid < stride) {
if (qval_cache[sid] < qval_cache[sid + stride]) {
qval_cache[sid] = qval_cache[sid + stride];
}
}
__syncthreads();
stride /= 2;
}
if (sid % ACTIONS == 0) {
d_qtable[c_qid] += ALPHA * (rewards[agent_id] + GAMMA * qval_cache[sid] - d_qtable[c_qid]);
}
}
}
}
void agent_update(int2* cstate, int2* nstate, float *rewards)
{
dim3 block(THREADS, 1, 1);
dim3 grid(NUM_AGENTS * ACTIONS / block.x, 1, 1);
hipLaunchKernelGGL(( Agent_update) , dim3(grid), dim3(block) , 0, 0, cstate, nstate, rewards, d_qtable, d_action, d_active);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
/** CUDA Dynamic Parallelism
* @brief it will be called in __global__ Agent_action and Agent_update
* for agent_id to calculate (.x) greedy_action and (.y) max_qval
*/
// __inline__ __device__ void Get_qAction_qMaxVal(int2 *state, float *d_qtable, float2 *d_actval, unsigned int agent_id)
// {
// // exploitation (greedy policy)
// // located position on q_table
// unsigned int x = state[agent_id].x;
// unsigned int y = state[agent_id].y;
// // memory shared
// __shared__ float qval_cache[ACTIONS]; // 4 actions
// __shared__ short action_cache[ACTIONS];
// unsigned int aid = threadIdx.x; // action_id
// action_cache[aid] = (short)threadIdx.x;
// unsigned int q_id = (y * COLS + x) * ACTIONS;
// qval_cache[aid] = d_qtable[q_id + aid];
// __syncthreads();
// // reduction for getting the max val and action
// unsigned int stride = blockDim.x / 2; // 4 actions / 2
// #pragma unroll
// while (stride != 0) {
// if (aid < stride && qval_cache[aid] < qval_cache[aid + stride]) {
// // keep larger values in left cache
// qval_cache[aid] = qval_cache[aid + stride];
// action_cache[aid] = action_cache[aid + stride];
// }
// __syncthreads();
// stride /= 2;
// }
// // update: .x action; .y max_qval.
// d_actval[agent_id].x = action_cache[0];
// d_actval[agent_id].y = qval_cache[0];
// }
////////////////////////////////////////////////////////////////////////////////////////////////////////
// __global__ void Agent_action(int2 *cstate, short *d_action, hiprandState_t *d_states, float epsilon, float *d_qtable, bool *d_active) {
// unsigned int agent_id = blockIdx.x;
// if (d_active[agent_id] == 1)
// {
// // agent is alive
// float rand_state = hiprand_uniform(&d_states[agent_id]);
// if (rand_state < epsilon) {
// // exploration
// short action = (short)(hiprand_uniform(&d_states[agent_id]) * ACTIONS);
// if (action == 4) {
// // hiprand_uniform (0, 1] for keeping uniform make the case action==4 as action==0
// action = 0;
// }
// d_action[agent_id] = action;
// }
// else {
// // exploitation (greedy policy)
// // memory shared
// __shared__ float qval_cache[ACTIONS]; // 4 actions
// __shared__ short action_cache[ACTIONS];
// unsigned int aid = threadIdx.x;
// action_cache[aid] = (short)threadIdx.x;
// // located position on q_table
// unsigned int x = cstate[agent_id].x;
// unsigned int y = cstate[agent_id].y;
// unsigned int q_id = (y * COLS + x) * ACTIONS;
// // qval_cache[action_id] = d_qtable[q_id + action_id];
// qval_cache[aid] = d_qtable[q_id + aid];
// __syncthreads();
// // reduction for getting the max val and action
// unsigned int stride = blockDim.x / 2;
// #pragma unroll
// while (stride != 0) {
// if (aid < stride && qval_cache[aid] < qval_cache[aid + stride]) {
// qval_cache[aid] = qval_cache[aid + stride];
// action_cache[aid] = action_cache[aid + stride];
// }
// __syncthreads();
// stride /= 2;
// }
// // action = action_cache[0];
// d_action[agent_id] = action_cache[0];
// }
// }
// }
// short* agent_action(int2* cstate) {
// // do exploration or exploitation
// // dim3 block(ACTIONS, 1, 1);
// // dim3 grid(NUM_AGENTS, 1, 1);
// hipLaunchKernelGGL(( Agent_action) , dim3(grid), dim3(block) , 0, 0, cstate, d_action, d_states, epsilon, d_qtable, d_active);
// return d_action;
// }
// __global__ void Agent_update(int2* cstate, int2* nstate, float *rewards, float *d_qtable, short *d_action, bool *d_active)
// {
// // observe next state S' and R
// unsigned int agent_id = blockIdx.x;
// if (d_active[agent_id] == 1) {
// // agent active
// float gamma_item = 0; // if agent is inactive, the gamma_item == 0
// if (rewards[agent_id] == 0) {
// // agent still active
// // memory shared
// __shared__ float qval_cache[ACTIONS];
// unsigned int action_id = threadIdx.x;
// unsigned int x1 = nstate[agent_id].x;
// unsigned int y1 = nstate[agent_id].y;
// unsigned int n_qid = (y1 * COLS + x1) * ACTIONS; // next state (n+1)
// qval_cache[action_id] = d_qtable[n_qid + action_id];
// __syncthreads();
// // reduction
// unsigned int i = blockDim.x / 2;
// #pragma unroll
// while (i != 0) {
// if (action_id < i && qval_cache[action_id] < qval_cache[action_id + i]) {
// qval_cache[action_id] = qval_cache[action_id + i];
// }
// __syncthreads();
// i /= 2;
// }
// float best_next_qval = qval_cache[0];
// gamma_item = GAMMA * best_next_qval;
// }
// // update q_table of current state (n) <- max val of next state (n+1)
// // Q(S, A) <- Q(S, A) + alpha[R + gamma * max Q(S', a) - Q(S, A)]
// unsigned int x0 = cstate[agent_id].x;
// unsigned int y0 = cstate[agent_id].y;
// unsigned int c_qid = (y0 * COLS + x0) * ACTIONS + (int)d_action[agent_id];
// d_qtable[c_qid] += ALPHA * (rewards[agent_id] + gamma_item - d_qtable[c_qid]);
// }
// }
// void agent_update(int2* cstate, int2* nstate, float *rewards)
// {
// dim3 block(ACTIONS, 1, 1);
// dim3 grid(NUM_AGENTS, 1, 1);
// Agent_update <<<grid, block >>> (cstate, nstate, rewards, d_qtable, d_action, d_active);
// }
| 20adace351600fd0409b5612c6ccb12046ddaded.cu | /*************************************************************************
/* ECE 277: GPU Programmming 2021 WINTER quarter
/* Author and Instructer: Cheolhong An
/* Copyright 2019
/* University of California, San Diego
/*************************************************************************/
#include <cuda_fp16.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <curand.h>
#include <curand_kernel.h>
#define ACTIONS 4
#define NUM_AGENTS 512
#define COLS 46
#define ROWS 46
#define QSIZE COLS * ROWS * ACTIONS
#define THREADS 256
#define GAMMA 0.9
#define ALPHA 0.5
#define EPSILON 1.0
#define DELTA_EPS 0.01
#define EPS_CEIL 1.0
#define EPS_BOTTOM 0.0
short *d_action;
curandState *d_states;
bool *d_active;
float *d_qtable;
float epsilon;
////////////////////////// agent_init() //////////////////////////
// <<<NUM_AGENTS * ACTIONS / THREADS, THREADS >>>
__global__ void Init_agent(curandState *d_states, bool *d_active)
{
unsigned int agent_id = threadIdx.x + blockIdx.x * blockDim.x;
if (agent_id < NUM_AGENTS) {
curand_init(clock() + agent_id, agent_id, 0, &d_states[agent_id]);
d_active[agent_id] = 1;
}
}
// occupency
__global__ void Init_qtable(float *d_qtable)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int nx = gridDim.x * blockDim.x;
unsigned int tid = ix + iy * nx;
if (tid < QSIZE) {
d_qtable[tid] = 0.0f;
}
}
void agent_init()
{
// clear action + initQ table + self initialization
epsilon = EPSILON;
cudaMalloc((void **)&d_action, NUM_AGENTS * sizeof(short));
cudaMalloc((void **)&d_states, NUM_AGENTS * sizeof(curandState));
cudaMalloc((void **)&d_active, NUM_AGENTS * sizeof(bool));
dim3 block(THREADS, 1, 1);
dim3 grid(NUM_AGENTS / block.x, 1, 1);
Init_agent << <grid, block >> > (d_states, d_active);
cudaMalloc((void **)&d_qtable, QSIZE * sizeof(float));
dim3 qblock(THREADS, 1, 1);
dim3 qgrid(QSIZE / block.x + 1); // COLS 46 * ROWS 46 * ACTIONS 4
Init_qtable <<< qgrid , qblock >>> (d_qtable);
}
////////////////////////// agent_init_episode() //////////////////////////
// <<<NUM_AGENTS * ACTIONS / THREADS, THREADS >>>
__global__ void Init_epsiode(bool *d_active) {
// agent 1 alive, 0 dead;
unsigned int agent_id = threadIdx.x + blockIdx.x * blockDim.x;
d_active[agent_id] = 1;
}
void agent_init_episode() {
// set all agents in active status
dim3 block(THREADS, 1, 1);
dim3 grid(NUM_AGENTS / block.x, 1, 1);
Init_epsiode <<<grid, block >>> (d_active);
}
////////////////////////// adjust_epsilon() //////////////////////////
float agent_adjustepsilon()
{
if (epsilon > EPS_CEIL) {
epsilon = EPS_CEIL;
}
else if (epsilon < EPS_BOTTOM) {
epsilon = EPS_BOTTOM;
}
else {
epsilon -= (float)DELTA_EPS;
}
return epsilon;
}
////////////////////////// agent_action() //////////////////////////
// <<<NUM_AGENTS * ACTIONS / THREADS, THREADS >>>
__global__ void Agent_action(int2 *cstate, short *d_action, curandState *d_states, float epsilon, float *d_qtable, bool *d_active) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int nx = gridDim.x * blockDim.x;
int tid = iy * nx + ix;
int agent_id = tid / ACTIONS;
float rand_state = curand_uniform(&d_states[agent_id]);
if (rand_state < epsilon) {
short action = (short)(curand_uniform(&d_states[agent_id]) * ACTIONS);
if (action == 4) action = 0; // (0, 1], for keeping uniform distributed to make action==4 to 0
d_action[agent_id] = action;
}
else {
__shared__ float qval_cache[THREADS];
__shared__ short action_cache[THREADS];
int sid = threadIdx.x;
int aid = sid % ACTIONS;
action_cache[sid] = aid;
int x = cstate[agent_id].x, y = cstate[agent_id].y;
int qid = (y * COLS + x) * ACTIONS;
qval_cache[sid] = d_qtable[qid + aid];
__syncthreads();
unsigned int stride = ACTIONS / 2;
#pragma unroll
while (stride != 0) {
if (aid < stride) {
if (qval_cache[sid] < qval_cache[sid + stride]) {
qval_cache[sid] = qval_cache[sid + stride];
action_cache[sid] = action_cache[sid + stride];
}
}
__syncthreads();
stride /= 2;
}
if (sid % ACTIONS == 0) {
d_action[agent_id] = action_cache[sid];
}
}
}
short* agent_action(int2* cstate) {
// do exploration or exploitation
dim3 block(THREADS, 1, 1);
dim3 grid(NUM_AGENTS * ACTIONS / block.x, 1, 1);
Agent_action <<< grid, block >>> (cstate, d_action, d_states, epsilon, d_qtable, d_active);
return d_action;
}
////////////////////////// agent_update() //////////////////////////
// <<<NUM_AGENTS * ACTIONS / THREADS, THREADS >>>
__global__ void Agent_update(int2* cstate, int2* nstate, float *rewards, float *d_qtable, short *d_action, bool *d_active)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int nx = gridDim.x * blockDim.x;
int tid = iy * nx + ix;
int agent_id = tid / ACTIONS;
if (d_active[agent_id] == 1) {
unsigned int x0 = cstate[agent_id].x, y0 = cstate[agent_id].y;
unsigned int c_qid = (y0 * COLS + x0) * ACTIONS + (int)d_action[agent_id];
float gamma_item = 0;
if (rewards[agent_id] != 0) {
d_qtable[c_qid] += ALPHA * (rewards[agent_id] + gamma_item - d_qtable[c_qid]);
}
else {
// memory shared
__shared__ float qval_cache[THREADS];
int sid = threadIdx.x;
int aid = sid % ACTIONS;
// next state
int x = nstate[agent_id].x, y = nstate[agent_id].y;
int qid = (y * COLS + x) * ACTIONS;
qval_cache[sid] = d_qtable[qid + aid];
// reduction, max qval
unsigned int stride = ACTIONS / 2;
#pragma unroll
while (stride != 0) {
if (aid < stride) {
if (qval_cache[sid] < qval_cache[sid + stride]) {
qval_cache[sid] = qval_cache[sid + stride];
}
}
__syncthreads();
stride /= 2;
}
if (sid % ACTIONS == 0) {
d_qtable[c_qid] += ALPHA * (rewards[agent_id] + GAMMA * qval_cache[sid] - d_qtable[c_qid]);
}
}
}
}
void agent_update(int2* cstate, int2* nstate, float *rewards)
{
dim3 block(THREADS, 1, 1);
dim3 grid(NUM_AGENTS * ACTIONS / block.x, 1, 1);
Agent_update <<< grid, block >>> (cstate, nstate, rewards, d_qtable, d_action, d_active);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
/** CUDA Dynamic Parallelism
* @brief it will be called in __global__ Agent_action and Agent_update
* for agent_id to calculate (.x) greedy_action and (.y) max_qval
*/
// __inline__ __device__ void Get_qAction_qMaxVal(int2 *state, float *d_qtable, float2 *d_actval, unsigned int agent_id)
// {
// // exploitation (greedy policy)
// // located position on q_table
// unsigned int x = state[agent_id].x;
// unsigned int y = state[agent_id].y;
// // memory shared
// __shared__ float qval_cache[ACTIONS]; // 4 actions
// __shared__ short action_cache[ACTIONS];
// unsigned int aid = threadIdx.x; // action_id
// action_cache[aid] = (short)threadIdx.x;
// unsigned int q_id = (y * COLS + x) * ACTIONS;
// qval_cache[aid] = d_qtable[q_id + aid];
// __syncthreads();
// // reduction for getting the max val and action
// unsigned int stride = blockDim.x / 2; // 4 actions / 2
// #pragma unroll
// while (stride != 0) {
// if (aid < stride && qval_cache[aid] < qval_cache[aid + stride]) {
// // keep larger values in left cache
// qval_cache[aid] = qval_cache[aid + stride];
// action_cache[aid] = action_cache[aid + stride];
// }
// __syncthreads();
// stride /= 2;
// }
// // update: .x action; .y max_qval.
// d_actval[agent_id].x = action_cache[0];
// d_actval[agent_id].y = qval_cache[0];
// }
////////////////////////////////////////////////////////////////////////////////////////////////////////
// __global__ void Agent_action(int2 *cstate, short *d_action, curandState *d_states, float epsilon, float *d_qtable, bool *d_active) {
// unsigned int agent_id = blockIdx.x;
// if (d_active[agent_id] == 1)
// {
// // agent is alive
// float rand_state = curand_uniform(&d_states[agent_id]);
// if (rand_state < epsilon) {
// // exploration
// short action = (short)(curand_uniform(&d_states[agent_id]) * ACTIONS);
// if (action == 4) {
// // curand_uniform (0, 1] for keeping uniform make the case action==4 as action==0
// action = 0;
// }
// d_action[agent_id] = action;
// }
// else {
// // exploitation (greedy policy)
// // memory shared
// __shared__ float qval_cache[ACTIONS]; // 4 actions
// __shared__ short action_cache[ACTIONS];
// unsigned int aid = threadIdx.x;
// action_cache[aid] = (short)threadIdx.x;
// // located position on q_table
// unsigned int x = cstate[agent_id].x;
// unsigned int y = cstate[agent_id].y;
// unsigned int q_id = (y * COLS + x) * ACTIONS;
// // qval_cache[action_id] = d_qtable[q_id + action_id];
// qval_cache[aid] = d_qtable[q_id + aid];
// __syncthreads();
// // reduction for getting the max val and action
// unsigned int stride = blockDim.x / 2;
// #pragma unroll
// while (stride != 0) {
// if (aid < stride && qval_cache[aid] < qval_cache[aid + stride]) {
// qval_cache[aid] = qval_cache[aid + stride];
// action_cache[aid] = action_cache[aid + stride];
// }
// __syncthreads();
// stride /= 2;
// }
// // action = action_cache[0];
// d_action[agent_id] = action_cache[0];
// }
// }
// }
// short* agent_action(int2* cstate) {
// // do exploration or exploitation
// // dim3 block(ACTIONS, 1, 1);
// // dim3 grid(NUM_AGENTS, 1, 1);
// Agent_action <<< grid, block >>> (cstate, d_action, d_states, epsilon, d_qtable, d_active);
// return d_action;
// }
// __global__ void Agent_update(int2* cstate, int2* nstate, float *rewards, float *d_qtable, short *d_action, bool *d_active)
// {
// // observe next state S' and R
// unsigned int agent_id = blockIdx.x;
// if (d_active[agent_id] == 1) {
// // agent active
// float gamma_item = 0; // if agent is inactive, the gamma_item == 0
// if (rewards[agent_id] == 0) {
// // agent still active
// // memory shared
// __shared__ float qval_cache[ACTIONS];
// unsigned int action_id = threadIdx.x;
// unsigned int x1 = nstate[agent_id].x;
// unsigned int y1 = nstate[agent_id].y;
// unsigned int n_qid = (y1 * COLS + x1) * ACTIONS; // next state (n+1)
// qval_cache[action_id] = d_qtable[n_qid + action_id];
// __syncthreads();
// // reduction
// unsigned int i = blockDim.x / 2;
// #pragma unroll
// while (i != 0) {
// if (action_id < i && qval_cache[action_id] < qval_cache[action_id + i]) {
// qval_cache[action_id] = qval_cache[action_id + i];
// }
// __syncthreads();
// i /= 2;
// }
// float best_next_qval = qval_cache[0];
// gamma_item = GAMMA * best_next_qval;
// }
// // update q_table of current state (n) <- max val of next state (n+1)
// // Q(S, A) <- Q(S, A) + alpha[R + gamma * max Q(S', a) - Q(S, A)]
// unsigned int x0 = cstate[agent_id].x;
// unsigned int y0 = cstate[agent_id].y;
// unsigned int c_qid = (y0 * COLS + x0) * ACTIONS + (int)d_action[agent_id];
// d_qtable[c_qid] += ALPHA * (rewards[agent_id] + gamma_item - d_qtable[c_qid]);
// }
// }
// void agent_update(int2* cstate, int2* nstate, float *rewards)
// {
// dim3 block(ACTIONS, 1, 1);
// dim3 grid(NUM_AGENTS, 1, 1);
// Agent_update <<<grid, block >>> (cstate, nstate, rewards, d_qtable, d_action, d_active);
// }
|
9c6dee9b23cea06b53c5a3be11bc983414d7ff42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "network.h"
#include "image.h"
#include "data.h"
#include "utils.h"
#include "parser.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
#include "gru_layer.h"
#include "crnn_layer.h"
#include "detection_layer.h"
#include "region_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "maxpool_layer.h"
#include "reorg_layer.h"
#include "avgpool_layer.h"
#include "normalization_layer.h"
#include "batchnorm_layer.h"
#include "cost_layer.h"
#include "local_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "shortcut_layer.h"
#include "blas.h"
}
#ifdef OPENCV
#include "opencv2/highgui/highgui_c.h"
#endif
#include "http_stream.h"
float * get_network_output_gpu_layer(network net, int i);
float * get_network_delta_gpu_layer(network net, int i);
float * get_network_output_gpu(network net);
void forward_network_gpu(network net, network_state state)
{
//hipDeviceSynchronize();
//printf("\n");
state.workspace = net.workspace;
int i;
for(i = 0; i < net.n; ++i){
state.index = i;
layer l = net.layers[i];
if(l.delta_gpu && state.train){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
//printf("%d - type: %d - ", i, l.type);
//start_timer();
l.forward_gpu(l, state);
//hipDeviceSynchronize();
//stop_timer_and_show();
if(net.wait_stream)
hipStreamSynchronize(get_cuda_stream());
state.input = l.output_gpu;
//hipDeviceSynchronize();
/*
cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs);
if (l.out_w >= 0 && l.out_h >= 1 && l.c >= 3) {
int j;
for (j = 0; j < l.out_c; ++j) {
image img = make_image(l.out_w, l.out_h, 3);
memcpy(img.data, l.output + l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float));
memcpy(img.data + l.out_w*l.out_h * 1, l.output + l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float));
memcpy(img.data + l.out_w*l.out_h * 2, l.output + l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float));
char buff[256];
sprintf(buff, "layer-%d slice-%d", i, j);
show_image(img, buff);
save_image(img, buff);
}
cvWaitKey(0); // wait press-key in console
cvDestroyAllWindows();
}
*/
}
//hipDeviceSynchronize();
//show_total_time();
}
void backward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
float * original_input = state.input;
float * original_delta = state.delta;
for(i = net.n-1; i >= 0; --i){
state.index = i;
layer l = net.layers[i];
if (l.stopbackward) break;
if(i == 0){
state.input = original_input;
state.delta = original_delta;
}else{
layer prev = net.layers[i-1];
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
}
l.backward_gpu(l, state);
}
}
void update_network_gpu(network net)
{
cuda_set_device(net.gpu_index);
int i;
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
}
void forward_backward_network_gpu(network net, float *x, float *y)
{
network_state state;
state.index = 0;
state.net = net;
int x_size = get_network_input_size(net)*net.batch;
int y_size = get_network_output_size(net)*net.batch;
if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch;
if(!*net.input_gpu){
*net.input_gpu = cuda_make_array(x, x_size);
*net.truth_gpu = cuda_make_array(y, y_size);
}else{
cuda_push_array(*net.input_gpu, x, x_size);
cuda_push_array(*net.truth_gpu, y, y_size);
}
state.input = *net.input_gpu;
state.delta = 0;
state.truth = *net.truth_gpu;
state.train = 1;
#ifdef CUDNN_HALF
int i;
for (i = 0; i < net.n; ++i) {
layer l = net.layers[i];
cuda_convert_f32_to_f16(l.weights_gpu, l.c*l.n*l.size*l.size, l.weights_gpu16);
}
#endif
forward_network_gpu(net, state);
//hipStreamSynchronize(get_cuda_stream());
backward_network_gpu(net, state);
}
float train_network_datum_gpu(network net, float *x, float *y)
{
*net.seen += net.batch;
forward_backward_network_gpu(net, x, y);
float error = get_network_cost(net);
if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net);
return error;
}
typedef struct {
network net;
data d;
float *err;
} train_args;
void *train_thread(void *ptr)
{
train_args args = *(train_args*)ptr;
free(ptr);
cuda_set_device(args.net.gpu_index);
*args.err = train_network(args.net, args.d);
return 0;
}
pthread_t train_network_in_thread(network net, data d, float *err)
{
pthread_t thread;
train_args *ptr = (train_args *)calloc(1, sizeof(train_args));
ptr->net = net;
ptr->d = d;
ptr->err = err;
if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed");
return thread;
}
void pull_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void push_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void update_layer(layer l, network net)
{
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
void merge_weights(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weights, 1, base.weights, 1);
if (l.scales) {
axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1);
}
}
void scale_weights(layer l, float s)
{
if (l.type == CONVOLUTIONAL) {
scal_cpu(l.n, s, l.biases, 1);
scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1);
if (l.scales) {
scal_cpu(l.n, s, l.scales, 1);
}
} else if(l.type == CONNECTED) {
scal_cpu(l.outputs, s, l.biases, 1);
scal_cpu(l.outputs*l.inputs, s, l.weights, 1);
}
}
void pull_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.biases_gpu, l.biases, l.n);
cuda_pull_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.biases_gpu, l.biases, l.outputs);
cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void push_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, l.biases, l.outputs);
cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void distribute_weights(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, base.biases, l.n);
cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c);
if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, base.biases, l.outputs);
cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs);
}
}
void merge_updates(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1);
if (l.scale_updates) {
axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1);
}
}
void distribute_updates(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c);
if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs);
}
}
void sync_layer(network *nets, int n, int j)
{
//printf("Syncing layer %d\n", j);
int i;
network net = nets[0];
layer base = net.layers[j];
cuda_set_device(net.gpu_index);
pull_weights(base);
for (i = 1; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
pull_weights(l);
merge_weights(l, base);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
distribute_weights(l, base);
}
//printf("Done syncing layer %d\n", j);
}
typedef struct{
network *nets;
int n;
int j;
} sync_args;
void *sync_layer_thread(void *ptr)
{
sync_args args = *(sync_args*)ptr;
sync_layer(args.nets, args.n, args.j);
free(ptr);
return 0;
}
pthread_t sync_layer_in_thread(network *nets, int n, int j)
{
pthread_t thread;
sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args));
ptr->nets = nets;
ptr->n = n;
ptr->j = j;
if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed");
return thread;
}
void sync_nets(network *nets, int n, int interval)
{
int j;
int layers = nets[0].n;
pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t));
*nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions;
for (j = 0; j < n; ++j){
*nets[j].seen = *nets[0].seen;
}
for (j = 0; j < layers; ++j) {
threads[j] = sync_layer_in_thread(nets, n, j);
}
for (j = 0; j < layers; ++j) {
pthread_join(threads[j], 0);
}
free(threads);
}
float train_networks(network *nets, int n, data d, int interval)
{
int i;
int batch = nets[0].batch;
int subdivisions = nets[0].subdivisions;
assert(batch * subdivisions * n == d.X.rows);
pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t));
float *errors = (float *) calloc(n, sizeof(float));
float sum = 0;
for(i = 0; i < n; ++i){
data p = get_data_part(d, i, n);
threads[i] = train_network_in_thread(nets[i], p, errors + i);
}
for(i = 0; i < n; ++i){
pthread_join(threads[i], 0);
//printf("%f\n", errors[i]);
sum += errors[i];
}
//hipDeviceSynchronize();
if (get_current_batch(nets[0]) % interval == 0) {
printf("Syncing... ");
fflush(stdout);
sync_nets(nets, n, interval);
printf("Done!\n");
}
//hipDeviceSynchronize();
free(threads);
free(errors);
return (float)sum/(n);
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
float *network_predict_gpu(network net, float *input)
{
if (net.gpu_index != cuda_get_device())
cuda_set_device(net.gpu_index);
int size = get_network_input_size(net) * net.batch;
network_state state;
state.index = 0;
state.net = net;
//state.input = cuda_make_array(input, size); // memory will be allocated in the parse_network_cfg_custom()
state.input = net.input_state_gpu;
cuda_push_array(state.input, input, size);
state.truth = 0;
state.train = 0;
state.delta = 0;
forward_network_gpu(net, state);
float *out = get_network_output_gpu(net);
//cuda_free(state.input); // will be freed in the free_network()
return out;
}
| 9c6dee9b23cea06b53c5a3be11bc983414d7ff42.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include "network.h"
#include "image.h"
#include "data.h"
#include "utils.h"
#include "parser.h"
#include "crop_layer.h"
#include "connected_layer.h"
#include "rnn_layer.h"
#include "gru_layer.h"
#include "crnn_layer.h"
#include "detection_layer.h"
#include "region_layer.h"
#include "convolutional_layer.h"
#include "activation_layer.h"
#include "maxpool_layer.h"
#include "reorg_layer.h"
#include "avgpool_layer.h"
#include "normalization_layer.h"
#include "batchnorm_layer.h"
#include "cost_layer.h"
#include "local_layer.h"
#include "softmax_layer.h"
#include "dropout_layer.h"
#include "route_layer.h"
#include "shortcut_layer.h"
#include "blas.h"
}
#ifdef OPENCV
#include "opencv2/highgui/highgui_c.h"
#endif
#include "http_stream.h"
float * get_network_output_gpu_layer(network net, int i);
float * get_network_delta_gpu_layer(network net, int i);
float * get_network_output_gpu(network net);
void forward_network_gpu(network net, network_state state)
{
//cudaDeviceSynchronize();
//printf("\n");
state.workspace = net.workspace;
int i;
for(i = 0; i < net.n; ++i){
state.index = i;
layer l = net.layers[i];
if(l.delta_gpu && state.train){
fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
}
//printf("%d - type: %d - ", i, l.type);
//start_timer();
l.forward_gpu(l, state);
//cudaDeviceSynchronize();
//stop_timer_and_show();
if(net.wait_stream)
cudaStreamSynchronize(get_cuda_stream());
state.input = l.output_gpu;
//cudaDeviceSynchronize();
/*
cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs);
if (l.out_w >= 0 && l.out_h >= 1 && l.c >= 3) {
int j;
for (j = 0; j < l.out_c; ++j) {
image img = make_image(l.out_w, l.out_h, 3);
memcpy(img.data, l.output + l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float));
memcpy(img.data + l.out_w*l.out_h * 1, l.output + l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float));
memcpy(img.data + l.out_w*l.out_h * 2, l.output + l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float));
char buff[256];
sprintf(buff, "layer-%d slice-%d", i, j);
show_image(img, buff);
save_image(img, buff);
}
cvWaitKey(0); // wait press-key in console
cvDestroyAllWindows();
}
*/
}
//cudaDeviceSynchronize();
//show_total_time();
}
void backward_network_gpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
float * original_input = state.input;
float * original_delta = state.delta;
for(i = net.n-1; i >= 0; --i){
state.index = i;
layer l = net.layers[i];
if (l.stopbackward) break;
if(i == 0){
state.input = original_input;
state.delta = original_delta;
}else{
layer prev = net.layers[i-1];
state.input = prev.output_gpu;
state.delta = prev.delta_gpu;
}
l.backward_gpu(l, state);
}
}
void update_network_gpu(network net)
{
cuda_set_device(net.gpu_index);
int i;
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
for(i = 0; i < net.n; ++i){
layer l = net.layers[i];
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
}
void forward_backward_network_gpu(network net, float *x, float *y)
{
network_state state;
state.index = 0;
state.net = net;
int x_size = get_network_input_size(net)*net.batch;
int y_size = get_network_output_size(net)*net.batch;
if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch;
if(!*net.input_gpu){
*net.input_gpu = cuda_make_array(x, x_size);
*net.truth_gpu = cuda_make_array(y, y_size);
}else{
cuda_push_array(*net.input_gpu, x, x_size);
cuda_push_array(*net.truth_gpu, y, y_size);
}
state.input = *net.input_gpu;
state.delta = 0;
state.truth = *net.truth_gpu;
state.train = 1;
#ifdef CUDNN_HALF
int i;
for (i = 0; i < net.n; ++i) {
layer l = net.layers[i];
cuda_convert_f32_to_f16(l.weights_gpu, l.c*l.n*l.size*l.size, l.weights_gpu16);
}
#endif
forward_network_gpu(net, state);
//cudaStreamSynchronize(get_cuda_stream());
backward_network_gpu(net, state);
}
float train_network_datum_gpu(network net, float *x, float *y)
{
*net.seen += net.batch;
forward_backward_network_gpu(net, x, y);
float error = get_network_cost(net);
if (((*net.seen) / net.batch) % net.subdivisions == 0) update_network_gpu(net);
return error;
}
typedef struct {
network net;
data d;
float *err;
} train_args;
void *train_thread(void *ptr)
{
train_args args = *(train_args*)ptr;
free(ptr);
cuda_set_device(args.net.gpu_index);
*args.err = train_network(args.net, args.d);
return 0;
}
pthread_t train_network_in_thread(network net, data d, float *err)
{
pthread_t thread;
train_args *ptr = (train_args *)calloc(1, sizeof(train_args));
ptr->net = net;
ptr->d = d;
ptr->err = err;
if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed");
return thread;
}
void pull_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void push_updates(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c);
if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
}
}
void update_layer(layer l, network net)
{
int update_batch = net.batch*net.subdivisions;
float rate = get_current_rate(net);
l.t = get_current_batch(net);
if(l.update_gpu){
l.update_gpu(l, update_batch, rate, net.momentum, net.decay);
}
}
void merge_weights(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weights, 1, base.weights, 1);
if (l.scales) {
axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1);
}
}
void scale_weights(layer l, float s)
{
if (l.type == CONVOLUTIONAL) {
scal_cpu(l.n, s, l.biases, 1);
scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1);
if (l.scales) {
scal_cpu(l.n, s, l.scales, 1);
}
} else if(l.type == CONNECTED) {
scal_cpu(l.outputs, s, l.biases, 1);
scal_cpu(l.outputs*l.inputs, s, l.weights, 1);
}
}
void pull_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_pull_array(l.biases_gpu, l.biases, l.n);
cuda_pull_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_pull_array(l.biases_gpu, l.biases, l.outputs);
cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void push_weights(layer l)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, l.biases, l.n);
cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c);
if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, l.biases, l.outputs);
cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
}
}
void distribute_weights(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.biases_gpu, base.biases, l.n);
cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c);
if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.biases_gpu, base.biases, l.outputs);
cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs);
}
}
void merge_updates(layer l, layer base)
{
if (l.type == CONVOLUTIONAL) {
axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1);
if (l.scale_updates) {
axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1);
}
} else if(l.type == CONNECTED) {
axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1);
axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1);
}
}
void distribute_updates(layer l, layer base)
{
if(l.type == CONVOLUTIONAL){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c);
if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n);
} else if(l.type == CONNECTED){
cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs);
cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs);
}
}
void sync_layer(network *nets, int n, int j)
{
//printf("Syncing layer %d\n", j);
int i;
network net = nets[0];
layer base = net.layers[j];
cuda_set_device(net.gpu_index);
pull_weights(base);
for (i = 1; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
pull_weights(l);
merge_weights(l, base);
}
scale_weights(base, 1./n);
for (i = 0; i < n; ++i) {
cuda_set_device(nets[i].gpu_index);
layer l = nets[i].layers[j];
distribute_weights(l, base);
}
//printf("Done syncing layer %d\n", j);
}
typedef struct{
network *nets;
int n;
int j;
} sync_args;
void *sync_layer_thread(void *ptr)
{
sync_args args = *(sync_args*)ptr;
sync_layer(args.nets, args.n, args.j);
free(ptr);
return 0;
}
pthread_t sync_layer_in_thread(network *nets, int n, int j)
{
pthread_t thread;
sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args));
ptr->nets = nets;
ptr->n = n;
ptr->j = j;
if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed");
return thread;
}
void sync_nets(network *nets, int n, int interval)
{
int j;
int layers = nets[0].n;
pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t));
*nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions;
for (j = 0; j < n; ++j){
*nets[j].seen = *nets[0].seen;
}
for (j = 0; j < layers; ++j) {
threads[j] = sync_layer_in_thread(nets, n, j);
}
for (j = 0; j < layers; ++j) {
pthread_join(threads[j], 0);
}
free(threads);
}
float train_networks(network *nets, int n, data d, int interval)
{
int i;
int batch = nets[0].batch;
int subdivisions = nets[0].subdivisions;
assert(batch * subdivisions * n == d.X.rows);
pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t));
float *errors = (float *) calloc(n, sizeof(float));
float sum = 0;
for(i = 0; i < n; ++i){
data p = get_data_part(d, i, n);
threads[i] = train_network_in_thread(nets[i], p, errors + i);
}
for(i = 0; i < n; ++i){
pthread_join(threads[i], 0);
//printf("%f\n", errors[i]);
sum += errors[i];
}
//cudaDeviceSynchronize();
if (get_current_batch(nets[0]) % interval == 0) {
printf("Syncing... ");
fflush(stdout);
sync_nets(nets, n, interval);
printf("Done!\n");
}
//cudaDeviceSynchronize();
free(threads);
free(errors);
return (float)sum/(n);
}
float *get_network_output_layer_gpu(network net, int i)
{
layer l = net.layers[i];
if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
return l.output;
}
float *get_network_output_gpu(network net)
{
int i;
for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
return get_network_output_layer_gpu(net, i);
}
float *network_predict_gpu(network net, float *input)
{
if (net.gpu_index != cuda_get_device())
cuda_set_device(net.gpu_index);
int size = get_network_input_size(net) * net.batch;
network_state state;
state.index = 0;
state.net = net;
//state.input = cuda_make_array(input, size); // memory will be allocated in the parse_network_cfg_custom()
state.input = net.input_state_gpu;
cuda_push_array(state.input, input, size);
state.truth = 0;
state.train = 0;
state.delta = 0;
forward_network_gpu(net, state);
float *out = get_network_output_gpu(net);
//cuda_free(state.input); // will be freed in the free_network()
return out;
}
|
1f614bd9f978048846f0fc3a1f6a1867fb79a2fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
//You can change the dimension, program will produce two matrices.
#define M 10
#define N 10
#define CUDA_CALL(x) {if((x) != hipSuccess){ \
printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \
printf(" %s\n", hipGetErrorString(hipGetLastError())); \
exit(EXIT_FAILURE);}}
__global__ void matrixAdd(int d_x[][N], int d_y[N], int d_z[][N]) {
int idx = threadIdx.x;
int idy = threadIdx.y;
if (idx < M && idy < N) {
d_z[idx][idy] = d_x[idx][idy] * d_y[idy];
}
}
int main() {
int sizeM = (M * N) * sizeof(int);
int sizeV = N * sizeof(int);
int h_x[M][N], h_y[N], h_z[M][N];
int(*d_x)[N], (*d_y), (*d_z)[N];
int i = 0;
int j = 0;
//Initialize matrix
for (i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
h_x[i][j] = 1;
h_z[i][j] = 0;
}
}
//Initialize vector
for (i = 0; i < N; i++) {
h_y[i] = 2;
}
hipEvent_t startC, stopC;
float elapsed_time_msC;
hipEventCreate(&startC);
hipEventCreate(&stopC);
hipEventRecord(startC, 0);
for (i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
h_z[i][j] = h_x[i][j] * h_y[j];
}
}
hipEventRecord(stopC, 0);
hipEventSynchronize(stopC);
hipEventElapsedTime(&elapsed_time_msC, startC, stopC);
printf("Time to calculate results(CPU Time): %f ms.\n", elapsed_time_msC);
CUDA_CALL(hipMalloc(&d_x, sizeM));
CUDA_CALL(hipMemcpy(d_x, h_x, sizeM, hipMemcpyHostToDevice));
CUDA_CALL(hipMalloc(&d_y, sizeV));
CUDA_CALL(hipMemcpy(d_y, h_y, sizeV, hipMemcpyHostToDevice));
CUDA_CALL(hipMalloc(&d_z, sizeM));
dim3 dimGrid(1, 1);
dim3 dimBlock(M, N);
hipEvent_t start, stop;
float elapsed_time_ms;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
matrixAdd << < dimGrid, dimBlock >> > (d_x, d_y, d_z);
CUDA_CALL(hipMemcpy(h_z, d_z, sizeM, hipMemcpyDeviceToHost));
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time_ms, start, stop);
printf("Time to calculate results(GPU Time): %f ms.\n", elapsed_time_ms);
hipFree(d_x);
hipFree(d_y);
hipFree(d_z);
printf("Output of Multiplication\n");
for (i = 0; i < M; i++) {
for (j = 0; j < N; j++) {
printf("%d\t", h_z[i][j]);
}
printf("\n");
}
printf("\n");
getchar();
} | 1f614bd9f978048846f0fc3a1f6a1867fb79a2fc.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
//You can change the dimension, program will produce two matrices.
#define M 10
#define N 10
#define CUDA_CALL(x) {if((x) != cudaSuccess){ \
printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \
printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \
exit(EXIT_FAILURE);}}
__global__ void matrixAdd(int d_x[][N], int d_y[N], int d_z[][N]) {
int idx = threadIdx.x;
int idy = threadIdx.y;
if (idx < M && idy < N) {
d_z[idx][idy] = d_x[idx][idy] * d_y[idy];
}
}
int main() {
int sizeM = (M * N) * sizeof(int);
int sizeV = N * sizeof(int);
int h_x[M][N], h_y[N], h_z[M][N];
int(*d_x)[N], (*d_y), (*d_z)[N];
int i = 0;
int j = 0;
//Initialize matrix
for (i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
h_x[i][j] = 1;
h_z[i][j] = 0;
}
}
//Initialize vector
for (i = 0; i < N; i++) {
h_y[i] = 2;
}
cudaEvent_t startC, stopC;
float elapsed_time_msC;
cudaEventCreate(&startC);
cudaEventCreate(&stopC);
cudaEventRecord(startC, 0);
for (i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
h_z[i][j] = h_x[i][j] * h_y[j];
}
}
cudaEventRecord(stopC, 0);
cudaEventSynchronize(stopC);
cudaEventElapsedTime(&elapsed_time_msC, startC, stopC);
printf("Time to calculate results(CPU Time): %f ms.\n", elapsed_time_msC);
CUDA_CALL(cudaMalloc(&d_x, sizeM));
CUDA_CALL(cudaMemcpy(d_x, h_x, sizeM, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMalloc(&d_y, sizeV));
CUDA_CALL(cudaMemcpy(d_y, h_y, sizeV, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMalloc(&d_z, sizeM));
dim3 dimGrid(1, 1);
dim3 dimBlock(M, N);
cudaEvent_t start, stop;
float elapsed_time_ms;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
matrixAdd << < dimGrid, dimBlock >> > (d_x, d_y, d_z);
CUDA_CALL(cudaMemcpy(h_z, d_z, sizeM, cudaMemcpyDeviceToHost));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop);
printf("Time to calculate results(GPU Time): %f ms.\n", elapsed_time_ms);
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
printf("Output of Multiplication\n");
for (i = 0; i < M; i++) {
for (j = 0; j < N; j++) {
printf("%d\t", h_z[i][j]);
}
printf("\n");
}
printf("\n");
getchar();
} |
5b9f690222b9af6101c6b6e5c290db0ec25ab5d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
*
* Matrix Multiplication - CUDA for GPUs
*
* CS3210
*
**/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <assert.h>
#define BLOCK_SIZE 32
int size;
typedef struct
{
float **element;
} matrix;
long long wall_clock_time()
{
#ifdef __linux__
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000ll);
#else
struct timeval tv;
gettimeofday(&tv, NULL);
return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000ll);
#endif
}
__device__ float getElement(matrix A, int row, int col) {
return A.element[row][col];
}
__device__ void setElement(matrix A, int row, int col, float value) {
A.element[row][col] = value;
}
__device__ matrix getSubMatrix(matrix A, int blockRow, int blockCol) {
int startingRow = BLOCK_SIZE * blockRow;
int startingCol = BLOCK_SIZE * blockCol;
// Allocate memory for sub matrix
matrix subA;
subA.element = (float**)malloc(sizeof(float*) * BLOCK_SIZE);
int row;
for (row = 0; row < BLOCK_SIZE; row++) {
// subA.element[row] = (float*)malloc(sizeof(float) * BLOCK_SIZE);
subA.element[row] = A.element[startingRow + row] + startingCol;
}
// int row, col;
// for (row = 0; row < BLOCK_SIZE; row++) {
// subA.element[row] = A.element[startingRow + row] + startingCol;
// // for (col = 0; col < BLOCK_SIZE; col++) {
// // printf("%f ", A.element[startingRow + row][startingCol + col]);
// // }
// // printf("\n");
// }
// int i, j;
// for (i = 0; i < BLOCK_SIZE; i++) {
// for (j = 0; j < BLOCK_SIZE; j++) {
// printf("%f ", subA.element[i][j]);
// }
// printf("\n");
// }
return subA;
}
/**
* Allocates memory for a matrix of size SIZE
* The memory is allocated row-major order, i.e.
* elements from the same row are allocated at contiguous
* memory addresses.
**/
void allocate_matrix(matrix* m)
{
int i;
hipError_t rc;
// allocate array for all the rows
rc = hipMallocManaged((void**)&(m->element), sizeof(float*) * size);
if (rc != hipSuccess)
{
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(rc));
exit(1);
}
// allocate an array for each row of the matrix
for (i = 0; i < size; i++)
{
rc = hipMallocManaged((void**)&(m->element[i]), sizeof(float) * size);
if (rc != hipSuccess)
{
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(rc));
exit(1);
}
}
}
/**
* Free the memory allocated for a matrix.
**/
void free_matrix(matrix* m) {
int i;
for (i = 0; i < size; i++)
hipFree(m->element[i]);
hipFree(m->element);
}
/**
* Initializes the elements of the matrix with
* random values between 0 and 9
**/
void init_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
m.element[i][j] = rand() % 10;
}
}
/**
* Initializes the elements of the matrix with
* element 0.
**/
void init_matrix_zero(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
m.element[i][j] = 0.0;
}
}
/**
* Multiplies matrix @a with matrix @b storing
* the result in matrix @result
*
* The multiplication algorithm is the O(n^3)
* algorithm
*/
void mm(matrix a, matrix b, matrix result)
{
int i, j, k;
// Do the multiplication
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
for(k = 0; k < size; k++)
result.element[i][j] += a.element[i][k] * b.element[k][j];
}
/**
* Each kernel computes the result element (i,j).
*/
__global__ void mm_kernel(matrix a, matrix b, matrix result, int size)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int k;
float resultValue = 0;
// if (i >= size || j >= size)
// return;
for (k = 0; k < (size / BLOCK_SIZE); k++) {
__shared__ float sharedA[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float sharedB[BLOCK_SIZE][BLOCK_SIZE];
sharedA[threadIdx.y][threadIdx.x] = a.element[i][k * BLOCK_SIZE + threadIdx.x];
sharedB[threadIdx.y][threadIdx.x] = b.element[k * BLOCK_SIZE + threadIdx.y][j];
__syncthreads();
int i;
for (i = 0; i < BLOCK_SIZE; i++) {
resultValue += sharedA[threadIdx.y][i] * sharedB[i][threadIdx.x];
}
__syncthreads();
}
result.element[i][j] = resultValue;
}
void print_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
{
printf("row %4d: ", i);
for (j = 0; j < size; j++)
printf("%6.2f ", m.element[i][j]);
printf("\n");
}
}
void work()
{
matrix a, b, result1, result2;
long long before, after;
int correct, i, j, dim;
hipError_t rc;
// Allocate memory for matrices
allocate_matrix(&a);
allocate_matrix(&b);
allocate_matrix(&result1);
allocate_matrix(&result2);
// Initialize matrix elements
init_matrix(a);
init_matrix(b);
// print_matrix(a);
// printf("\n");
// print_matrix(b);
// printf("\n");
// Perform sequential matrix multiplication
before = wall_clock_time();
mm(a, b, result1);
after = wall_clock_time();
fprintf(stderr, "Matrix multiplication on CPU took %1.2f seconds\n", ((float)(after - before))/1000000000);
// print_matrix(result1);
// Perform CUDA matrix multiplication
dim3 block(BLOCK_SIZE, BLOCK_SIZE); // a block of 32 x 32 CUDA threads
dim = (size % BLOCK_SIZE == 0) ? size / BLOCK_SIZE : size / BLOCK_SIZE + 1;
dim3 grid(dim, dim); // a grid of CUDA thread blocks
before = wall_clock_time();
hipLaunchKernelGGL(( mm_kernel), dim3(grid), dim3(block), 0, 0, a, b, result2, size);
hipDeviceSynchronize();
after = wall_clock_time();
fprintf(stderr, "Matrix multiplication on GPU took %1.2f seconds\n", ((float)(after - before))/1000000000);
// print_matrix(result2);
// was there any error?
rc = hipGetLastError();
if (rc != hipSuccess)
printf("Last CUDA error %s\n", hipGetErrorString(rc));
// Compare the results
correct = 1;
for (i = 0; correct && i < size; i++)
for (j = 0; j < size; j++)
if (result1.element[i][j] != result2.element[i][j]) {
printf("correct: %f, actual: %f\n", result1.element[i][j], result2.element[i][j]);
correct = 0;
break;
}
if (correct)
printf("The result matrices are identical!\n");
else
printf("Difference in result matrices at element (%d, %d)!\n", i, j);
free_matrix(&a);
free_matrix(&b);
free_matrix(&result1);
free_matrix(&result2);
}
int main(int argc, char ** argv)
{
srand(0);
printf("Usage: %s <size>\n", argv[0]);
if (argc >= 2)
size = atoi(argv[1]);
else
size = 1024;
fprintf(stderr,"Sequential matrix multiplication of size %d\n", size);
// Multiply the matrices
work();
return 0;
}
| 5b9f690222b9af6101c6b6e5c290db0ec25ab5d0.cu | /**
*
* Matrix Multiplication - CUDA for GPUs
*
* CS3210
*
**/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <assert.h>
#define BLOCK_SIZE 32
int size;
typedef struct
{
float **element;
} matrix;
long long wall_clock_time()
{
#ifdef __linux__
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000ll);
#else
struct timeval tv;
gettimeofday(&tv, NULL);
return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000ll);
#endif
}
__device__ float getElement(matrix A, int row, int col) {
return A.element[row][col];
}
__device__ void setElement(matrix A, int row, int col, float value) {
A.element[row][col] = value;
}
__device__ matrix getSubMatrix(matrix A, int blockRow, int blockCol) {
int startingRow = BLOCK_SIZE * blockRow;
int startingCol = BLOCK_SIZE * blockCol;
// Allocate memory for sub matrix
matrix subA;
subA.element = (float**)malloc(sizeof(float*) * BLOCK_SIZE);
int row;
for (row = 0; row < BLOCK_SIZE; row++) {
// subA.element[row] = (float*)malloc(sizeof(float) * BLOCK_SIZE);
subA.element[row] = A.element[startingRow + row] + startingCol;
}
// int row, col;
// for (row = 0; row < BLOCK_SIZE; row++) {
// subA.element[row] = A.element[startingRow + row] + startingCol;
// // for (col = 0; col < BLOCK_SIZE; col++) {
// // printf("%f ", A.element[startingRow + row][startingCol + col]);
// // }
// // printf("\n");
// }
// int i, j;
// for (i = 0; i < BLOCK_SIZE; i++) {
// for (j = 0; j < BLOCK_SIZE; j++) {
// printf("%f ", subA.element[i][j]);
// }
// printf("\n");
// }
return subA;
}
/**
* Allocates memory for a matrix of size SIZE
* The memory is allocated row-major order, i.e.
* elements from the same row are allocated at contiguous
* memory addresses.
**/
void allocate_matrix(matrix* m)
{
int i;
cudaError_t rc;
// allocate array for all the rows
rc = cudaMallocManaged((void**)&(m->element), sizeof(float*) * size);
if (rc != cudaSuccess)
{
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(rc));
exit(1);
}
// allocate an array for each row of the matrix
for (i = 0; i < size; i++)
{
rc = cudaMallocManaged((void**)&(m->element[i]), sizeof(float) * size);
if (rc != cudaSuccess)
{
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(rc));
exit(1);
}
}
}
/**
* Free the memory allocated for a matrix.
**/
void free_matrix(matrix* m) {
int i;
for (i = 0; i < size; i++)
cudaFree(m->element[i]);
cudaFree(m->element);
}
/**
* Initializes the elements of the matrix with
* random values between 0 and 9
**/
void init_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
m.element[i][j] = rand() % 10;
}
}
/**
* Initializes the elements of the matrix with
* element 0.
**/
void init_matrix_zero(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
m.element[i][j] = 0.0;
}
}
/**
* Multiplies matrix @a with matrix @b storing
* the result in matrix @result
*
* The multiplication algorithm is the O(n^3)
* algorithm
*/
void mm(matrix a, matrix b, matrix result)
{
int i, j, k;
// Do the multiplication
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
for(k = 0; k < size; k++)
result.element[i][j] += a.element[i][k] * b.element[k][j];
}
/**
* Each kernel computes the result element (i,j).
*/
__global__ void mm_kernel(matrix a, matrix b, matrix result, int size)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int k;
float resultValue = 0;
// if (i >= size || j >= size)
// return;
for (k = 0; k < (size / BLOCK_SIZE); k++) {
__shared__ float sharedA[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float sharedB[BLOCK_SIZE][BLOCK_SIZE];
sharedA[threadIdx.y][threadIdx.x] = a.element[i][k * BLOCK_SIZE + threadIdx.x];
sharedB[threadIdx.y][threadIdx.x] = b.element[k * BLOCK_SIZE + threadIdx.y][j];
__syncthreads();
int i;
for (i = 0; i < BLOCK_SIZE; i++) {
resultValue += sharedA[threadIdx.y][i] * sharedB[i][threadIdx.x];
}
__syncthreads();
}
result.element[i][j] = resultValue;
}
void print_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
{
printf("row %4d: ", i);
for (j = 0; j < size; j++)
printf("%6.2f ", m.element[i][j]);
printf("\n");
}
}
void work()
{
matrix a, b, result1, result2;
long long before, after;
int correct, i, j, dim;
cudaError_t rc;
// Allocate memory for matrices
allocate_matrix(&a);
allocate_matrix(&b);
allocate_matrix(&result1);
allocate_matrix(&result2);
// Initialize matrix elements
init_matrix(a);
init_matrix(b);
// print_matrix(a);
// printf("\n");
// print_matrix(b);
// printf("\n");
// Perform sequential matrix multiplication
before = wall_clock_time();
mm(a, b, result1);
after = wall_clock_time();
fprintf(stderr, "Matrix multiplication on CPU took %1.2f seconds\n", ((float)(after - before))/1000000000);
// print_matrix(result1);
// Perform CUDA matrix multiplication
dim3 block(BLOCK_SIZE, BLOCK_SIZE); // a block of 32 x 32 CUDA threads
dim = (size % BLOCK_SIZE == 0) ? size / BLOCK_SIZE : size / BLOCK_SIZE + 1;
dim3 grid(dim, dim); // a grid of CUDA thread blocks
before = wall_clock_time();
mm_kernel<<<grid, block>>>(a, b, result2, size);
cudaDeviceSynchronize();
after = wall_clock_time();
fprintf(stderr, "Matrix multiplication on GPU took %1.2f seconds\n", ((float)(after - before))/1000000000);
// print_matrix(result2);
// was there any error?
rc = cudaGetLastError();
if (rc != cudaSuccess)
printf("Last CUDA error %s\n", cudaGetErrorString(rc));
// Compare the results
correct = 1;
for (i = 0; correct && i < size; i++)
for (j = 0; j < size; j++)
if (result1.element[i][j] != result2.element[i][j]) {
printf("correct: %f, actual: %f\n", result1.element[i][j], result2.element[i][j]);
correct = 0;
break;
}
if (correct)
printf("The result matrices are identical!\n");
else
printf("Difference in result matrices at element (%d, %d)!\n", i, j);
free_matrix(&a);
free_matrix(&b);
free_matrix(&result1);
free_matrix(&result2);
}
int main(int argc, char ** argv)
{
srand(0);
printf("Usage: %s <size>\n", argv[0]);
if (argc >= 2)
size = atoi(argv[1]);
else
size = 1024;
fprintf(stderr,"Sequential matrix multiplication of size %d\n", size);
// Multiply the matrices
work();
return 0;
}
|
f46c22cc6b3e10d1c516e5115d2300ad9421cb3b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2017 Trevor Simonton
#include <hip/hip_fp16.h>
__global__ void Wombat4x8(
float *Wb,
float *Wa,
int *bwords,
int bwords_start_idx,
int *awords,
int awords_start_idx,
int *labels,
int hidden_size,
float alpha,
int max_exp,
int hs) {
int row = threadIdx.y;
int col = threadIdx.x;
int batch_index = blockIdx.x;
int a = 4;
int b = 8;
int awords_index = awords_start_idx + batch_index*a;
int labels_index = awords_start_idx + batch_index*a;
int bwords_index = bwords_start_idx + batch_index*b;
extern __shared__ float sw[];
float *As = &sw[0];
float *Bs = &sw[4 * hidden_size];
// load in local sets of word vectors into As and Bs
for (int i = 0; i < hidden_size; i += b) {
if ((i+col) < hidden_size)
As[(hidden_size*row) + (i+col)] =
Wa[(hidden_size * awords[awords_index + row]) + (i+col)];
}
for (int i = 0; i < hidden_size; i += a) {
if ((i+row) < hidden_size)
Bs[(hidden_size*col) + (i+row)] =
Wb[(hidden_size * bwords[bwords_index + col]) + (i+row)];
}
__syncthreads();
// activate loaded vectors into Cs
float f = 0;
for (int i = 0; i < hidden_size; ++i) {
f += As[(hidden_size*row) + i] * Bs[col*hidden_size + i];
}
if (hs == 1) {
if (f >= max_exp) {
f = 0;
} else if (f <= -max_exp) {
f = 0;
} else {
f = exp(f);
f = f / (1.0f + f);
f = (1.0f - labels[labels_index + row] - f) * alpha;
}
} else {
if (f > max_exp) {
f = (labels[labels_index + row] - 1) * alpha;
} else if (f < -max_exp) {
f = labels[labels_index + row] * alpha;
} else {
f = exp(f);
f = f / (1.0f + f);
f = (labels[labels_index + row] - f) * alpha;
}
}
for (int i = 0; i < hidden_size; i++) {
// calculate local update for this thread
float uA = f * Bs[col*hidden_size + i];
float uB = f * As[row*hidden_size + i];
// update column of B
uB += __shfl_down(uB, 16);
uB += __shfl_down(uB, 8);
if (row == 0) {
atomicAdd(
Wb + (hidden_size * bwords[bwords_index + col]) + i,
uB);
}
// update column of A
uA += __shfl_down(uA, 4, 8);
uA += __shfl_down(uA, 2, 8);
uA += __shfl_down(uA, 1, 8);
if (col == 0) {
atomicAdd(
Wa + (hidden_size * awords[awords_index + row]) + i,
uA);
}
}
}
__global__ void VectorTrain(
float *Wb,
float *Wa,
int *bwords,
int bwords_start_idx,
int *awords,
int awords_start_idx,
int *labels,
int hidden_size,
float alpha,
int max_exp,
int B_start,
int hs) {
int batch_index = blockIdx.x;
int awords_index = awords_start_idx + batch_index;
int labels_index = awords_start_idx + batch_index;
int bwords_index = bwords_start_idx + batch_index;
extern __shared__ float sv[];
float *A1s = &sv[0];
float *Bs = &sv[B_start];
float f = 0;
for (int i = 0; i < hidden_size / 32; i++) {
A1s[i+threadIdx.x*hidden_size/32] =
Wa[(hidden_size * awords[awords_index]) + i + threadIdx.x*hidden_size/32];
Bs[i+threadIdx.x*hidden_size/32] =
Wb[(hidden_size * bwords[bwords_index]) + i + threadIdx.x*hidden_size/32];
}
__syncthreads();
for (int i = 0; i < hidden_size / 32; i++) {
f += A1s[i + threadIdx.x*hidden_size/32]
* Bs[i + threadIdx.x*hidden_size/32];
}
#pragma unroll
for (int i = 16; i > 0; i /= 2) {
f += __shfl_down(f, i);
}
if (threadIdx.x == 0) {
if (hs == 1) {
if (f >= max_exp) {
f = 0;
} else if (f <= -max_exp) {
f = 0;
} else {
f = exp(f);
f = f / (1.0f + f);
f = (1.0f - labels[labels_index] - f) * alpha;
}
} else {
if (f > max_exp) {
f = (labels[labels_index] - 1) * alpha;
} else if (f < -max_exp) {
f = labels[labels_index] * alpha;
} else {
f = exp(f);
f = f / (1.0f + f);
f = (labels[labels_index] - f) * alpha;
}
}
}
f = __shfl(f, 0);
// Calculate and apply updates
for (int i = 0; i < hidden_size/32; i++) {
atomicAdd(
Wa + (hidden_size * awords[awords_index])
+ i+threadIdx.x*hidden_size/32,
f * Bs[i+threadIdx.x*hidden_size/32]);
atomicAdd(
Wb + (hidden_size * bwords[bwords_index])
+ i+threadIdx.x*hidden_size/32,
f * A1s[i+threadIdx.x*hidden_size/32]);
}
}
| f46c22cc6b3e10d1c516e5115d2300ad9421cb3b.cu | // Copyright 2017 Trevor Simonton
#include <cuda_fp16.h>
__global__ void Wombat4x8(
float *Wb,
float *Wa,
int *bwords,
int bwords_start_idx,
int *awords,
int awords_start_idx,
int *labels,
int hidden_size,
float alpha,
int max_exp,
int hs) {
int row = threadIdx.y;
int col = threadIdx.x;
int batch_index = blockIdx.x;
int a = 4;
int b = 8;
int awords_index = awords_start_idx + batch_index*a;
int labels_index = awords_start_idx + batch_index*a;
int bwords_index = bwords_start_idx + batch_index*b;
extern __shared__ float sw[];
float *As = &sw[0];
float *Bs = &sw[4 * hidden_size];
// load in local sets of word vectors into As and Bs
for (int i = 0; i < hidden_size; i += b) {
if ((i+col) < hidden_size)
As[(hidden_size*row) + (i+col)] =
Wa[(hidden_size * awords[awords_index + row]) + (i+col)];
}
for (int i = 0; i < hidden_size; i += a) {
if ((i+row) < hidden_size)
Bs[(hidden_size*col) + (i+row)] =
Wb[(hidden_size * bwords[bwords_index + col]) + (i+row)];
}
__syncthreads();
// activate loaded vectors into Cs
float f = 0;
for (int i = 0; i < hidden_size; ++i) {
f += As[(hidden_size*row) + i] * Bs[col*hidden_size + i];
}
if (hs == 1) {
if (f >= max_exp) {
f = 0;
} else if (f <= -max_exp) {
f = 0;
} else {
f = exp(f);
f = f / (1.0f + f);
f = (1.0f - labels[labels_index + row] - f) * alpha;
}
} else {
if (f > max_exp) {
f = (labels[labels_index + row] - 1) * alpha;
} else if (f < -max_exp) {
f = labels[labels_index + row] * alpha;
} else {
f = exp(f);
f = f / (1.0f + f);
f = (labels[labels_index + row] - f) * alpha;
}
}
for (int i = 0; i < hidden_size; i++) {
// calculate local update for this thread
float uA = f * Bs[col*hidden_size + i];
float uB = f * As[row*hidden_size + i];
// update column of B
uB += __shfl_down(uB, 16);
uB += __shfl_down(uB, 8);
if (row == 0) {
atomicAdd(
Wb + (hidden_size * bwords[bwords_index + col]) + i,
uB);
}
// update column of A
uA += __shfl_down(uA, 4, 8);
uA += __shfl_down(uA, 2, 8);
uA += __shfl_down(uA, 1, 8);
if (col == 0) {
atomicAdd(
Wa + (hidden_size * awords[awords_index + row]) + i,
uA);
}
}
}
__global__ void VectorTrain(
float *Wb,
float *Wa,
int *bwords,
int bwords_start_idx,
int *awords,
int awords_start_idx,
int *labels,
int hidden_size,
float alpha,
int max_exp,
int B_start,
int hs) {
int batch_index = blockIdx.x;
int awords_index = awords_start_idx + batch_index;
int labels_index = awords_start_idx + batch_index;
int bwords_index = bwords_start_idx + batch_index;
extern __shared__ float sv[];
float *A1s = &sv[0];
float *Bs = &sv[B_start];
float f = 0;
for (int i = 0; i < hidden_size / 32; i++) {
A1s[i+threadIdx.x*hidden_size/32] =
Wa[(hidden_size * awords[awords_index]) + i + threadIdx.x*hidden_size/32];
Bs[i+threadIdx.x*hidden_size/32] =
Wb[(hidden_size * bwords[bwords_index]) + i + threadIdx.x*hidden_size/32];
}
__syncthreads();
for (int i = 0; i < hidden_size / 32; i++) {
f += A1s[i + threadIdx.x*hidden_size/32]
* Bs[i + threadIdx.x*hidden_size/32];
}
#pragma unroll
for (int i = 16; i > 0; i /= 2) {
f += __shfl_down(f, i);
}
if (threadIdx.x == 0) {
if (hs == 1) {
if (f >= max_exp) {
f = 0;
} else if (f <= -max_exp) {
f = 0;
} else {
f = exp(f);
f = f / (1.0f + f);
f = (1.0f - labels[labels_index] - f) * alpha;
}
} else {
if (f > max_exp) {
f = (labels[labels_index] - 1) * alpha;
} else if (f < -max_exp) {
f = labels[labels_index] * alpha;
} else {
f = exp(f);
f = f / (1.0f + f);
f = (labels[labels_index] - f) * alpha;
}
}
}
f = __shfl(f, 0);
// Calculate and apply updates
for (int i = 0; i < hidden_size/32; i++) {
atomicAdd(
Wa + (hidden_size * awords[awords_index])
+ i+threadIdx.x*hidden_size/32,
f * Bs[i+threadIdx.x*hidden_size/32]);
atomicAdd(
Wb + (hidden_size * bwords[bwords_index])
+ i+threadIdx.x*hidden_size/32,
f * A1s[i+threadIdx.x*hidden_size/32]);
}
}
|
9525823403b7e555ca3bcfaad499779d47427c84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/box_coder_kernel.h"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/box_coder.h"
namespace phi {
template <typename T>
__global__ void EncodeCenterSizeKernel(const T *prior_box_data,
const T *prior_box_var_data,
const T *target_box_data,
const int row,
const int col,
const int len,
const bool normalized,
const T prior_box_var_size,
const float *variance,
const int var_size,
T *output) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < row * col) {
const int row_idx = idx / col;
const int col_idx = idx % col;
T prior_box_width = prior_box_data[col_idx * len + 2] -
prior_box_data[col_idx * len] + (normalized == false);
T prior_box_height = prior_box_data[col_idx * len + 3] -
prior_box_data[col_idx * len + 1] +
(normalized == false);
T prior_box_center_x = prior_box_data[col_idx * len] + prior_box_width / 2;
T prior_box_center_y =
prior_box_data[col_idx * len + 1] + prior_box_height / 2;
T target_box_center_x =
(target_box_data[row_idx * len + 2] + target_box_data[row_idx * len]) /
2;
T target_box_center_y = (target_box_data[row_idx * len + 3] +
target_box_data[row_idx * len + 1]) /
2;
T target_box_width = target_box_data[row_idx * len + 2] -
target_box_data[row_idx * len] + (normalized == false);
T target_box_height = target_box_data[row_idx * len + 3] -
target_box_data[row_idx * len + 1] +
(normalized == false);
output[idx * len] =
(target_box_center_x - prior_box_center_x) / prior_box_width;
output[idx * len + 1] =
(target_box_center_y - prior_box_center_y) / prior_box_height;
output[idx * len + 2] = log(fabs(target_box_width / prior_box_width));
output[idx * len + 3] = log(fabs(target_box_height / prior_box_height));
if (prior_box_var_data) {
int prior_var_offset = col_idx * len;
output[idx * len] /= prior_box_var_data[prior_var_offset];
output[idx * len + 1] /= prior_box_var_data[prior_var_offset + 1];
output[idx * len + 2] /= prior_box_var_data[prior_var_offset + 2];
output[idx * len + 3] /= prior_box_var_data[prior_var_offset + 3];
} else if (var_size == 4) {
for (int k = 0; k < 4; ++k) {
output[idx * len + k] /= static_cast<T>(variance[k]);
}
}
}
}
template <typename T>
__global__ void DecodeCenterSizeKernel(const T *prior_box_data,
const T *prior_box_var_data,
const T *target_box_data,
const int row,
const int col,
const int len,
const bool normalized,
const T prior_box_var_size,
const float *variance,
const int var_size,
const int axis,
T *output) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
int prior_box_offset = 0;
if (idx < row * col) {
const int col_idx = idx % col;
const int row_idx = idx / col;
prior_box_offset = axis == 0 ? col_idx * len : row_idx * len;
T prior_box_width = prior_box_data[prior_box_offset + 2] -
prior_box_data[prior_box_offset] +
(normalized == false);
T prior_box_height = prior_box_data[prior_box_offset + 3] -
prior_box_data[prior_box_offset + 1] +
(normalized == false);
T prior_box_center_x =
prior_box_data[prior_box_offset] + prior_box_width / 2;
T prior_box_center_y =
prior_box_data[prior_box_offset + 1] + prior_box_height / 2;
T target_box_width, target_box_height;
T target_box_center_x, target_box_center_y;
T box_var_x = T(1), box_var_y = T(1);
T box_var_w = T(1), box_var_h = T(1);
if (prior_box_var_data) {
int prior_var_offset = axis == 0 ? col_idx * len : row_idx * len;
box_var_x = prior_box_var_data[prior_var_offset];
box_var_y = prior_box_var_data[prior_var_offset + 1];
box_var_w = prior_box_var_data[prior_var_offset + 2];
box_var_h = prior_box_var_data[prior_var_offset + 3];
} else if (var_size == 4) {
box_var_x = static_cast<T>(variance[0]);
box_var_y = static_cast<T>(variance[1]);
box_var_w = static_cast<T>(variance[2]);
box_var_h = static_cast<T>(variance[3]);
}
target_box_width =
exp(box_var_w * target_box_data[idx * len + 2]) * prior_box_width;
target_box_height =
exp(box_var_h * target_box_data[idx * len + 3]) * prior_box_height;
target_box_center_x =
box_var_x * target_box_data[idx * len] * prior_box_width +
prior_box_center_x;
target_box_center_y =
box_var_y * target_box_data[idx * len + 1] * prior_box_height +
prior_box_center_y;
output[idx * len] = target_box_center_x - target_box_width / 2;
output[idx * len + 1] = target_box_center_y - target_box_height / 2;
output[idx * len + 2] =
target_box_center_x + target_box_width / 2 - (normalized == false);
output[idx * len + 3] =
target_box_center_y + target_box_height / 2 - (normalized == false);
}
}
template <typename T, typename Context>
void BoxCoderKernel(const Context &dev_ctx,
const DenseTensor &prior_box,
const paddle::optional<DenseTensor> &prior_box_var,
const DenseTensor &target_box,
const std::string &code_type_str,
bool normalized,
int axis,
const std::vector<float> &variance,
DenseTensor *output_box) {
const T *prior_box_data = prior_box.template data<T>();
const T *target_box_data = target_box.template data<T>();
const T *prior_box_var_data = nullptr;
auto prior_box_var_size = 0;
if (prior_box_var) {
PADDLE_ENFORCE_EQ(variance.empty(),
true,
phi::errors::InvalidArgument(
"Input 'PriorBoxVar' and attribute 'variance'"
" of BoxCoder operator should not be used at the "
"same time."));
prior_box_var_data = prior_box_var->data<T>();
prior_box_var_size = prior_box_var->dims().size();
}
if (!(variance.empty())) {
PADDLE_ENFORCE_EQ(static_cast<int>(variance.size()),
4,
phi::errors::InvalidArgument(
"Size of attribute 'variance' in BoxCoder operator"
" should be 4. But received size is %d",
variance.size()));
}
if (target_box.lod().size()) {
PADDLE_ENFORCE_EQ(target_box.lod().size(),
1,
phi::errors::InvalidArgument(
"Input 'TargetBox' of BoxCoder operator only"
" supports LoD with one level."));
}
const int var_size = static_cast<int>(variance.size());
auto code_type = phi::funcs::GetBoxCodeType(code_type_str);
auto row = target_box.dims()[0];
auto col = prior_box.dims()[0];
if (code_type == phi::funcs::BoxCodeType::kDecodeCenterSize) {
col = target_box.dims()[1];
}
auto len = prior_box.dims()[1];
int block = 512;
int grid = (row * col + block - 1) / block;
int bytes = var_size * sizeof(float);
auto dev_var = phi::memory_utils::Alloc(
dev_ctx.GetPlace(),
bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
float *dev_var_data = reinterpret_cast<float *>(dev_var->ptr());
auto cplace = phi::CPUPlace();
const auto gplace = dev_ctx.GetPlace();
memory_utils::Copy(
gplace, dev_var_data, cplace, &variance[0], bytes, dev_ctx.stream());
output_box->Resize({row, col, len});
dev_ctx.template Alloc<T>(output_box);
T *output = output_box->data<T>();
if (code_type == phi::funcs::BoxCodeType::kEncodeCenterSize) {
hipLaunchKernelGGL(( EncodeCenterSizeKernel<T>)
, dim3(grid), dim3(block), 0, dev_ctx.stream(), prior_box_data,
prior_box_var_data,
target_box_data,
row,
col,
len,
normalized,
prior_box_var_size,
dev_var_data,
var_size,
output);
} else if (code_type == phi::funcs::BoxCodeType::kDecodeCenterSize) {
hipLaunchKernelGGL(( DecodeCenterSizeKernel<T>)
, dim3(grid), dim3(block), 0, dev_ctx.stream(), prior_box_data,
prior_box_var_data,
target_box_data,
row,
col,
len,
normalized,
prior_box_var_size,
dev_var_data,
var_size,
axis,
output);
}
}
} // namespace phi
PD_REGISTER_KERNEL(
box_coder, GPU, ALL_LAYOUT, phi::BoxCoderKernel, float, double) {}
| 9525823403b7e555ca3bcfaad499779d47427c84.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/box_coder_kernel.h"
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/box_coder.h"
namespace phi {
template <typename T>
__global__ void EncodeCenterSizeKernel(const T *prior_box_data,
const T *prior_box_var_data,
const T *target_box_data,
const int row,
const int col,
const int len,
const bool normalized,
const T prior_box_var_size,
const float *variance,
const int var_size,
T *output) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < row * col) {
const int row_idx = idx / col;
const int col_idx = idx % col;
T prior_box_width = prior_box_data[col_idx * len + 2] -
prior_box_data[col_idx * len] + (normalized == false);
T prior_box_height = prior_box_data[col_idx * len + 3] -
prior_box_data[col_idx * len + 1] +
(normalized == false);
T prior_box_center_x = prior_box_data[col_idx * len] + prior_box_width / 2;
T prior_box_center_y =
prior_box_data[col_idx * len + 1] + prior_box_height / 2;
T target_box_center_x =
(target_box_data[row_idx * len + 2] + target_box_data[row_idx * len]) /
2;
T target_box_center_y = (target_box_data[row_idx * len + 3] +
target_box_data[row_idx * len + 1]) /
2;
T target_box_width = target_box_data[row_idx * len + 2] -
target_box_data[row_idx * len] + (normalized == false);
T target_box_height = target_box_data[row_idx * len + 3] -
target_box_data[row_idx * len + 1] +
(normalized == false);
output[idx * len] =
(target_box_center_x - prior_box_center_x) / prior_box_width;
output[idx * len + 1] =
(target_box_center_y - prior_box_center_y) / prior_box_height;
output[idx * len + 2] = log(fabs(target_box_width / prior_box_width));
output[idx * len + 3] = log(fabs(target_box_height / prior_box_height));
if (prior_box_var_data) {
int prior_var_offset = col_idx * len;
output[idx * len] /= prior_box_var_data[prior_var_offset];
output[idx * len + 1] /= prior_box_var_data[prior_var_offset + 1];
output[idx * len + 2] /= prior_box_var_data[prior_var_offset + 2];
output[idx * len + 3] /= prior_box_var_data[prior_var_offset + 3];
} else if (var_size == 4) {
for (int k = 0; k < 4; ++k) {
output[idx * len + k] /= static_cast<T>(variance[k]);
}
}
}
}
template <typename T>
__global__ void DecodeCenterSizeKernel(const T *prior_box_data,
const T *prior_box_var_data,
const T *target_box_data,
const int row,
const int col,
const int len,
const bool normalized,
const T prior_box_var_size,
const float *variance,
const int var_size,
const int axis,
T *output) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
int prior_box_offset = 0;
if (idx < row * col) {
const int col_idx = idx % col;
const int row_idx = idx / col;
prior_box_offset = axis == 0 ? col_idx * len : row_idx * len;
T prior_box_width = prior_box_data[prior_box_offset + 2] -
prior_box_data[prior_box_offset] +
(normalized == false);
T prior_box_height = prior_box_data[prior_box_offset + 3] -
prior_box_data[prior_box_offset + 1] +
(normalized == false);
T prior_box_center_x =
prior_box_data[prior_box_offset] + prior_box_width / 2;
T prior_box_center_y =
prior_box_data[prior_box_offset + 1] + prior_box_height / 2;
T target_box_width, target_box_height;
T target_box_center_x, target_box_center_y;
T box_var_x = T(1), box_var_y = T(1);
T box_var_w = T(1), box_var_h = T(1);
if (prior_box_var_data) {
int prior_var_offset = axis == 0 ? col_idx * len : row_idx * len;
box_var_x = prior_box_var_data[prior_var_offset];
box_var_y = prior_box_var_data[prior_var_offset + 1];
box_var_w = prior_box_var_data[prior_var_offset + 2];
box_var_h = prior_box_var_data[prior_var_offset + 3];
} else if (var_size == 4) {
box_var_x = static_cast<T>(variance[0]);
box_var_y = static_cast<T>(variance[1]);
box_var_w = static_cast<T>(variance[2]);
box_var_h = static_cast<T>(variance[3]);
}
target_box_width =
exp(box_var_w * target_box_data[idx * len + 2]) * prior_box_width;
target_box_height =
exp(box_var_h * target_box_data[idx * len + 3]) * prior_box_height;
target_box_center_x =
box_var_x * target_box_data[idx * len] * prior_box_width +
prior_box_center_x;
target_box_center_y =
box_var_y * target_box_data[idx * len + 1] * prior_box_height +
prior_box_center_y;
output[idx * len] = target_box_center_x - target_box_width / 2;
output[idx * len + 1] = target_box_center_y - target_box_height / 2;
output[idx * len + 2] =
target_box_center_x + target_box_width / 2 - (normalized == false);
output[idx * len + 3] =
target_box_center_y + target_box_height / 2 - (normalized == false);
}
}
template <typename T, typename Context>
void BoxCoderKernel(const Context &dev_ctx,
const DenseTensor &prior_box,
const paddle::optional<DenseTensor> &prior_box_var,
const DenseTensor &target_box,
const std::string &code_type_str,
bool normalized,
int axis,
const std::vector<float> &variance,
DenseTensor *output_box) {
const T *prior_box_data = prior_box.template data<T>();
const T *target_box_data = target_box.template data<T>();
const T *prior_box_var_data = nullptr;
auto prior_box_var_size = 0;
if (prior_box_var) {
PADDLE_ENFORCE_EQ(variance.empty(),
true,
phi::errors::InvalidArgument(
"Input 'PriorBoxVar' and attribute 'variance'"
" of BoxCoder operator should not be used at the "
"same time."));
prior_box_var_data = prior_box_var->data<T>();
prior_box_var_size = prior_box_var->dims().size();
}
if (!(variance.empty())) {
PADDLE_ENFORCE_EQ(static_cast<int>(variance.size()),
4,
phi::errors::InvalidArgument(
"Size of attribute 'variance' in BoxCoder operator"
" should be 4. But received size is %d",
variance.size()));
}
if (target_box.lod().size()) {
PADDLE_ENFORCE_EQ(target_box.lod().size(),
1,
phi::errors::InvalidArgument(
"Input 'TargetBox' of BoxCoder operator only"
" supports LoD with one level."));
}
const int var_size = static_cast<int>(variance.size());
auto code_type = phi::funcs::GetBoxCodeType(code_type_str);
auto row = target_box.dims()[0];
auto col = prior_box.dims()[0];
if (code_type == phi::funcs::BoxCodeType::kDecodeCenterSize) {
col = target_box.dims()[1];
}
auto len = prior_box.dims()[1];
int block = 512;
int grid = (row * col + block - 1) / block;
int bytes = var_size * sizeof(float);
auto dev_var = phi::memory_utils::Alloc(
dev_ctx.GetPlace(),
bytes,
phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream())));
float *dev_var_data = reinterpret_cast<float *>(dev_var->ptr());
auto cplace = phi::CPUPlace();
const auto gplace = dev_ctx.GetPlace();
memory_utils::Copy(
gplace, dev_var_data, cplace, &variance[0], bytes, dev_ctx.stream());
output_box->Resize({row, col, len});
dev_ctx.template Alloc<T>(output_box);
T *output = output_box->data<T>();
if (code_type == phi::funcs::BoxCodeType::kEncodeCenterSize) {
EncodeCenterSizeKernel<T>
<<<grid, block, 0, dev_ctx.stream()>>>(prior_box_data,
prior_box_var_data,
target_box_data,
row,
col,
len,
normalized,
prior_box_var_size,
dev_var_data,
var_size,
output);
} else if (code_type == phi::funcs::BoxCodeType::kDecodeCenterSize) {
DecodeCenterSizeKernel<T>
<<<grid, block, 0, dev_ctx.stream()>>>(prior_box_data,
prior_box_var_data,
target_box_data,
row,
col,
len,
normalized,
prior_box_var_size,
dev_var_data,
var_size,
axis,
output);
}
}
} // namespace phi
PD_REGISTER_KERNEL(
box_coder, GPU, ALL_LAYOUT, phi::BoxCoderKernel, float, double) {}
|
303905cb2df93f4130710b7d0516039f475ebacf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zgemm_reduce.cu normal z -> d, Fri Sep 11 18:29:20 2015
*/
#include "common_magma.h"
#include "magma_templates.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
// size of work for a thread block
#define BLK_M 16
#define BLK_N 16
// BLK_K gets defined in magmablas_dgemm_reduce,
// because it depends on the CUDA architecture at runtime.
//==============================================================================
// BLK_K size is templated, as it depends on CUDA architecture at runtime.
// Hmm... how to compile for both CUDA arch 1.x and 2.x?
template< int BLK_K >
__global__
void dgemm_reduce_kernel(
int m, int n, int k,
double alpha,
const double* __restrict__ dA, int lda,
const double* __restrict__ dB, int ldb,
double beta,
double * __restrict__ dC, int ldc)
{
#if (__CUDA_ARCH__ >= 200)
const int tx = threadIdx.x;
if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n) {
dA += (blockIdx.x*BLK_M + threadIdx.y) * lda;
dB += (blockIdx.y*BLK_N + threadIdx.z) * ldb;
dC += blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc;
// was: sum[BLK_M][BLK_N+1][BLK_K+1];
// moved 3rd dimension to 1st dimension to make magma_sum_reduce_3d interface nicer.
__shared__ double sum[BLK_K][BLK_M+1][BLK_N+1];
double lsum;
/* w := v**H * C */
lsum = MAGMA_D_ZERO;
for( int j = tx; j < k; j += BLK_K )
lsum += MAGMA_D_CNJG( dA[j] )* dB[j];
sum[tx][threadIdx.y][threadIdx.z] = lsum;
magma_sum_reduce_3d< BLK_K, BLK_M+1, BLK_N+1 >( tx, threadIdx.y, threadIdx.z, sum );
/* C := C - v * w */
__syncthreads();
if (threadIdx.x == 0) {
if (MAGMA_D_EQUAL(beta, MAGMA_D_ZERO))
dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[0][threadIdx.y][threadIdx.z];
else
dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] +
alpha*sum[0][threadIdx.y][threadIdx.z];
}
}
#endif
}
//==============================================================================
/**
Purpose
-------
DGEMM_REDUCE performs one of the matrix-matrix operations
C := alpha*A^T*B + beta*C,
where alpha and beta are scalars, and A, B and C are matrices, with A
a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix.
This routine is tuned for m, n << k. Typically, m and n are expected
to be less than 128.
@ingroup magma_dblas3
********************************************************************/
extern "C" void
magmablas_dgemm_reduce(
magma_int_t m, magma_int_t n, magma_int_t k,
double alpha,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_const_ptr dB, magma_int_t lddb,
double beta,
magmaDouble_ptr dC, magma_int_t lddc )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( k < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( lddb < k )
info = -8;
else if ( lddc < m )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x -- maximum 512 threads
const int NUM_THREADS = 512;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 2
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ) );
dim3 threads( BLK_K, BLK_M, BLK_N );
hipLaunchKernelGGL(( dgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, magma_stream ,
m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
else {
// --------------------
// call CUDA ARCH 2.x -- maximum 1024 threads
const int NUM_THREADS = 1024;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 4
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ) );
dim3 threads( BLK_K, BLK_M, BLK_N );
hipLaunchKernelGGL(( dgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, magma_stream ,
m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
}
//==============================================================================
| 303905cb2df93f4130710b7d0516039f475ebacf.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zgemm_reduce.cu normal z -> d, Fri Sep 11 18:29:20 2015
*/
#include "common_magma.h"
#include "magma_templates.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
// size of work for a thread block
#define BLK_M 16
#define BLK_N 16
// BLK_K gets defined in magmablas_dgemm_reduce,
// because it depends on the CUDA architecture at runtime.
//==============================================================================
// BLK_K size is templated, as it depends on CUDA architecture at runtime.
// Hmm... how to compile for both CUDA arch 1.x and 2.x?
template< int BLK_K >
__global__
void dgemm_reduce_kernel(
int m, int n, int k,
double alpha,
const double* __restrict__ dA, int lda,
const double* __restrict__ dB, int ldb,
double beta,
double * __restrict__ dC, int ldc)
{
#if (__CUDA_ARCH__ >= 200)
const int tx = threadIdx.x;
if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n) {
dA += (blockIdx.x*BLK_M + threadIdx.y) * lda;
dB += (blockIdx.y*BLK_N + threadIdx.z) * ldb;
dC += blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc;
// was: sum[BLK_M][BLK_N+1][BLK_K+1];
// moved 3rd dimension to 1st dimension to make magma_sum_reduce_3d interface nicer.
__shared__ double sum[BLK_K][BLK_M+1][BLK_N+1];
double lsum;
/* w := v**H * C */
lsum = MAGMA_D_ZERO;
for( int j = tx; j < k; j += BLK_K )
lsum += MAGMA_D_CNJG( dA[j] )* dB[j];
sum[tx][threadIdx.y][threadIdx.z] = lsum;
magma_sum_reduce_3d< BLK_K, BLK_M+1, BLK_N+1 >( tx, threadIdx.y, threadIdx.z, sum );
/* C := C - v * w */
__syncthreads();
if (threadIdx.x == 0) {
if (MAGMA_D_EQUAL(beta, MAGMA_D_ZERO))
dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[0][threadIdx.y][threadIdx.z];
else
dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] +
alpha*sum[0][threadIdx.y][threadIdx.z];
}
}
#endif
}
//==============================================================================
/**
Purpose
-------
DGEMM_REDUCE performs one of the matrix-matrix operations
C := alpha*A^T*B + beta*C,
where alpha and beta are scalars, and A, B and C are matrices, with A
a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix.
This routine is tuned for m, n << k. Typically, m and n are expected
to be less than 128.
@ingroup magma_dblas3
********************************************************************/
extern "C" void
magmablas_dgemm_reduce(
magma_int_t m, magma_int_t n, magma_int_t k,
double alpha,
magmaDouble_const_ptr dA, magma_int_t ldda,
magmaDouble_const_ptr dB, magma_int_t lddb,
double beta,
magmaDouble_ptr dC, magma_int_t lddc )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( k < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( lddb < k )
info = -8;
else if ( lddc < m )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x -- maximum 512 threads
const int NUM_THREADS = 512;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 2
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ) );
dim3 threads( BLK_K, BLK_M, BLK_N );
dgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, magma_stream >>>
( m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
else {
// --------------------
// call CUDA ARCH 2.x -- maximum 1024 threads
const int NUM_THREADS = 1024;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N)); // == 4
dim3 blocks( magma_ceildiv( m, BLK_M ), magma_ceildiv( n, BLK_N ) );
dim3 threads( BLK_K, BLK_M, BLK_N );
dgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, magma_stream >>>
( m, n, k, alpha, dA, ldda, dB, lddb, beta, dC, lddc );
}
}
//==============================================================================
|
18e50116f8b951a92ad0bfb20078ad35033db1c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "wb.h"
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
#define GPU_F __device__
#define CPU_F __host__
#define GCPU_F GPU_F CPU_F
template< typename T >
GCPU_F T clamp( T val, T mn, T mx ) {
if( val < mn ) return mn;
if( val > mx ) return mx;
return val;
}
GCPU_F int to1DIndex( int i, int j, int k, int width, int depth ) {
return ((i * width) + j) * depth + k;
}
__global__ void stencil(float *output, float *input, int width, int height, int depth) {
for( int i = 0; i < height; i++ ) {
for( int j = 0; j < width; j++ ) {
for( int k = 0; k < depth - 1; k++ ) {
auto res = input[ to1DIndex( i, j, k - 1, width, depth ) ]
+ input[ to1DIndex( i, j, k + 1, width, depth ) ]
+ input[ to1DIndex( i, j - 1, k, width, depth ) ]
+ input[ to1DIndex( i, j + 1, k, width, depth ) ]
+ input[ to1DIndex( i - 1, j, k, width, depth ) ]
+ input[ to1DIndex( i + 1, j, k, width, depth ) ];
output[ to1DIndex( i, j, k, width, depth ) ] = clamp( res, 0.0f, 255.0f );
}
}
}
}
static void launch_stencil(float *deviceOutputData, float *deviceInputData, int width, int height, int depth) {
hipLaunchKernelGGL(( stencil), dim3(1), dim3(1), 0, 0, deviceOutputData, deviceInputData, width, height, depth );
}
int main(int argc, char *argv[]) {
wbArg_t arg;
int width;
int height;
int depth;
char *inputFile;
wbImage_t input;
wbImage_t output;
float *hostInputData;
float *hostOutputData;
float *deviceInputData;
float *deviceOutputData;
arg = wbArg_read(argc, argv);
inputFile = wbArg_getInputFile(arg, 0);
input = wbImport(inputFile);
width = wbImage_getWidth(input);
height = wbImage_getHeight(input);
depth = wbImage_getChannels(input);
output = wbImage_new(width, height, depth);
hostInputData = wbImage_getData(input);
hostOutputData = wbImage_getData(output);
wbTime_start(GPU, "Doing GPU memory allocation");
hipMalloc((void **)&deviceInputData, width * height * depth * sizeof(float));
hipMalloc((void **)&deviceOutputData, width * height * depth * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
hipMemcpy(deviceInputData, hostInputData, width * height * depth * sizeof(float), hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
launch_stencil(deviceOutputData, deviceInputData, width, height, depth);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
hipMemcpy(hostOutputData, deviceOutputData, width * height * depth * sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
hipFree(deviceInputData);
hipFree(deviceOutputData);
wbImage_delete(output);
wbImage_delete(input);
return 0;
}
| 18e50116f8b951a92ad0bfb20078ad35033db1c7.cu | #include "wb.h"
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
#define GPU_F __device__
#define CPU_F __host__
#define GCPU_F GPU_F CPU_F
template< typename T >
GCPU_F T clamp( T val, T mn, T mx ) {
if( val < mn ) return mn;
if( val > mx ) return mx;
return val;
}
GCPU_F int to1DIndex( int i, int j, int k, int width, int depth ) {
return ((i * width) + j) * depth + k;
}
__global__ void stencil(float *output, float *input, int width, int height, int depth) {
for( int i = 0; i < height; i++ ) {
for( int j = 0; j < width; j++ ) {
for( int k = 0; k < depth - 1; k++ ) {
auto res = input[ to1DIndex( i, j, k - 1, width, depth ) ]
+ input[ to1DIndex( i, j, k + 1, width, depth ) ]
+ input[ to1DIndex( i, j - 1, k, width, depth ) ]
+ input[ to1DIndex( i, j + 1, k, width, depth ) ]
+ input[ to1DIndex( i - 1, j, k, width, depth ) ]
+ input[ to1DIndex( i + 1, j, k, width, depth ) ];
output[ to1DIndex( i, j, k, width, depth ) ] = clamp( res, 0.0f, 255.0f );
}
}
}
}
static void launch_stencil(float *deviceOutputData, float *deviceInputData, int width, int height, int depth) {
stencil<<<1, 1>>>( deviceOutputData, deviceInputData, width, height, depth );
}
int main(int argc, char *argv[]) {
wbArg_t arg;
int width;
int height;
int depth;
char *inputFile;
wbImage_t input;
wbImage_t output;
float *hostInputData;
float *hostOutputData;
float *deviceInputData;
float *deviceOutputData;
arg = wbArg_read(argc, argv);
inputFile = wbArg_getInputFile(arg, 0);
input = wbImport(inputFile);
width = wbImage_getWidth(input);
height = wbImage_getHeight(input);
depth = wbImage_getChannels(input);
output = wbImage_new(width, height, depth);
hostInputData = wbImage_getData(input);
hostOutputData = wbImage_getData(output);
wbTime_start(GPU, "Doing GPU memory allocation");
cudaMalloc((void **)&deviceInputData, width * height * depth * sizeof(float));
cudaMalloc((void **)&deviceOutputData, width * height * depth * sizeof(float));
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
cudaMemcpy(deviceInputData, hostInputData, width * height * depth * sizeof(float), cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
launch_stencil(deviceOutputData, deviceInputData, width, height, depth);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
cudaMemcpy(hostOutputData, deviceOutputData, width * height * depth * sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
cudaFree(deviceInputData);
cudaFree(deviceOutputData);
wbImage_delete(output);
wbImage_delete(input);
return 0;
}
|
e9430c3d4a0f7b8333671bee85770573715a6c7e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
/**
* collection communication: all_gather.
* @param send_count the count of elements will be sent.
* @param send_tensors the send tensors of multi GPUs.
* @param recv_tensors the recv tensors of multi GPUs.
* @param device_resources all gpus device resources.
* @param context gpu device context, for switching device.
*/
template <typename Type>
void SparseEmbeddingFunctors::all_gather(size_t send_count, const Tensors2<Type> &send_tensors,
Tensors2<Type> &recv_tensors,
const ResourceManager &resource_manager) {
size_t local_gpu_count = resource_manager.get_local_gpu_count();
size_t total_gpu_count = resource_manager.get_global_gpu_count();
// need to know the Type
ncclDataType_t type;
switch (sizeof(Type)) {
case 2:
type = ncclHalf;
break;
case 4:
type = ncclFloat;
break;
default:
CK_THROW_(Error_t::WrongInput, "Error: Type not support by now");
}
// for multi GPUs, use NCCL to do All-Gather
if (total_gpu_count > 1) {
CK_NCCL_THROW_(ncclGroupStart());
for (size_t id = 0; id < local_gpu_count; id++) {
const auto &local_gpu = resource_manager.get_local_gpu(id);
CK_NCCL_THROW_(ncclAllGather(send_tensors[id].get_ptr(), // send buff
recv_tensors[id].get_ptr(), // recv buff
send_count, type, local_gpu->get_nccl(),
local_gpu->get_stream()));
}
CK_NCCL_THROW_(ncclGroupEnd());
}
// for single GPU, just do memcpyD2D
else { // total_gpu_count == 1
const auto &local_gpu = resource_manager.get_local_gpu(0);
CudaDeviceContext context(local_gpu->get_device_id());
CK_CUDA_THROW_(hipMemcpyAsync(recv_tensors[0].get_ptr(), send_tensors[0].get_ptr(),
send_count * sizeof(Type), hipMemcpyDeviceToDevice,
local_gpu->get_stream()));
}
return;
}
template void SparseEmbeddingFunctors::all_gather<float>(size_t send_count,
const Tensors2<float> &send_tensors,
Tensors2<float> &recv_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::all_gather<__half>(size_t send_count,
const Tensors2<__half> &send_tensors,
Tensors2<__half> &recv_tensors,
const ResourceManager &resource_manager);
} // namespace HugeCTR | e9430c3d4a0f7b8333671bee85770573715a6c7e.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
/**
* collection communication: all_gather.
* @param send_count the count of elements will be sent.
* @param send_tensors the send tensors of multi GPUs.
* @param recv_tensors the recv tensors of multi GPUs.
* @param device_resources all gpus device resources.
* @param context gpu device context, for switching device.
*/
template <typename Type>
void SparseEmbeddingFunctors::all_gather(size_t send_count, const Tensors2<Type> &send_tensors,
Tensors2<Type> &recv_tensors,
const ResourceManager &resource_manager) {
size_t local_gpu_count = resource_manager.get_local_gpu_count();
size_t total_gpu_count = resource_manager.get_global_gpu_count();
// need to know the Type
ncclDataType_t type;
switch (sizeof(Type)) {
case 2:
type = ncclHalf;
break;
case 4:
type = ncclFloat;
break;
default:
CK_THROW_(Error_t::WrongInput, "Error: Type not support by now");
}
// for multi GPUs, use NCCL to do All-Gather
if (total_gpu_count > 1) {
CK_NCCL_THROW_(ncclGroupStart());
for (size_t id = 0; id < local_gpu_count; id++) {
const auto &local_gpu = resource_manager.get_local_gpu(id);
CK_NCCL_THROW_(ncclAllGather(send_tensors[id].get_ptr(), // send buff
recv_tensors[id].get_ptr(), // recv buff
send_count, type, local_gpu->get_nccl(),
local_gpu->get_stream()));
}
CK_NCCL_THROW_(ncclGroupEnd());
}
// for single GPU, just do memcpyD2D
else { // total_gpu_count == 1
const auto &local_gpu = resource_manager.get_local_gpu(0);
CudaDeviceContext context(local_gpu->get_device_id());
CK_CUDA_THROW_(cudaMemcpyAsync(recv_tensors[0].get_ptr(), send_tensors[0].get_ptr(),
send_count * sizeof(Type), cudaMemcpyDeviceToDevice,
local_gpu->get_stream()));
}
return;
}
template void SparseEmbeddingFunctors::all_gather<float>(size_t send_count,
const Tensors2<float> &send_tensors,
Tensors2<float> &recv_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::all_gather<__half>(size_t send_count,
const Tensors2<__half> &send_tensors,
Tensors2<__half> &recv_tensors,
const ResourceManager &resource_manager);
} // namespace HugeCTR |
bc86fab7d44c1c85e7d372847333535491188e12.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
@author Weifeng Liu
*/
// CSC Sync-Free SpTRSM kernel
// see paper by W. Liu, A. Li, J. D. Hogg, I. S. Duff, and B. Vinter. (2016).
// "A Synchronization-Free Algorithm for Parallel Sparse Triangular Solves".
// 22nd International European Conference on Parallel and Distributed Computing
// (Euro-Par '16). pp. 617-630.
#include "magmasparse_internal.h"
#include "atomicopsmagmaDoubleComplex.h"
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#define MAGMA_CSC_SYNCFREE_WARP_SIZE 32
#define MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD 0
#define MAGMA_CSC_SYNCFREE_SUBSTITUTION_BACKWARD 1
#define MAGMA_CSC_SYNCFREE_OPT_WARP_NNZ 1
#define MAGMA_CSC_SYNCFREE_OPT_WARP_RHS 2
#define MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO 3
__global__
void sptrsv_syncfree_analyser(magmaIndex_ptr d_cscRowIdx,
magmaDoubleComplex_ptr d_cscVal,
magma_int_t m,
magma_int_t nnz,
magmaIndex_ptr d_graphInDegree)
{
const int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < nnz)
{
atomicAdd(&d_graphInDegree[d_cscRowIdx[global_id]], 1);
}
}
__global__
void sptrsm_syncfree_executor(magmaIndex_ptr d_cscColPtr,
magmaIndex_ptr d_cscRowIdx,
magmaDoubleComplex_ptr d_cscVal,
magmaIndex_ptr d_graphInDegree,
magma_int_t m,
magma_int_t substitution,
magma_int_t rhs,
magma_int_t opt,
magmaDoubleComplex_ptr d_b,
magmaDoubleComplex_ptr d_x)
{
const int global_id = blockIdx.x * blockDim.x + threadIdx.x;
int global_x_id = global_id / MAGMA_CSC_SYNCFREE_WARP_SIZE;
if (global_x_id >= m) return;
// substitution is forward or backward
global_x_id = substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
global_x_id : m - 1 - global_x_id;
// Initialize
const int lane_id = (MAGMA_CSC_SYNCFREE_WARP_SIZE - 1) & threadIdx.x;
// Prefetch
const int pos = substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
d_cscColPtr[global_x_id] : d_cscColPtr[global_x_id+1]-1;
const magmaDoubleComplex one = MAGMA_Z_MAKE( 1.0, 0.0);
const magmaDoubleComplex coef = one / d_cscVal[pos];
/*
// clock_t start;
// Consumer
do {
start = clock();
}
while (1 != d_graphInDegree[global_x_id]);
// Consumer
int graphInDegree;
do {
//bypass Tex cache and avoid other mem optimization by nvcc/ptxas
asm("ld.global.u32 %0, [%1];" : "=r"(graphInDegree),"=r"(d_graphInDegree[global_x_id]) :: "memory");
}
while (1 != graphInDegree );
*/
for (int k = lane_id; k < rhs; k += MAGMA_CSC_SYNCFREE_WARP_SIZE)
{
const int pos = global_x_id * rhs + k;
d_x[pos] = (d_b[pos] - d_x[pos]) * coef;
}
// Producer
const magma_index_t start_ptr =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
d_cscColPtr[global_x_id]+1 : d_cscColPtr[global_x_id];
const magma_index_t stop_ptr =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
d_cscColPtr[global_x_id+1] : d_cscColPtr[global_x_id+1]-1;
if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_NNZ)
{
for (magma_index_t jj = start_ptr + lane_id;
jj < stop_ptr; jj += MAGMA_CSC_SYNCFREE_WARP_SIZE)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = 0; k < rhs; k++)
atomicAddmagmaDoubleComplex(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
else if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_RHS)
{
for (magma_index_t jj = start_ptr; jj < stop_ptr; jj++)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = lane_id;
k < rhs; k+=MAGMA_CSC_SYNCFREE_WARP_SIZE)
atomicAddmagmaDoubleComplex(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
if (!lane_id) atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
else if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO)
{
const magma_index_t len = stop_ptr - start_ptr;
if ((len <= rhs || rhs > 8) && len < 2048)
{
for (magma_index_t jj = start_ptr; jj < stop_ptr; jj++)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = lane_id;
k < rhs; k+=MAGMA_CSC_SYNCFREE_WARP_SIZE)
atomicAddmagmaDoubleComplex(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
if (!lane_id) atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
else
{
for (magma_index_t jj = start_ptr + lane_id;
jj < stop_ptr; jj += MAGMA_CSC_SYNCFREE_WARP_SIZE)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = 0; k < rhs; k++)
atomicAddmagmaDoubleComplex(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
}
}
extern "C" magma_int_t
magma_zgecscsyncfreetrsm_analysis(
magma_int_t m,
magma_int_t nnz,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolptr,
magmaIndex_ptr drowind,
magmaIndex_ptr dgraphindegree,
magmaIndex_ptr dgraphindegree_bak,
magma_queue_t queue )
{
int info = MAGMA_SUCCESS;
int num_threads = 128;
int num_blocks = ceil ((double)nnz / (double)num_threads);
hipMemset(dgraphindegree, 0, m * sizeof(magma_index_t));
hipLaunchKernelGGL(( sptrsv_syncfree_analyser), dim3(num_blocks), dim3(num_threads) , 0, 0,
drowind, dval, m, nnz, dgraphindegree);
// backup in-degree array
hipMemcpy(dgraphindegree_bak, dgraphindegree,
m * sizeof(int), hipMemcpyDeviceToDevice);
return info;
}
extern "C" magma_int_t
magma_zgecscsyncfreetrsm_solve(
magma_int_t m,
magma_int_t nnz,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolptr,
magmaIndex_ptr drowind,
magmaIndex_ptr dgraphindegree,
magmaIndex_ptr dgraphindegree_bak,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr db,
magma_int_t substitution,
magma_int_t rhs,
magma_queue_t queue )
{
int info = MAGMA_SUCCESS;
// get an unmodified in-degree array, only for benchmarking use
hipMemcpy(dgraphindegree, dgraphindegree_bak,
m * sizeof(magma_index_t), hipMemcpyDeviceToDevice);
// clear d_x for atomic operations
hipMemset(dx, 0, sizeof(magmaDoubleComplex) * m * rhs);
int num_threads, num_blocks;
num_threads = 4 * MAGMA_CSC_SYNCFREE_WARP_SIZE;
num_blocks = ceil ((double)m /
(double)(num_threads/MAGMA_CSC_SYNCFREE_WARP_SIZE));
hipLaunchKernelGGL(( sptrsm_syncfree_executor), dim3(num_blocks), dim3(num_threads) , 0, 0,
dcolptr, drowind, dval, dgraphindegree,
m, substitution, rhs, MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO,
db, dx);
return info;
}
| bc86fab7d44c1c85e7d372847333535491188e12.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@precisions normal z -> c d s
@author Weifeng Liu
*/
// CSC Sync-Free SpTRSM kernel
// see paper by W. Liu, A. Li, J. D. Hogg, I. S. Duff, and B. Vinter. (2016).
// "A Synchronization-Free Algorithm for Parallel Sparse Triangular Solves".
// 22nd International European Conference on Parallel and Distributed Computing
// (Euro-Par '16). pp. 617-630.
#include "magmasparse_internal.h"
#include "atomicopsmagmaDoubleComplex.h"
#include <cuda.h> // for CUDA_VERSION
#define MAGMA_CSC_SYNCFREE_WARP_SIZE 32
#define MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD 0
#define MAGMA_CSC_SYNCFREE_SUBSTITUTION_BACKWARD 1
#define MAGMA_CSC_SYNCFREE_OPT_WARP_NNZ 1
#define MAGMA_CSC_SYNCFREE_OPT_WARP_RHS 2
#define MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO 3
__global__
void sptrsv_syncfree_analyser(magmaIndex_ptr d_cscRowIdx,
magmaDoubleComplex_ptr d_cscVal,
magma_int_t m,
magma_int_t nnz,
magmaIndex_ptr d_graphInDegree)
{
const int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < nnz)
{
atomicAdd(&d_graphInDegree[d_cscRowIdx[global_id]], 1);
}
}
__global__
void sptrsm_syncfree_executor(magmaIndex_ptr d_cscColPtr,
magmaIndex_ptr d_cscRowIdx,
magmaDoubleComplex_ptr d_cscVal,
magmaIndex_ptr d_graphInDegree,
magma_int_t m,
magma_int_t substitution,
magma_int_t rhs,
magma_int_t opt,
magmaDoubleComplex_ptr d_b,
magmaDoubleComplex_ptr d_x)
{
const int global_id = blockIdx.x * blockDim.x + threadIdx.x;
int global_x_id = global_id / MAGMA_CSC_SYNCFREE_WARP_SIZE;
if (global_x_id >= m) return;
// substitution is forward or backward
global_x_id = substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
global_x_id : m - 1 - global_x_id;
// Initialize
const int lane_id = (MAGMA_CSC_SYNCFREE_WARP_SIZE - 1) & threadIdx.x;
// Prefetch
const int pos = substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
d_cscColPtr[global_x_id] : d_cscColPtr[global_x_id+1]-1;
const magmaDoubleComplex one = MAGMA_Z_MAKE( 1.0, 0.0);
const magmaDoubleComplex coef = one / d_cscVal[pos];
/*
// clock_t start;
// Consumer
do {
start = clock();
}
while (1 != d_graphInDegree[global_x_id]);
// Consumer
int graphInDegree;
do {
//bypass Tex cache and avoid other mem optimization by nvcc/ptxas
asm("ld.global.u32 %0, [%1];" : "=r"(graphInDegree),"=r"(d_graphInDegree[global_x_id]) :: "memory");
}
while (1 != graphInDegree );
*/
for (int k = lane_id; k < rhs; k += MAGMA_CSC_SYNCFREE_WARP_SIZE)
{
const int pos = global_x_id * rhs + k;
d_x[pos] = (d_b[pos] - d_x[pos]) * coef;
}
// Producer
const magma_index_t start_ptr =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
d_cscColPtr[global_x_id]+1 : d_cscColPtr[global_x_id];
const magma_index_t stop_ptr =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
d_cscColPtr[global_x_id+1] : d_cscColPtr[global_x_id+1]-1;
if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_NNZ)
{
for (magma_index_t jj = start_ptr + lane_id;
jj < stop_ptr; jj += MAGMA_CSC_SYNCFREE_WARP_SIZE)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = 0; k < rhs; k++)
atomicAddmagmaDoubleComplex(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
else if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_RHS)
{
for (magma_index_t jj = start_ptr; jj < stop_ptr; jj++)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = lane_id;
k < rhs; k+=MAGMA_CSC_SYNCFREE_WARP_SIZE)
atomicAddmagmaDoubleComplex(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
if (!lane_id) atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
else if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO)
{
const magma_index_t len = stop_ptr - start_ptr;
if ((len <= rhs || rhs > 8) && len < 2048)
{
for (magma_index_t jj = start_ptr; jj < stop_ptr; jj++)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = lane_id;
k < rhs; k+=MAGMA_CSC_SYNCFREE_WARP_SIZE)
atomicAddmagmaDoubleComplex(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
if (!lane_id) atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
else
{
for (magma_index_t jj = start_ptr + lane_id;
jj < stop_ptr; jj += MAGMA_CSC_SYNCFREE_WARP_SIZE)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = 0; k < rhs; k++)
atomicAddmagmaDoubleComplex(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
}
}
extern "C" magma_int_t
magma_zgecscsyncfreetrsm_analysis(
magma_int_t m,
magma_int_t nnz,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolptr,
magmaIndex_ptr drowind,
magmaIndex_ptr dgraphindegree,
magmaIndex_ptr dgraphindegree_bak,
magma_queue_t queue )
{
int info = MAGMA_SUCCESS;
int num_threads = 128;
int num_blocks = ceil ((double)nnz / (double)num_threads);
cudaMemset(dgraphindegree, 0, m * sizeof(magma_index_t));
sptrsv_syncfree_analyser<<< num_blocks, num_threads >>>
(drowind, dval, m, nnz, dgraphindegree);
// backup in-degree array
cudaMemcpy(dgraphindegree_bak, dgraphindegree,
m * sizeof(int), cudaMemcpyDeviceToDevice);
return info;
}
extern "C" magma_int_t
magma_zgecscsyncfreetrsm_solve(
magma_int_t m,
magma_int_t nnz,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr dcolptr,
magmaIndex_ptr drowind,
magmaIndex_ptr dgraphindegree,
magmaIndex_ptr dgraphindegree_bak,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr db,
magma_int_t substitution,
magma_int_t rhs,
magma_queue_t queue )
{
int info = MAGMA_SUCCESS;
// get an unmodified in-degree array, only for benchmarking use
cudaMemcpy(dgraphindegree, dgraphindegree_bak,
m * sizeof(magma_index_t), cudaMemcpyDeviceToDevice);
// clear d_x for atomic operations
cudaMemset(dx, 0, sizeof(magmaDoubleComplex) * m * rhs);
int num_threads, num_blocks;
num_threads = 4 * MAGMA_CSC_SYNCFREE_WARP_SIZE;
num_blocks = ceil ((double)m /
(double)(num_threads/MAGMA_CSC_SYNCFREE_WARP_SIZE));
sptrsm_syncfree_executor<<< num_blocks, num_threads >>>
(dcolptr, drowind, dval, dgraphindegree,
m, substitution, rhs, MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO,
db, dx);
return info;
}
|
376f2b465bd3272aadf1e005cd4f7c9763409343.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
#include <ctime>
#include <cmath>
#include "enum_header.h"
#include <unistd.h>
#include <stdio.h>
/* we need these includes for CUDA's random number stuff */
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
// REMEMBER TO PUT __host__ __device__ IN FRONT OF CLASS METHODS
#define PI 3.14159265358979323846
#define CHECK(x) do {\
hipError_t err =(x);\
if (err !=hipSuccess){\
fprintf(stderr, "API error"\
"%s:%d Returned:%d\n", \
__FILE__, __LINE__, err);\
exit(1);\
} while(0)
//double* two_dim_index(double* vector, int i, int j, double m, int b);
double* three_dim_index(double* matrix, int i, int j, int k, double m, int b, int num_assets);
__device__ double* two_dim_indexGPU(double* vector, int i, int j, double m, int b){
//int m_int= (int)m;
double* p;
//specify index layout here
p=&vector[b*(i)+(j)];
return p;
}
__device__ double* three_dim_indexGPU(double* matrix, int i, int j, int k, double m, int b, int num_assets){
int m_int = (int)m;
double* p;
//specify index layout here
//p=&matrix[(m_int)*b*(k)+(m_int)*(j)+(i)];
p=&matrix[i*b*num_assets+j*num_assets+k];
return p;
}
__device__ double densityGPU(double Xold, double Xnew, double sigma, double r, double delta, double delta_t){
double f=0, x=0;
//x=(1/(sigma*sqrt(delta_t)))*(log(Xnew)-log(Xold)-(r-delta-0.5*sigma*sigma)*delta_t);
x=(1/(sigma*sqrt(delta_t)))*(Xnew-Xold-(r-delta-0.5*sigma*sigma)*delta_t);
//f= (1/(sigma*sqrt(delta_t)*Xnew))*(1/(sqrt(2*PI)))*exp(-0.5*x*x); // this is the transition density
f= (1/(sigma*sqrt(delta_t)))*(1/(sqrt(2*PI)))*exp(-0.5*x*x);
return f;
}
__global__ void init(unsigned int seed, hiprandState_t* states) {
int idx=blockDim.x*blockIdx.x + threadIdx.x;
/* we have to initialize the state */
hiprand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
idx, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[idx]);
}
__device__ double GeometricPayOffCallV(double* X, double m, int b, int num_assets, double Strike){
double h;
h=1;
for(int l=0; l<num_assets; l++){
// h*=exp(X[i][j][l]);
//h*= exp(*two_dim_indexGPU(X, i, l, m, b));
h*=exp(X[l]);
}
h=pow(h,1.0/(num_assets));
if(h-Strike>0){
h=h-Strike;
}
else{
h=0;
}
return h;
}
__device__ double GeometricPayOffPutV(double* X, double m, int b, int num_assets, double Strike){
double h;
h=1;
for(int l=0; l<num_assets; l++){
// h*=exp(X[i][j][l]);
//h*= exp(*two_dim_indexGPU(X, i, l, m, b));
h*=exp(X[l]);
}
h=pow(h,1.0/(num_assets));
if(Strike-h>0){
h=Strike-h;
}
else{
h=0;
}
return h;
}
__device__ void S_weights(double* S_Weights, double* X_device, double* S_new, int m, int b, double* sigma_device, double* delta_device, double delta_t, int num_assets, double r , int i, double* weight_denominator_device ){//note: S_new used to be just S
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("Beginning \n");}
//double density_product,
double sum, w_s;
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("in function m =%i /n",m);}
for(int h=0; h<b; h++){ //h=k
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("Outside loop, i=%i \n", h);}
sum=0;
w_s=1;
for(int kk=0; kk<num_assets; kk++){
//w_s*=densityGPU(*two_dim_indexGPU(S, i, kk, m, num_assets), *three_dim_indexGPU(X_device, (i+1), h, kk, m, b), sigma_device[kk], r, delta_device[kk], delta_t);
w_s*=densityGPU(S_new[kk], *three_dim_indexGPU(X_device, (i+1), h, kk, m, b, num_assets), sigma_device[kk], r, delta_device[kk], delta_t);
}
/*
clock_t start_time =clock();
clock_t stop_time =clock();
int time=stop_time-start_time;
if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("result at i=%i , = %i\n",i, time);}
*/
/*
density_product=1;
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("after first inside loop \n");}
for(int g=0; g<b; g++){ //g=l
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("inside second loop i=%i \n", g);}
for(int gg=0; gg<num_assets; gg++){
density_product*=densityGPU(*three_dim_indexGPU(X_device, i, g, gg, m, b), *three_dim_indexGPU(X_device, (i+1), h, gg, m, b), sigma_device[gg], r, delta_device[gg], delta_t);
}
sum+=(1/((double)b))*density_product;
}
*/
sum = *two_dim_indexGPU(weight_denominator_device, i, h, m-1, b);
w_s = (((double)b)*w_s)/sum;
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("w_s=%f \n", w_s);}
//*two_dim_indexGPU(S_Weights, i, h, m, b)=w_s;
S_Weights[h]=w_s;
}
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("End \n");}
}
__global__ void PathEstimatorKernel(double* X_device, double* weight_denominator_device, double* V_device, double* delta_device, double* sigma_device, double* X0_device, int N, double strike, double r, double delta_t, int b, int m, int num_assets, hiprandState_t* states, double* results_dev, double* asset_amount_device){
int idx =blockDim.x*blockIdx.x + threadIdx.x;
//if(blockDim.x*blockIdx.x + threadIdx.x==N+1){printf("n+1 outside \n");}
if(idx<N){
//if(blockDim.x*blockIdx.x + threadIdx.x==N-1){printf("n-1 \n");}
//if(blockDim.x*blockIdx.x + threadIdx.x==N+1){printf("n+1 inside \n");}
//GeometricPayOffPut thePayOff(strike);
//GeometricPayOffPut payoff(strike);
//enum Containers { vector, matrix };
//Containers Vector = vector;
//Containers Matrix = matrix;
double v_0, S_i, Z, C, H, sum, weight; //, w_s, sum_Z;
//srand((unsigned)time(NULL));
//std::random_device rd;
//std::default_random_engine generator;
//generator.seed( rd() );
//std::normal_distribution<double> distribution(0.0,1.0);
/// ARRAY CODE
//const int S_N=(m)*num_assets;
//const int S_W_N=(m)*b;
const int S_N= num_assets;
const int S_W_N= b;
double* S_new;
S_new= new double[S_N];
//double s_new[S_new_N];
//S_new=s_new;
//double* S_old;
//S_old=new double[S_N];
double* S_Weights;
S_Weights=new double[S_W_N];
//double s_weights[S_W_new_N];
//S_Weights=s_weights;
//double* S_new;
//double* S_old;
//double* S_Weights;
/*
double s_new[1];
//double s_old[1];
double s_weights[250];
S_new=s_new;
//S_old=s_old;
S_Weights=s_weights;
*/
//S_Weights=new double[250];
//S_new=new double[1];
//if(idx==0){printf("X[0][0][0]= %f \n",*three_dim_indexGPU(X_device,0,0,0,m,b));}
//if(idx==0){printf("before the loop");}
int i=0;
do {
if(i==0){
for(int ll=0; ll<num_assets; ll++){
//Z=boxmuller();
// NEED TO CHANGE THE RANDOM NUMBER GENERATOR
//Z=distribution(generator);
Z=hiprand_normal_double(&states[idx]);
S_i=X0_device[ll] + (r-delta_device[ll]-0.5*pow(sigma_device[ll], 2))*delta_t + sigma_device[ll]*sqrt(delta_t)*Z;
//tempnodevector.push_back(S_i);
//*two_dim_indexGPU(S, i, ll, m, num_assets)=S_i;
S_new[ll]=S_i;
}
}
else{
for(int jj=0; jj<num_assets; jj++){
//Z=boxmuller();
//Z=distribution(generator);
Z=hiprand_normal_double(&states[idx]);
//if(idx==0){printf("random number=%f /n", Z);}
//S_i=(*two_dim_indexGPU(S, (i-1), jj, m, num_assets)) + (r-delta_device[jj]-0.5*pow(sigma_device[jj], 2))*delta_t + sigma_device[jj]*sqrt(delta_t)*Z;
S_i=S_new[jj] + (r-delta_device[jj]-0.5*pow(sigma_device[jj], 2))*delta_t + sigma_device[jj]*sqrt(delta_t)*Z;
//tempnodevector.push_back(S_i);
//*two_dim_indexGPU(S, i, jj, m, num_assets)=S_i;
S_new[jj]=S_i;
}
}
//if(idx==0){printf("before the call, m =%i /n", m);}
if(i<m-1){
//S_weights(tempvec, S_Weights, X, S, m, b, sigma, delta, delta_t, asset_amount, r, i );
//S_weights(S_Weights, X_device, S, m, b, sigma_device, delta_device, delta_t, num_assets, r, i );
S_weights(S_Weights, X_device, S_new, m, b, sigma_device, delta_device, delta_t, num_assets, r, i, weight_denominator_device);
}
double con_val=0; //continuation value variable
sum=0;
if(i==m-1){
C=0;//continuation value at the last time step
}
else{
for(int k=0; k<b; k++){
//weight= * two_dim_indexGPU(S_Weights, i, k, m, b);
weight= S_Weights[k];
//con_val=V[(m-1)-i-1][k];
con_val= *two_dim_indexGPU(V_device, (m-1-i-1), k, m, b);
sum+=(weight) * (con_val);
}
//con_val=inner_product(b, first_vector, second_vector);
C=(1/(double)b)*sum; //continuation value
// C=(1/(double)b)*con_val;
}
//H=Payoff(S, strike, asset_amount, i)*exp(-r*delta_t*((i+1)));
//H=thePayOff(S, i, 0, m, num_assets, Vector, num_assets)*exp(-r*delta_t*((i+1)));
H= GeometricPayOffPutV(S_new, m, num_assets, num_assets, strike)*exp(-r*delta_t*((i+1)));
i=i+1;
/*for(int copy=0; copy<num_assets; copy++){
S_old[copy]=S_new[copy];
}*/
}while(H<C);//this will stop once H is less then the continuation value. at m-1, c=0 therefore m-1 is the max amount of loops.
v_0=H;
//if(idx==0){printf("result %i=%f", idx, v_0);}
results_dev[idx]=v_0;
delete[] S_new;
//delete[] S_old;
delete[] S_Weights;
//return v_0;
}
}
double PathEstimator(double strike, double r, double delta_t, int b, double m, double sigma[], double delta[], double X0[], double* X, double* weight_denominator, double* V, double asset_amount[], int num_assets, int Path_estimator_iterations ){
//m=int(m);
//printf("at the start of pathestimator m=%f /n", m);
//printf("Ib serial X[0][0][0]= %f \n",*three_dim_index(X,0,0,0,m,b));
hipError_t error = hipGetLastError();
if( error != hipSuccess )
{
std::cout << hipGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
;
int N= Path_estimator_iterations;
double* sigma_host;
sigma_host =sigma;
double* delta_host;
delta_host =delta;
double* X0_host;
X0_host =X0;
double* asset_amount_host;
asset_amount_host =asset_amount;
int m_int=(int)m;
//printf("at the start of pathestimator m_int=%i /n", m_int);
int X_N=(m_int) * b * (num_assets);
int W_N=(m_int-1) * b;
int V_N=(m_int) * b;
int delta_N= num_assets;
int sigma_N=num_assets;
int X0_N=num_assets;
int asset_amount_N = num_assets;
double* X_device;
double* V_device;
double* weight_denominator_device;
double* sigma_device;
double* delta_device;
double* X0_device;
double* asset_amount_device;
error = hipGetLastError();
if( error != hipSuccess )
{
std::cout << hipGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
hipMalloc((void**) &X_device, X_N*sizeof(double) );
hipMemcpy(X_device, X, X_N*sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**) &V_device, V_N*sizeof(double) );
hipMemcpy(V_device, V, V_N*sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**) &weight_denominator_device, W_N*sizeof(double) );
hipMemcpy(weight_denominator_device, weight_denominator, W_N*sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**) &X0_device, X0_N*sizeof(double) );
hipMemcpy(X0_device, X0_host, X0_N*sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**) &sigma_device, sigma_N*sizeof(double) );
hipMemcpy(sigma_device, sigma_host, sigma_N*sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**) &delta_device, delta_N*sizeof(double) );
hipMemcpy(delta_device, delta_host, delta_N*sizeof(double), hipMemcpyHostToDevice);
hipMalloc((void**) &asset_amount_device, asset_amount_N*sizeof(double) );
hipMemcpy(asset_amount_device, asset_amount_host, asset_amount_N*sizeof(double), hipMemcpyHostToDevice);
//dim3 gridDim((int)ceil(N/512.0));
//printf("the grid dim is:%i\n",(int)ceil(N/512.0));
//dim3 blockDim(512);
dim3 gridDim((int)ceil(N/512.0));
dim3 blockDim(512.0);
/*if(N>512){
gridDim()= ceil(N/521);
}
*/
error = hipGetLastError();
if( error != hipSuccess )
{
std::cout << hipGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
double* results;
results = new double[N];
double* results_dev;
hipMalloc((void**) &results_dev, N*sizeof(double) );
// CALL RANDOM SEEDING KERNEL HERE
error = hipGetLastError();
if( error != hipSuccess )
{
std::cout << hipGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
hiprandState_t* states;
hipMalloc((void**) &states, N * sizeof(hiprandState_t));
hipLaunchKernelGGL((
init), dim3(gridDim), dim3(blockDim), 0, 0, time(0), states);
hipDeviceSynchronize();
error = hipGetLastError();
if( error != hipSuccess )
{
std::cout << hipGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
hipDeviceSetLimit(hipLimitMallocHeapSize, 80000000*sizeof(double));
//size_t size;
//hipDeviceGetLimit(&size, hipLimitMallocHeapSize);
//printf("Heap size found to be %d\n",(int)size);
//printf("after");hipLaunchKernelGGL((
PathEstimatorKernel), dim3(gridDim), dim3(blockDim), 0, 0, X_device, weight_denominator_device, V_device, delta_device, sigma_device, X0_device, N, strike, r, delta_t, b, m_int, num_assets, states, results_dev, asset_amount_device);
hipDeviceSynchronize();
error = hipGetLastError();
if( error != hipSuccess )
{
std::cout << hipGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
hipMemcpy(results, results_dev, sizeof(double)*N, hipMemcpyDeviceToHost);
error = hipGetLastError();
if( error != hipSuccess )
{
std::cout << hipGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
double result=0;
for(int f=0; f<Path_estimator_iterations; f++){
result+=results[f];
//printf("result %i =%f\n", f, results[f]);
}
result=(1/double(N))*result;
delete[] results;
error = hipGetLastError();
if( error != hipSuccess )
{
std::cout << hipGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
hipFree(X_device);
hipFree(V_device);
hipFree(weight_denominator_device);
hipFree(sigma_device);
hipFree(delta_device);
hipFree(X0_device);
hipFree(results_dev);
hipFree(asset_amount_device);
hipDeviceReset();
return result;
//hipDeviceReset();
}
| 376f2b465bd3272aadf1e005cd4f7c9763409343.cu | #include <cuda.h>
#include <iostream>
#include <math.h>
#include <ctime>
#include <cmath>
#include "enum_header.h"
#include <unistd.h>
#include <stdio.h>
/* we need these includes for CUDA's random number stuff */
#include <curand.h>
#include <curand_kernel.h>
// REMEMBER TO PUT __host__ __device__ IN FRONT OF CLASS METHODS
#define PI 3.14159265358979323846
#define CHECK(x) do {\
cudaError_t err =(x);\
if (err !=cudaSuccess){\
fprintf(stderr, "API error"\
"%s:%d Returned:%d\n", \
__FILE__, __LINE__, err);\
exit(1);\
} while(0)
//double* two_dim_index(double* vector, int i, int j, double m, int b);
double* three_dim_index(double* matrix, int i, int j, int k, double m, int b, int num_assets);
__device__ double* two_dim_indexGPU(double* vector, int i, int j, double m, int b){
//int m_int= (int)m;
double* p;
//specify index layout here
p=&vector[b*(i)+(j)];
return p;
}
__device__ double* three_dim_indexGPU(double* matrix, int i, int j, int k, double m, int b, int num_assets){
int m_int = (int)m;
double* p;
//specify index layout here
//p=&matrix[(m_int)*b*(k)+(m_int)*(j)+(i)];
p=&matrix[i*b*num_assets+j*num_assets+k];
return p;
}
__device__ double densityGPU(double Xold, double Xnew, double sigma, double r, double delta, double delta_t){
double f=0, x=0;
//x=(1/(sigma*sqrt(delta_t)))*(log(Xnew)-log(Xold)-(r-delta-0.5*sigma*sigma)*delta_t);
x=(1/(sigma*sqrt(delta_t)))*(Xnew-Xold-(r-delta-0.5*sigma*sigma)*delta_t);
//f= (1/(sigma*sqrt(delta_t)*Xnew))*(1/(sqrt(2*PI)))*exp(-0.5*x*x); // this is the transition density
f= (1/(sigma*sqrt(delta_t)))*(1/(sqrt(2*PI)))*exp(-0.5*x*x);
return f;
}
__global__ void init(unsigned int seed, curandState_t* states) {
int idx=blockDim.x*blockIdx.x + threadIdx.x;
/* we have to initialize the state */
curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
idx, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[idx]);
}
__device__ double GeometricPayOffCallV(double* X, double m, int b, int num_assets, double Strike){
double h;
h=1;
for(int l=0; l<num_assets; l++){
// h*=exp(X[i][j][l]);
//h*= exp(*two_dim_indexGPU(X, i, l, m, b));
h*=exp(X[l]);
}
h=pow(h,1.0/(num_assets));
if(h-Strike>0){
h=h-Strike;
}
else{
h=0;
}
return h;
}
__device__ double GeometricPayOffPutV(double* X, double m, int b, int num_assets, double Strike){
double h;
h=1;
for(int l=0; l<num_assets; l++){
// h*=exp(X[i][j][l]);
//h*= exp(*two_dim_indexGPU(X, i, l, m, b));
h*=exp(X[l]);
}
h=pow(h,1.0/(num_assets));
if(Strike-h>0){
h=Strike-h;
}
else{
h=0;
}
return h;
}
__device__ void S_weights(double* S_Weights, double* X_device, double* S_new, int m, int b, double* sigma_device, double* delta_device, double delta_t, int num_assets, double r , int i, double* weight_denominator_device ){//note: S_new used to be just S
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("Beginning \n");}
//double density_product,
double sum, w_s;
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("in function m =%i /n",m);}
for(int h=0; h<b; h++){ //h=k
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("Outside loop, i=%i \n", h);}
sum=0;
w_s=1;
for(int kk=0; kk<num_assets; kk++){
//w_s*=densityGPU(*two_dim_indexGPU(S, i, kk, m, num_assets), *three_dim_indexGPU(X_device, (i+1), h, kk, m, b), sigma_device[kk], r, delta_device[kk], delta_t);
w_s*=densityGPU(S_new[kk], *three_dim_indexGPU(X_device, (i+1), h, kk, m, b, num_assets), sigma_device[kk], r, delta_device[kk], delta_t);
}
/*
clock_t start_time =clock();
clock_t stop_time =clock();
int time=stop_time-start_time;
if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("result at i=%i , = %i\n",i, time);}
*/
/*
density_product=1;
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("after first inside loop \n");}
for(int g=0; g<b; g++){ //g=l
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("inside second loop i=%i \n", g);}
for(int gg=0; gg<num_assets; gg++){
density_product*=densityGPU(*three_dim_indexGPU(X_device, i, g, gg, m, b), *three_dim_indexGPU(X_device, (i+1), h, gg, m, b), sigma_device[gg], r, delta_device[gg], delta_t);
}
sum+=(1/((double)b))*density_product;
}
*/
sum = *two_dim_indexGPU(weight_denominator_device, i, h, m-1, b);
w_s = (((double)b)*w_s)/sum;
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("w_s=%f \n", w_s);}
//*two_dim_indexGPU(S_Weights, i, h, m, b)=w_s;
S_Weights[h]=w_s;
}
//if(blockDim.x*blockIdx.x + threadIdx.x==0){printf("End \n");}
}
__global__ void PathEstimatorKernel(double* X_device, double* weight_denominator_device, double* V_device, double* delta_device, double* sigma_device, double* X0_device, int N, double strike, double r, double delta_t, int b, int m, int num_assets, curandState_t* states, double* results_dev, double* asset_amount_device){
int idx =blockDim.x*blockIdx.x + threadIdx.x;
//if(blockDim.x*blockIdx.x + threadIdx.x==N+1){printf("n+1 outside \n");}
if(idx<N){
//if(blockDim.x*blockIdx.x + threadIdx.x==N-1){printf("n-1 \n");}
//if(blockDim.x*blockIdx.x + threadIdx.x==N+1){printf("n+1 inside \n");}
//GeometricPayOffPut thePayOff(strike);
//GeometricPayOffPut payoff(strike);
//enum Containers { vector, matrix };
//Containers Vector = vector;
//Containers Matrix = matrix;
double v_0, S_i, Z, C, H, sum, weight; //, w_s, sum_Z;
//srand((unsigned)time(NULL));
//std::random_device rd;
//std::default_random_engine generator;
//generator.seed( rd() );
//std::normal_distribution<double> distribution(0.0,1.0);
/// ARRAY CODE
//const int S_N=(m)*num_assets;
//const int S_W_N=(m)*b;
const int S_N= num_assets;
const int S_W_N= b;
double* S_new;
S_new= new double[S_N];
//double s_new[S_new_N];
//S_new=s_new;
//double* S_old;
//S_old=new double[S_N];
double* S_Weights;
S_Weights=new double[S_W_N];
//double s_weights[S_W_new_N];
//S_Weights=s_weights;
//double* S_new;
//double* S_old;
//double* S_Weights;
/*
double s_new[1];
//double s_old[1];
double s_weights[250];
S_new=s_new;
//S_old=s_old;
S_Weights=s_weights;
*/
//S_Weights=new double[250];
//S_new=new double[1];
//if(idx==0){printf("X[0][0][0]= %f \n",*three_dim_indexGPU(X_device,0,0,0,m,b));}
//if(idx==0){printf("before the loop");}
int i=0;
do {
if(i==0){
for(int ll=0; ll<num_assets; ll++){
//Z=boxmuller();
// NEED TO CHANGE THE RANDOM NUMBER GENERATOR
//Z=distribution(generator);
Z=curand_normal_double(&states[idx]);
S_i=X0_device[ll] + (r-delta_device[ll]-0.5*pow(sigma_device[ll], 2))*delta_t + sigma_device[ll]*sqrt(delta_t)*Z;
//tempnodevector.push_back(S_i);
//*two_dim_indexGPU(S, i, ll, m, num_assets)=S_i;
S_new[ll]=S_i;
}
}
else{
for(int jj=0; jj<num_assets; jj++){
//Z=boxmuller();
//Z=distribution(generator);
Z=curand_normal_double(&states[idx]);
//if(idx==0){printf("random number=%f /n", Z);}
//S_i=(*two_dim_indexGPU(S, (i-1), jj, m, num_assets)) + (r-delta_device[jj]-0.5*pow(sigma_device[jj], 2))*delta_t + sigma_device[jj]*sqrt(delta_t)*Z;
S_i=S_new[jj] + (r-delta_device[jj]-0.5*pow(sigma_device[jj], 2))*delta_t + sigma_device[jj]*sqrt(delta_t)*Z;
//tempnodevector.push_back(S_i);
//*two_dim_indexGPU(S, i, jj, m, num_assets)=S_i;
S_new[jj]=S_i;
}
}
//if(idx==0){printf("before the call, m =%i /n", m);}
if(i<m-1){
//S_weights(tempvec, S_Weights, X, S, m, b, sigma, delta, delta_t, asset_amount, r, i );
//S_weights(S_Weights, X_device, S, m, b, sigma_device, delta_device, delta_t, num_assets, r, i );
S_weights(S_Weights, X_device, S_new, m, b, sigma_device, delta_device, delta_t, num_assets, r, i, weight_denominator_device);
}
double con_val=0; //continuation value variable
sum=0;
if(i==m-1){
C=0;//continuation value at the last time step
}
else{
for(int k=0; k<b; k++){
//weight= * two_dim_indexGPU(S_Weights, i, k, m, b);
weight= S_Weights[k];
//con_val=V[(m-1)-i-1][k];
con_val= *two_dim_indexGPU(V_device, (m-1-i-1), k, m, b);
sum+=(weight) * (con_val);
}
//con_val=inner_product(b, first_vector, second_vector);
C=(1/(double)b)*sum; //continuation value
// C=(1/(double)b)*con_val;
}
//H=Payoff(S, strike, asset_amount, i)*exp(-r*delta_t*((i+1)));
//H=thePayOff(S, i, 0, m, num_assets, Vector, num_assets)*exp(-r*delta_t*((i+1)));
H= GeometricPayOffPutV(S_new, m, num_assets, num_assets, strike)*exp(-r*delta_t*((i+1)));
i=i+1;
/*for(int copy=0; copy<num_assets; copy++){
S_old[copy]=S_new[copy];
}*/
}while(H<C);//this will stop once H is less then the continuation value. at m-1, c=0 therefore m-1 is the max amount of loops.
v_0=H;
//if(idx==0){printf("result %i=%f", idx, v_0);}
results_dev[idx]=v_0;
delete[] S_new;
//delete[] S_old;
delete[] S_Weights;
//return v_0;
}
}
double PathEstimator(double strike, double r, double delta_t, int b, double m, double sigma[], double delta[], double X0[], double* X, double* weight_denominator, double* V, double asset_amount[], int num_assets, int Path_estimator_iterations ){
//m=int(m);
//printf("at the start of pathestimator m=%f /n", m);
//printf("Ib serial X[0][0][0]= %f \n",*three_dim_index(X,0,0,0,m,b));
cudaError_t error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
;
int N= Path_estimator_iterations;
double* sigma_host;
sigma_host =sigma;
double* delta_host;
delta_host =delta;
double* X0_host;
X0_host =X0;
double* asset_amount_host;
asset_amount_host =asset_amount;
int m_int=(int)m;
//printf("at the start of pathestimator m_int=%i /n", m_int);
int X_N=(m_int) * b * (num_assets);
int W_N=(m_int-1) * b;
int V_N=(m_int) * b;
int delta_N= num_assets;
int sigma_N=num_assets;
int X0_N=num_assets;
int asset_amount_N = num_assets;
double* X_device;
double* V_device;
double* weight_denominator_device;
double* sigma_device;
double* delta_device;
double* X0_device;
double* asset_amount_device;
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaMalloc((void**) &X_device, X_N*sizeof(double) );
cudaMemcpy(X_device, X, X_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &V_device, V_N*sizeof(double) );
cudaMemcpy(V_device, V, V_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &weight_denominator_device, W_N*sizeof(double) );
cudaMemcpy(weight_denominator_device, weight_denominator, W_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &X0_device, X0_N*sizeof(double) );
cudaMemcpy(X0_device, X0_host, X0_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &sigma_device, sigma_N*sizeof(double) );
cudaMemcpy(sigma_device, sigma_host, sigma_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &delta_device, delta_N*sizeof(double) );
cudaMemcpy(delta_device, delta_host, delta_N*sizeof(double), cudaMemcpyHostToDevice);
cudaMalloc((void**) &asset_amount_device, asset_amount_N*sizeof(double) );
cudaMemcpy(asset_amount_device, asset_amount_host, asset_amount_N*sizeof(double), cudaMemcpyHostToDevice);
//dim3 gridDim((int)ceil(N/512.0));
//printf("the grid dim is:%i\n",(int)ceil(N/512.0));
//dim3 blockDim(512);
dim3 gridDim((int)ceil(N/512.0));
dim3 blockDim(512.0);
/*if(N>512){
gridDim()= ceil(N/521);
}
*/
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
double* results;
results = new double[N];
double* results_dev;
cudaMalloc((void**) &results_dev, N*sizeof(double) );
// CALL RANDOM SEEDING KERNEL HERE
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
curandState_t* states;
cudaMalloc((void**) &states, N * sizeof(curandState_t));
init<<<gridDim, blockDim>>>(time(0), states);
cudaDeviceSynchronize();
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 80000000*sizeof(double));
//size_t size;
//cudaDeviceGetLimit(&size, cudaLimitMallocHeapSize);
//printf("Heap size found to be %d\n",(int)size);
//printf("after");
PathEstimatorKernel<<<gridDim, blockDim>>>(X_device, weight_denominator_device, V_device, delta_device, sigma_device, X0_device, N, strike, r, delta_t, b, m_int, num_assets, states, results_dev, asset_amount_device);
cudaDeviceSynchronize();
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaMemcpy(results, results_dev, sizeof(double)*N, cudaMemcpyDeviceToHost);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
double result=0;
for(int f=0; f<Path_estimator_iterations; f++){
result+=results[f];
//printf("result %i =%f\n", f, results[f]);
}
result=(1/double(N))*result;
delete[] results;
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaFree(X_device);
cudaFree(V_device);
cudaFree(weight_denominator_device);
cudaFree(sigma_device);
cudaFree(delta_device);
cudaFree(X0_device);
cudaFree(results_dev);
cudaFree(asset_amount_device);
cudaDeviceReset();
return result;
//cudaDeviceReset();
}
|
14e95ef48a443d3c76c73fc745c2a7df9ac44690.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//MIT License
//
//Copyright (c) 2018 TU Braunschweig, Algorithms Group
//
//Permission is hereby granted, free of charge, to any person obtaining a copy
//of this software and associated documentation files (the "Software"), to deal
//in the Software without restriction, including without limitation the rights
//to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
//copies of the Software, and to permit persons to whom the Software is
//furnished to do so, subject to the following conditions:
//
//The above copyright notice and this permission notice shall be included in all
//copies or substantial portions of the Software.
//
//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
//IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
//AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
//LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
//OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
//SOFTWARE.
#include "operations.cuh"
#include "../subintervals.hpp"
#include <algcuda/device.hpp>
#include <algcuda/exit.cuh>
#include "run_kernel.cuh"
namespace circlepacking {
namespace lane_packing {
void find_critical_intervals_end_configuration();
}
}
using namespace circlepacking;
using namespace circlepacking::lane_packing;
using algcuda::Atomic_ordered_double;
constexpr int la_subintervals_2d = 4096;
constexpr int r1_subintervals_2d = 1024;
constexpr int r2_subintervals_2d = 1024;
constexpr int la_parallel_2d = 256;
constexpr int r1_parallel_2d = 256;
constexpr int r2_parallel_2d = 256;
constexpr int la_subintervals_3d = 1024;
constexpr int r1_subintervals_3d = 256;
constexpr int r2_subintervals_3d = 128;
constexpr int r3_subintervals_3d = 128;
constexpr int la_parallel_3d = 256;
constexpr int r1_parallel_3d = 128;
constexpr int r2_parallel_3d = 16;
constexpr int r3_parallel_3d = 16;
static inline __device__ IV compute_r1_range(IV la) {
// any disk above this can simply be placed on its own (we assume the disks fit into the lane)
double ub_by_single_placement = lower_bound_radius_single_placement(la.get_lb());
double ub_by_lane_width = (0.5*(1.0-la)).get_ub();
// any disk below this would immediately open a new lane
double lb_by_lane_width = (0.25*(1.0-la)).get_lb();
return {lb_by_lane_width, ub_by_lane_width < ub_by_single_placement ? ub_by_lane_width : ub_by_single_placement};
}
static inline __device__ IV compute_r2_range(IV la, IV r1) {
double lb_lane_width_r1 = __dmul_rd(0.5, __dadd_rd(1.0, -__dadd_ru(la.get_ub(), __dmul_ru(2.0, r1.get_ub()))));
if(lb_lane_width_r1 < 0.0) { lb_lane_width_r1 = 0.0; }
return {lb_lane_width_r1, r1.get_ub()};
}
static __device__ bool handle_inner_outer_2d(IV la, IV r1, IV r2) {
IV a = collision_angle_inner_outer(la, r1, r2) + semicircle_angle_outer(r2);
double ub_lane_container_area_ratio = __dadd_ru(1.0, -__dmul_rd(la.get_lb(), la.get_lb()));
double ub_area_used = __dmul_ru(ub_lane_container_area_ratio, __dmul_ru(0.5, a.get_ub()));
IV rho = (1.0 - 2.0*r2).square() - la.square();
rho.tighten_lb(0.0);
double lb_alpha_reusable = (a - semicircle_angle_inner(la,r1)).get_lb();
double lb_area_reusable = __dmul_rd(__dmul_rd(0.5, lb_alpha_reusable), rho.get_lb());
double lb_area1 = __dmul_rd(lb_pi, __dmul_rd(r1.get_lb(), r1.get_lb()));
double lb_area2 = __dmul_rd(lb_pi, __dmul_rd(r2.get_lb(), r2.get_lb()));
double lb_disk_area = __dadd_rd(lb_area2, __dmul_rd(0.5, lb_area1));
return lb_disk_area >= __dmul_ru(critical_ratio, __dadd_ru(ub_area_used, -lb_area_reusable));
}
static __device__ bool handle_outer_inner_2d(IV la, IV r1, IV r2) {
IV a = collision_angle_outer_inner(la, r1, r2) + semicircle_angle_inner(la, r2);
double ub_lane_container_area_ratio = __dadd_ru(1.0, -__dmul_rd(la.get_lb(), la.get_lb()));
double ub_area_used = __dmul_ru(ub_lane_container_area_ratio, __dmul_ru(0.5, a.get_ub()));
IV rho = 1.0 - (la + 2.0*r2).square();
rho.tighten_lb(0.0);
double lb_alpha_reusable = (a-semicircle_angle_outer(r1)).get_lb();
double lb_area_reusable = __dmul_rd(__dmul_rd(0.5, lb_alpha_reusable), rho.get_lb());
double lb_area1 = __dmul_rd(lb_pi, __dmul_rd(r1.get_lb(), r1.get_lb()));
double lb_area2 = __dmul_rd(lb_pi, __dmul_rd(r2.get_lb(), r2.get_lb()));
double lb_disk_area = __dadd_rd(lb_area2, __dmul_rd(0.5, lb_area1));
return lb_disk_area >= __dmul_ru(critical_ratio, __dadd_ru(ub_area_used, -lb_area_reusable));
}
static __global__ void kernel_find_critical_intervals_end_configuration_2d(IV outer_la, Atomic_ordered_double* first_problematic_la, bool output_criticals) {
for(int la_offset = blockIdx.x; la_offset < la_subintervals_2d; la_offset += gridDim.x) {
IV la = get_subinterval(outer_la, la_offset, la_subintervals_2d);
IV r1_range = compute_r1_range(la);
for(int r1_offset = blockIdx.y; r1_offset < r1_subintervals_2d; r1_offset += gridDim.y) {
IV r1_ = get_subinterval(r1_range, r1_offset, r1_subintervals_2d);
IV r2_range = compute_r2_range(la, r1_);
for(int r2_offset = threadIdx.x; r2_offset < r2_subintervals_2d; r2_offset += blockDim.x) {
IV r2 = get_subinterval(r2_range, r2_offset, r2_subintervals_2d);
IV r1 = r1_;
r1.tighten_lb(r2.get_lb());
if(__dmul_ru(2.0, r1.get_ub()) >= __dadd_rd(1.0, -la.get_ub())) {
printf("End configuration (2 disks): Circle possibly does not fit - la = [%.19g,%.19g], r1 = [%.19g,%.19g], r2 = [%.19g,%.19g]\n",
la.get_lb(), la.get_ub(), r1.get_lb(), r1.get_ub(), r2.get_lb(), r2.get_ub()
);
algcuda::trap();
}
if(!handle_inner_outer_2d(la, r1, r2)) {
if(output_criticals) {
printf("End configuration (2 disks): Critical interval with r1 on the inside - la = [%.19g,%.19g], r1 = [%.19g,%.19g], r2 = [%.19g,%.19g]\n",
la.get_lb(), la.get_ub(), r1.get_lb(), r1.get_ub(), r2.get_lb(), r2.get_ub()
);
} else {
first_problematic_la->store_min(la.get_lb());
return;
}
}
if(!handle_outer_inner_2d(la, r1, r2)) {
if(output_criticals) {
printf("End configuration (2 disks): Critical interval with r1 on the outside - la = [%.19g,%.19g], r1 = [%.19g,%.19g], r2 = [%.19g,%.19g]\n",
la.get_lb(), la.get_ub(), r1.get_lb(), r1.get_ub(), r2.get_lb(), r2.get_ub()
);
} else {
first_problematic_la->store_min(la.get_lb());
return;
}
}
}
}
}
}
static inline __device__ IV compute_r3_range_3d(IV la, IV r1, IV r2) {
double lb_lane_width_r2 = __dmul_rd(0.5, __dadd_rd(1.0, -__dadd_ru(la.get_ub(), __dmul_ru(2.0, r2.get_ub()))));
if(lb_lane_width_r2 < 0.0) { lb_lane_width_r2 = 0.0; }
return {lb_lane_width_r2, r2.get_ub()};
}
static inline __device__ bool handle_r1_inner_3d(IV la, IV r1, IV r2, IV r3) {
IV a12 = collision_angle_inner_outer(la, r1, r2);
IV a23 = collision_angle_outer_inner(la, r2, r3);
IV a13 = collision_angle_inner_inner(la, r1, r3);
if(definitely(a12 + a23 >= a13)) {
// we already handle this case as 2 times 2 semicircles
return true;
}
IV a3 = semicircle_angle_inner(la, r3);
IV a = a13+a3;
IV a2 = semicircle_angle_outer(r2);
IV reusable_alpha = a - (a12+a2);
IV rho = 1.0 - (la + 2.0*r3).square();
rho.tighten_lb(0.0);
double lb_area_reusable = __dmul_rd(__dmul_rd(0.5, reusable_alpha.get_lb()), rho.get_lb());
double ub_lane_container_area_ratio = __dadd_ru(1.0, -__dmul_rd(la.get_lb(), la.get_lb()));
double ub_area_used = __dmul_ru(ub_lane_container_area_ratio, __dmul_ru(0.5, a.get_ub()));
double lb_area1 = __dmul_rd(lb_pi, __dmul_rd(r1.get_lb(), r1.get_lb()));
double lb_area2 = __dmul_rd(lb_pi, __dmul_rd(r2.get_lb(), r2.get_lb()));
double lb_area3 = __dmul_rd(lb_pi, __dmul_rd(r3.get_lb(), r3.get_lb()));
double lb_disk_area = __dadd_rd(__dadd_rd(lb_area2, lb_area3), __dmul_rd(0.5, lb_area1));
return lb_disk_area >= __dmul_ru(critical_ratio, __dadd_ru(ub_area_used, -lb_area_reusable));
}
static inline __device__ bool handle_r1_outer_3d(IV la, IV r1, IV r2, IV r3) {
IV a12 = collision_angle_outer_inner(la, r1, r2);
IV a23 = collision_angle_inner_outer(la, r2, r3);
IV a13 = collision_angle_outer_outer(r1, r3);
if(definitely(a12+a23 >= a13)) {
// we already handle this case as 2 tiems 2 semicircles
return true;
}
IV a3 = semicircle_angle_outer(r3);
IV a = a13+a3;
IV a2 = semicircle_angle_inner(la, r2);
IV reusable_alpha = a - (a12+a2);
IV rho = (1.0 - 2.0*r3).square() - la.square();
rho.tighten_lb(0.0);
double lb_area_reusable = __dmul_rd(__dmul_rd(0.5, reusable_alpha.get_lb()), rho.get_lb());
double ub_lane_container_area_ratio = __dadd_ru(1.0, -__dmul_rd(la.get_lb(), la.get_lb()));
double ub_area_used = __dmul_ru(ub_lane_container_area_ratio, __dmul_ru(0.5, a.get_ub()));
double lb_area1 = __dmul_rd(lb_pi, __dmul_rd(r1.get_lb(), r1.get_lb()));
double lb_area2 = __dmul_rd(lb_pi, __dmul_rd(r2.get_lb(), r2.get_lb()));
double lb_area3 = __dmul_rd(lb_pi, __dmul_rd(r3.get_lb(), r3.get_lb()));
double lb_disk_area = __dadd_rd(__dadd_rd(lb_area2, lb_area3), __dmul_rd(0.5, lb_area1));
return lb_disk_area >= __dmul_ru(critical_ratio, __dadd_ru(ub_area_used, -lb_area_reusable));
}
static __global__ void kernel_find_critical_intervals_end_configuration_3d(IV outer_la, Atomic_ordered_double* first_problematic_la, bool output_criticals) {
for(int la_offset = blockIdx.x; la_offset < la_subintervals_3d; la_offset += gridDim.x) {
IV la = get_subinterval(outer_la, la_offset, la_subintervals_3d);
IV r1_range = compute_r1_range(la);
for(int r1_offset = blockIdx.y; r1_offset < r1_subintervals_3d; r1_offset += gridDim.y) {
IV r1_ = get_subinterval(r1_range, r1_offset, r1_subintervals_3d);
IV r2_range = compute_r2_range(la, r1_);
for(int r2_offset = threadIdx.x; r2_offset < r2_subintervals_3d; r2_offset += blockDim.x) {
IV r2_ = get_subinterval(r2_range, r2_offset, r2_subintervals_3d);
IV r3_range = compute_r3_range_3d(la, r1_, r2_);
if(r3_range.empty()) {
continue;
}
for(int r3_offset = threadIdx.y; r3_offset < r3_subintervals_3d; r3_offset += blockDim.y) {
IV r3 = get_subinterval(r3_range, r3_offset, r3_subintervals_3d);
IV r2 = r2_;
r2.tighten_lb(r3.get_lb());
IV r1 = r1_;
r1.tighten_lb(r2.get_lb());
if(r2.empty() || r1.empty()) {
continue;
}
if(__dmul_ru(2.0, r1.get_ub()) >= __dadd_rd(1.0, -la.get_ub())) {
printf("End configuration (3 disks): Circle possibly does not fit - la = [%.19g,%.19g], r1 = [%.19g,%.19g], r2 = [%.19g,%.19g], r3 = [%.19g,%.19g]\n",
la.get_lb(), la.get_ub(), r1.get_lb(), r1.get_ub(), r2.get_lb(), r2.get_ub(), r3.get_lb(), r3.get_ub()
);
algcuda::trap();
}
if(!handle_r1_inner_3d(la, r1, r2, r3)) {
if(output_criticals) {
printf("End configuration (3 disks): Critical interval with r1 on the inside - la = [%.19g,%.19g], r1 = [%.19g,%.19g], r2 = [%.19g,%.19g], r3 = [%.19g,%.19g]\n",
la.get_lb(), la.get_ub(), r1.get_lb(), r1.get_ub(), r2.get_lb(), r2.get_ub(), r3.get_lb(), r3.get_ub()
);
} else {
first_problematic_la->store_min(la.get_lb());
return;
}
}
if(!handle_r1_outer_3d(la, r1, r2, r3)) {
if(output_criticals) {
printf("End configuration (3 disks): Critical interval with r1 on the outside - la = [%.19g,%.19g], r1 = [%.19g,%.19g], r2 = [%.19g,%.19g], r3 = [%.19g,%.19g]\n",
la.get_lb(), la.get_ub(), r1.get_lb(), r1.get_ub(), r2.get_lb(), r2.get_ub(), r3.get_lb(), r3.get_ub()
);
} else {
first_problematic_la->store_min(la.get_lb());
return;
}
}
}
}
}
}
}
static void find_critical_intervals_end_configuration_2d() {
const dim3 grid(la_parallel_2d, r1_parallel_2d);
std::cout << "End configuration (2 disks) ..." << std::endl;
run_kernel_until_no_criticals(&kernel_find_critical_intervals_end_configuration_2d, IV(lambda_min, lambda_max), grid, dim3(r2_parallel_2d));
}
static void find_critical_intervals_end_configuration_3d() {
const dim3 grid(la_parallel_3d, r1_parallel_3d);
const dim3 block(r2_parallel_3d, r3_parallel_3d);
std::cout << "End configuration (3 disks) ..." << std::endl;
run_kernel_until_no_criticals(&kernel_find_critical_intervals_end_configuration_3d, IV(lambda_min, lambda_max), grid, block);
}
void circlepacking::lane_packing::find_critical_intervals_end_configuration() {
find_critical_intervals_end_configuration_2d();
find_critical_intervals_end_configuration_3d();
}
| 14e95ef48a443d3c76c73fc745c2a7df9ac44690.cu | //MIT License
//
//Copyright (c) 2018 TU Braunschweig, Algorithms Group
//
//Permission is hereby granted, free of charge, to any person obtaining a copy
//of this software and associated documentation files (the "Software"), to deal
//in the Software without restriction, including without limitation the rights
//to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
//copies of the Software, and to permit persons to whom the Software is
//furnished to do so, subject to the following conditions:
//
//The above copyright notice and this permission notice shall be included in all
//copies or substantial portions of the Software.
//
//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
//IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
//AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
//LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
//OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
//SOFTWARE.
#include "operations.cuh"
#include "../subintervals.hpp"
#include <algcuda/device.hpp>
#include <algcuda/exit.cuh>
#include "run_kernel.cuh"
namespace circlepacking {
namespace lane_packing {
void find_critical_intervals_end_configuration();
}
}
using namespace circlepacking;
using namespace circlepacking::lane_packing;
using algcuda::Atomic_ordered_double;
constexpr int la_subintervals_2d = 4096;
constexpr int r1_subintervals_2d = 1024;
constexpr int r2_subintervals_2d = 1024;
constexpr int la_parallel_2d = 256;
constexpr int r1_parallel_2d = 256;
constexpr int r2_parallel_2d = 256;
constexpr int la_subintervals_3d = 1024;
constexpr int r1_subintervals_3d = 256;
constexpr int r2_subintervals_3d = 128;
constexpr int r3_subintervals_3d = 128;
constexpr int la_parallel_3d = 256;
constexpr int r1_parallel_3d = 128;
constexpr int r2_parallel_3d = 16;
constexpr int r3_parallel_3d = 16;
static inline __device__ IV compute_r1_range(IV la) {
// any disk above this can simply be placed on its own (we assume the disks fit into the lane)
double ub_by_single_placement = lower_bound_radius_single_placement(la.get_lb());
double ub_by_lane_width = (0.5*(1.0-la)).get_ub();
// any disk below this would immediately open a new lane
double lb_by_lane_width = (0.25*(1.0-la)).get_lb();
return {lb_by_lane_width, ub_by_lane_width < ub_by_single_placement ? ub_by_lane_width : ub_by_single_placement};
}
static inline __device__ IV compute_r2_range(IV la, IV r1) {
double lb_lane_width_r1 = __dmul_rd(0.5, __dadd_rd(1.0, -__dadd_ru(la.get_ub(), __dmul_ru(2.0, r1.get_ub()))));
if(lb_lane_width_r1 < 0.0) { lb_lane_width_r1 = 0.0; }
return {lb_lane_width_r1, r1.get_ub()};
}
static __device__ bool handle_inner_outer_2d(IV la, IV r1, IV r2) {
IV a = collision_angle_inner_outer(la, r1, r2) + semicircle_angle_outer(r2);
double ub_lane_container_area_ratio = __dadd_ru(1.0, -__dmul_rd(la.get_lb(), la.get_lb()));
double ub_area_used = __dmul_ru(ub_lane_container_area_ratio, __dmul_ru(0.5, a.get_ub()));
IV rho = (1.0 - 2.0*r2).square() - la.square();
rho.tighten_lb(0.0);
double lb_alpha_reusable = (a - semicircle_angle_inner(la,r1)).get_lb();
double lb_area_reusable = __dmul_rd(__dmul_rd(0.5, lb_alpha_reusable), rho.get_lb());
double lb_area1 = __dmul_rd(lb_pi, __dmul_rd(r1.get_lb(), r1.get_lb()));
double lb_area2 = __dmul_rd(lb_pi, __dmul_rd(r2.get_lb(), r2.get_lb()));
double lb_disk_area = __dadd_rd(lb_area2, __dmul_rd(0.5, lb_area1));
return lb_disk_area >= __dmul_ru(critical_ratio, __dadd_ru(ub_area_used, -lb_area_reusable));
}
static __device__ bool handle_outer_inner_2d(IV la, IV r1, IV r2) {
IV a = collision_angle_outer_inner(la, r1, r2) + semicircle_angle_inner(la, r2);
double ub_lane_container_area_ratio = __dadd_ru(1.0, -__dmul_rd(la.get_lb(), la.get_lb()));
double ub_area_used = __dmul_ru(ub_lane_container_area_ratio, __dmul_ru(0.5, a.get_ub()));
IV rho = 1.0 - (la + 2.0*r2).square();
rho.tighten_lb(0.0);
double lb_alpha_reusable = (a-semicircle_angle_outer(r1)).get_lb();
double lb_area_reusable = __dmul_rd(__dmul_rd(0.5, lb_alpha_reusable), rho.get_lb());
double lb_area1 = __dmul_rd(lb_pi, __dmul_rd(r1.get_lb(), r1.get_lb()));
double lb_area2 = __dmul_rd(lb_pi, __dmul_rd(r2.get_lb(), r2.get_lb()));
double lb_disk_area = __dadd_rd(lb_area2, __dmul_rd(0.5, lb_area1));
return lb_disk_area >= __dmul_ru(critical_ratio, __dadd_ru(ub_area_used, -lb_area_reusable));
}
static __global__ void kernel_find_critical_intervals_end_configuration_2d(IV outer_la, Atomic_ordered_double* first_problematic_la, bool output_criticals) {
for(int la_offset = blockIdx.x; la_offset < la_subintervals_2d; la_offset += gridDim.x) {
IV la = get_subinterval(outer_la, la_offset, la_subintervals_2d);
IV r1_range = compute_r1_range(la);
for(int r1_offset = blockIdx.y; r1_offset < r1_subintervals_2d; r1_offset += gridDim.y) {
IV r1_ = get_subinterval(r1_range, r1_offset, r1_subintervals_2d);
IV r2_range = compute_r2_range(la, r1_);
for(int r2_offset = threadIdx.x; r2_offset < r2_subintervals_2d; r2_offset += blockDim.x) {
IV r2 = get_subinterval(r2_range, r2_offset, r2_subintervals_2d);
IV r1 = r1_;
r1.tighten_lb(r2.get_lb());
if(__dmul_ru(2.0, r1.get_ub()) >= __dadd_rd(1.0, -la.get_ub())) {
printf("End configuration (2 disks): Circle possibly does not fit - la = [%.19g,%.19g], r1 = [%.19g,%.19g], r2 = [%.19g,%.19g]\n",
la.get_lb(), la.get_ub(), r1.get_lb(), r1.get_ub(), r2.get_lb(), r2.get_ub()
);
algcuda::trap();
}
if(!handle_inner_outer_2d(la, r1, r2)) {
if(output_criticals) {
printf("End configuration (2 disks): Critical interval with r1 on the inside - la = [%.19g,%.19g], r1 = [%.19g,%.19g], r2 = [%.19g,%.19g]\n",
la.get_lb(), la.get_ub(), r1.get_lb(), r1.get_ub(), r2.get_lb(), r2.get_ub()
);
} else {
first_problematic_la->store_min(la.get_lb());
return;
}
}
if(!handle_outer_inner_2d(la, r1, r2)) {
if(output_criticals) {
printf("End configuration (2 disks): Critical interval with r1 on the outside - la = [%.19g,%.19g], r1 = [%.19g,%.19g], r2 = [%.19g,%.19g]\n",
la.get_lb(), la.get_ub(), r1.get_lb(), r1.get_ub(), r2.get_lb(), r2.get_ub()
);
} else {
first_problematic_la->store_min(la.get_lb());
return;
}
}
}
}
}
}
static inline __device__ IV compute_r3_range_3d(IV la, IV r1, IV r2) {
double lb_lane_width_r2 = __dmul_rd(0.5, __dadd_rd(1.0, -__dadd_ru(la.get_ub(), __dmul_ru(2.0, r2.get_ub()))));
if(lb_lane_width_r2 < 0.0) { lb_lane_width_r2 = 0.0; }
return {lb_lane_width_r2, r2.get_ub()};
}
static inline __device__ bool handle_r1_inner_3d(IV la, IV r1, IV r2, IV r3) {
IV a12 = collision_angle_inner_outer(la, r1, r2);
IV a23 = collision_angle_outer_inner(la, r2, r3);
IV a13 = collision_angle_inner_inner(la, r1, r3);
if(definitely(a12 + a23 >= a13)) {
// we already handle this case as 2 times 2 semicircles
return true;
}
IV a3 = semicircle_angle_inner(la, r3);
IV a = a13+a3;
IV a2 = semicircle_angle_outer(r2);
IV reusable_alpha = a - (a12+a2);
IV rho = 1.0 - (la + 2.0*r3).square();
rho.tighten_lb(0.0);
double lb_area_reusable = __dmul_rd(__dmul_rd(0.5, reusable_alpha.get_lb()), rho.get_lb());
double ub_lane_container_area_ratio = __dadd_ru(1.0, -__dmul_rd(la.get_lb(), la.get_lb()));
double ub_area_used = __dmul_ru(ub_lane_container_area_ratio, __dmul_ru(0.5, a.get_ub()));
double lb_area1 = __dmul_rd(lb_pi, __dmul_rd(r1.get_lb(), r1.get_lb()));
double lb_area2 = __dmul_rd(lb_pi, __dmul_rd(r2.get_lb(), r2.get_lb()));
double lb_area3 = __dmul_rd(lb_pi, __dmul_rd(r3.get_lb(), r3.get_lb()));
double lb_disk_area = __dadd_rd(__dadd_rd(lb_area2, lb_area3), __dmul_rd(0.5, lb_area1));
return lb_disk_area >= __dmul_ru(critical_ratio, __dadd_ru(ub_area_used, -lb_area_reusable));
}
static inline __device__ bool handle_r1_outer_3d(IV la, IV r1, IV r2, IV r3) {
IV a12 = collision_angle_outer_inner(la, r1, r2);
IV a23 = collision_angle_inner_outer(la, r2, r3);
IV a13 = collision_angle_outer_outer(r1, r3);
if(definitely(a12+a23 >= a13)) {
// we already handle this case as 2 tiems 2 semicircles
return true;
}
IV a3 = semicircle_angle_outer(r3);
IV a = a13+a3;
IV a2 = semicircle_angle_inner(la, r2);
IV reusable_alpha = a - (a12+a2);
IV rho = (1.0 - 2.0*r3).square() - la.square();
rho.tighten_lb(0.0);
double lb_area_reusable = __dmul_rd(__dmul_rd(0.5, reusable_alpha.get_lb()), rho.get_lb());
double ub_lane_container_area_ratio = __dadd_ru(1.0, -__dmul_rd(la.get_lb(), la.get_lb()));
double ub_area_used = __dmul_ru(ub_lane_container_area_ratio, __dmul_ru(0.5, a.get_ub()));
double lb_area1 = __dmul_rd(lb_pi, __dmul_rd(r1.get_lb(), r1.get_lb()));
double lb_area2 = __dmul_rd(lb_pi, __dmul_rd(r2.get_lb(), r2.get_lb()));
double lb_area3 = __dmul_rd(lb_pi, __dmul_rd(r3.get_lb(), r3.get_lb()));
double lb_disk_area = __dadd_rd(__dadd_rd(lb_area2, lb_area3), __dmul_rd(0.5, lb_area1));
return lb_disk_area >= __dmul_ru(critical_ratio, __dadd_ru(ub_area_used, -lb_area_reusable));
}
static __global__ void kernel_find_critical_intervals_end_configuration_3d(IV outer_la, Atomic_ordered_double* first_problematic_la, bool output_criticals) {
for(int la_offset = blockIdx.x; la_offset < la_subintervals_3d; la_offset += gridDim.x) {
IV la = get_subinterval(outer_la, la_offset, la_subintervals_3d);
IV r1_range = compute_r1_range(la);
for(int r1_offset = blockIdx.y; r1_offset < r1_subintervals_3d; r1_offset += gridDim.y) {
IV r1_ = get_subinterval(r1_range, r1_offset, r1_subintervals_3d);
IV r2_range = compute_r2_range(la, r1_);
for(int r2_offset = threadIdx.x; r2_offset < r2_subintervals_3d; r2_offset += blockDim.x) {
IV r2_ = get_subinterval(r2_range, r2_offset, r2_subintervals_3d);
IV r3_range = compute_r3_range_3d(la, r1_, r2_);
if(r3_range.empty()) {
continue;
}
for(int r3_offset = threadIdx.y; r3_offset < r3_subintervals_3d; r3_offset += blockDim.y) {
IV r3 = get_subinterval(r3_range, r3_offset, r3_subintervals_3d);
IV r2 = r2_;
r2.tighten_lb(r3.get_lb());
IV r1 = r1_;
r1.tighten_lb(r2.get_lb());
if(r2.empty() || r1.empty()) {
continue;
}
if(__dmul_ru(2.0, r1.get_ub()) >= __dadd_rd(1.0, -la.get_ub())) {
printf("End configuration (3 disks): Circle possibly does not fit - la = [%.19g,%.19g], r1 = [%.19g,%.19g], r2 = [%.19g,%.19g], r3 = [%.19g,%.19g]\n",
la.get_lb(), la.get_ub(), r1.get_lb(), r1.get_ub(), r2.get_lb(), r2.get_ub(), r3.get_lb(), r3.get_ub()
);
algcuda::trap();
}
if(!handle_r1_inner_3d(la, r1, r2, r3)) {
if(output_criticals) {
printf("End configuration (3 disks): Critical interval with r1 on the inside - la = [%.19g,%.19g], r1 = [%.19g,%.19g], r2 = [%.19g,%.19g], r3 = [%.19g,%.19g]\n",
la.get_lb(), la.get_ub(), r1.get_lb(), r1.get_ub(), r2.get_lb(), r2.get_ub(), r3.get_lb(), r3.get_ub()
);
} else {
first_problematic_la->store_min(la.get_lb());
return;
}
}
if(!handle_r1_outer_3d(la, r1, r2, r3)) {
if(output_criticals) {
printf("End configuration (3 disks): Critical interval with r1 on the outside - la = [%.19g,%.19g], r1 = [%.19g,%.19g], r2 = [%.19g,%.19g], r3 = [%.19g,%.19g]\n",
la.get_lb(), la.get_ub(), r1.get_lb(), r1.get_ub(), r2.get_lb(), r2.get_ub(), r3.get_lb(), r3.get_ub()
);
} else {
first_problematic_la->store_min(la.get_lb());
return;
}
}
}
}
}
}
}
static void find_critical_intervals_end_configuration_2d() {
const dim3 grid(la_parallel_2d, r1_parallel_2d);
std::cout << "End configuration (2 disks) ..." << std::endl;
run_kernel_until_no_criticals(&kernel_find_critical_intervals_end_configuration_2d, IV(lambda_min, lambda_max), grid, dim3(r2_parallel_2d));
}
static void find_critical_intervals_end_configuration_3d() {
const dim3 grid(la_parallel_3d, r1_parallel_3d);
const dim3 block(r2_parallel_3d, r3_parallel_3d);
std::cout << "End configuration (3 disks) ..." << std::endl;
run_kernel_until_no_criticals(&kernel_find_critical_intervals_end_configuration_3d, IV(lambda_min, lambda_max), grid, block);
}
void circlepacking::lane_packing::find_critical_intervals_end_configuration() {
find_critical_intervals_end_configuration_2d();
find_critical_intervals_end_configuration_3d();
}
|
7f1b097d433ab0f7c4471b81e703f9715d75aa8e.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <vector>
#include "caffe/layers/batch_norm_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
void BatchNormLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int_tp num = bottom[0]->shape(0);
int_tp spatial_dim = bottom[0]->count() / (channels_ * bottom[0]->shape(0));
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (bottom[0] != top[0]) {
caffe_copy(bottom[0]->count(), bottom_data, top_data);
}
if (use_global_stats_) {
// use the stored mean/variance estimates.
const Dtype scale_factor = this->blobs_[2]->cpu_data()[0] == 0 ?
0 : 1 / this->blobs_[2]->cpu_data()[0];
caffe_gpu_scale(variance_.count(), scale_factor,
this->blobs_[0]->gpu_data(), mean_.mutable_gpu_data());
caffe_gpu_scale(variance_.count(), scale_factor,
this->blobs_[1]->gpu_data(), variance_.mutable_gpu_data());
} else {
// compute mean
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim,
1. / (num * spatial_dim), bottom_data,
spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0.,
mean_.mutable_gpu_data());
}
// subtract mean
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), mean_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, -1, num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 1., top_data);
if (!use_global_stats_) {
// compute variance using var(X) = E((X-EX)^2)
caffe_gpu_mul(top[0]->count(), top[0]->gpu_data(), top[0]->gpu_data(),
temp_.mutable_gpu_data()); // (X-EX)^2
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim,
1. / (num * spatial_dim), temp_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, Dtype(1.),
num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(),
Dtype(0.), variance_.mutable_gpu_data()); // E((X_EX)^2)
// compute and save moving average
this->blobs_[2]->mutable_cpu_data()[0] *= moving_average_fraction_;
this->blobs_[2]->mutable_cpu_data()[0] += 1;
caffe_gpu_axpby(mean_.count(), Dtype(1), mean_.gpu_data(),
moving_average_fraction_, this->blobs_[0]->mutable_gpu_data());
int_tp m = bottom[0]->count()/channels_;
Dtype bias_correction_factor = m > 1 ? Dtype(m)/(m-1) : 1;
caffe_gpu_axpby(variance_.count(), bias_correction_factor,
variance_.gpu_data(), moving_average_fraction_,
this->blobs_[1]->mutable_gpu_data());
}
// normalize variance
caffe_gpu_add_scalar(variance_.count(),
eps_, variance_.mutable_gpu_data());
caffe_gpu_sqrt(variance_.count(), variance_.gpu_data(),
variance_.mutable_gpu_data());
// replicate variance to input size
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), variance_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, 1., num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 0., temp_.mutable_gpu_data());
caffe_gpu_div(temp_.count(), top_data, temp_.gpu_data(), top_data);
// TODO(cdoersch): The caching is only needed
// because later in-place layers might clobber the data.
// Can we skip this if they won't?
caffe_copy(x_norm_.count(), top_data,
x_norm_.mutable_gpu_data());
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
if (bottom[0] != top[0]) {
greentea_copy<Dtype>(bottom[0]->count(), (cl_mem) bottom_data, 0,
(cl_mem) top_data, 0, &ctx);
}
if (use_global_stats_) {
// use the stored mean/variance estimates.
const Dtype scale_factor = this->blobs_[2]->cpu_data()[0] == 0 ?
0 : 1 / this->blobs_[2]->cpu_data()[0];
greentea_gpu_scale(this->device_->id(), variance_.count(), scale_factor,
(cl_mem) (this->blobs_[0]->gpu_data()), 0,
(cl_mem) (mean_.mutable_gpu_data()), 0);
greentea_gpu_scale(this->device_->id(), variance_.count(), scale_factor,
(cl_mem) (this->blobs_[1]->gpu_data()), 0,
(cl_mem) (variance_.mutable_gpu_data()), 0);
} else {
// compute mean
greentea_gpu_gemv<Dtype>(this->device_->id(),
CblasNoTrans, channels_ * num, spatial_dim,
1. / (num * spatial_dim), (cl_mem) bottom_data, 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemv<Dtype>(this->device_->id(),
CblasTrans, num, channels_, 1.,
(cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0, 0.,
(cl_mem) (mean_.mutable_gpu_data()), 0);
}
// subtract mean
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num, channels_, 1, 1,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
(cl_mem) (mean_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
channels_ * num,
spatial_dim, 1, -1, (cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0, 1.,
(cl_mem) top_data, 0);
if (!use_global_stats_) {
// compute variance using var(X) = E((X-EX)^2)
greentea_gpu_mul<Dtype>(this->device_->id(),
top[0]->count(), (cl_mem) (top[0]->gpu_data()), 0,
(cl_mem) (top[0]->gpu_data()), 0,
(cl_mem) (temp_.mutable_gpu_data()), 0); // (X-EX)^2
greentea_gpu_gemv<Dtype>(this->device_->id(),
CblasNoTrans, channels_ * num, spatial_dim,
1. / (num * spatial_dim), (cl_mem) (temp_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemv<Dtype>(this->device_->id(),
CblasTrans, num, channels_, Dtype(1.),
(cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0, Dtype(0.),
(cl_mem) (variance_.mutable_gpu_data()), 0); // E((X_EX)^2)
// compute and save moving average
this->blobs_[2]->mutable_cpu_data()[0] *= moving_average_fraction_;
this->blobs_[2]->mutable_cpu_data()[0] += 1;
greentea_gpu_axpby(this->device_->id(), mean_.count(), Dtype(1),
(cl_mem) (mean_.gpu_data()), 0,
moving_average_fraction_,
(cl_mem) (this->blobs_[0]->mutable_gpu_data()), 0);
int_tp m = bottom[0]->count()/channels_;
Dtype bias_correction_factor = m > 1 ? Dtype(m)/(m-1) : 1;
greentea_gpu_axpby<Dtype>(this->device_->id(), variance_.count(),
bias_correction_factor,
(cl_mem) (variance_.gpu_data()), 0, moving_average_fraction_,
(cl_mem) (this->blobs_[1]->mutable_gpu_data()), 0);
}
// normalize variance
greentea_gpu_add_scalar<Dtype>(this->device_->id(), variance_.count(),
eps_, (cl_mem) (variance_.mutable_gpu_data()), 0);
greentea_gpu_sqrt<Dtype>(this->device_->id(), variance_.count(),
(cl_mem) (variance_.gpu_data()), 0,
(cl_mem) (variance_.mutable_gpu_data()), 0);
// replicate variance to input size
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num, channels_, 1, 1,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
(cl_mem) (variance_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
channels_ * num,
spatial_dim, 1, 1., (cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0, 0.,
(cl_mem) (temp_.mutable_gpu_data()), 0);
greentea_gpu_div<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) top_data, 0, (cl_mem) (temp_.gpu_data()), 0,
(cl_mem) top_data, 0);
// TODO(cdoersch): The caching is only needed
// because later in-place layers might clobber the data.
// Can we skip this if they won't?
greentea_copy<Dtype>(x_norm_.count(), (cl_mem)top_data, 0,
(cl_mem) (x_norm_.mutable_gpu_data()), 0, &ctx);
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void BatchNormLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (bottom[0] != top[0]) {
top_diff = top[0]->gpu_diff();
} else {
caffe_copy(x_norm_.count(), top[0]->gpu_diff(),
x_norm_.mutable_gpu_diff());
top_diff = x_norm_.gpu_diff();
}
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (use_global_stats_) {
caffe_gpu_div(temp_.count(), top_diff, temp_.gpu_data(), bottom_diff);
return;
}
const Dtype* top_data = x_norm_.gpu_data();
int_tp num = bottom[0]->shape()[0];
int_tp spatial_dim = bottom[0]->count() / (channels_ * bottom[0]->shape(0));
// if Y = (X-mean(X))/(sqrt(var(X)+eps)), then
//
// dE(Y)/dX =
// (dE/dY - mean(dE/dY) - mean(dE/dY \cdot Y) \cdot Y)
// ./ sqrt(var(X) + eps)
//
// where \cdot and ./ are hadamard product and elementwise division,
// respectively, dE/dY is the top diff, and mean/var/sum are all computed
// along all dimensions except the channels dimension. In the above
// equation, the operations allow for expansion (i.e. broadcast) along all
// dimensions except the channels dimension where required.
// sum(dE/dY \cdot Y)
caffe_gpu_mul<Dtype>(temp_.count(), top_data, top_diff, bottom_diff);
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim, 1.,
bottom_diff, spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(),
batch_sum_multiplier_.gpu_data(), 0.,
mean_.mutable_gpu_data());
// reshape (broadcast) the above
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), mean_.gpu_data(),
0., num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, 1., num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 0., bottom_diff);
// sum(dE/dY \cdot Y) \cdot Y
caffe_gpu_mul<Dtype>(temp_.count(), top_data, bottom_diff, bottom_diff);
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim, 1.,
top_diff, spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(),
batch_sum_multiplier_.gpu_data(), 0.,
mean_.mutable_gpu_data());
// reshape (broadcast) the above to make
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), mean_.gpu_data(),
0., num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num * channels_,
spatial_dim, 1, 1., num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 1., bottom_diff);
// dE/dY - mean(dE/dY)-mean(dE/dY \cdot Y) \cdot Y
caffe_gpu_axpby<Dtype>(temp_.count(), Dtype(1), top_diff,
Dtype(-1. / (num * spatial_dim)), bottom_diff);
// note: temp_ still contains sqrt(var(X)+eps), computed during the forward
// pass.
caffe_gpu_div<Dtype>(temp_.count(), bottom_diff, temp_.gpu_data(),
bottom_diff);
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
if (bottom[0] != top[0]) {
top_diff = top[0]->gpu_diff();
} else {
greentea_copy<Dtype>(x_norm_.count(), (cl_mem) (top[0]->gpu_diff()), 0,
(cl_mem) (x_norm_.mutable_gpu_diff()), 0, &ctx);
top_diff = x_norm_.gpu_diff();
}
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (use_global_stats_) {
greentea_gpu_div<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) top_diff, 0, (cl_mem) (temp_.gpu_data()),
0, (cl_mem) bottom_diff, 0);
return;
}
const Dtype* top_data = x_norm_.gpu_data();
int_tp num = bottom[0]->shape()[0];
int_tp spatial_dim = bottom[0]->count() / (channels_ * bottom[0]->shape(0));
// if Y = (X-mean(X))/(sqrt(var(X)+eps)), then
//
// dE(Y)/dX =
// (dE/dY - mean(dE/dY) - mean(dE/dY \cdot Y) \cdot Y)
// ./ sqrt(var(X) + eps)
//
// where \cdot and ./ are hadamard product and elementwise division,
// respectively, dE/dY is the top diff, and mean/var/sum are all computed
// along all dimensions except the channels dimension. In the above
// equation, the operations allow for expansion (i.e. broadcast) along all
// dimensions except the channels dimension where required.
// sum(dE/dY \cdot Y)
greentea_gpu_mul<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) top_data, 0, (cl_mem) top_diff, 0,
(cl_mem) bottom_diff, 0);
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans, channels_ * num,
spatial_dim, 1., (cl_mem) bottom_diff, 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) (num_by_chans_.mutable_gpu_data()),
0);
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, num, channels_,
1., (cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0, 0.,
(cl_mem) (mean_.mutable_gpu_data()), 0);
// reshape (broadcast) the above
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num, channels_, 1, 1,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
(cl_mem) (mean_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
channels_ * num, spatial_dim, 1, 1.,
(cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) bottom_diff, 0);
// sum(dE/dY \cdot Y) \cdot Y
greentea_gpu_mul<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) top_data, 0, (cl_mem) bottom_diff, 0,
(cl_mem) bottom_diff, 0);
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans, channels_ * num,
spatial_dim, 1., (cl_mem) top_diff, 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) (num_by_chans_.mutable_gpu_data()),
0);
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, num, channels_,
1., (cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0, 0.,
(cl_mem) (mean_.mutable_gpu_data()), 0);
// reshape (broadcast) the above to make
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num, channels_, 1, 1,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
(cl_mem) (mean_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num * channels_, spatial_dim, 1, 1.,
(cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
1., (cl_mem) bottom_diff, 0);
// dE/dY - mean(dE/dY)-mean(dE/dY \cdot Y) \cdot Y
greentea_gpu_axpby<Dtype>(this->device_->id(), temp_.count(), Dtype(1),
(cl_mem) top_diff, 0,
Dtype(-1. / (num * spatial_dim)),
(cl_mem) bottom_diff, 0);
// note: temp_ still contains sqrt(var(X)+eps), computed during the forward
// pass.
greentea_gpu_div<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) bottom_diff, 0,
(cl_mem) (temp_.gpu_data()), 0,
(cl_mem) bottom_diff, 0);
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BatchNormLayer);
} // namespace caffe
| 7f1b097d433ab0f7c4471b81e703f9715d75aa8e.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/batch_norm_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template<typename Dtype>
void BatchNormLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int_tp num = bottom[0]->shape(0);
int_tp spatial_dim = bottom[0]->count() / (channels_ * bottom[0]->shape(0));
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (bottom[0] != top[0]) {
caffe_copy(bottom[0]->count(), bottom_data, top_data);
}
if (use_global_stats_) {
// use the stored mean/variance estimates.
const Dtype scale_factor = this->blobs_[2]->cpu_data()[0] == 0 ?
0 : 1 / this->blobs_[2]->cpu_data()[0];
caffe_gpu_scale(variance_.count(), scale_factor,
this->blobs_[0]->gpu_data(), mean_.mutable_gpu_data());
caffe_gpu_scale(variance_.count(), scale_factor,
this->blobs_[1]->gpu_data(), variance_.mutable_gpu_data());
} else {
// compute mean
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim,
1. / (num * spatial_dim), bottom_data,
spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(), 0.,
mean_.mutable_gpu_data());
}
// subtract mean
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), mean_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, -1, num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 1., top_data);
if (!use_global_stats_) {
// compute variance using var(X) = E((X-EX)^2)
caffe_gpu_mul(top[0]->count(), top[0]->gpu_data(), top[0]->gpu_data(),
temp_.mutable_gpu_data()); // (X-EX)^2
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim,
1. / (num * spatial_dim), temp_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, Dtype(1.),
num_by_chans_.gpu_data(), batch_sum_multiplier_.gpu_data(),
Dtype(0.), variance_.mutable_gpu_data()); // E((X_EX)^2)
// compute and save moving average
this->blobs_[2]->mutable_cpu_data()[0] *= moving_average_fraction_;
this->blobs_[2]->mutable_cpu_data()[0] += 1;
caffe_gpu_axpby(mean_.count(), Dtype(1), mean_.gpu_data(),
moving_average_fraction_, this->blobs_[0]->mutable_gpu_data());
int_tp m = bottom[0]->count()/channels_;
Dtype bias_correction_factor = m > 1 ? Dtype(m)/(m-1) : 1;
caffe_gpu_axpby(variance_.count(), bias_correction_factor,
variance_.gpu_data(), moving_average_fraction_,
this->blobs_[1]->mutable_gpu_data());
}
// normalize variance
caffe_gpu_add_scalar(variance_.count(),
eps_, variance_.mutable_gpu_data());
caffe_gpu_sqrt(variance_.count(), variance_.gpu_data(),
variance_.mutable_gpu_data());
// replicate variance to input size
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), variance_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, 1., num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 0., temp_.mutable_gpu_data());
caffe_gpu_div(temp_.count(), top_data, temp_.gpu_data(), top_data);
// TODO(cdoersch): The caching is only needed
// because later in-place layers might clobber the data.
// Can we skip this if they won't?
caffe_copy(x_norm_.count(), top_data,
x_norm_.mutable_gpu_data());
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
if (bottom[0] != top[0]) {
greentea_copy<Dtype>(bottom[0]->count(), (cl_mem) bottom_data, 0,
(cl_mem) top_data, 0, &ctx);
}
if (use_global_stats_) {
// use the stored mean/variance estimates.
const Dtype scale_factor = this->blobs_[2]->cpu_data()[0] == 0 ?
0 : 1 / this->blobs_[2]->cpu_data()[0];
greentea_gpu_scale(this->device_->id(), variance_.count(), scale_factor,
(cl_mem) (this->blobs_[0]->gpu_data()), 0,
(cl_mem) (mean_.mutable_gpu_data()), 0);
greentea_gpu_scale(this->device_->id(), variance_.count(), scale_factor,
(cl_mem) (this->blobs_[1]->gpu_data()), 0,
(cl_mem) (variance_.mutable_gpu_data()), 0);
} else {
// compute mean
greentea_gpu_gemv<Dtype>(this->device_->id(),
CblasNoTrans, channels_ * num, spatial_dim,
1. / (num * spatial_dim), (cl_mem) bottom_data, 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemv<Dtype>(this->device_->id(),
CblasTrans, num, channels_, 1.,
(cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0, 0.,
(cl_mem) (mean_.mutable_gpu_data()), 0);
}
// subtract mean
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num, channels_, 1, 1,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
(cl_mem) (mean_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
channels_ * num,
spatial_dim, 1, -1, (cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0, 1.,
(cl_mem) top_data, 0);
if (!use_global_stats_) {
// compute variance using var(X) = E((X-EX)^2)
greentea_gpu_mul<Dtype>(this->device_->id(),
top[0]->count(), (cl_mem) (top[0]->gpu_data()), 0,
(cl_mem) (top[0]->gpu_data()), 0,
(cl_mem) (temp_.mutable_gpu_data()), 0); // (X-EX)^2
greentea_gpu_gemv<Dtype>(this->device_->id(),
CblasNoTrans, channels_ * num, spatial_dim,
1. / (num * spatial_dim), (cl_mem) (temp_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemv<Dtype>(this->device_->id(),
CblasTrans, num, channels_, Dtype(1.),
(cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0, Dtype(0.),
(cl_mem) (variance_.mutable_gpu_data()), 0); // E((X_EX)^2)
// compute and save moving average
this->blobs_[2]->mutable_cpu_data()[0] *= moving_average_fraction_;
this->blobs_[2]->mutable_cpu_data()[0] += 1;
greentea_gpu_axpby(this->device_->id(), mean_.count(), Dtype(1),
(cl_mem) (mean_.gpu_data()), 0,
moving_average_fraction_,
(cl_mem) (this->blobs_[0]->mutable_gpu_data()), 0);
int_tp m = bottom[0]->count()/channels_;
Dtype bias_correction_factor = m > 1 ? Dtype(m)/(m-1) : 1;
greentea_gpu_axpby<Dtype>(this->device_->id(), variance_.count(),
bias_correction_factor,
(cl_mem) (variance_.gpu_data()), 0, moving_average_fraction_,
(cl_mem) (this->blobs_[1]->mutable_gpu_data()), 0);
}
// normalize variance
greentea_gpu_add_scalar<Dtype>(this->device_->id(), variance_.count(),
eps_, (cl_mem) (variance_.mutable_gpu_data()), 0);
greentea_gpu_sqrt<Dtype>(this->device_->id(), variance_.count(),
(cl_mem) (variance_.gpu_data()), 0,
(cl_mem) (variance_.mutable_gpu_data()), 0);
// replicate variance to input size
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num, channels_, 1, 1,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
(cl_mem) (variance_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
channels_ * num,
spatial_dim, 1, 1., (cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0, 0.,
(cl_mem) (temp_.mutable_gpu_data()), 0);
greentea_gpu_div<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) top_data, 0, (cl_mem) (temp_.gpu_data()), 0,
(cl_mem) top_data, 0);
// TODO(cdoersch): The caching is only needed
// because later in-place layers might clobber the data.
// Can we skip this if they won't?
greentea_copy<Dtype>(x_norm_.count(), (cl_mem)top_data, 0,
(cl_mem) (x_norm_.mutable_gpu_data()), 0, &ctx);
#endif // USE_GREENTEA
}
}
template<typename Dtype>
void BatchNormLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff;
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (bottom[0] != top[0]) {
top_diff = top[0]->gpu_diff();
} else {
caffe_copy(x_norm_.count(), top[0]->gpu_diff(),
x_norm_.mutable_gpu_diff());
top_diff = x_norm_.gpu_diff();
}
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (use_global_stats_) {
caffe_gpu_div(temp_.count(), top_diff, temp_.gpu_data(), bottom_diff);
return;
}
const Dtype* top_data = x_norm_.gpu_data();
int_tp num = bottom[0]->shape()[0];
int_tp spatial_dim = bottom[0]->count() / (channels_ * bottom[0]->shape(0));
// if Y = (X-mean(X))/(sqrt(var(X)+eps)), then
//
// dE(Y)/dX =
// (dE/dY - mean(dE/dY) - mean(dE/dY \cdot Y) \cdot Y)
// ./ sqrt(var(X) + eps)
//
// where \cdot and ./ are hadamard product and elementwise division,
// respectively, dE/dY is the top diff, and mean/var/sum are all computed
// along all dimensions except the channels dimension. In the above
// equation, the operations allow for expansion (i.e. broadcast) along all
// dimensions except the channels dimension where required.
// sum(dE/dY \cdot Y)
caffe_gpu_mul<Dtype>(temp_.count(), top_data, top_diff, bottom_diff);
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim, 1.,
bottom_diff, spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(),
batch_sum_multiplier_.gpu_data(), 0.,
mean_.mutable_gpu_data());
// reshape (broadcast) the above
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), mean_.gpu_data(),
0., num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
spatial_dim, 1, 1., num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 0., bottom_diff);
// sum(dE/dY \cdot Y) \cdot Y
caffe_gpu_mul<Dtype>(temp_.count(), top_data, bottom_diff, bottom_diff);
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
caffe_gpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim, 1.,
top_diff, spatial_sum_multiplier_.gpu_data(), 0.,
num_by_chans_.mutable_gpu_data());
caffe_gpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
num_by_chans_.gpu_data(),
batch_sum_multiplier_.gpu_data(), 0.,
mean_.mutable_gpu_data());
// reshape (broadcast) the above to make
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
batch_sum_multiplier_.gpu_data(), mean_.gpu_data(),
0., num_by_chans_.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num * channels_,
spatial_dim, 1, 1., num_by_chans_.gpu_data(),
spatial_sum_multiplier_.gpu_data(), 1., bottom_diff);
// dE/dY - mean(dE/dY)-mean(dE/dY \cdot Y) \cdot Y
caffe_gpu_axpby<Dtype>(temp_.count(), Dtype(1), top_diff,
Dtype(-1. / (num * spatial_dim)), bottom_diff);
// note: temp_ still contains sqrt(var(X)+eps), computed during the forward
// pass.
caffe_gpu_div<Dtype>(temp_.count(), bottom_diff, temp_.gpu_data(),
bottom_diff);
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
if (bottom[0] != top[0]) {
top_diff = top[0]->gpu_diff();
} else {
greentea_copy<Dtype>(x_norm_.count(), (cl_mem) (top[0]->gpu_diff()), 0,
(cl_mem) (x_norm_.mutable_gpu_diff()), 0, &ctx);
top_diff = x_norm_.gpu_diff();
}
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (use_global_stats_) {
greentea_gpu_div<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) top_diff, 0, (cl_mem) (temp_.gpu_data()),
0, (cl_mem) bottom_diff, 0);
return;
}
const Dtype* top_data = x_norm_.gpu_data();
int_tp num = bottom[0]->shape()[0];
int_tp spatial_dim = bottom[0]->count() / (channels_ * bottom[0]->shape(0));
// if Y = (X-mean(X))/(sqrt(var(X)+eps)), then
//
// dE(Y)/dX =
// (dE/dY - mean(dE/dY) - mean(dE/dY \cdot Y) \cdot Y)
// ./ sqrt(var(X) + eps)
//
// where \cdot and ./ are hadamard product and elementwise division,
// respectively, dE/dY is the top diff, and mean/var/sum are all computed
// along all dimensions except the channels dimension. In the above
// equation, the operations allow for expansion (i.e. broadcast) along all
// dimensions except the channels dimension where required.
// sum(dE/dY \cdot Y)
greentea_gpu_mul<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) top_data, 0, (cl_mem) top_diff, 0,
(cl_mem) bottom_diff, 0);
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans, channels_ * num,
spatial_dim, 1., (cl_mem) bottom_diff, 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) (num_by_chans_.mutable_gpu_data()),
0);
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, num, channels_,
1., (cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0, 0.,
(cl_mem) (mean_.mutable_gpu_data()), 0);
// reshape (broadcast) the above
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num, channels_, 1, 1,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
(cl_mem) (mean_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
channels_ * num, spatial_dim, 1, 1.,
(cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) bottom_diff, 0);
// sum(dE/dY \cdot Y) \cdot Y
greentea_gpu_mul<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) top_data, 0, (cl_mem) bottom_diff, 0,
(cl_mem) bottom_diff, 0);
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans, channels_ * num,
spatial_dim, 1., (cl_mem) top_diff, 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
0., (cl_mem) (num_by_chans_.mutable_gpu_data()),
0);
greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, num, channels_,
1., (cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0, 0.,
(cl_mem) (mean_.mutable_gpu_data()), 0);
// reshape (broadcast) the above to make
// sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num, channels_, 1, 1,
(cl_mem) (batch_sum_multiplier_.gpu_data()), 0,
(cl_mem) (mean_.gpu_data()), 0, 0.,
(cl_mem) (num_by_chans_.mutable_gpu_data()), 0);
greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans,
num * channels_, spatial_dim, 1, 1.,
(cl_mem) (num_by_chans_.gpu_data()), 0,
(cl_mem) (spatial_sum_multiplier_.gpu_data()), 0,
1., (cl_mem) bottom_diff, 0);
// dE/dY - mean(dE/dY)-mean(dE/dY \cdot Y) \cdot Y
greentea_gpu_axpby<Dtype>(this->device_->id(), temp_.count(), Dtype(1),
(cl_mem) top_diff, 0,
Dtype(-1. / (num * spatial_dim)),
(cl_mem) bottom_diff, 0);
// note: temp_ still contains sqrt(var(X)+eps), computed during the forward
// pass.
greentea_gpu_div<Dtype>(this->device_->id(), temp_.count(),
(cl_mem) bottom_diff, 0,
(cl_mem) (temp_.gpu_data()), 0,
(cl_mem) bottom_diff, 0);
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(BatchNormLayer);
} // namespace caffe
|
4749e3247929dfcd5489b4afce81b0be875f95ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8) {
comp = (+1.3657E-35f * +1.1692E-37f + (-1.2707E-36f / ceilf(floorf((+1.2151E-19f / (-0.0f - (-1.1254E-36f + (var_1 / var_2))))))));
if (comp < var_3 * (var_4 / (var_5 - var_6 - var_7))) {
comp = ldexpf(+1.2284E-36f * var_8, 2);
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9);
hipDeviceSynchronize();
return 0;
}
| 4749e3247929dfcd5489b4afce81b0be875f95ef.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8) {
comp = (+1.3657E-35f * +1.1692E-37f + (-1.2707E-36f / ceilf(floorf((+1.2151E-19f / (-0.0f - (-1.1254E-36f + (var_1 / var_2))))))));
if (comp < var_3 * (var_4 / (var_5 - var_6 - var_7))) {
comp = ldexpf(+1.2284E-36f * var_8, 2);
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9);
cudaDeviceSynchronize();
return 0;
}
|
db37ec45d39a73e2f6a09288136878bfdf9783d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void stencil_sync(int *in, int *out)
{
__shared__ int temp[BLOCK_SIZE + 2 * RADIUS];
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
int lindex = threadIdx.x + RADIUS;
// Read input elements into shared memory
temp[lindex] = in[gindex+RADIUS];
if (threadIdx.x < RADIUS) {
temp[lindex - RADIUS] = in[gindex];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE + RADIUS];
}
////////////////////////////////// sync thread ////////////////////////////
__syncthreads();
// Apply the stencil
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result += temp[lindex + offset];
// Store the result
out[gindex] = result;
} | db37ec45d39a73e2f6a09288136878bfdf9783d3.cu | #include "includes.h"
__global__ void stencil_sync(int *in, int *out)
{
__shared__ int temp[BLOCK_SIZE + 2 * RADIUS];
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
int lindex = threadIdx.x + RADIUS;
// Read input elements into shared memory
temp[lindex] = in[gindex+RADIUS];
if (threadIdx.x < RADIUS) {
temp[lindex - RADIUS] = in[gindex];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE + RADIUS];
}
////////////////////////////////// sync thread ////////////////////////////
__syncthreads();
// Apply the stencil
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++)
result += temp[lindex + offset];
// Store the result
out[gindex] = result;
} |
0a6d07b3e47c4878191c0581260f444bfbf27cd7.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
************************************************************************/
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <string>
#include <vector>
#include "nccl.h"
#include "test_utilities.h"
#include <roctracer/roctx.h>
int csv = false;
template<typename T>
void RunTest(T** sendbuff, T** recvbuff, const int N, const ncclDataType_t type,
const ncclRedOp_t op, int root, ncclComm_t* const comms,
const std::vector<int>& dList) {
// initialize data
T* buffer = (T*)malloc(N * sizeof(T));
T* result = (T*)malloc(N * sizeof(T));
memset(buffer, 0, N * sizeof(T));
memset(result, 0, N * sizeof(T));
int nDev = 0;
ncclCommCount(comms[0], &nDev);
hipStream_t* s = (hipStream_t*)malloc(sizeof(hipStream_t)*nDev);
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipStreamCreate(s+i));
CUDACHECK(hipMemset(recvbuff[i], 0, N * sizeof(T)));
Randomize(sendbuff[i], N, i);
if(i == 0) {
CUDACHECK(hipMemcpy(result, sendbuff[i], N*sizeof(T), hipMemcpyDeviceToHost));
} else {
Accumulate<T>(result, sendbuff[i], N, op);
}
}
// warm up GPU
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
ncclReduce((const void*)sendbuff[i], (void*)recvbuff[i], ::min(N, 1024 * 1024),
type, op, root, comms[i], s[i]);
}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipStreamSynchronize(s[i]));
}
// for (int n = 0; n <= N; n = (n > 0) ? n << 1 : 1)
{
int n = N;
printf((csv) ? "%i,%i,%s,%s,%d," : "%12i %12i %6s %6s %4d",
(int) (n * sizeof(T)), n, TypeName(type).c_str(),
OperationName(op).c_str(), root);
// do out-of-place reduction first
roctxRangePushA("out of place");
auto start = std::chrono::high_resolution_clock::now();
//for (int i=0; i<100; i++) {
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
ncclReduce((const void*)sendbuff[i], (void*)recvbuff[i], n, type, op,
root, comms[i], s[i]);
}
//}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipStreamSynchronize(s[i]));
}
auto stop = std::chrono::high_resolution_clock::now();
roctxRangePop();
roctxRangePushA("out of place bookkeeping");
double elapsedSec =
std::chrono::duration_cast<std::chrono::duration<double>>(
stop - start).count(); // / 100.0;
double algbw = (double)(n * sizeof(T)) / 1.0E9 / elapsedSec;
double busbw = algbw;
CUDACHECK(hipSetDevice(dList[root]));
double maxDelta = CheckDelta<T>(recvbuff[root], result, N);
printf((csv)?"%f,%f,%f,%le,":" %7.3f %5.2f %5.2f %7.0le",
elapsedSec * 1.0E3, algbw, busbw, maxDelta);
roctxRangePop();
}
// for (int n = 0; n <= N; n = (n > 0) ? n << 1 : 1)
{
int n = N;
// now do in-place reduction
roctxRangePushA("in place");
auto start = std::chrono::high_resolution_clock::now();
//for (int i=0; i<100; i++) {
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
ncclReduce((const void*)sendbuff[i], (void*)sendbuff[i], n, type, op,
root, comms[i], s[i]);
}
//}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipStreamSynchronize(s[i]));
}
auto stop = std::chrono::high_resolution_clock::now();
roctxRangePop();
roctxRangePushA("in place bookkeeping");
double elapsedSec =
std::chrono::duration_cast<std::chrono::duration<double>>(
stop - start).count(); // / 100.0;
double algbw = (double)(n * sizeof(T)) / 1.0E9 / elapsedSec;
double busbw = algbw;
CUDACHECK(hipSetDevice(dList[root]));
double maxDelta = CheckDelta<T>(sendbuff[root], result, N);
printf((csv)?"%f,%f,%f,%le,":" %7.3f %5.2f %5.2f %7.0le\n",
elapsedSec * 1.0E3, algbw, busbw, maxDelta);
roctxRangePop();
}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipStreamDestroy(s[i]));
}
free(s);
free(buffer);
free(result);
}
template<typename T>
void RunTests(const int N, const ncclDataType_t type, ncclComm_t* const comms,
const std::vector<int>& dList) {
int nDev = 0;
ncclCommCount(comms[0], &nDev);
T** sendbuff = (T**)malloc(nDev * sizeof(T*));
T** recvbuff = (T**)malloc(nDev * sizeof(T*));
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipMalloc(sendbuff + i, N * sizeof(T)));
CUDACHECK(hipMalloc(recvbuff + i, N * sizeof(T)));
}
for (ncclRedOp_t op : { ncclSum, ncclProd, ncclMax, ncclMin }) {
// for (ncclRedOp_t op : { ncclSum }) {
for(int root=0; root<nDev; ++root) {
RunTest<T>(sendbuff, recvbuff, N, type, op, root, comms, dList);
}
}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(hipSetDevice(dList[i]));
CUDACHECK(hipFree(sendbuff[i]));
CUDACHECK(hipFree(recvbuff[i]));
}
free(sendbuff);
free(recvbuff);
}
void usage() {
printf("Tests nccl Reduce with user supplied arguments.\n"
" Usage: reduce_test <data size in bytes> [number of GPUs] "
"[GPU 0] [GPU 1] ...\n\n");
}
int main(int argc, char* argv[]) {
int nVis = 0;
CUDACHECK(hipGetDeviceCount(&nVis));
int N = 0;
if (argc > 1) {
int t = sscanf(argv[1], "%d", &N);
if (t == 0) {
printf("Error: %s is not an integer!\n\n", argv[1]);
usage();
exit(EXIT_FAILURE);
}
} else {
printf("Error: must specify at least data size in bytes!\n\n");
usage();
exit(EXIT_FAILURE);
}
int nDev = nVis;
if (argc > 2) {
int t = sscanf(argv[2], "%d", &nDev);
if (t == 0) {
printf("Error: %s is not an integer!\n\n", argv[1]);
usage();
exit(EXIT_FAILURE);
}
}
std::vector<int> dList(nDev);
for (int i = 0; i < nDev; ++i)
dList[i] = i % nVis;
if (argc > 3) {
if (argc - 3 != nDev) {
printf("Error: insufficient number of GPUs in list\n\n");
usage();
exit(EXIT_FAILURE);
}
for (int i = 0; i < nDev; ++i) {
int t = sscanf(argv[3 + i], "%d", dList.data() + i);
if (t == 0) {
printf("Error: %s is not an integer!\n\n", argv[2 + i]);
usage();
exit(EXIT_FAILURE);
}
}
}
ncclComm_t* comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*nDev);
ncclCommInitAll(comms, nDev, dList.data());
if (!csv) {
printf("# Using devices\n");
for (int g = 0; g < nDev; ++g) {
int cudaDev;
int rank;
hipDeviceProp_t prop;
ncclCommCuDevice(comms[g], &cudaDev);
ncclCommUserRank(comms[g], &rank);
CUDACHECK(hipGetDeviceProperties(&prop, cudaDev));
printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
prop.pciBusID, prop.name);
}
printf("\n");
printf("# %10s %12s %6s %6s %4s out-of-place in-place\n", "", "", "", "", "");
printf("# %10s %12s %6s %6s %4s %7s %5s %5s %7s %7s %5s %5s %7s\n",
"bytes", "N", "type", "op", "root",
"time", "algbw", "busbw", "res", "time", "algbw", "busbw", "res");
}
else {
printf("B,N,type,op,root,oop_time,oop_algbw,oop_busbw,oop_res,ip_time,ip_algbw,ip_busbw,ip_res\n");
}
RunTests<char>(N / sizeof(char), ncclChar, comms, dList);
RunTests<int>(N / sizeof(int), ncclInt, comms, dList);
#ifdef CUDA_HAS_HALF
RunTests<half>(N / sizeof(half), ncclHalf, comms, dList);
#endif
RunTests<float>(N / sizeof(float), ncclFloat, comms, dList);
RunTests<double>(N / sizeof(double), ncclDouble, comms, dList);
RunTests<long long>(N / sizeof(long long), ncclInt64, comms, dList);
RunTests<unsigned long long>(N / sizeof(unsigned long long), ncclUint64, comms, dList);
printf("\n");
for(int i = 0; i < nDev; ++i)
ncclCommDestroy(comms[i]);
free(comms);
exit(EXIT_SUCCESS);
}
| 0a6d07b3e47c4878191c0581260f444bfbf27cd7.cu | /*************************************************************************
* Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
************************************************************************/
#include <chrono>
#include <cstdio>
#include <cstdlib>
#include <string>
#include <vector>
#include "nccl.h"
#include "test_utilities.h"
#include <nvToolsExt.h>
int csv = false;
template<typename T>
void RunTest(T** sendbuff, T** recvbuff, const int N, const ncclDataType_t type,
const ncclRedOp_t op, int root, ncclComm_t* const comms,
const std::vector<int>& dList) {
// initialize data
T* buffer = (T*)malloc(N * sizeof(T));
T* result = (T*)malloc(N * sizeof(T));
memset(buffer, 0, N * sizeof(T));
memset(result, 0, N * sizeof(T));
int nDev = 0;
ncclCommCount(comms[0], &nDev);
cudaStream_t* s = (cudaStream_t*)malloc(sizeof(cudaStream_t)*nDev);
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaStreamCreate(s+i));
CUDACHECK(cudaMemset(recvbuff[i], 0, N * sizeof(T)));
Randomize(sendbuff[i], N, i);
if(i == 0) {
CUDACHECK(cudaMemcpy(result, sendbuff[i], N*sizeof(T), cudaMemcpyDeviceToHost));
} else {
Accumulate<T>(result, sendbuff[i], N, op);
}
}
// warm up GPU
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
ncclReduce((const void*)sendbuff[i], (void*)recvbuff[i], std::min(N, 1024 * 1024),
type, op, root, comms[i], s[i]);
}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaStreamSynchronize(s[i]));
}
// for (int n = 0; n <= N; n = (n > 0) ? n << 1 : 1)
{
int n = N;
printf((csv) ? "%i,%i,%s,%s,%d," : "%12i %12i %6s %6s %4d",
(int) (n * sizeof(T)), n, TypeName(type).c_str(),
OperationName(op).c_str(), root);
// do out-of-place reduction first
nvtxRangePushA("out of place");
auto start = std::chrono::high_resolution_clock::now();
//for (int i=0; i<100; i++) {
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
ncclReduce((const void*)sendbuff[i], (void*)recvbuff[i], n, type, op,
root, comms[i], s[i]);
}
//}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaStreamSynchronize(s[i]));
}
auto stop = std::chrono::high_resolution_clock::now();
nvtxRangePop();
nvtxRangePushA("out of place bookkeeping");
double elapsedSec =
std::chrono::duration_cast<std::chrono::duration<double>>(
stop - start).count(); // / 100.0;
double algbw = (double)(n * sizeof(T)) / 1.0E9 / elapsedSec;
double busbw = algbw;
CUDACHECK(cudaSetDevice(dList[root]));
double maxDelta = CheckDelta<T>(recvbuff[root], result, N);
printf((csv)?"%f,%f,%f,%le,":" %7.3f %5.2f %5.2f %7.0le",
elapsedSec * 1.0E3, algbw, busbw, maxDelta);
nvtxRangePop();
}
// for (int n = 0; n <= N; n = (n > 0) ? n << 1 : 1)
{
int n = N;
// now do in-place reduction
nvtxRangePushA("in place");
auto start = std::chrono::high_resolution_clock::now();
//for (int i=0; i<100; i++) {
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
ncclReduce((const void*)sendbuff[i], (void*)sendbuff[i], n, type, op,
root, comms[i], s[i]);
}
//}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaStreamSynchronize(s[i]));
}
auto stop = std::chrono::high_resolution_clock::now();
nvtxRangePop();
nvtxRangePushA("in place bookkeeping");
double elapsedSec =
std::chrono::duration_cast<std::chrono::duration<double>>(
stop - start).count(); // / 100.0;
double algbw = (double)(n * sizeof(T)) / 1.0E9 / elapsedSec;
double busbw = algbw;
CUDACHECK(cudaSetDevice(dList[root]));
double maxDelta = CheckDelta<T>(sendbuff[root], result, N);
printf((csv)?"%f,%f,%f,%le,":" %7.3f %5.2f %5.2f %7.0le\n",
elapsedSec * 1.0E3, algbw, busbw, maxDelta);
nvtxRangePop();
}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaStreamDestroy(s[i]));
}
free(s);
free(buffer);
free(result);
}
template<typename T>
void RunTests(const int N, const ncclDataType_t type, ncclComm_t* const comms,
const std::vector<int>& dList) {
int nDev = 0;
ncclCommCount(comms[0], &nDev);
T** sendbuff = (T**)malloc(nDev * sizeof(T*));
T** recvbuff = (T**)malloc(nDev * sizeof(T*));
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaMalloc(sendbuff + i, N * sizeof(T)));
CUDACHECK(cudaMalloc(recvbuff + i, N * sizeof(T)));
}
for (ncclRedOp_t op : { ncclSum, ncclProd, ncclMax, ncclMin }) {
// for (ncclRedOp_t op : { ncclSum }) {
for(int root=0; root<nDev; ++root) {
RunTest<T>(sendbuff, recvbuff, N, type, op, root, comms, dList);
}
}
for (int i = 0; i < nDev; ++i) {
CUDACHECK(cudaSetDevice(dList[i]));
CUDACHECK(cudaFree(sendbuff[i]));
CUDACHECK(cudaFree(recvbuff[i]));
}
free(sendbuff);
free(recvbuff);
}
void usage() {
printf("Tests nccl Reduce with user supplied arguments.\n"
" Usage: reduce_test <data size in bytes> [number of GPUs] "
"[GPU 0] [GPU 1] ...\n\n");
}
int main(int argc, char* argv[]) {
int nVis = 0;
CUDACHECK(cudaGetDeviceCount(&nVis));
int N = 0;
if (argc > 1) {
int t = sscanf(argv[1], "%d", &N);
if (t == 0) {
printf("Error: %s is not an integer!\n\n", argv[1]);
usage();
exit(EXIT_FAILURE);
}
} else {
printf("Error: must specify at least data size in bytes!\n\n");
usage();
exit(EXIT_FAILURE);
}
int nDev = nVis;
if (argc > 2) {
int t = sscanf(argv[2], "%d", &nDev);
if (t == 0) {
printf("Error: %s is not an integer!\n\n", argv[1]);
usage();
exit(EXIT_FAILURE);
}
}
std::vector<int> dList(nDev);
for (int i = 0; i < nDev; ++i)
dList[i] = i % nVis;
if (argc > 3) {
if (argc - 3 != nDev) {
printf("Error: insufficient number of GPUs in list\n\n");
usage();
exit(EXIT_FAILURE);
}
for (int i = 0; i < nDev; ++i) {
int t = sscanf(argv[3 + i], "%d", dList.data() + i);
if (t == 0) {
printf("Error: %s is not an integer!\n\n", argv[2 + i]);
usage();
exit(EXIT_FAILURE);
}
}
}
ncclComm_t* comms = (ncclComm_t*)malloc(sizeof(ncclComm_t)*nDev);
ncclCommInitAll(comms, nDev, dList.data());
if (!csv) {
printf("# Using devices\n");
for (int g = 0; g < nDev; ++g) {
int cudaDev;
int rank;
cudaDeviceProp prop;
ncclCommCuDevice(comms[g], &cudaDev);
ncclCommUserRank(comms[g], &rank);
CUDACHECK(cudaGetDeviceProperties(&prop, cudaDev));
printf("# Rank %2d uses device %2d [0x%02x] %s\n", rank, cudaDev,
prop.pciBusID, prop.name);
}
printf("\n");
printf("# %10s %12s %6s %6s %4s out-of-place in-place\n", "", "", "", "", "");
printf("# %10s %12s %6s %6s %4s %7s %5s %5s %7s %7s %5s %5s %7s\n",
"bytes", "N", "type", "op", "root",
"time", "algbw", "busbw", "res", "time", "algbw", "busbw", "res");
}
else {
printf("B,N,type,op,root,oop_time,oop_algbw,oop_busbw,oop_res,ip_time,ip_algbw,ip_busbw,ip_res\n");
}
RunTests<char>(N / sizeof(char), ncclChar, comms, dList);
RunTests<int>(N / sizeof(int), ncclInt, comms, dList);
#ifdef CUDA_HAS_HALF
RunTests<half>(N / sizeof(half), ncclHalf, comms, dList);
#endif
RunTests<float>(N / sizeof(float), ncclFloat, comms, dList);
RunTests<double>(N / sizeof(double), ncclDouble, comms, dList);
RunTests<long long>(N / sizeof(long long), ncclInt64, comms, dList);
RunTests<unsigned long long>(N / sizeof(unsigned long long), ncclUint64, comms, dList);
printf("\n");
for(int i = 0; i < nDev; ++i)
ncclCommDestroy(comms[i]);
free(comms);
exit(EXIT_SUCCESS);
}
|
1e299230177bcabd8514908114b267b91a8c0e06.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* PAVLE - Parallel Variable-Length Encoder for CUDA
*
* Copyright (C) 2009 Tjark Bringewat <[email protected]>, Ana Balevic <[email protected]>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* MIT License. Read the full licence: http://www.opensource.org/licenses/mit-license.php
*
* If you find this program useful, please contact me and reference PAVLE home page in your work.
*
*/
#ifndef _PACK_KERNELS_H_
#define _PACK_KERNELS_H_
#include <cstdint>
#include "parameters.h"
__global__ static void pack2(uint32_t* srcData, uint32_t* cindex, uint32_t* cindex2,
uint32_t* dstData, uint32_t original_num_block_elements) {
uint32_t tid = blockDim.x * blockIdx.x + threadIdx.x;
// source index
uint32_t offset = tid * original_num_block_elements;//DPB,
uint32_t bitsize = cindex[tid];
// destination index
uint32_t pos = cindex2[tid];
uint32_t dword = pos / 32;
uint32_t bit = pos % 32;
uint32_t dw = srcData[offset]; // load the first dword from srcData[]
uint32_t tmp = dw >> bit; // cut off those bits that do not fit into the initial location in destData[]
atomicOr(&dstData[dword], tmp); // fill up this initial location
tmp = dw << 32-bit; // save the remaining bits that were cut off earlier in tmp
uint32_t i;
for (i = 1; i < bitsize / 32; i++) { // from now on, we have exclusive access to destData[]
dw = srcData[offset+i]; // load next dword from srcData[]
tmp |= dw >> bit; // fill up tmp
dstData[dword+i] = tmp; // write complete dword to destData[]
tmp = dw << 32-bit; // save the remaining bits in tmp (like before)
}
// exclusive access to dstData[] ends here
// the remaining block can, or rather should be further optimized
// write the remaining bits in tmp, UNLESS bit is 0 and bitsize is divisible by 32, in this case do nothing
if (bit != 0 || bitsize % 32 != 0)
atomicOr(&dstData[dword + i], tmp);
if (bitsize % 32 != 0) {
dw = srcData[offset+i];
atomicOr(&dstData[dword+i], dw >> bit);
atomicOr(&dstData[dword+i+1], dw << 32-bit);
}
}
#endif
| 1e299230177bcabd8514908114b267b91a8c0e06.cu | /*
* PAVLE - Parallel Variable-Length Encoder for CUDA
*
* Copyright (C) 2009 Tjark Bringewat <[email protected]>, Ana Balevic <[email protected]>
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* MIT License. Read the full licence: http://www.opensource.org/licenses/mit-license.php
*
* If you find this program useful, please contact me and reference PAVLE home page in your work.
*
*/
#ifndef _PACK_KERNELS_H_
#define _PACK_KERNELS_H_
#include <cstdint>
#include "parameters.h"
__global__ static void pack2(uint32_t* srcData, uint32_t* cindex, uint32_t* cindex2,
uint32_t* dstData, uint32_t original_num_block_elements) {
uint32_t tid = blockDim.x * blockIdx.x + threadIdx.x;
// source index
uint32_t offset = tid * original_num_block_elements;//DPB,
uint32_t bitsize = cindex[tid];
// destination index
uint32_t pos = cindex2[tid];
uint32_t dword = pos / 32;
uint32_t bit = pos % 32;
uint32_t dw = srcData[offset]; // load the first dword from srcData[]
uint32_t tmp = dw >> bit; // cut off those bits that do not fit into the initial location in destData[]
atomicOr(&dstData[dword], tmp); // fill up this initial location
tmp = dw << 32-bit; // save the remaining bits that were cut off earlier in tmp
uint32_t i;
for (i = 1; i < bitsize / 32; i++) { // from now on, we have exclusive access to destData[]
dw = srcData[offset+i]; // load next dword from srcData[]
tmp |= dw >> bit; // fill up tmp
dstData[dword+i] = tmp; // write complete dword to destData[]
tmp = dw << 32-bit; // save the remaining bits in tmp (like before)
}
// exclusive access to dstData[] ends here
// the remaining block can, or rather should be further optimized
// write the remaining bits in tmp, UNLESS bit is 0 and bitsize is divisible by 32, in this case do nothing
if (bit != 0 || bitsize % 32 != 0)
atomicOr(&dstData[dword + i], tmp);
if (bitsize % 32 != 0) {
dw = srcData[offset+i];
atomicOr(&dstData[dword+i], dw >> bit);
atomicOr(&dstData[dword+i+1], dw << 32-bit);
}
}
#endif
|
653be009b9ca5c8d605b1b05dd8c711bd21f99ad.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "core.h"
#include "common_coll.h"
#include "enqueue.h"
#include "collectives.h"
ncclResult_t ncclAllGatherFunc(const void* sendbuff, void* recvbuff, size_t count,
ncclDataType_t datatype, ncclRedOp_t op, int root, ncclComm_t comm, hipStream_t stream) {
size_t nbytes = count*ncclTypeSize(datatype);
INFO(NCCL_COLL,"AllGather: opCount %lx sendbuff %p recvbuff %p count %zi datatype %d op %d root %d comm %p [nranks=%d] stream %p", comm->opCount, sendbuff, recvbuff, count, datatype, op, root, comm, comm->nRanks, stream);
if (comm->nRanks == 1) {
if (sendbuff != recvbuff)
CUDACHECK(hipMemcpyAsync(recvbuff, sendbuff, nbytes, hipMemcpyDeviceToDevice, stream));
} else {
NCCLCHECK(transportSaveProxies(ALLGATHER_SUBSTEPS, ALLGATHER_BUFCHUNKS, comm->nRanks-1, comm->nRanks, nbytes*comm->nRanks, proxyPatternRing, comm));
NCCLCHECK(saveKernel(ncclCollAllGather, sendbuff, recvbuff, nbytes, ncclInt8, op, root, comm, stream, nbytes*comm->nRanks, 1));
}
return ncclSuccess;
}
NCCL_API(ncclResult_t, ncclAllGather, const void* sendbuff, void* recvbuff, size_t sendcount,
ncclDataType_t datatype, ncclComm_t comm, hipStream_t stream);
ncclResult_t ncclAllGather(const void* sendbuff, void* recvbuff, size_t sendcount,
ncclDataType_t datatype, ncclComm_t comm, hipStream_t stream) {
return ncclEnqueueCheck(ncclAllGatherFunc, "AllGather", sendbuff, recvbuff, sendcount, datatype,
ncclSum, 0, comm, stream);
}
| 653be009b9ca5c8d605b1b05dd8c711bd21f99ad.cu | /*************************************************************************
* Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "core.h"
#include "common_coll.h"
#include "enqueue.h"
#include "collectives.h"
ncclResult_t ncclAllGatherFunc(const void* sendbuff, void* recvbuff, size_t count,
ncclDataType_t datatype, ncclRedOp_t op, int root, ncclComm_t comm, cudaStream_t stream) {
size_t nbytes = count*ncclTypeSize(datatype);
INFO(NCCL_COLL,"AllGather: opCount %lx sendbuff %p recvbuff %p count %zi datatype %d op %d root %d comm %p [nranks=%d] stream %p", comm->opCount, sendbuff, recvbuff, count, datatype, op, root, comm, comm->nRanks, stream);
if (comm->nRanks == 1) {
if (sendbuff != recvbuff)
CUDACHECK(cudaMemcpyAsync(recvbuff, sendbuff, nbytes, cudaMemcpyDeviceToDevice, stream));
} else {
NCCLCHECK(transportSaveProxies(ALLGATHER_SUBSTEPS, ALLGATHER_BUFCHUNKS, comm->nRanks-1, comm->nRanks, nbytes*comm->nRanks, proxyPatternRing, comm));
NCCLCHECK(saveKernel(ncclCollAllGather, sendbuff, recvbuff, nbytes, ncclInt8, op, root, comm, stream, nbytes*comm->nRanks, 1));
}
return ncclSuccess;
}
NCCL_API(ncclResult_t, ncclAllGather, const void* sendbuff, void* recvbuff, size_t sendcount,
ncclDataType_t datatype, ncclComm_t comm, cudaStream_t stream);
ncclResult_t ncclAllGather(const void* sendbuff, void* recvbuff, size_t sendcount,
ncclDataType_t datatype, ncclComm_t comm, cudaStream_t stream) {
return ncclEnqueueCheck(ncclAllGatherFunc, "AllGather", sendbuff, recvbuff, sendcount, datatype,
ncclSum, 0, comm, stream);
}
|
99b96c28e0d824b6277f9008bf8270cfea0be9e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, float * __restrict__ __var_4__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_0__ <= (M-3)){
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_1__ <= (N-3)){
float __temp_0__;
__temp_0__ = (2 * input[__iter_0__+(-2)+(M-0)*(__iter_1__+(-2))]);
float __temp_1__;
__temp_1__ = (4 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(-2))]);
float __temp_2__;
__temp_2__ = (__temp_0__ + __temp_1__);
float __temp_3__;
__temp_3__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(-2))]);
float __temp_4__;
__temp_4__ = (__temp_2__ + __temp_3__);
float __temp_5__;
__temp_5__ = (4 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(-2))]);
float __temp_6__;
__temp_6__ = (__temp_4__ + __temp_5__);
float __temp_7__;
__temp_7__ = (2 * input[__iter_0__+(2)+(M-0)*(__iter_1__+(-2))]);
float __temp_8__;
__temp_8__ = (__temp_6__ + __temp_7__);
float __temp_9__;
__temp_9__ = (4 * input[__iter_0__+(-2)+(M-0)*(__iter_1__+(-1))]);
float __temp_10__;
__temp_10__ = (__temp_8__ + __temp_9__);
float __temp_11__;
__temp_11__ = (9 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(-1))]);
float __temp_12__;
__temp_12__ = (__temp_10__ + __temp_11__);
float __temp_13__;
__temp_13__ = (12 * input[__iter_0__+(M-0)*(__iter_1__+(-1))]);
float __temp_14__;
__temp_14__ = (__temp_12__ + __temp_13__);
float __temp_15__;
__temp_15__ = (9 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(-1))]);
float __temp_16__;
__temp_16__ = (__temp_14__ + __temp_15__);
float __temp_17__;
__temp_17__ = (4 * input[__iter_0__+(2)+(M-0)*(__iter_1__+(-1))]);
float __temp_18__;
__temp_18__ = (__temp_16__ + __temp_17__);
float __temp_19__;
__temp_19__ = (5 * input[__iter_0__+(-2)+(M-0)*(__iter_1__)]);
float __temp_20__;
__temp_20__ = (__temp_18__ + __temp_19__);
float __temp_21__;
__temp_21__ = (12 * input[__iter_0__+(-1)+(M-0)*(__iter_1__)]);
float __temp_22__;
__temp_22__ = (__temp_20__ + __temp_21__);
float __temp_23__;
__temp_23__ = (15 * input[__iter_0__+(M-0)*(__iter_1__)]);
float __temp_24__;
__temp_24__ = (__temp_22__ + __temp_23__);
float __temp_25__;
__temp_25__ = (12 * input[__iter_0__+(1)+(M-0)*(__iter_1__)]);
float __temp_26__;
__temp_26__ = (__temp_24__ + __temp_25__);
float __temp_27__;
__temp_27__ = (5 * input[__iter_0__+(2)+(M-0)*(__iter_1__)]);
float __temp_28__;
__temp_28__ = (__temp_26__ + __temp_27__);
float __temp_29__;
__temp_29__ = (4 * input[__iter_0__+(-2)+(M-0)*(__iter_1__+(1))]);
float __temp_30__;
__temp_30__ = (__temp_28__ + __temp_29__);
float __temp_31__;
__temp_31__ = (9 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(1))]);
float __temp_32__;
__temp_32__ = (__temp_30__ + __temp_31__);
float __temp_33__;
__temp_33__ = (12 * input[__iter_0__+(M-0)*(__iter_1__+(1))]);
float __temp_34__;
__temp_34__ = (__temp_32__ + __temp_33__);
float __temp_35__;
__temp_35__ = (9 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(1))]);
float __temp_36__;
__temp_36__ = (__temp_34__ + __temp_35__);
float __temp_37__;
__temp_37__ = (4 * input[__iter_0__+(2)+(M-0)*(__iter_1__+(1))]);
float __temp_38__;
__temp_38__ = (__temp_36__ + __temp_37__);
float __temp_39__;
__temp_39__ = (2 * input[__iter_0__+(-2)+(M-0)*(__iter_1__+(2))]);
float __temp_40__;
__temp_40__ = (__temp_38__ + __temp_39__);
float __temp_41__;
__temp_41__ = (4 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(2))]);
float __temp_42__;
__temp_42__ = (__temp_40__ + __temp_41__);
float __temp_43__;
__temp_43__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(2))]);
float __temp_44__;
__temp_44__ = (__temp_42__ + __temp_43__);
float __temp_45__;
__temp_45__ = (4 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(2))]);
float __temp_46__;
__temp_46__ = (__temp_44__ + __temp_45__);
float __temp_47__;
__temp_47__ = (2 * input[__iter_0__+(2)+(M-0)*(__iter_1__+(2))]);
float __temp_48__;
__temp_48__ = (__temp_46__ + __temp_47__);
float __temp_49__;
__temp_49__ = (__temp_48__ / 159);
__var_4__[__iter_0__+(M-0)*(__iter_1__)] = __temp_49__;
}
}
}
__global__ void __kernel___forma_kernel__1__(float * __restrict__ __var_4__, int N, int M, float * __restrict__ __var_3__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_2__;
__iter_2__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_2__ <= (M-3)){
int __iter_3__;
__iter_3__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_3__ <= (N-3)){
float __temp_50__;
__temp_50__ = (2 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__+(-2))]);
float __temp_51__;
__temp_51__ = (4 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(-2))]);
float __temp_52__;
__temp_52__ = (__temp_50__ + __temp_51__);
float __temp_53__;
__temp_53__ = (5 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(-2))]);
float __temp_54__;
__temp_54__ = (__temp_52__ + __temp_53__);
float __temp_55__;
__temp_55__ = (4 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(-2))]);
float __temp_56__;
__temp_56__ = (__temp_54__ + __temp_55__);
float __temp_57__;
__temp_57__ = (2 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__+(-2))]);
float __temp_58__;
__temp_58__ = (__temp_56__ + __temp_57__);
float __temp_59__;
__temp_59__ = (4 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__+(-1))]);
float __temp_60__;
__temp_60__ = (__temp_58__ + __temp_59__);
float __temp_61__;
__temp_61__ = (9 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(-1))]);
float __temp_62__;
__temp_62__ = (__temp_60__ + __temp_61__);
float __temp_63__;
__temp_63__ = (12 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(-1))]);
float __temp_64__;
__temp_64__ = (__temp_62__ + __temp_63__);
float __temp_65__;
__temp_65__ = (9 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(-1))]);
float __temp_66__;
__temp_66__ = (__temp_64__ + __temp_65__);
float __temp_67__;
__temp_67__ = (4 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__+(-1))]);
float __temp_68__;
__temp_68__ = (__temp_66__ + __temp_67__);
float __temp_69__;
__temp_69__ = (5 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__)]);
float __temp_70__;
__temp_70__ = (__temp_68__ + __temp_69__);
float __temp_71__;
__temp_71__ = (12 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__)]);
float __temp_72__;
__temp_72__ = (__temp_70__ + __temp_71__);
float __temp_73__;
__temp_73__ = (15 * __var_4__[__iter_2__+(M-0)*(__iter_3__)]);
float __temp_74__;
__temp_74__ = (__temp_72__ + __temp_73__);
float __temp_75__;
__temp_75__ = (12 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__)]);
float __temp_76__;
__temp_76__ = (__temp_74__ + __temp_75__);
float __temp_77__;
__temp_77__ = (5 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__)]);
float __temp_78__;
__temp_78__ = (__temp_76__ + __temp_77__);
float __temp_79__;
__temp_79__ = (4 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__+(1))]);
float __temp_80__;
__temp_80__ = (__temp_78__ + __temp_79__);
float __temp_81__;
__temp_81__ = (9 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(1))]);
float __temp_82__;
__temp_82__ = (__temp_80__ + __temp_81__);
float __temp_83__;
__temp_83__ = (12 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(1))]);
float __temp_84__;
__temp_84__ = (__temp_82__ + __temp_83__);
float __temp_85__;
__temp_85__ = (9 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(1))]);
float __temp_86__;
__temp_86__ = (__temp_84__ + __temp_85__);
float __temp_87__;
__temp_87__ = (4 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__+(1))]);
float __temp_88__;
__temp_88__ = (__temp_86__ + __temp_87__);
float __temp_89__;
__temp_89__ = (2 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__+(2))]);
float __temp_90__;
__temp_90__ = (__temp_88__ + __temp_89__);
float __temp_91__;
__temp_91__ = (4 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(2))]);
float __temp_92__;
__temp_92__ = (__temp_90__ + __temp_91__);
float __temp_93__;
__temp_93__ = (5 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(2))]);
float __temp_94__;
__temp_94__ = (__temp_92__ + __temp_93__);
float __temp_95__;
__temp_95__ = (4 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(2))]);
float __temp_96__;
__temp_96__ = (__temp_94__ + __temp_95__);
float __temp_97__;
__temp_97__ = (2 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__+(2))]);
float __temp_98__;
__temp_98__ = (__temp_96__ + __temp_97__);
float __temp_99__;
__temp_99__ = (__temp_98__ / 159);
__var_3__[__iter_2__+(M-0)*(__iter_3__)] = __temp_99__;
}
}
}
__global__ void __kernel___forma_kernel__2__(float * __restrict__ __var_3__, int N, int M, float * __restrict__ __var_2__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_4__;
__iter_4__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_4__ <= (M-3)){
int __iter_5__;
__iter_5__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_5__ <= (N-3)){
float __temp_100__;
__temp_100__ = (2 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__+(-2))]);
float __temp_101__;
__temp_101__ = (4 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(-2))]);
float __temp_102__;
__temp_102__ = (__temp_100__ + __temp_101__);
float __temp_103__;
__temp_103__ = (5 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(-2))]);
float __temp_104__;
__temp_104__ = (__temp_102__ + __temp_103__);
float __temp_105__;
__temp_105__ = (4 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(-2))]);
float __temp_106__;
__temp_106__ = (__temp_104__ + __temp_105__);
float __temp_107__;
__temp_107__ = (2 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__+(-2))]);
float __temp_108__;
__temp_108__ = (__temp_106__ + __temp_107__);
float __temp_109__;
__temp_109__ = (4 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__+(-1))]);
float __temp_110__;
__temp_110__ = (__temp_108__ + __temp_109__);
float __temp_111__;
__temp_111__ = (9 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(-1))]);
float __temp_112__;
__temp_112__ = (__temp_110__ + __temp_111__);
float __temp_113__;
__temp_113__ = (12 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(-1))]);
float __temp_114__;
__temp_114__ = (__temp_112__ + __temp_113__);
float __temp_115__;
__temp_115__ = (9 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(-1))]);
float __temp_116__;
__temp_116__ = (__temp_114__ + __temp_115__);
float __temp_117__;
__temp_117__ = (4 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__+(-1))]);
float __temp_118__;
__temp_118__ = (__temp_116__ + __temp_117__);
float __temp_119__;
__temp_119__ = (5 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__)]);
float __temp_120__;
__temp_120__ = (__temp_118__ + __temp_119__);
float __temp_121__;
__temp_121__ = (12 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__)]);
float __temp_122__;
__temp_122__ = (__temp_120__ + __temp_121__);
float __temp_123__;
__temp_123__ = (15 * __var_3__[__iter_4__+(M-0)*(__iter_5__)]);
float __temp_124__;
__temp_124__ = (__temp_122__ + __temp_123__);
float __temp_125__;
__temp_125__ = (12 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__)]);
float __temp_126__;
__temp_126__ = (__temp_124__ + __temp_125__);
float __temp_127__;
__temp_127__ = (5 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__)]);
float __temp_128__;
__temp_128__ = (__temp_126__ + __temp_127__);
float __temp_129__;
__temp_129__ = (4 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__+(1))]);
float __temp_130__;
__temp_130__ = (__temp_128__ + __temp_129__);
float __temp_131__;
__temp_131__ = (9 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(1))]);
float __temp_132__;
__temp_132__ = (__temp_130__ + __temp_131__);
float __temp_133__;
__temp_133__ = (12 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(1))]);
float __temp_134__;
__temp_134__ = (__temp_132__ + __temp_133__);
float __temp_135__;
__temp_135__ = (9 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(1))]);
float __temp_136__;
__temp_136__ = (__temp_134__ + __temp_135__);
float __temp_137__;
__temp_137__ = (4 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__+(1))]);
float __temp_138__;
__temp_138__ = (__temp_136__ + __temp_137__);
float __temp_139__;
__temp_139__ = (2 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__+(2))]);
float __temp_140__;
__temp_140__ = (__temp_138__ + __temp_139__);
float __temp_141__;
__temp_141__ = (4 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(2))]);
float __temp_142__;
__temp_142__ = (__temp_140__ + __temp_141__);
float __temp_143__;
__temp_143__ = (5 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(2))]);
float __temp_144__;
__temp_144__ = (__temp_142__ + __temp_143__);
float __temp_145__;
__temp_145__ = (4 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(2))]);
float __temp_146__;
__temp_146__ = (__temp_144__ + __temp_145__);
float __temp_147__;
__temp_147__ = (2 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__+(2))]);
float __temp_148__;
__temp_148__ = (__temp_146__ + __temp_147__);
float __temp_149__;
__temp_149__ = (__temp_148__ / 159);
__var_2__[__iter_4__+(M-0)*(__iter_5__)] = __temp_149__;
}
}
}
__global__ void __kernel___forma_kernel__3__(float * __restrict__ __var_2__, int N, int M, float * __restrict__ __var_1__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_6__;
__iter_6__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_6__ <= (M-3)){
int __iter_7__;
__iter_7__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_7__ <= (N-3)){
float __temp_150__;
__temp_150__ = (2 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__+(-2))]);
float __temp_151__;
__temp_151__ = (4 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(-2))]);
float __temp_152__;
__temp_152__ = (__temp_150__ + __temp_151__);
float __temp_153__;
__temp_153__ = (5 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(-2))]);
float __temp_154__;
__temp_154__ = (__temp_152__ + __temp_153__);
float __temp_155__;
__temp_155__ = (4 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(-2))]);
float __temp_156__;
__temp_156__ = (__temp_154__ + __temp_155__);
float __temp_157__;
__temp_157__ = (2 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__+(-2))]);
float __temp_158__;
__temp_158__ = (__temp_156__ + __temp_157__);
float __temp_159__;
__temp_159__ = (4 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__+(-1))]);
float __temp_160__;
__temp_160__ = (__temp_158__ + __temp_159__);
float __temp_161__;
__temp_161__ = (9 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(-1))]);
float __temp_162__;
__temp_162__ = (__temp_160__ + __temp_161__);
float __temp_163__;
__temp_163__ = (12 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(-1))]);
float __temp_164__;
__temp_164__ = (__temp_162__ + __temp_163__);
float __temp_165__;
__temp_165__ = (9 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(-1))]);
float __temp_166__;
__temp_166__ = (__temp_164__ + __temp_165__);
float __temp_167__;
__temp_167__ = (4 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__+(-1))]);
float __temp_168__;
__temp_168__ = (__temp_166__ + __temp_167__);
float __temp_169__;
__temp_169__ = (5 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__)]);
float __temp_170__;
__temp_170__ = (__temp_168__ + __temp_169__);
float __temp_171__;
__temp_171__ = (12 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__)]);
float __temp_172__;
__temp_172__ = (__temp_170__ + __temp_171__);
float __temp_173__;
__temp_173__ = (15 * __var_2__[__iter_6__+(M-0)*(__iter_7__)]);
float __temp_174__;
__temp_174__ = (__temp_172__ + __temp_173__);
float __temp_175__;
__temp_175__ = (12 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__)]);
float __temp_176__;
__temp_176__ = (__temp_174__ + __temp_175__);
float __temp_177__;
__temp_177__ = (5 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__)]);
float __temp_178__;
__temp_178__ = (__temp_176__ + __temp_177__);
float __temp_179__;
__temp_179__ = (4 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__+(1))]);
float __temp_180__;
__temp_180__ = (__temp_178__ + __temp_179__);
float __temp_181__;
__temp_181__ = (9 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(1))]);
float __temp_182__;
__temp_182__ = (__temp_180__ + __temp_181__);
float __temp_183__;
__temp_183__ = (12 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(1))]);
float __temp_184__;
__temp_184__ = (__temp_182__ + __temp_183__);
float __temp_185__;
__temp_185__ = (9 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(1))]);
float __temp_186__;
__temp_186__ = (__temp_184__ + __temp_185__);
float __temp_187__;
__temp_187__ = (4 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__+(1))]);
float __temp_188__;
__temp_188__ = (__temp_186__ + __temp_187__);
float __temp_189__;
__temp_189__ = (2 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__+(2))]);
float __temp_190__;
__temp_190__ = (__temp_188__ + __temp_189__);
float __temp_191__;
__temp_191__ = (4 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(2))]);
float __temp_192__;
__temp_192__ = (__temp_190__ + __temp_191__);
float __temp_193__;
__temp_193__ = (5 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(2))]);
float __temp_194__;
__temp_194__ = (__temp_192__ + __temp_193__);
float __temp_195__;
__temp_195__ = (4 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(2))]);
float __temp_196__;
__temp_196__ = (__temp_194__ + __temp_195__);
float __temp_197__;
__temp_197__ = (2 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__+(2))]);
float __temp_198__;
__temp_198__ = (__temp_196__ + __temp_197__);
float __temp_199__;
__temp_199__ = (__temp_198__ / 159);
__var_1__[__iter_6__+(M-0)*(__iter_7__)] = __temp_199__;
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void gaussian(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*((N-0)*(M-0)), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __var_2__;
hipMalloc(&__var_2__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
float * __var_3__;
hipMalloc(&__var_3__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_3__\n");
float * __var_4__;
hipMalloc(&__var_4__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_4__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-3) - 2 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((N-3) - 2 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 32;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __var_4__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_4__, N, M, __var_3__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_3__, N, M, __var_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, N, M, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*((N-0)*(M-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__var_2__);
hipFree(__var_3__);
hipFree(__var_4__);
}
/*Host Free End*/
| 99b96c28e0d824b6277f9008bf8270cfea0be9e3.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, float * __restrict__ __var_4__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_0__ <= (M-3)){
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_1__ <= (N-3)){
float __temp_0__;
__temp_0__ = (2 * input[__iter_0__+(-2)+(M-0)*(__iter_1__+(-2))]);
float __temp_1__;
__temp_1__ = (4 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(-2))]);
float __temp_2__;
__temp_2__ = (__temp_0__ + __temp_1__);
float __temp_3__;
__temp_3__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(-2))]);
float __temp_4__;
__temp_4__ = (__temp_2__ + __temp_3__);
float __temp_5__;
__temp_5__ = (4 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(-2))]);
float __temp_6__;
__temp_6__ = (__temp_4__ + __temp_5__);
float __temp_7__;
__temp_7__ = (2 * input[__iter_0__+(2)+(M-0)*(__iter_1__+(-2))]);
float __temp_8__;
__temp_8__ = (__temp_6__ + __temp_7__);
float __temp_9__;
__temp_9__ = (4 * input[__iter_0__+(-2)+(M-0)*(__iter_1__+(-1))]);
float __temp_10__;
__temp_10__ = (__temp_8__ + __temp_9__);
float __temp_11__;
__temp_11__ = (9 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(-1))]);
float __temp_12__;
__temp_12__ = (__temp_10__ + __temp_11__);
float __temp_13__;
__temp_13__ = (12 * input[__iter_0__+(M-0)*(__iter_1__+(-1))]);
float __temp_14__;
__temp_14__ = (__temp_12__ + __temp_13__);
float __temp_15__;
__temp_15__ = (9 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(-1))]);
float __temp_16__;
__temp_16__ = (__temp_14__ + __temp_15__);
float __temp_17__;
__temp_17__ = (4 * input[__iter_0__+(2)+(M-0)*(__iter_1__+(-1))]);
float __temp_18__;
__temp_18__ = (__temp_16__ + __temp_17__);
float __temp_19__;
__temp_19__ = (5 * input[__iter_0__+(-2)+(M-0)*(__iter_1__)]);
float __temp_20__;
__temp_20__ = (__temp_18__ + __temp_19__);
float __temp_21__;
__temp_21__ = (12 * input[__iter_0__+(-1)+(M-0)*(__iter_1__)]);
float __temp_22__;
__temp_22__ = (__temp_20__ + __temp_21__);
float __temp_23__;
__temp_23__ = (15 * input[__iter_0__+(M-0)*(__iter_1__)]);
float __temp_24__;
__temp_24__ = (__temp_22__ + __temp_23__);
float __temp_25__;
__temp_25__ = (12 * input[__iter_0__+(1)+(M-0)*(__iter_1__)]);
float __temp_26__;
__temp_26__ = (__temp_24__ + __temp_25__);
float __temp_27__;
__temp_27__ = (5 * input[__iter_0__+(2)+(M-0)*(__iter_1__)]);
float __temp_28__;
__temp_28__ = (__temp_26__ + __temp_27__);
float __temp_29__;
__temp_29__ = (4 * input[__iter_0__+(-2)+(M-0)*(__iter_1__+(1))]);
float __temp_30__;
__temp_30__ = (__temp_28__ + __temp_29__);
float __temp_31__;
__temp_31__ = (9 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(1))]);
float __temp_32__;
__temp_32__ = (__temp_30__ + __temp_31__);
float __temp_33__;
__temp_33__ = (12 * input[__iter_0__+(M-0)*(__iter_1__+(1))]);
float __temp_34__;
__temp_34__ = (__temp_32__ + __temp_33__);
float __temp_35__;
__temp_35__ = (9 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(1))]);
float __temp_36__;
__temp_36__ = (__temp_34__ + __temp_35__);
float __temp_37__;
__temp_37__ = (4 * input[__iter_0__+(2)+(M-0)*(__iter_1__+(1))]);
float __temp_38__;
__temp_38__ = (__temp_36__ + __temp_37__);
float __temp_39__;
__temp_39__ = (2 * input[__iter_0__+(-2)+(M-0)*(__iter_1__+(2))]);
float __temp_40__;
__temp_40__ = (__temp_38__ + __temp_39__);
float __temp_41__;
__temp_41__ = (4 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(2))]);
float __temp_42__;
__temp_42__ = (__temp_40__ + __temp_41__);
float __temp_43__;
__temp_43__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(2))]);
float __temp_44__;
__temp_44__ = (__temp_42__ + __temp_43__);
float __temp_45__;
__temp_45__ = (4 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(2))]);
float __temp_46__;
__temp_46__ = (__temp_44__ + __temp_45__);
float __temp_47__;
__temp_47__ = (2 * input[__iter_0__+(2)+(M-0)*(__iter_1__+(2))]);
float __temp_48__;
__temp_48__ = (__temp_46__ + __temp_47__);
float __temp_49__;
__temp_49__ = (__temp_48__ / 159);
__var_4__[__iter_0__+(M-0)*(__iter_1__)] = __temp_49__;
}
}
}
__global__ void __kernel___forma_kernel__1__(float * __restrict__ __var_4__, int N, int M, float * __restrict__ __var_3__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_2__;
__iter_2__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_2__ <= (M-3)){
int __iter_3__;
__iter_3__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_3__ <= (N-3)){
float __temp_50__;
__temp_50__ = (2 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__+(-2))]);
float __temp_51__;
__temp_51__ = (4 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(-2))]);
float __temp_52__;
__temp_52__ = (__temp_50__ + __temp_51__);
float __temp_53__;
__temp_53__ = (5 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(-2))]);
float __temp_54__;
__temp_54__ = (__temp_52__ + __temp_53__);
float __temp_55__;
__temp_55__ = (4 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(-2))]);
float __temp_56__;
__temp_56__ = (__temp_54__ + __temp_55__);
float __temp_57__;
__temp_57__ = (2 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__+(-2))]);
float __temp_58__;
__temp_58__ = (__temp_56__ + __temp_57__);
float __temp_59__;
__temp_59__ = (4 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__+(-1))]);
float __temp_60__;
__temp_60__ = (__temp_58__ + __temp_59__);
float __temp_61__;
__temp_61__ = (9 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(-1))]);
float __temp_62__;
__temp_62__ = (__temp_60__ + __temp_61__);
float __temp_63__;
__temp_63__ = (12 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(-1))]);
float __temp_64__;
__temp_64__ = (__temp_62__ + __temp_63__);
float __temp_65__;
__temp_65__ = (9 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(-1))]);
float __temp_66__;
__temp_66__ = (__temp_64__ + __temp_65__);
float __temp_67__;
__temp_67__ = (4 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__+(-1))]);
float __temp_68__;
__temp_68__ = (__temp_66__ + __temp_67__);
float __temp_69__;
__temp_69__ = (5 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__)]);
float __temp_70__;
__temp_70__ = (__temp_68__ + __temp_69__);
float __temp_71__;
__temp_71__ = (12 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__)]);
float __temp_72__;
__temp_72__ = (__temp_70__ + __temp_71__);
float __temp_73__;
__temp_73__ = (15 * __var_4__[__iter_2__+(M-0)*(__iter_3__)]);
float __temp_74__;
__temp_74__ = (__temp_72__ + __temp_73__);
float __temp_75__;
__temp_75__ = (12 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__)]);
float __temp_76__;
__temp_76__ = (__temp_74__ + __temp_75__);
float __temp_77__;
__temp_77__ = (5 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__)]);
float __temp_78__;
__temp_78__ = (__temp_76__ + __temp_77__);
float __temp_79__;
__temp_79__ = (4 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__+(1))]);
float __temp_80__;
__temp_80__ = (__temp_78__ + __temp_79__);
float __temp_81__;
__temp_81__ = (9 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(1))]);
float __temp_82__;
__temp_82__ = (__temp_80__ + __temp_81__);
float __temp_83__;
__temp_83__ = (12 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(1))]);
float __temp_84__;
__temp_84__ = (__temp_82__ + __temp_83__);
float __temp_85__;
__temp_85__ = (9 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(1))]);
float __temp_86__;
__temp_86__ = (__temp_84__ + __temp_85__);
float __temp_87__;
__temp_87__ = (4 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__+(1))]);
float __temp_88__;
__temp_88__ = (__temp_86__ + __temp_87__);
float __temp_89__;
__temp_89__ = (2 * __var_4__[__iter_2__+(-2)+(M-0)*(__iter_3__+(2))]);
float __temp_90__;
__temp_90__ = (__temp_88__ + __temp_89__);
float __temp_91__;
__temp_91__ = (4 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(2))]);
float __temp_92__;
__temp_92__ = (__temp_90__ + __temp_91__);
float __temp_93__;
__temp_93__ = (5 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(2))]);
float __temp_94__;
__temp_94__ = (__temp_92__ + __temp_93__);
float __temp_95__;
__temp_95__ = (4 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(2))]);
float __temp_96__;
__temp_96__ = (__temp_94__ + __temp_95__);
float __temp_97__;
__temp_97__ = (2 * __var_4__[__iter_2__+(2)+(M-0)*(__iter_3__+(2))]);
float __temp_98__;
__temp_98__ = (__temp_96__ + __temp_97__);
float __temp_99__;
__temp_99__ = (__temp_98__ / 159);
__var_3__[__iter_2__+(M-0)*(__iter_3__)] = __temp_99__;
}
}
}
__global__ void __kernel___forma_kernel__2__(float * __restrict__ __var_3__, int N, int M, float * __restrict__ __var_2__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_4__;
__iter_4__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_4__ <= (M-3)){
int __iter_5__;
__iter_5__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_5__ <= (N-3)){
float __temp_100__;
__temp_100__ = (2 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__+(-2))]);
float __temp_101__;
__temp_101__ = (4 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(-2))]);
float __temp_102__;
__temp_102__ = (__temp_100__ + __temp_101__);
float __temp_103__;
__temp_103__ = (5 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(-2))]);
float __temp_104__;
__temp_104__ = (__temp_102__ + __temp_103__);
float __temp_105__;
__temp_105__ = (4 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(-2))]);
float __temp_106__;
__temp_106__ = (__temp_104__ + __temp_105__);
float __temp_107__;
__temp_107__ = (2 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__+(-2))]);
float __temp_108__;
__temp_108__ = (__temp_106__ + __temp_107__);
float __temp_109__;
__temp_109__ = (4 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__+(-1))]);
float __temp_110__;
__temp_110__ = (__temp_108__ + __temp_109__);
float __temp_111__;
__temp_111__ = (9 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(-1))]);
float __temp_112__;
__temp_112__ = (__temp_110__ + __temp_111__);
float __temp_113__;
__temp_113__ = (12 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(-1))]);
float __temp_114__;
__temp_114__ = (__temp_112__ + __temp_113__);
float __temp_115__;
__temp_115__ = (9 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(-1))]);
float __temp_116__;
__temp_116__ = (__temp_114__ + __temp_115__);
float __temp_117__;
__temp_117__ = (4 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__+(-1))]);
float __temp_118__;
__temp_118__ = (__temp_116__ + __temp_117__);
float __temp_119__;
__temp_119__ = (5 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__)]);
float __temp_120__;
__temp_120__ = (__temp_118__ + __temp_119__);
float __temp_121__;
__temp_121__ = (12 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__)]);
float __temp_122__;
__temp_122__ = (__temp_120__ + __temp_121__);
float __temp_123__;
__temp_123__ = (15 * __var_3__[__iter_4__+(M-0)*(__iter_5__)]);
float __temp_124__;
__temp_124__ = (__temp_122__ + __temp_123__);
float __temp_125__;
__temp_125__ = (12 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__)]);
float __temp_126__;
__temp_126__ = (__temp_124__ + __temp_125__);
float __temp_127__;
__temp_127__ = (5 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__)]);
float __temp_128__;
__temp_128__ = (__temp_126__ + __temp_127__);
float __temp_129__;
__temp_129__ = (4 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__+(1))]);
float __temp_130__;
__temp_130__ = (__temp_128__ + __temp_129__);
float __temp_131__;
__temp_131__ = (9 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(1))]);
float __temp_132__;
__temp_132__ = (__temp_130__ + __temp_131__);
float __temp_133__;
__temp_133__ = (12 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(1))]);
float __temp_134__;
__temp_134__ = (__temp_132__ + __temp_133__);
float __temp_135__;
__temp_135__ = (9 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(1))]);
float __temp_136__;
__temp_136__ = (__temp_134__ + __temp_135__);
float __temp_137__;
__temp_137__ = (4 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__+(1))]);
float __temp_138__;
__temp_138__ = (__temp_136__ + __temp_137__);
float __temp_139__;
__temp_139__ = (2 * __var_3__[__iter_4__+(-2)+(M-0)*(__iter_5__+(2))]);
float __temp_140__;
__temp_140__ = (__temp_138__ + __temp_139__);
float __temp_141__;
__temp_141__ = (4 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(2))]);
float __temp_142__;
__temp_142__ = (__temp_140__ + __temp_141__);
float __temp_143__;
__temp_143__ = (5 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(2))]);
float __temp_144__;
__temp_144__ = (__temp_142__ + __temp_143__);
float __temp_145__;
__temp_145__ = (4 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(2))]);
float __temp_146__;
__temp_146__ = (__temp_144__ + __temp_145__);
float __temp_147__;
__temp_147__ = (2 * __var_3__[__iter_4__+(2)+(M-0)*(__iter_5__+(2))]);
float __temp_148__;
__temp_148__ = (__temp_146__ + __temp_147__);
float __temp_149__;
__temp_149__ = (__temp_148__ / 159);
__var_2__[__iter_4__+(M-0)*(__iter_5__)] = __temp_149__;
}
}
}
__global__ void __kernel___forma_kernel__3__(float * __restrict__ __var_2__, int N, int M, float * __restrict__ __var_1__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_6__;
__iter_6__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 2;
if(__iter_6__ <= (M-3)){
int __iter_7__;
__iter_7__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 2;
if(__iter_7__ <= (N-3)){
float __temp_150__;
__temp_150__ = (2 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__+(-2))]);
float __temp_151__;
__temp_151__ = (4 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(-2))]);
float __temp_152__;
__temp_152__ = (__temp_150__ + __temp_151__);
float __temp_153__;
__temp_153__ = (5 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(-2))]);
float __temp_154__;
__temp_154__ = (__temp_152__ + __temp_153__);
float __temp_155__;
__temp_155__ = (4 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(-2))]);
float __temp_156__;
__temp_156__ = (__temp_154__ + __temp_155__);
float __temp_157__;
__temp_157__ = (2 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__+(-2))]);
float __temp_158__;
__temp_158__ = (__temp_156__ + __temp_157__);
float __temp_159__;
__temp_159__ = (4 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__+(-1))]);
float __temp_160__;
__temp_160__ = (__temp_158__ + __temp_159__);
float __temp_161__;
__temp_161__ = (9 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(-1))]);
float __temp_162__;
__temp_162__ = (__temp_160__ + __temp_161__);
float __temp_163__;
__temp_163__ = (12 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(-1))]);
float __temp_164__;
__temp_164__ = (__temp_162__ + __temp_163__);
float __temp_165__;
__temp_165__ = (9 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(-1))]);
float __temp_166__;
__temp_166__ = (__temp_164__ + __temp_165__);
float __temp_167__;
__temp_167__ = (4 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__+(-1))]);
float __temp_168__;
__temp_168__ = (__temp_166__ + __temp_167__);
float __temp_169__;
__temp_169__ = (5 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__)]);
float __temp_170__;
__temp_170__ = (__temp_168__ + __temp_169__);
float __temp_171__;
__temp_171__ = (12 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__)]);
float __temp_172__;
__temp_172__ = (__temp_170__ + __temp_171__);
float __temp_173__;
__temp_173__ = (15 * __var_2__[__iter_6__+(M-0)*(__iter_7__)]);
float __temp_174__;
__temp_174__ = (__temp_172__ + __temp_173__);
float __temp_175__;
__temp_175__ = (12 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__)]);
float __temp_176__;
__temp_176__ = (__temp_174__ + __temp_175__);
float __temp_177__;
__temp_177__ = (5 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__)]);
float __temp_178__;
__temp_178__ = (__temp_176__ + __temp_177__);
float __temp_179__;
__temp_179__ = (4 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__+(1))]);
float __temp_180__;
__temp_180__ = (__temp_178__ + __temp_179__);
float __temp_181__;
__temp_181__ = (9 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(1))]);
float __temp_182__;
__temp_182__ = (__temp_180__ + __temp_181__);
float __temp_183__;
__temp_183__ = (12 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(1))]);
float __temp_184__;
__temp_184__ = (__temp_182__ + __temp_183__);
float __temp_185__;
__temp_185__ = (9 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(1))]);
float __temp_186__;
__temp_186__ = (__temp_184__ + __temp_185__);
float __temp_187__;
__temp_187__ = (4 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__+(1))]);
float __temp_188__;
__temp_188__ = (__temp_186__ + __temp_187__);
float __temp_189__;
__temp_189__ = (2 * __var_2__[__iter_6__+(-2)+(M-0)*(__iter_7__+(2))]);
float __temp_190__;
__temp_190__ = (__temp_188__ + __temp_189__);
float __temp_191__;
__temp_191__ = (4 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(2))]);
float __temp_192__;
__temp_192__ = (__temp_190__ + __temp_191__);
float __temp_193__;
__temp_193__ = (5 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(2))]);
float __temp_194__;
__temp_194__ = (__temp_192__ + __temp_193__);
float __temp_195__;
__temp_195__ = (4 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(2))]);
float __temp_196__;
__temp_196__ = (__temp_194__ + __temp_195__);
float __temp_197__;
__temp_197__ = (2 * __var_2__[__iter_6__+(2)+(M-0)*(__iter_7__+(2))]);
float __temp_198__;
__temp_198__ = (__temp_196__ + __temp_197__);
float __temp_199__;
__temp_199__ = (__temp_198__ / 159);
__var_1__[__iter_6__+(M-0)*(__iter_7__)] = __temp_199__;
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void gaussian(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*((N-0)*(M-0)), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __var_2__;
cudaMalloc(&__var_2__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
float * __var_3__;
cudaMalloc(&__var_3__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_3__\n");
float * __var_4__;
cudaMalloc(&__var_4__,sizeof(float)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_4__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-3) - 2 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((N-3) - 2 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 32;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __var_4__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_4__, N, M, __var_3__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_3__, N, M, __var_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, N, M, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((N-0)*(M-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__var_2__);
cudaFree(__var_3__);
cudaFree(__var_4__);
}
/*Host Free End*/
|
1e219f6061e8b57ab452a56df1996fd231d2fca6.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Planar Complex GEMM
This example demonstrates the CUTLASS Library's exposure of planar complex GEMM kernels supporting
the batched strided mode.
These kernels represent complex matrices by storing the real and imaginary parts of the matrix in
disjoint regions in memory. These real-valued matrices are stored using existing cuBLAS layouts
as either column-major or row-major layouts with a single leading dimension indicating the stride
between columns or rows.
The CUTLASS Library collects multiple template instantiations in a data structure and offers
a BLAS-like dispatch API to invoke the appropriate kernel on the Volta or Turing architectures.
CUTLASS decouples matrix layout from complex transformation, so four possible transformations
are possible on the A and B operands:
n: column-major
c: column-major complex conjugate
t: row-major
h: row-major complex conjugate
The CUTLASS Library contains many kernel instances specialized for architecture, data type, tile
size, and alignment. This can result in long compile times.
To build strictly the planar complex kernels needed for general application, execute the following
CMake command in an empty build directory.
$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" \
-DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_*gemm_planar_complex
This builds all planar complex GEMM variants for Volta and Turing architectures.
To build strictly the kernels needed for this example, an even narrower filter string may be
specified as follows. This only builds planar complex GEMMs targeting Tensor Cores for
the 'CN' layout configuration (conjugate A operand with both A and B as column-major).
$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" \
-DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_f16_s*gemm_planar_complex_f16*cn
$ make 10_planar_complex
$ ./examples/10_planar_complex/10_planar_complex --m=2048 --n=1024 --k=512 --batch=10
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor_planar_complex.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/device/gemm_planar_complex.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/library/handle.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
hipError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
hipError_t error = hipSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
int batch_count;
cutlass::complex<float> alpha;
cutlass::complex<float> beta;
bool reference_check;
int iterations;
Options():
help(false),
problem_size({1024, 1024, 1024}),
batch_count(1),
reference_check(true),
iterations(20),
alpha(1),
beta() { }
bool valid() {
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("batch", batch_count);
cmd.get_cmd_line_argument("alpha", alpha.real());
cmd.get_cmd_line_argument("alpha_i", alpha.imag());
cmd.get_cmd_line_argument("beta", beta.real());
cmd.get_cmd_line_argument("beta_i", beta.imag());
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "10_planar_complex example\n\n"
<< " This example uses the CUTLASS Library to execute Planar Complex GEMM computations.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m <int> GEMM M dimension\n"
<< " --n <int> GEMM N dimension\n"
<< " --k <int> GEMM K dimension\n"
<< " --batch <int> Number of GEMM operations executed in one batch\n"
<< " --alpha <f32> Epilogue scalar alpha (real part)\n"
<< " --alpha_i <f32> Epilogue scalar alpha (imaginary part)\n"
<< " --beta <f32> Epilogue scalar beta (real part)\n\n"
<< " --beta_i <f32> Epilogue scalar beta (imaginary part)\n\n"
<< " --iterations <int> Number of profiling iterations to perform.\n\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/10_planar_complex/10_planar_complex --batch=7 --m=1024 --n=512 --k=1024 \\\n"
<< " --alpha=2 --alpha_i=-2 --beta=0.707 --beta_i=-.707\n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = problem_size.product() * batch_count * 4;
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Performance test environment for planar complex
class TestbedPlanarComplex {
public:
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = cutlass::half_t;
using LayoutC = cutlass::layout::ColumnMajor;
using ElementCompute = float;
using ElementAccumulator = float;
//
// Data members
//
cutlass::library::Handle handle;
cutlass::gemm::GemmCoord problem_size;
int batch_count;
cutlass::DeviceAllocation<ElementA> tensor_A;
cutlass::DeviceAllocation<ElementB> tensor_B;
cutlass::DeviceAllocation<ElementC> tensor_C;
cutlass::DeviceAllocation<ElementC> tensor_D;
cutlass::DeviceAllocation<ElementC> tensor_D_ref;
//
// Methods
//
TestbedPlanarComplex(
Options const &options
):
problem_size(options.problem_size), batch_count(options.batch_count) {
// Allocate device memory for batched strided GEMM
tensor_A.reset(int64_t(problem_size.m()) * problem_size.k() * batch_count * 2);
tensor_B.reset(int64_t(problem_size.k()) * problem_size.n() * batch_count * 2);
tensor_C.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2);
tensor_D.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2);
tensor_D_ref.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2);
}
void initialize() {
uint64_t seed = 1073;
// Use small integers to simplify correctness checking
int scope_max = 6;
int scope_min = -6;
cutlass::reference::device::BlockFillRandomUniform(
tensor_A.get(), tensor_A.size(), seed, ElementA(scope_max), ElementA(scope_min), 0);
cutlass::reference::device::BlockFillRandomUniform(
tensor_B.get(), tensor_B.size(), seed * 2019, ElementB(scope_max), ElementB(scope_min), 0);
cutlass::reference::device::BlockFillRandomUniform(
tensor_C.get(), tensor_C.size(), seed * 2020, ElementC(scope_max), ElementC(scope_min), 0);
}
Result profile(Options const &options) {
Result result;
initialize();
ElementA *ptr_A = tensor_A.get();
ElementB *ptr_B = tensor_B.get();
ElementC *ptr_C = tensor_C.get();
ElementC *ptr_D = tensor_D.get();
int64_t batch_stride_A = int64_t(problem_size.m()) * problem_size.k() * 2;
int64_t batch_stride_B = int64_t(problem_size.k()) * problem_size.n() * 2;
int64_t batch_stride_C = int64_t(problem_size.m()) * problem_size.n() * 2;
int64_t batch_stride_D = int64_t(problem_size.m()) * problem_size.n() * 2;
int lda = LayoutA::packed({problem_size.m(), problem_size.k()}).stride(0);
int ldb = LayoutB::packed({problem_size.k(), problem_size.n()}).stride(0);
int ldc = LayoutC::packed({problem_size.m(), problem_size.n()}).stride(0);
int ldd = LayoutC::packed({problem_size.m(), problem_size.n()}).stride(0);
int64_t imag_stride_A = int64_t(problem_size.m()) * problem_size.k();
int64_t imag_stride_B = int64_t(problem_size.k()) * problem_size.n();
int64_t imag_stride_C = int64_t(problem_size.m()) * problem_size.n();
int64_t imag_stride_D = int64_t(problem_size.m()) * problem_size.n();
//
// Construct events
//
hipEvent_t events[2];
for (auto & event : events) {
result.error = hipEventCreate(&event);
if (result.error != hipSuccess) {
std::cerr << "hipEventCreate() failed: " << hipGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMMs
result.error = hipEventRecord(events[0]);
if (result.error != hipSuccess) {
std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
//
// Execute the planar complex GEMM kernel via the CUTLASS Library's
// dispatch routines.
//
// Note, for planar complex GEMM kernels, all numeric type arguments
// specify the data type of the base real types. These are understood to
// apply to planar complex representations of matrices in memory and to complex<T>
// structures for scalars.
//
// See tools/library/include/cutlass/library/handle.h for more details.
//
result.status = handle.gemm_planar_complex(
problem_size.m(), // GEMM M dimension
problem_size.n(), // GEMM N dimension
problem_size.k(), // GEMM K dimension
cutlass::library::NumericTypeID::kF32, // Base data type of complex-valued accumulation
cutlass::library::NumericTypeID::kF32, // Base data type of complex-valued alpha/beta scalars
&options.alpha, // Pointer to alpha scalar, of type complex<T>
cutlass::library::NumericTypeID::kF16, // Base data type of complex-valued A matrix
cutlass::library::LayoutTypeID::kColumnMajor, // Layout of A matrix
cutlass::library::ComplexTransform::kConjugate, // Complex transformation on A matrix operand
ptr_A, // Pointer to real part of A matrix
ptr_A + imag_stride_A, // Pointer to imaginary part of A matrix
lda, // Leading dimension of real part of A matrix
lda, // Leading dimension of imaginary part of A matrix
cutlass::library::NumericTypeID::kF16, // Base data type of complex-valued B matrix
cutlass::library::LayoutTypeID::kColumnMajor, // Layout of B matrix
cutlass::library::ComplexTransform::kNone, // Complex transformation on B matrix operand
ptr_B, // Pointer to real part of B matrix
ptr_B + imag_stride_B, // Pointer to imaginary part of B matrix
ldb, // Leading dimension of real part of B matrix
ldb, // Leading dimension of imaginary part of B matrix
&options.beta, // Pointer to beta scalar, of type complex<T>
cutlass::library::NumericTypeID::kF16, // Base data type of complex valued C and D matrices
ptr_C, // Pointer to real part of C matrix
ptr_C + imag_stride_C, // Pointer to imaginary part of C matrix
ldc, // Leading dimension of real part of C matrix
ldc, // Leading dimension of imaginary part of C matrix
ptr_D, // Pointer to real part of D matrix
ptr_D + imag_stride_D, // Pointer to imaginary part of D matrix
ldd, // Leading dimension of real part of D matrix
ldd, // Leading dimension of imaginary part of D matrix
batch_count, // Number of batched elements
batch_stride_A, // Stride between batches of real parts of A matrix
batch_stride_A, // Stride between batches of imaginary parts of A matrix
batch_stride_B, // Stride between batches of real parts of B matrix
batch_stride_B, // Stride between batches of imaginary parts of B matrix
batch_stride_C, // Stride between batches of real parts of C matrix
batch_stride_C, // Stride between batches of imaginary parts of C matrix
batch_stride_D, // Stride between batches of real parts of D matrix
batch_stride_D // Stride between batches of imaginary parts of D matrix
);
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "CUTLASS internal error - configuration not supported" << std::endl;
return result;
}
}
//
// Stop profiling loop
//
// Record an event when the GEMMs are complete
result.error = hipEventRecord(events[1]);
if (result.error != hipSuccess) {
std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = hipEventSynchronize(events[1]);
if (result.error != hipSuccess) {
std::cerr << "hipEventSynchronize() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = hipEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != hipSuccess) {
std::cerr << "cudaEventElapsed() failed: " << hipGetErrorString(result.error) << std::endl;
return result;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)hipEventDestroy(event);
}
if (handle.get_last_operation()) {
std::cout << "Recently executed '" << handle.get_last_operation()->description().name << "'" << std::endl;
}
//
// Compute reference in device code
//
if (options.reference_check) {
result.passed = true;
for (int64_t idx = 0; result.passed && idx < int64_t(batch_count); ++idx) {
cutlass::reference::device::GemmPlanarComplex<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator
>(
problem_size,
options.alpha,
{tensor_A.get() + idx * batch_stride_A, lda, imag_stride_A},
cutlass::ComplexTransform::kConjugate,
{tensor_B.get() + idx * batch_stride_B, ldb, imag_stride_B},
cutlass::ComplexTransform::kNone,
options.beta,
{tensor_C.get() + idx * batch_stride_C, ldc, imag_stride_C},
{tensor_D_ref.get() + idx * batch_stride_D, ldd, imag_stride_D}
);
ElementC epsilon = 0.1_hf;
ElementC nonzero_floor = 0.1_hf;
result.passed = cutlass::reference::device::BlockCompareRelativelyEqual(
tensor_D.get() + idx * batch_stride_D,
tensor_D_ref.get() + idx * batch_stride_D,
batch_stride_D,
epsilon,
nonzero_floor
);
}
if (result.passed) {
std::cout << "Reference check passed." << std::endl;
}
else {
std::cerr << "Error - reference check failed." << std::endl;
}
}
std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " GFLOPs: " << result.gflops << std::endl;
return result;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
//
// This example uses mma.sync to directly access Tensor Cores to achieve peak performance.
//
// Volta Tensor Core operations are first available in CUDA 10.1 Toolkit.
//
// Turing Tensor Core operations are first available in CUDA 10.2 Toolkit.
//
hipDeviceProp_t props;
hipError_t error = hipGetDeviceProperties(&props, 0);
if (error != hipSuccess) {
std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl;
return -1;
}
if (props.major < 7) {
std::cerr << "Volta Tensor Core operations must be run on a machine with compute capability at least 70."
<< std::endl;
// Returning zero so this test passes on older architectures even though its actions are no-op.
return 0;
}
else if (props.major == 7 && props.minor <= 2) {
//
// If running on the Volta architecture, at least CUDA 10.1 Toolkit is required to run this example.
//
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))) {
std::cerr << "Volta Tensor Core operations must be compiled with CUDA 10.1 Toolkit or later." << std::endl;
// Returning zero so this test passes on older Toolkits even though its actions are no-op.
return 0;
}
}
else if (props.major == 7 && props.minor >= 5) {
//
// If running on the Turing architecture, at least CUDA 10.2 Toolkit is required to run this example.
//
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
// Returning zero so this test passes on older Toolkits even though its actions are no-op.
return 0;
}
}
else {
// NVIDIA Ampere Architecture GPUs (SM80 and later) are fully supported on CUDA 11 Toolkit and beyond.
//
// fall through
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
TestbedPlanarComplex testbed(options);
Result result = testbed.profile(options);
return result.passed ? 0 : -1;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| 1e219f6061e8b57ab452a56df1996fd231d2fca6.cu | /***************************************************************************************************
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Planar Complex GEMM
This example demonstrates the CUTLASS Library's exposure of planar complex GEMM kernels supporting
the batched strided mode.
These kernels represent complex matrices by storing the real and imaginary parts of the matrix in
disjoint regions in memory. These real-valued matrices are stored using existing cuBLAS layouts
as either column-major or row-major layouts with a single leading dimension indicating the stride
between columns or rows.
The CUTLASS Library collects multiple template instantiations in a data structure and offers
a BLAS-like dispatch API to invoke the appropriate kernel on the Volta or Turing architectures.
CUTLASS decouples matrix layout from complex transformation, so four possible transformations
are possible on the A and B operands:
n: column-major
c: column-major complex conjugate
t: row-major
h: row-major complex conjugate
The CUTLASS Library contains many kernel instances specialized for architecture, data type, tile
size, and alignment. This can result in long compile times.
To build strictly the planar complex kernels needed for general application, execute the following
CMake command in an empty build directory.
$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" \
-DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_*gemm_planar_complex
This builds all planar complex GEMM variants for Volta and Turing architectures.
To build strictly the kernels needed for this example, an even narrower filter string may be
specified as follows. This only builds planar complex GEMMs targeting Tensor Cores for
the 'CN' layout configuration (conjugate A operand with both A and B as column-major).
$ cmake .. -DCUTLASS_NVCC_ARCHS="70;75;80" \
-DCUTLASS_LIBRARY_KERNELS=cutlass_tensorop_f16_s*gemm_planar_complex_f16*cn
$ make 10_planar_complex
$ ./examples/10_planar_complex/10_planar_complex --m=2048 --n=1024 --k=512 --batch=10
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/distribution.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/tensor_view_io.h"
#include "cutlass/util/host_tensor_planar_complex.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/device/gemm_planar_complex.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/library/handle.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
int batch_count;
cutlass::complex<float> alpha;
cutlass::complex<float> beta;
bool reference_check;
int iterations;
Options():
help(false),
problem_size({1024, 1024, 1024}),
batch_count(1),
reference_check(true),
iterations(20),
alpha(1),
beta() { }
bool valid() {
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("batch", batch_count);
cmd.get_cmd_line_argument("alpha", alpha.real());
cmd.get_cmd_line_argument("alpha_i", alpha.imag());
cmd.get_cmd_line_argument("beta", beta.real());
cmd.get_cmd_line_argument("beta_i", beta.imag());
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "10_planar_complex example\n\n"
<< " This example uses the CUTLASS Library to execute Planar Complex GEMM computations.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m <int> GEMM M dimension\n"
<< " --n <int> GEMM N dimension\n"
<< " --k <int> GEMM K dimension\n"
<< " --batch <int> Number of GEMM operations executed in one batch\n"
<< " --alpha <f32> Epilogue scalar alpha (real part)\n"
<< " --alpha_i <f32> Epilogue scalar alpha (imaginary part)\n"
<< " --beta <f32> Epilogue scalar beta (real part)\n\n"
<< " --beta_i <f32> Epilogue scalar beta (imaginary part)\n\n"
<< " --iterations <int> Number of profiling iterations to perform.\n\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/10_planar_complex/10_planar_complex --batch=7 --m=1024 --n=512 --k=1024 \\\n"
<< " --alpha=2 --alpha_i=-2 --beta=0.707 --beta_i=-.707\n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = problem_size.product() * batch_count * 4;
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Performance test environment for planar complex
class TestbedPlanarComplex {
public:
using ElementA = cutlass::half_t;
using LayoutA = cutlass::layout::ColumnMajor;
using ElementB = cutlass::half_t;
using LayoutB = cutlass::layout::ColumnMajor;
using ElementC = cutlass::half_t;
using LayoutC = cutlass::layout::ColumnMajor;
using ElementCompute = float;
using ElementAccumulator = float;
//
// Data members
//
cutlass::library::Handle handle;
cutlass::gemm::GemmCoord problem_size;
int batch_count;
cutlass::DeviceAllocation<ElementA> tensor_A;
cutlass::DeviceAllocation<ElementB> tensor_B;
cutlass::DeviceAllocation<ElementC> tensor_C;
cutlass::DeviceAllocation<ElementC> tensor_D;
cutlass::DeviceAllocation<ElementC> tensor_D_ref;
//
// Methods
//
TestbedPlanarComplex(
Options const &options
):
problem_size(options.problem_size), batch_count(options.batch_count) {
// Allocate device memory for batched strided GEMM
tensor_A.reset(int64_t(problem_size.m()) * problem_size.k() * batch_count * 2);
tensor_B.reset(int64_t(problem_size.k()) * problem_size.n() * batch_count * 2);
tensor_C.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2);
tensor_D.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2);
tensor_D_ref.reset(int64_t(problem_size.m()) * problem_size.n() * batch_count * 2);
}
void initialize() {
uint64_t seed = 1073;
// Use small integers to simplify correctness checking
int scope_max = 6;
int scope_min = -6;
cutlass::reference::device::BlockFillRandomUniform(
tensor_A.get(), tensor_A.size(), seed, ElementA(scope_max), ElementA(scope_min), 0);
cutlass::reference::device::BlockFillRandomUniform(
tensor_B.get(), tensor_B.size(), seed * 2019, ElementB(scope_max), ElementB(scope_min), 0);
cutlass::reference::device::BlockFillRandomUniform(
tensor_C.get(), tensor_C.size(), seed * 2020, ElementC(scope_max), ElementC(scope_min), 0);
}
Result profile(Options const &options) {
Result result;
initialize();
ElementA *ptr_A = tensor_A.get();
ElementB *ptr_B = tensor_B.get();
ElementC *ptr_C = tensor_C.get();
ElementC *ptr_D = tensor_D.get();
int64_t batch_stride_A = int64_t(problem_size.m()) * problem_size.k() * 2;
int64_t batch_stride_B = int64_t(problem_size.k()) * problem_size.n() * 2;
int64_t batch_stride_C = int64_t(problem_size.m()) * problem_size.n() * 2;
int64_t batch_stride_D = int64_t(problem_size.m()) * problem_size.n() * 2;
int lda = LayoutA::packed({problem_size.m(), problem_size.k()}).stride(0);
int ldb = LayoutB::packed({problem_size.k(), problem_size.n()}).stride(0);
int ldc = LayoutC::packed({problem_size.m(), problem_size.n()}).stride(0);
int ldd = LayoutC::packed({problem_size.m(), problem_size.n()}).stride(0);
int64_t imag_stride_A = int64_t(problem_size.m()) * problem_size.k();
int64_t imag_stride_B = int64_t(problem_size.k()) * problem_size.n();
int64_t imag_stride_C = int64_t(problem_size.m()) * problem_size.n();
int64_t imag_stride_D = int64_t(problem_size.m()) * problem_size.n();
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMMs
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
//
// Execute the planar complex GEMM kernel via the CUTLASS Library's
// dispatch routines.
//
// Note, for planar complex GEMM kernels, all numeric type arguments
// specify the data type of the base real types. These are understood to
// apply to planar complex representations of matrices in memory and to complex<T>
// structures for scalars.
//
// See tools/library/include/cutlass/library/handle.h for more details.
//
result.status = handle.gemm_planar_complex(
problem_size.m(), // GEMM M dimension
problem_size.n(), // GEMM N dimension
problem_size.k(), // GEMM K dimension
cutlass::library::NumericTypeID::kF32, // Base data type of complex-valued accumulation
cutlass::library::NumericTypeID::kF32, // Base data type of complex-valued alpha/beta scalars
&options.alpha, // Pointer to alpha scalar, of type complex<T>
cutlass::library::NumericTypeID::kF16, // Base data type of complex-valued A matrix
cutlass::library::LayoutTypeID::kColumnMajor, // Layout of A matrix
cutlass::library::ComplexTransform::kConjugate, // Complex transformation on A matrix operand
ptr_A, // Pointer to real part of A matrix
ptr_A + imag_stride_A, // Pointer to imaginary part of A matrix
lda, // Leading dimension of real part of A matrix
lda, // Leading dimension of imaginary part of A matrix
cutlass::library::NumericTypeID::kF16, // Base data type of complex-valued B matrix
cutlass::library::LayoutTypeID::kColumnMajor, // Layout of B matrix
cutlass::library::ComplexTransform::kNone, // Complex transformation on B matrix operand
ptr_B, // Pointer to real part of B matrix
ptr_B + imag_stride_B, // Pointer to imaginary part of B matrix
ldb, // Leading dimension of real part of B matrix
ldb, // Leading dimension of imaginary part of B matrix
&options.beta, // Pointer to beta scalar, of type complex<T>
cutlass::library::NumericTypeID::kF16, // Base data type of complex valued C and D matrices
ptr_C, // Pointer to real part of C matrix
ptr_C + imag_stride_C, // Pointer to imaginary part of C matrix
ldc, // Leading dimension of real part of C matrix
ldc, // Leading dimension of imaginary part of C matrix
ptr_D, // Pointer to real part of D matrix
ptr_D + imag_stride_D, // Pointer to imaginary part of D matrix
ldd, // Leading dimension of real part of D matrix
ldd, // Leading dimension of imaginary part of D matrix
batch_count, // Number of batched elements
batch_stride_A, // Stride between batches of real parts of A matrix
batch_stride_A, // Stride between batches of imaginary parts of A matrix
batch_stride_B, // Stride between batches of real parts of B matrix
batch_stride_B, // Stride between batches of imaginary parts of B matrix
batch_stride_C, // Stride between batches of real parts of C matrix
batch_stride_C, // Stride between batches of imaginary parts of C matrix
batch_stride_D, // Stride between batches of real parts of D matrix
batch_stride_D // Stride between batches of imaginary parts of D matrix
);
if (result.status != cutlass::Status::kSuccess) {
std::cerr << "CUTLASS internal error - configuration not supported" << std::endl;
return result;
}
}
//
// Stop profiling loop
//
// Record an event when the GEMMs are complete
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return result;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
if (handle.get_last_operation()) {
std::cout << "Recently executed '" << handle.get_last_operation()->description().name << "'" << std::endl;
}
//
// Compute reference in device code
//
if (options.reference_check) {
result.passed = true;
for (int64_t idx = 0; result.passed && idx < int64_t(batch_count); ++idx) {
cutlass::reference::device::GemmPlanarComplex<
ElementA, LayoutA,
ElementB, LayoutB,
ElementC, LayoutC,
ElementAccumulator
>(
problem_size,
options.alpha,
{tensor_A.get() + idx * batch_stride_A, lda, imag_stride_A},
cutlass::ComplexTransform::kConjugate,
{tensor_B.get() + idx * batch_stride_B, ldb, imag_stride_B},
cutlass::ComplexTransform::kNone,
options.beta,
{tensor_C.get() + idx * batch_stride_C, ldc, imag_stride_C},
{tensor_D_ref.get() + idx * batch_stride_D, ldd, imag_stride_D}
);
ElementC epsilon = 0.1_hf;
ElementC nonzero_floor = 0.1_hf;
result.passed = cutlass::reference::device::BlockCompareRelativelyEqual(
tensor_D.get() + idx * batch_stride_D,
tensor_D_ref.get() + idx * batch_stride_D,
batch_stride_D,
epsilon,
nonzero_floor
);
}
if (result.passed) {
std::cout << "Reference check passed." << std::endl;
}
else {
std::cerr << "Error - reference check failed." << std::endl;
}
}
std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " GFLOPs: " << result.gflops << std::endl;
return result;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char const **args) {
//
// This example uses mma.sync to directly access Tensor Cores to achieve peak performance.
//
// Volta Tensor Core operations are first available in CUDA 10.1 Toolkit.
//
// Turing Tensor Core operations are first available in CUDA 10.2 Toolkit.
//
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (props.major < 7) {
std::cerr << "Volta Tensor Core operations must be run on a machine with compute capability at least 70."
<< std::endl;
// Returning zero so this test passes on older architectures even though its actions are no-op.
return 0;
}
else if (props.major == 7 && props.minor <= 2) {
//
// If running on the Volta architecture, at least CUDA 10.1 Toolkit is required to run this example.
//
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))) {
std::cerr << "Volta Tensor Core operations must be compiled with CUDA 10.1 Toolkit or later." << std::endl;
// Returning zero so this test passes on older Toolkits even though its actions are no-op.
return 0;
}
}
else if (props.major == 7 && props.minor >= 5) {
//
// If running on the Turing architecture, at least CUDA 10.2 Toolkit is required to run this example.
//
if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) {
std::cerr << "Turing Tensor Core operations must be compiled with CUDA 10.2 Toolkit or later." << std::endl;
// Returning zero so this test passes on older Toolkits even though its actions are no-op.
return 0;
}
}
else {
// NVIDIA Ampere Architecture GPUs (SM80 and later) are fully supported on CUDA 11 Toolkit and beyond.
//
// fall through
}
//
// Parse options
//
Options options;
options.parse(argc, args);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
// Execute one problem size
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
TestbedPlanarComplex testbed(options);
Result result = testbed.profile(options);
return result.passed ? 0 : -1;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
2026cfeffaf05bc8d6870b5305151570384a9d0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "utilities.cuh"
/**
* The multi-GPUs based 2D multi slices projection and backprojection
* Author: Rui Liu
* Date: Sep. 18, 2016
*/
#include <hip/hip_runtime.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <hip/hip_vector_types.h>
#include "multiSlices_ker.cuh"
typedef unsigned char byte;
#define BLKX 32
#define BLKY 8
#define BLKZ 1
namespace DD2
{
struct CosSinFunctor
{
__host__ __device__ float2 operator()(float ang)
{
return make_float2(cos(ang),sin(ang));
}
};
// Split the projection data
void splitProjection(
thrust::host_vector<thrust::host_vector<float> >& subProj,
thrust::host_vector<thrust::host_vector<float2> >& subCossin,
float* proj, thrust::host_vector<float2>& cossin,
const int SLN, const int DNU, const int PN,
thrust::host_vector<int> sSLN, const int gpuNum)
{
int psum = 0;
for(int i = 0; i != gpuNum; ++i)
{
subProj[i].resize(sSLN[i] * DNU * PN);
subCossin[i].resize(sSLN[i] * PN);
int curPos = sSLN[i];
for(int p = 0; p != DNU * PN; ++p)
{
for(int s = 0; s != sSLN[i]; ++s)
{
int subPos = p * sSLN[i] + s;
int totPos = p * SLN + (s + psum);
subProj[i][subPos] = proj[totPos];
}
}
for(int p = 0; p != PN; ++p)
{
for(int s = 0; s != sSLN[i]; ++s)
{
int subPos = p * sSLN[i] + s;
int totPos = p * SLN + (s + psum);
subCossin[i][subPos] = cossin[totPos];
}
}
psum += sSLN[i];
}
}
void combineProjection(
thrust::host_vector<thrust::host_vector<float> >& subProj,
float* proj, const int SLN, const int DNU, const int PN,
std::vector<int>& sSLN, const int gpuNum)
{
int psum = 0;
for(int i = 0; i != gpuNum; ++i)
{
int curPos = sSLN[i];
for(int p = 0; p < DNU * PN; ++p)
{
for(int s = 0; s != sSLN[i]; ++s)
{
int subPos = p * sSLN[i] + s;
int totPos = p * SLN + (s + psum);
proj[totPos] = subProj[i][subPos];
}
}
psum += sSLN[i];
}
}
void combineVolume(
thrust::host_vector<thrust::host_vector<float> >& subVol,
float* vol, const int SLN, const int XN, const int YN,
thrust::host_vector<int>& sSLN, const int gpuNum)
{
int psum = 0;
//omp_set_num_threads();
for(int i = 0; i < gpuNum; ++i)
{
int curPos = sSLN[i];
#pragma omp parallel for
for(int p = 0; p < XN * YN; ++p)
{
for(int s = 0; s != sSLN[i]; ++s)
{
int subPos = p * sSLN[i] + s;
int totPos = p * SLN + (s + psum);
vol[totPos] = subVol[i][subPos];
}
}
psum += sSLN[i];
}
}
// Split the volume
void splitVolume(
std::vector<std::vector<float> >& subVol,
thrust::host_vector<thrust::host_vector<float2> >& subCossin,
float* vol,
thrust::host_vector<float2> cossin,
const int SLN, const int XN, const int YN, const int PN,
std::vector<int>& sSLN, const int gpuNum)
{
int psum = 0;
for(int i = 0; i != gpuNum; ++i)
{
subVol[i].resize(sSLN[i] * XN * YN);
int curPos = sSLN[i];
for(int p = 0; p != XN * YN; ++p)
{
for(int s = 0; s != sSLN[i]; ++s)
{
int subPos = p * sSLN[i] + s;
int totPos = p * SLN + (s + psum);
subVol[i][subPos] = vol[totPos];
}
}
for(int p = 0; p != PN; ++p)
{
for(int s = 0; s != sSLN[i]; ++s)
{
int subPos = p * sSLN[i] + s;
int totPos = p * SLN + (s + psum);
subCossin[i][subPos] = cossin[totPos];
}
}
psum += sSLN[i];
}
}
// Copy the volume from the original to
template<typename Ta, typename Tb>
__global__ void naive_copyToTwoVolumes(Ta* in_ZXY,
Tb* out_ZXY, Tb* out_ZYX,
int XN, int YN, int ZN)
{
int idz = threadIdx.x + blockIdx.x * blockDim.x;
int idx = threadIdx.y + blockIdx.y * blockDim.y;
int idy = threadIdx.z + blockIdx.z * blockDim.z;
if (idx < XN && idy < YN && idz < ZN)
{
int i = (idy * XN + idx) * ZN + idz;
int ni = (idy * (XN + 1) + (idx + 1)) * ZN + idz;
int nj = (idx * (YN + 1) + (idy + 1)) * ZN + idz;
out_ZXY[ni] = in_ZXY[i];
out_ZYX[nj] = in_ZXY[i];
}
}
__global__ void horizontalIntegral(float* prj, int DNU, int DNV, int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int pIdx = threadIdx.y + blockIdx.y * blockDim.y;
if (idv < DNV && pIdx < PN)
{
int headPrt = pIdx * DNU * DNV + idv;
for (int ii = 1; ii < DNU; ++ii)
{
prj[headPrt + ii * DNV] = prj[headPrt + ii * DNV] + prj[headPrt + (ii - 1) * DNV];
}
}
}
__global__ void addOneSidedZeroBoarder(const float* prj_in, float* prj_out, int DNU, int DNV, int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int idu = threadIdx.y + blockIdx.y * blockDim.y;
int pn = threadIdx.z + blockIdx.z * blockDim.z;
if (idu < DNU && idv < DNV && pn < PN)
{
int i = (pn * DNU + idu) * DNV + idv;
int ni = (pn * (DNU + 1) + (idu + 1)) * (DNV + 1) + idv + 1;
prj_out[ni] = prj_in[i];
}
}
__global__ void verticalIntegral2(float* prj, int ZN, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N)
{
int currentHead = idx * ZN;
for (int ii = 1; ii < ZN; ++ii)
{
prj[currentHead + ii] = prj[currentHead + ii] + prj[currentHead + ii - 1];
}
}
}
__global__ void heorizontalIntegral2(float* prj, int DNU, int DNV, int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int pIdx = threadIdx.y + blockIdx.y * blockDim.y;
if (idv < DNV && pIdx < PN)
{
int headPrt = pIdx * DNU * DNV + idv;
for (int ii = 1; ii < DNU; ++ii)
{
prj[headPrt + ii * DNV] = prj[headPrt + ii * DNV] + prj[headPrt + (ii - 1) * DNV];
}
}
}
__global__ void addOneSidedZeroBoarder_multiSlice_Fan(const float* prj_in, float* prj_out, int DNU, int SLN, int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int idu = threadIdx.y + blockIdx.y * blockDim.y;
int pn = threadIdx.z + blockIdx.z * blockDim.z;
if (idu < DNU && idv < SLN && pn < PN)
{
int i = (pn * DNU + idu) * SLN + idv;
int ni = (pn * (DNU + 1) + (idu + 1)) * SLN + idv;
prj_out[ni] = prj_in[i];
}
}
__global__ void heorizontalIntegral_multiSlice_Fan(float* prj, int DNU, int SLN, int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int pIdx = threadIdx.y + blockIdx.y * blockDim.y;
if (idv < SLN && pIdx < PN)
{
int headPrt = pIdx * DNU * SLN + idv;
for (int ii = 1; ii < DNU; ++ii)
{
prj[headPrt + ii * SLN] = prj[headPrt + ii * SLN] + prj[headPrt + (ii - 1) * SLN];
}
}
}
}
__global__ void MultiSlices_DDPROJ_ker(
hipTextureObject_t volTex1,
hipTextureObject_t volTex2,
float* proj,
float2 s, // source position
const float2* __restrict__ cossin,
const float* __restrict__ xds,
const float* __restrict__ yds,
const float* __restrict__ bxds,
const float* __restrict__ byds,
float2 objCntIdx,
float dx,
int XN, int YN, int SLN,
int DNU, int PN)
{
int slnIdx = threadIdx.x + blockIdx.x * blockDim.x;
int detIdU = threadIdx.y + blockIdx.y * blockDim.y;
int angIdx = threadIdx.z + blockIdx.z * blockDim.z;
if(slnIdx < SLN && detIdU < DNU && angIdx < PN)
{
float2 dir = cossin[angIdx * SLN + slnIdx]; // cossin;
float2 cursour = make_float2(
s.x * dir.x - s.y * dir.y,
s.x * dir.y + s.y * dir.x); // current source position;
s = dir;
float2 curDet = make_float2(
xds[detIdU] * s.x - yds[detIdU] * s.y,
xds[detIdU] * s.y + yds[detIdU] * s.x);
float2 curDetL = make_float2(
bxds[detIdU] * s.x - byds[detIdU] * s.y,
bxds[detIdU] * s.y + byds[detIdU] * s.x);
float2 curDetR = make_float2(
bxds[detIdU+1] * s.x - byds[detIdU+1] * s.y,
bxds[detIdU+1] * s.y + byds[detIdU+1] * s.x);
dir = normalize(curDet - cursour);
float factL = 0;
float factR = 0;
float constVal = 0;
float obj = 0;
float realL = 0;
float realR = 0;
float intersectLength = 0;
float invdx = 1.0f / dx;
float summ;
if(fabsf(s.x) <= fabsf(s.y))
{
summ = 0;
factL = (curDetL.y - cursour.y) / (curDetL.x - cursour.x);
factR = (curDetR.y - cursour.y) / (curDetR.x - cursour.x);
constVal = dx / fabsf(dir.x);
#pragma unroll
for (int ii = 0; ii < XN; ++ii)
{
obj = (ii - objCntIdx.x) * dx;
realL = (obj - curDetL.x) * factL + curDetL.y;
realR = (obj - curDetR.x) * factR + curDetR.y;
intersectLength = realR - realL;
realL = realL * invdx + objCntIdx.y + 1;
realR = realR * invdx + objCntIdx.y + 1;
summ += (tex3D<float>(volTex2, slnIdx + 0.5f, realR, ii + 0.5) - tex3D<float>(volTex2, slnIdx + 0.5, realL, ii + 0.5)) / intersectLength;
}
__syncthreads();
proj[(angIdx * DNU + detIdU) * SLN + slnIdx] = summ * constVal;
}
else
{
summ = 0;
factL = (curDetL.x - cursour.x) / (curDetL.y - cursour.y);
factR = (curDetR.x - cursour.x) / (curDetR.y - cursour.y);
constVal = dx / fabsf(dir.y);
#pragma unroll
for (int ii = 0; ii < YN; ++ii)
{
obj = (ii - objCntIdx.y) * dx;
realL = (obj - curDetL.y) * factL + curDetL.x;
realR = (obj - curDetR.y) * factR + curDetR.x;
intersectLength = realR - realL;
realL = realL * invdx + objCntIdx.x + 1;
realR = realR * invdx + objCntIdx.x + 1;
summ += (tex3D<float>(volTex1, slnIdx + 0.5f, realR, ii + 0.5) - tex3D<float>(volTex1, slnIdx + 0.5, realL, ii + 0.5)) / intersectLength;
}
__syncthreads();
proj[(angIdx * DNU + detIdU) * SLN + slnIdx] = summ * constVal;
}
}
}
void MultiSlices_DDPROJ(
float* hvol, // the pointer to the image
float* hprj, // the pointer to the projection (SLN, DNU, PN) order
const float x0, const float y0, //position of the initial source
float* xds, float* yds, // distribution of the detector cells
const int DNU, // Number of detector cells
const int SLN, // Number of slices to be projected or backprojected
const float imgXCenter, const float imgYCenter, //Center of the image
const int XN, const int YN, // pixel number of the image
const float dx, // size of the pixel
float* h_angs, // view angles SHOULD BE WITH SIZE SLN * PN
int PN, // # of view angles
byte* mask,
int* startidx,
const int gpuNum)
{
// Regular the projection
for(int i = 0; i != XN * YN; ++i)
{
byte v = mask[i];
for(int z = 0; z != SLN; ++z)
{
hvol[i * SLN + z] *= v;
}
}
float* bxds = new float[DNU + 1];
float* byds = new float[DNU + 1];
DD3Boundaries<float>(DNU + 1, xds, bxds);
DD3Boundaries<float>(DNU + 1, yds, byds);
const float objCntIdxX = (XN - 1.0) * 0.5 - imgXCenter / dx;
const float objCntIdxY = (YN - 1.0) * 0.5 - imgYCenter / dx;
std::vector<int> startIdx(startidx, startidx + gpuNum);
std::vector<int> endIdx(gpuNum);
std::copy(startIdx.begin() + 1,
startIdx.end(), endIdx.begin());
endIdx[gpuNum - 1] = SLN;
std::vector<int> sSLN(gpuNum);
//Split the volumes
std::vector<std::vector<float> > subVol(gpuNum);
thrust::host_vector<thrust::host_vector<float2> > subCossin(gpuNum);
for(int i = 0; i != gpuNum; ++i)
{
//subVol[i].resize(sSLN[i] * XN * YN);
sSLN[i] = endIdx[i] - startIdx[i];
subCossin[i].resize(sSLN[i] * PN);
}
thrust::host_vector<float2> cossin(PN * SLN);
thrust::transform(h_angs, h_angs + PN * SLN,
cossin.begin(),[=](float ang){
return make_float2(cosf(ang), sinf(ang));
});
DD2::splitVolume(subVol, subCossin, hvol, cossin,
SLN, XN, YN, PN, sSLN, gpuNum);
// Generate multiple streams
std::vector<hipStream_t> stream(gpuNum);
std::vector<int> siz(gpuNum, 0);
std::vector<int> nsiz_ZXY(gpuNum, 0);
std::vector<int> nsiz_ZYX(gpuNum, 0);
thrust::host_vector<thrust::device_vector<float> > SATZXY(gpuNum);
thrust::host_vector<thrust::device_vector<float> > SATZYX(gpuNum);
thrust::host_vector<hipExtent> volumeSize1(gpuNum);
thrust::host_vector<hipExtent> volumeSize2(gpuNum);
thrust::host_vector<hipChannelFormatDesc> channelDesc1(gpuNum);
thrust::host_vector<hipChannelFormatDesc> channelDesc2(gpuNum);
thrust::host_vector<hipArray*> d_volumeArray1(gpuNum);
thrust::host_vector<hipArray*> d_volumeArray2(gpuNum);
thrust::host_vector<hipMemcpy3DParms> copyParams1(gpuNum);
thrust::host_vector<hipMemcpy3DParms> copyParams2(gpuNum);
thrust::host_vector<hipResourceDesc> resDesc1(gpuNum);
thrust::host_vector<hipResourceDesc> resDesc2(gpuNum);
thrust::host_vector<hipTextureDesc> texDesc1(gpuNum);
thrust::host_vector<hipTextureDesc> texDesc2(gpuNum);
thrust::host_vector<hipTextureObject_t> texObj1(gpuNum);
thrust::host_vector<hipTextureObject_t> texObj2(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_prj(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_xds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_yds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_bxds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_byds(gpuNum);
thrust::host_vector<thrust::device_vector<float2> > d_cossin(gpuNum);
thrust::host_vector<thrust::host_vector<float> > h_prj(gpuNum);
dim3 pblk(BLKX,BLKY,BLKZ);
thrust::host_vector<dim3> pgid(gpuNum);
omp_set_num_threads(gpuNum);
#pragma omp parallel for
for(int i = 0 ; i < gpuNum; ++i)
{
hipSetDevice(i);
siz[i] = XN * YN * sSLN[i];
nsiz_ZXY[i] = sSLN[i] * (XN + 1) * YN;
nsiz_ZYX[i] = sSLN[i] * (YN + 1) * XN;
SATZXY[i].resize(nsiz_ZXY[i]);
SATZYX[i].resize(nsiz_ZYX[i]);
thrust::device_vector<float> vol = subVol[i];
dim3 blk(64,16,1);
dim3 gid(
(sSLN[i] + blk.x - 1) / blk.x,
(XN + blk.y - 1) / blk.y,
(YN + blk.z - 1) / blk.z);
hipLaunchKernelGGL(( DD2::naive_copyToTwoVolumes), dim3(gid),dim3(blk),0,stream[i],
(thrust::raw_pointer_cast(&vol[0])),
(thrust::raw_pointer_cast(&SATZXY[i][0])),
(thrust::raw_pointer_cast(&SATZYX[i][0])),
XN,YN,sSLN[i]);
vol.clear();
blk.x = 64;
blk.y = 16;
blk.z = 1;
gid.x = (sSLN[i] + blk.x - 1) / blk.x;
gid.y = (YN + blk.y - 1) / blk.y;
gid.z = 1;
hipLaunchKernelGGL(( DD2::horizontalIntegral), dim3(gid), dim3(blk), 0, stream[i],
thrust::raw_pointer_cast(&SATZXY[i][0]),
XN + 1, sSLN[i], YN);
blk.x = 64;
blk.y = 16;
blk.z = 1;
gid.x = (sSLN[i] + blk.x - 1) / blk.x;
gid.y = (XN + blk.y - 1) / blk.y;
gid.z = 1;
hipLaunchKernelGGL(( DD2::horizontalIntegral), dim3(gid), dim3(blk), 0, stream[i],
thrust::raw_pointer_cast(&SATZYX[i][0]),
YN + 1, sSLN[i], XN);
volumeSize1[i].width = sSLN[i];
volumeSize1[i].height = XN + 1;
volumeSize1[i].depth = YN;
volumeSize2[i].width = sSLN[i];
volumeSize2[i].height = YN + 1;
volumeSize2[i].depth = XN;
channelDesc1[i] = hipCreateChannelDesc<float>();
channelDesc2[i] = hipCreateChannelDesc<float>();
hipMalloc3DArray(&d_volumeArray1[i], &channelDesc1[i], volumeSize1[i]);
hipMalloc3DArray(&d_volumeArray2[i], &channelDesc2[i], volumeSize2[i]);
copyParams1[i].srcPtr = make_hipPitchedPtr((void*)
thrust::raw_pointer_cast(&SATZXY[i][0]),
volumeSize1[i].width * sizeof(float),
volumeSize1[i].width, volumeSize1[i].height);
copyParams1[i].dstArray = d_volumeArray1[i];
copyParams1[i].extent = volumeSize1[i];
copyParams1[i].kind = hipMemcpyDeviceToDevice;
copyParams2[i].srcPtr = make_hipPitchedPtr((void*)
thrust::raw_pointer_cast(&SATZYX[i][0]),
volumeSize2[i].width * sizeof(float),
volumeSize2[i].width, volumeSize2[i].height);
copyParams2[i].dstArray = d_volumeArray2[i];
copyParams2[i].extent = volumeSize2[i];
copyParams2[i].kind = hipMemcpyDeviceToDevice;
hipMemcpy3D(©Params1[i]);
hipMemcpy3D(©Params2[i]);
SATZXY[i].clear();
SATZYX[i].clear();
memset(&resDesc1[i], 0, sizeof(resDesc1[i]));
memset(&resDesc2[i], 0, sizeof(resDesc2[i]));
resDesc1[i].resType = hipResourceTypeArray;
resDesc2[i].resType = hipResourceTypeArray;
resDesc1[i].res.array.array = d_volumeArray1[i];
resDesc2[i].res.array.array = d_volumeArray2[i];
memset(&texDesc1[i], 0, sizeof(texDesc1[i]));
memset(&texDesc2[i], 0, sizeof(texDesc2[i]));
texDesc1[i].addressMode[0] = hipAddressModeClamp;
texDesc1[i].addressMode[1] = hipAddressModeClamp;
texDesc1[i].addressMode[2] = hipAddressModeClamp;
texDesc2[i].addressMode[0] = hipAddressModeClamp;
texDesc2[i].addressMode[1] = hipAddressModeClamp;
texDesc2[i].addressMode[2] = hipAddressModeClamp;
texDesc1[i].filterMode = hipFilterModeLinear;
texDesc2[i].filterMode = hipFilterModeLinear;
texDesc1[i].readMode = hipReadModeElementType;
texDesc2[i].readMode = hipReadModeElementType;
texDesc1[i].normalizedCoords = false;
texDesc2[i].normalizedCoords = false;
hipCreateTextureObject(&texObj1[i], &resDesc1[i], &texDesc1[i], nullptr);
hipCreateTextureObject(&texObj2[i], &resDesc2[i], &texDesc2[i], nullptr);
d_prj[i].resize(sSLN[i] * DNU * PN);
h_prj[i].resize(sSLN[i] * DNU * PN);
d_xds[i].resize(DNU);
thrust::copy(xds,xds+DNU, d_xds[i].begin());
d_yds[i].resize(DNU);
thrust::copy(yds,yds+DNU, d_yds[i].begin());
d_bxds[i].resize(DNU + 1);
thrust::copy(bxds,bxds+DNU+1, d_bxds[i].begin());
d_byds[i].resize(DNU + 1);
thrust::copy(byds,byds+DNU+1, d_byds[i].begin());
d_cossin[i].resize(sSLN[i] * PN);
thrust::copy(subCossin[i].begin(), subCossin[i].end(), d_cossin[i].begin());
pgid[i].x = (sSLN[i] + pblk.x - 1) / pblk.x;
pgid[i].y = (DNU + pblk.y - 1) / pblk.y;
pgid[i].z = (PN + pblk.z - 1) / pblk.z;
}
#pragma omp barrier
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
hipSetDevice(i);
hipLaunchKernelGGL(( MultiSlices_DDPROJ_ker), dim3(pgid[i]),dim3(pblk), 0, stream[i],
texObj1[i],texObj2[i],
thrust::raw_pointer_cast(&d_prj[i][0]),
make_float2(x0,y0),
thrust::raw_pointer_cast(&d_cossin[i][0]),
thrust::raw_pointer_cast(&d_xds[i][0]),
thrust::raw_pointer_cast(&d_yds[i][0]),
thrust::raw_pointer_cast(&d_bxds[i][0]),
thrust::raw_pointer_cast(&d_byds[i][0]),
make_float2(objCntIdxX, objCntIdxY),
dx,XN,YN,sSLN[i],DNU,PN);
h_prj[i] = d_prj[i];
}
#pragma omp barrier
DD2::combineProjection(h_prj,hprj, SLN, DNU, PN, sSLN, gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
hipSetDevice(i);
subVol[i].clear();
subCossin[i].clear();
hipStreamDestroy(stream[i]);
hipFreeArray(d_volumeArray1[i]);
hipFreeArray(d_volumeArray2[i]);
hipDestroyTextureObject(texObj1[i]);
hipDestroyTextureObject(texObj2[i]);
d_prj[i].clear();
d_xds[i].clear();
d_yds[i].clear();
d_bxds[i].clear();
d_byds[i].clear();
d_cossin[i].clear();
h_prj[i].clear();
}
#pragma omp barrier
//Clear the data
delete[] bxds;
delete[] byds;
startIdx.clear();
endIdx.clear();
sSLN.clear();
subVol.clear();
subCossin.clear();
cossin.clear();
stream.clear();
siz.clear();
nsiz_ZXY.clear();
nsiz_ZYX.clear();
SATZXY.clear();
SATZYX.clear();
volumeSize1.clear();
volumeSize2.clear();
channelDesc1.clear();
channelDesc2.clear();
d_volumeArray1.clear();
d_volumeArray2.clear();
copyParams1.clear();
copyParams2.clear();
resDesc1.clear();
resDesc2.clear();
texDesc1.clear();
texDesc2.clear();
texObj1.clear();
texObj2.clear();
d_prj.clear();
d_xds.clear();
d_yds.clear();
d_bxds.clear();
d_byds.clear();
d_cossin.clear();
h_prj.clear();
pgid.clear();
}
__global__ void MultiSlices_DDBACK_ker(
hipTextureObject_t prjTexObj,
float* vol,
const byte* __restrict__ msk,
const float2* __restrict__ cossin,
float2 s,
float S2D,
float2 curvox, // imgCenter index
float dx, float dbeta, float detCntIdx,
int2 VN, int SLN, int PN)
{
int3 id;
id.z = threadIdx.x + __umul24(blockIdx.x, blockDim.x);
id.x = threadIdx.y + __umul24(blockIdx.y, blockDim.y);
id.y = threadIdx.z + __umul24(blockIdx.z, blockDim.z);
if(id.z < SLN && id.x < VN.x && id.y < VN.y)
{
if(msk[id.y * VN.x + id.x] != 1)
{
return;
}
curvox = make_float2((id.x - curvox.x) * dx, (id.y - curvox.y) * dx);
float2 cursour;
float idxL, idxR;
float cosVal;
float summ = 0;
float2 cossinT;
float inv_sid = 1.0f / sqrtf(s.x * s.x + s.y * s.y);
float2 dir;
float l_square;
float l;
float alpha;
float deltaAlpha;
//S2D /= ddv;
dbeta = 1.0 / dbeta;
float ddv;
for(int angIdx = 0; angIdx < PN; ++angIdx)
{
cossinT = cossin[angIdx * SLN + id.z];
cursour = make_float2(
s.x * cossinT.x - s.y * cossinT.y,
s.x * cossinT.y + s.y * cossinT.x);
dir = curvox - cursour;
l_square = dir.x * dir.x + dir.y * dir.y;
l = rsqrtf(l_square); // 1 / sqrt(l_square);
alpha = asinf((cursour.y * dir.x - cursour.x * dir.y) * inv_sid * l);
if(fabsf(cursour.x) > fabsf(cursour.y))
{
ddv = dir.x;
}
else
{
ddv = dir.y;
}
deltaAlpha = ddv / l_square * dx * 0.5;
cosVal = dx / ddv * sqrtf(l_square);
idxL = (alpha - deltaAlpha) * dbeta + detCntIdx + 1.0;
idxR = (alpha + deltaAlpha) * dbeta + detCntIdx + 1.0;
summ += (tex3D<float>(prjTexObj,id.z + 0.5, idxR, angIdx + 0.5) -
tex3D<float>(prjTexObj,id.z + 0.5, idxL, angIdx + 0.5)) * cosVal;
}
__syncthreads();
vol[(id.y * VN.x + id.x) * SLN + id.z] = summ;
}
}
void MultiSlices_DDBACK(
float* hvol, // the pointer to the image
float* hprj, // the pointer to the projection (SLN, DNU, PN) order
const float x0, const float y0, //position of the initial source
float* xds, float* yds, // distribution of the detector cells
const int DNU, // Number of detector cells
const int SLN, // Number of slices to be projected or backprojected
const float imgXCenter, const float imgYCenter, //Center of the image
const int XN, const int YN, // pixel number of the image
const float dx, // size of the pixel
float* h_angs, // view angles SIZE SHOULD BE (SLN x PN)
int PN, // # of view angles
byte* mask,
int* startidx,
const int gpuNum)
{
std::vector<int> startIdx(startidx, startidx + gpuNum);
std::vector<int> endIdx(startIdx.size());
std::copy(startIdx.begin() + 1, startIdx.end(), endIdx.begin());
endIdx[gpuNum - 1] = SLN;
std::vector<int> sSLN(startIdx.size());// = endIdx - startIdx;
for(int i = 0; i < gpuNum; ++i)
{
sSLN[i] = endIdx[i] - startIdx[i];
}
startIdx.clear();
endIdx.clear();
const float2 objCntIdx(
make_float2((XN - 1.0) * 0.5 - imgXCenter / dx,
(YN - 1.0) * 0.5 - imgYCenter / dx));
const float2 sour(make_float2(x0, y0));
const float S2D = hypotf(xds[0] - x0, yds[0] - y0);
const float dbeta = atanf(
(sqrt(powf(xds[1] - xds[0],2.0) + powf(yds[1] - yds[0],2.0)))
/ S2D * 0.5f) * 2.0f;
float* bxds = new float[DNU + 1];
float* byds = new float[DNU + 1];
DD3Boundaries(DNU+1, xds, bxds);
DD3Boundaries(DNU+1, yds, byds);
//Calculate the most left angle
const float detCntIdx = fabsf(atanf(bxds[0] / (y0 - byds[0]))) / dbeta - 0.5f;
delete[] bxds;
delete[] byds;
/////////////////////////////////////////////////////////////////////////////
thrust::host_vector<float2> h_cossin(SLN * PN);
thrust::transform(h_angs, h_angs + PN * SLN,
h_cossin.begin(), [=](float ang)
{return make_float2(cosf(ang),sinf(ang));});
thrust::host_vector<thrust::host_vector<float> > subProj(gpuNum);
thrust::host_vector<thrust::host_vector<float2> > subCossin(gpuNum);
thrust::host_vector<thrust::device_vector<byte> > d_msk(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_vol(gpuNum);
thrust::host_vector<thrust::host_vector<float> > h_vol(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_subProj(gpuNum);
thrust::host_vector<thrust::device_vector<float2> > d_subCossin(gpuNum);
thrust::host_vector<hipArray*> d_prjArray(gpuNum);
thrust::host_vector<hipTextureObject_t> texObj(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_prjSAT(gpuNum);
thrust::host_vector<hipStream_t> stream(gpuNum);
thrust::host_vector<int> siz(gpuNum);
thrust::host_vector<int> nsiz(gpuNum);
thrust::host_vector<hipExtent> prjSize(gpuNum);
thrust::host_vector<hipChannelFormatDesc> channelDesc(gpuNum);
dim3 copyBlk(64,16,1);
thrust::host_vector<dim3> copyGid(gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
hipSetDevice(i);
hipStreamCreate(&stream[i]);
subProj[i].resize(sSLN[i] * DNU * PN);
subCossin[i].resize(sSLN[i] * PN);
d_msk[i].resize(XN * YN);
thrust::copy(mask, mask + XN * YN, d_msk[i].begin());
d_vol[i].resize(sSLN[i] * XN * YN);
h_vol[i].resize(sSLN[i] * XN * YN);
d_subProj[i].resize(sSLN[i] * DNU * PN);
d_subCossin[i].resize(sSLN[i] * PN);
d_prjSAT[i].resize(sSLN[i] * (DNU + 1) * PN);
}
// Split the projection
DD2::splitProjection(subProj, subCossin, hprj, h_cossin, SLN, DNU,
PN, sSLN, gpuNum);
h_cossin.clear();
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
hipSetDevice(i);
d_subProj[i] = subProj[i];
subProj[i].clear();
d_subCossin[i] = subCossin[i];
subCossin[i].clear();
siz[i] = DNU * sSLN[i] * PN;
nsiz[i] = (DNU + 1) * sSLN[i] * PN;
copyGid[i].x = (sSLN[i] + copyBlk.x - 1) / copyBlk.x;
copyGid[i].y = (DNU + copyBlk.y - 1) / copyBlk.y;
copyGid[i].z = (PN + copyBlk.z - 1) / copyBlk.z;
DD2::addOneSidedZeroBoarder_multiSlice_Fan << <copyGid[i], copyBlk, 0, stream[i] >> >(
thrust::raw_pointer_cast(&d_subProj[i][0]),
thrust::raw_pointer_cast(&d_prjSAT[i][0]),
DNU, sSLN[i], PN);
copyGid[i].x = (sSLN[i] + copyBlk.x - 1) / copyBlk.x;
copyGid[i].y = (PN + copyBlk.y - 1) / copyBlk.y;
copyGid[i].z = 1;
DD2::heorizontalIntegral_multiSlice_Fan << <copyGid[i], copyBlk, 0, stream[i] >> >(
thrust::raw_pointer_cast(&d_prjSAT[i][0]), DNU + 1, sSLN[i], PN);
d_subProj[i].clear();
/////////////////////////////////////////////////////////////////
prjSize[i].width = sSLN[i];
prjSize[i].height= DNU + 1;
prjSize[i].depth = PN;
channelDesc[i] = hipCreateChannelDesc<float>();
hipMalloc3DArray(&d_prjArray[i], &channelDesc[i], prjSize[i]);
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr(
(void*) thrust::raw_pointer_cast(&d_prjSAT[i][0]),
prjSize[i].width * sizeof(float),
prjSize[i].width, prjSize[i].height);
copyParams.dstArray = d_prjArray[i];
copyParams.extent = prjSize[i];
copyParams.kind = hipMemcpyDeviceToDevice;
hipMemcpy3D(©Params);
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = d_prjArray[i];
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.addressMode[1] = hipAddressModeClamp;
texDesc.addressMode[2] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModeLinear;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = false;
hipCreateTextureObject(&texObj[i], &resDesc, &texDesc, nullptr);
d_prjSAT[i].clear();
}
#pragma omp barrier
subProj.clear();
d_prjSAT.clear();
subCossin.clear();
d_subProj.clear();
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
hipSetDevice(i);
dim3 blk(BLKX,BLKY,BLKZ);
dim3 gid(
(sSLN[i] + blk.x - 1) / blk.x,
(XN + blk.y - 1) / blk.y,
(YN + blk.z - 1) / blk.z);
MultiSlices_DDBACK_ker<< <gid, blk, 0, stream[i] >> >(texObj[i],
thrust::raw_pointer_cast(&d_vol[i][0]),
thrust::raw_pointer_cast(&d_msk[i][0]),
thrust::raw_pointer_cast(&d_subCossin[i][0]),
sour, S2D, objCntIdx,
dx, dbeta, detCntIdx, make_int2(XN, YN), sSLN[i], PN);
h_vol[i] = d_vol[i];
d_vol[i].clear();
d_msk[i].clear();
d_subCossin[i].clear();
hipDestroyTextureObject(texObj[i]);
hipFreeArray(d_prjArray[i]);
hipStreamDestroy(stream[i]);
}
#pragma omp barrier
d_vol.clear();
d_msk.clear();
d_subCossin.clear();
d_prjArray.clear();
texObj.clear();
stream.clear();
siz.clear();
nsiz.clear();
prjSize.clear();
channelDesc.clear();
copyGid.clear();
thrust::host_vector<int> sSLNn = sSLN;
DD2::combineVolume(h_vol, hvol, SLN, XN, YN, sSLNn, gpuNum);
sSLNn.clear();
sSLN.clear();
}
__global__ void MultiSlices_PDPROJ_ker(
hipTextureObject_t texObj,
float* proj,
float2 s,
float* __restrict__ xds,
float* __restrict__ yds,
float2* __restrict__ cossin, // size should be SLN * PN
float2 objCntIdx,
float dx, int SLN, int DNU, int PN, int XN, int YN)
{
int slnIdx = threadIdx.x + blockIdx.x * blockDim.x;
int detIdx = threadIdx.y + blockIdx.y * blockDim.y;
int angIdx = threadIdx.z + blockIdx.z * blockDim.z;
if(slnIdx < SLN && detIdx < DNU && angIdx < PN)
{
float2 cssv = cossin[angIdx * SLN + slnIdx];
float2 cursour = make_float2(
s.x * cssv.x - s.y * cssv.y,
s.x * cssv.y + s.y * cssv.x);
float summ = xds[detIdx];
float obj = yds[detIdx];
float idx = 0;
float2 curDet = make_float2(
summ * cssv.x - obj * cssv.y,
summ * cssv.y + obj * cssv.x);
float2 dir = normalize(curDet - cursour);
summ = 0;
obj = 0;
if(fabs(cssv.x) <= fabs(cssv.y))
//if(fabsf(dir.y) <= fabsf(dir.x))
{
summ = 0;
#pragma unroll
for(int ii = 0; ii < XN; ++ii)
{
obj = (ii - objCntIdx.x) * dx;
idx = (obj - curDet.x) / dir.x * dir.y + curDet.y;
idx = idx / dx + objCntIdx.y + 0.5f;
summ += tex3D<float>(texObj, slnIdx + 0.5f, ii + 0.5f, idx);
}
__syncthreads();
proj[(angIdx * DNU + detIdx) * SLN + slnIdx] = summ * dx / fabsf(dir.x);
}
else
{
summ = 0;
#pragma unroll
for(int jj = 0; jj < YN; ++jj)
{
obj = (jj - objCntIdx.y) * dx;
idx = (obj - curDet.y) / dir.y * dir.x + curDet.x;
idx = idx / dx + objCntIdx.x + 0.5f;
summ += tex3D<float>(texObj, slnIdx + 0.5f, idx, jj + 0.5f);
}
__syncthreads();
proj[(angIdx * DNU + detIdx) * SLN + slnIdx] = summ * dx / fabsf(dir.y);
}
}
}
void MultiSlices_PDPROJ(
float* hvol, // the pointer to the image
float* hprj, // the pointer to the projection (SLN, DNU, PN) order
const float x0, const float y0, //position of the initial source
float* xds, float* yds, // distribution of the detector cells
const int DNU, // Number of detector cells
const int SLN, // Number of slices to be projected or backprojected
const float imgXCenter, const float imgYCenter, //Center of the image
const int XN, const int YN, // pixel number of the image
const float dx, // size of the pixel
float* h_angs, // view angles SHOULD BE WITH SIZE SLN * PN
int PN, // # of view angles
byte* mask,
int* startIdx, // This means how many slices will be applied to one GPU
const int gpuNum)
{
thrust::host_vector<float> hangs(h_angs,h_angs + PN * SLN);
// Regular the image volume
for(int i = 0; i != XN * YN; ++i)
{
byte v = mask[i];
for(int z = 0; z != SLN; ++z)
{
hvol[i * SLN + z] *= v;
}
}
const float objCntIdxX = (XN - 1.0) * 0.5 - imgXCenter / dx;
const float objCntIdxY = (YN - 1.0) * 0.5 - imgYCenter / dx;
//We do not need the overlapping for projection
std::vector<int> ObjIdx_Start(gpuNum, -1);
std::vector<int> ObjIdx_End(gpuNum, -1);
std::vector<std::vector<float> > subVol(gpuNum);
std::vector<int> sSLN(gpuNum,0);
for(int i = 1; i != gpuNum; ++i)
{
sSLN[i-1] = startIdx[i] - startIdx[i-1];
}
sSLN[gpuNum-1] = SLN - startIdx[gpuNum-1];
std::vector<hipStream_t> stream(gpuNum);
std::vector<hipExtent> volumeSize(gpuNum);
thrust::host_vector<thrust::host_vector<float2> > subCossin(gpuNum);
std::vector<int> siz(gpuNum, 0);
for(int i = 0; i != gpuNum; ++i)
{
hipSetDevice(i);
hipStreamCreate(&stream[i]); // Generate multiple streams
siz[i] = XN * YN * sSLN[i];
subCossin[i].resize(sSLN[i] * PN);
}
// // precalculate the cossin value
thrust::host_vector<float2> hcossin(PN * SLN);
thrust::transform(h_angs, h_angs + PN * SLN,
hcossin.begin(),[=](float ag){return make_float2(cosf(ag),sinf(ag));});
// Split the volume
DD2::splitVolume(subVol, subCossin, hvol, hcossin, SLN, XN, YN, PN, sSLN, gpuNum);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
std::vector<hipArray*> d_volumeArray(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_vol(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_prj(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_xds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_yds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_angs(gpuNum);
thrust::host_vector<thrust::device_vector<float2> >d_cossin(gpuNum);
dim3 blk(64,16,1);
std::vector<dim3> gid(gpuNum);
std::vector<hipTextureObject_t> texObj(gpuNum);
omp_set_num_threads(gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
hipSetDevice(i);
d_vol[i].resize(siz[i]);
d_vol[i] = subVol[i];
subVol[i].clear();
volumeSize[i].width = sSLN[i];
volumeSize[i].height= XN;
volumeSize[i].depth = YN;
hipMalloc3DArray(&d_volumeArray[i], &channelDesc, volumeSize[i]);
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void*)
thrust::raw_pointer_cast(&d_vol[i][0]),
volumeSize[i].width * sizeof(float),
volumeSize[i].width, volumeSize[i].height);
copyParams.dstArray = d_volumeArray[i];
copyParams.extent = volumeSize[i];
copyParams.kind = hipMemcpyDeviceToDevice;
hipMemcpy3DAsync(©Params, stream[i]);
d_vol[i].clear();
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = d_volumeArray[i];
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeBorder;
texDesc.addressMode[1] = hipAddressModeBorder;
texDesc.addressMode[2] = hipAddressModeBorder;
texDesc.filterMode = hipFilterModeLinear;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = false;
texObj[i] = 0;
hipCreateTextureObject(&texObj[i], &resDesc, &texDesc, nullptr);
d_prj[i].resize(DNU * PN * sSLN[i]);
d_xds[i].resize(DNU);
d_yds[i].resize(DNU);
thrust::copy(xds, xds + DNU, d_xds[i].begin());
thrust::copy(yds, yds + DNU, d_yds[i].begin());
//d_angs[i].resize(PN * SLN);
//thrust::copy(hangs.begin(), hangs.end(), d_angs[i].begin());
d_cossin[i].resize(PN * sSLN[i]);
d_cossin[i] = subCossin[i];
//thrust::transform(d_angs[i].begin(), d_angs[i].end(),
//d_cossin[i].begin(), DD2::CosSinFunctor());
//d_angs[i].clear();
gid[i].x = (sSLN[i] + blk.x - 1) / blk.x;
gid[i].y = (DNU + blk.y - 1) / blk.y;
gid[i].z = (PN + blk.z - 1) / blk.z;
}
thrust::host_vector<thrust::host_vector<float> > h_prj(gpuNum);
// Projection process
omp_set_num_threads(gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
hipSetDevice(i);
hipLaunchKernelGGL(( MultiSlices_PDPROJ_ker), dim3(gid[i]),dim3(blk), 0, stream[i], texObj[i],
thrust::raw_pointer_cast(&d_prj[i][0]),
make_float2(x0, y0),
thrust::raw_pointer_cast(&d_xds[i][0]),
thrust::raw_pointer_cast(&d_yds[i][0]),
thrust::raw_pointer_cast(&d_cossin[i][0]),
make_float2(objCntIdxX,objCntIdxY), dx,
sSLN[i], DNU, PN, XN, YN);
h_prj[i].resize(sSLN[i] * DNU * PN);
h_prj[i] = d_prj[i];
}
#pragma omp barrier
DD2::combineProjection(h_prj, hprj, SLN, DNU, PN, sSLN, gpuNum);
// Clean the resources
omp_set_num_threads(gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
hipSetDevice(i);
subVol[i].clear();
hipStreamDestroy(stream[i]);
hipFreeArray(d_volumeArray[i]);
d_vol[i].clear();
d_prj[i].clear();
d_xds[i].clear();
d_yds[i].clear();
d_angs[i].clear();
d_cossin[i].clear();
h_prj[i].clear();
}
hangs.clear();
ObjIdx_Start.clear();
ObjIdx_End.clear();
sSLN.clear();
subVol.clear();
stream.clear();
volumeSize.clear();
d_volumeArray.clear();
d_vol.clear();
d_prj.clear();
d_xds.clear();
d_yds.clear();
d_angs.clear();
d_cossin.clear();
gid.clear();
texObj.clear();
siz.clear();
h_prj.clear();
}
__global__ void MultiSlices_PDBACK_ker(
hipTextureObject_t texObj, // projection texture
float* vol,
const byte* __restrict__ msk,
const float2* __restrict__ cossin, // size should be SLN * PN
float2 s, // source position
float S2D,
float2 objCntIdx,
float dx,
float dbeta, /// what is dbeta
float detCntIdx,
int SLN, int XN, int YN, int DNU, int PN)
{
int slnIdx = threadIdx.x + blockIdx.x * blockDim.x;
int xIdx = threadIdx.y + blockIdx.y * blockDim.y;
int yIdx = threadIdx.z + blockIdx.z * blockDim.z;
if(slnIdx < SLN && xIdx < XN && yIdx < YN)
{
if(msk[yIdx * XN + xIdx] != 1)
return;
float2 curVox =
make_float2((xIdx - objCntIdx.x) * dx, (yIdx - objCntIdx.y) * dx);
float2 dir;
float2 cursour;
float invsid = rsqrtf(s.x * s.x + s.y * s.y);
float invl;
//float idxZ;
float idxXY;
float alpha;
float cosVal;
float2 cossinT;
float summ = 0;
float tempVal;
dbeta = 1.0 / dbeta;
for(int angIdx = 0; angIdx != PN; ++angIdx)
{
cossinT = cossin[angIdx * SLN + slnIdx];
cursour = make_float2(
s.x * cossinT.x - s.y * cossinT.y,
s.x * cossinT.y + s.y * cossinT.x);
dir = curVox - cursour;
tempVal = dir.x * dir.x + dir.y * dir.y;
invl = rsqrtf(tempVal);
alpha = asinf((cursour.y * dir.x - cursour.x * dir.y) * invl * invsid);
if(fabsf(cursour.x) >= fabsf(cursour.y))
{
cosVal = fabsf(1.0f / dir.x);
}
else
{
cosVal = fabsf(1.0f / dir.y);
}
cosVal *= (dx * sqrtf(tempVal));
idxXY = alpha * dbeta + detCntIdx + 0.5;
summ += tex3D<float>(texObj, slnIdx + 0.5f,
idxXY, angIdx + 0.5f) * cosVal;
}
__syncthreads();
vol[(yIdx * XN + xIdx) * SLN + slnIdx] = summ;
}
}
void MultiSlices_PDBACK(
float* hvol, // the pointer to the image
float* hprj, // the pointer to the projection (SLN, DNU, PN) order
const float x0, const float y0, //position of the initial source
float* xds, float* yds, // distribution of the detector cells
const int DNU, // Number of detector cells
const int SLN, // Number of slices to be projected or backprojected
const float imgXCenter, const float imgYCenter, //Center of the image
const int XN, const int YN, // pixel number of the image
const float dx, // size of the pixel
float* h_angs, // view angles SHOULD BE WITH SIZE SLN*PN
int PN, // # of view angles
byte* mask,
int* startIdx,
const int gpuNum)
{
//Set the start and end slices for each GPU
thrust::host_vector<int> ObjZIdx_Start(startIdx, startIdx + gpuNum);
thrust::host_vector<int> ObjZIdx_End(ObjZIdx_Start.size());
std::copy(ObjZIdx_Start.begin() + 1, ObjZIdx_Start.end(), ObjZIdx_End.begin());
ObjZIdx_End[gpuNum - 1] = SLN;
float* bxds = new float[DNU + 1];
float* byds = new float[DNU + 1];
DD3Boundaries(DNU + 1, xds, bxds);
DD3Boundaries(DNU + 1, yds, byds);
float2 dir = normalize(make_float2(-x0, -y0));
float2 dirL = normalize(make_float2(bxds[0] - x0, byds[0] - y0));
float2 dirR = normalize(make_float2(bxds[DNU] - x0, byds[DNU] - y0));
float dbeta = asin(dirL.x * dirR.y - dirL.y * dirR.x) / DNU;
float minBeta = asin(dir.x * dirL.y - dir.y * dirL.x);
float detCntIdx = -minBeta / dbeta - 0.5;
const float S2D = hypotf(xds[0] - x0, yds[0] - y0);
delete[] bxds;
delete[] byds;
thrust::host_vector<int> sSLN = ObjZIdx_End - ObjZIdx_Start;
const float objCntIdxX = (XN - 1.0f) * 0.5f - imgXCenter / dx;
const float objCntIdxY = (YN - 1.0f) * 0.5f - imgYCenter / dx;
thrust::host_vector<float2> sour(gpuNum);
thrust::host_vector<thrust::device_vector<byte> > msk(gpuNum);
thrust::host_vector<thrust::device_vector<float> > vol(gpuNum);
thrust::host_vector<thrust::device_vector<float> > prj(gpuNum);
thrust::host_vector<thrust::device_vector<float2> > cossin(gpuNum);
thrust::host_vector<hipArray*> d_prjArray(gpuNum);
thrust::host_vector<hipTextureObject_t> texObj(gpuNum);
thrust::host_vector<hipStream_t> stream(gpuNum);
thrust::host_vector<thrust::host_vector<float> > host_vol(gpuNum);
dim3 blk(32,16,1);
thrust::host_vector<dim3> gid(gpuNum);
// precalculate the cossin value
thrust::host_vector<float2> hcossin(PN * SLN);
thrust::transform(h_angs, h_angs + PN * SLN,
hcossin.begin(),[=](float ag){return make_float2(cosf(ag),sinf(ag));});
thrust::host_vector<thrust::host_vector<float2> > subCossin(gpuNum);
//Split the projection data
thrust::host_vector<thrust::host_vector<float> > sbprj(gpuNum);
for(int i = 0 ; i != gpuNum; ++i)
{
sbprj.resize(sSLN[i] * DNU * PN);
}
DD2::splitProjection(sbprj,subCossin, hprj, hcossin, SLN, DNU, PN, sSLN, gpuNum);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
omp_set_num_threads(gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
hipSetDevice(i);
hipStreamCreate(&stream[i]);
msk[i].resize(XN * YN);
thrust::copy(mask, mask + XN * YN, msk[i].begin());
vol[i].resize(sSLN[i] * XN * YN);
prj[i].resize(sSLN[i] * DNU * PN);
prj[i] = sbprj[i];
hipExtent prjSize;
prjSize.width = sSLN[i];
prjSize.height = DNU;
prjSize.depth = PN;
hipMalloc3DArray(&d_prjArray[i], &channelDesc, prjSize);
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr(
(void*) thrust::raw_pointer_cast(&prj[i][0]),
prjSize.width * sizeof(float),
prjSize.width, prjSize.height);
copyParams.dstArray = d_prjArray[i];
copyParams.extent = prjSize;
copyParams.kind = hipMemcpyDeviceToDevice;
hipMemcpy3DAsync(©Params, stream[i]);
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = d_prjArray[i];
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeBorder;
texDesc.addressMode[1] = hipAddressModeBorder;
texDesc.addressMode[2] = hipAddressModeBorder;
texDesc.filterMode = hipFilterModeLinear;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = false;
hipCreateTextureObject(&texObj[i], &resDesc, &texDesc, nullptr);
prj[i].clear();
cossin[i].resize(PN * sSLN[i]);
cossin[i] = subCossin[i];
gid[i].x = (sSLN[i] + blk.x - 1) / blk.x;
gid[i].y = (DNU + blk.y - 1) / blk.y;
gid[i].z = (PN + blk.z) / blk.z;
}
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
hipSetDevice(i);
hipLaunchKernelGGL(( MultiSlices_PDBACK_ker), dim3(gid[i]), dim3(blk), 0, stream[i],
texObj[i],
thrust::raw_pointer_cast(&vol[i][0]),
thrust::raw_pointer_cast(&msk[i][0]),
thrust::raw_pointer_cast(&cossin[i][0]),
make_float2(x0,y0), S2D, make_float2(objCntIdxX,objCntIdxY),
dx, dbeta, detCntIdx, sSLN[i], XN, YN, DNU, PN);
host_vol[i].resize(sSLN[i] * XN * YN);
host_vol[i] = vol[i];
}
#pragma omp barrier
//combine the volume
DD2::combineVolume(host_vol, hvol, SLN, XN, YN, sSLN, gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
hipSetDevice(i);
msk[i].clear();
vol[i].clear();
prj[i].clear();
cossin[i].clear();
hipDestroyTextureObject(texObj[i]);
hipFreeArray(d_prjArray[i]);
hipStreamDestroy(stream[i]);
host_vol[i].clear();
sbprj[i].clear();
}
ObjZIdx_Start.clear();
ObjZIdx_End.clear();
sSLN.clear();
sour.clear();
msk.clear();
vol.clear();
prj.clear();
cossin.clear();
d_prjArray.clear();
texObj.clear();
stream.clear();
host_vol.clear();
gid.clear();
hcossin.clear();
//hangs.clear();
sbprj.clear();
}
extern "C"
void DD2_multiGPU(
float* hvol, // the pointer to the image
float* hprj, // the pointer to the projection (SLN, DNU, PN) order
const int method, // Control to use forward projection or backprojection
const float x0, const float y0, //position of the initial source
float* xds, float* yds, // distribution of the detector cells
const int DNU, // Number of detector cells
const int SLN, // Number of slices to be projected or backprojected
const float imgXCenter, const float imgYCenter, //Center of the image
const int XN, const int YN, // pixel number of the image
const float dx, // size of the pixel
float* hangs, // view angles
int PN, // # of view angles
byte* mask,
int* startIdx,
const int gpuNum)
{
switch(method)
{
case 0: // DD projection
MultiSlices_DDPROJ(hvol, hprj, x0, y0, xds, yds, DNU, SLN,
imgXCenter, imgYCenter,XN, YN, dx, hangs, PN, mask,
startIdx,gpuNum);
break;
case 1: // DD backprojection
MultiSlices_DDBACK(hvol, hprj, x0, y0, xds, yds, DNU, SLN,
imgXCenter, imgYCenter,XN, YN, dx, hangs, PN, mask,
startIdx,gpuNum);
break;
case 2: // PD projection
MultiSlices_PDPROJ(hvol, hprj, x0, y0, xds, yds, DNU, SLN,
imgXCenter, imgYCenter,XN, YN, dx, hangs, PN, mask,
startIdx,gpuNum);
break;
case 3: // PD backprojection
MultiSlices_PDBACK(hvol, hprj, x0, y0, xds, yds, DNU, SLN,
imgXCenter, imgYCenter,XN, YN, dx, hangs, PN, mask,
startIdx,gpuNum);
break;
default:
break;
}
}
| 2026cfeffaf05bc8d6870b5305151570384a9d0b.cu | #include "utilities.cuh"
/**
* The multi-GPUs based 2D multi slices projection and backprojection
* Author: Rui Liu
* Date: Sep. 18, 2016
*/
#include <cuda_runtime.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <vector_types.h>
#include "multiSlices_ker.cuh"
typedef unsigned char byte;
#define BLKX 32
#define BLKY 8
#define BLKZ 1
namespace DD2
{
struct CosSinFunctor
{
__host__ __device__ float2 operator()(float ang)
{
return make_float2(cos(ang),sin(ang));
}
};
// Split the projection data
void splitProjection(
thrust::host_vector<thrust::host_vector<float> >& subProj,
thrust::host_vector<thrust::host_vector<float2> >& subCossin,
float* proj, thrust::host_vector<float2>& cossin,
const int SLN, const int DNU, const int PN,
thrust::host_vector<int> sSLN, const int gpuNum)
{
int psum = 0;
for(int i = 0; i != gpuNum; ++i)
{
subProj[i].resize(sSLN[i] * DNU * PN);
subCossin[i].resize(sSLN[i] * PN);
int curPos = sSLN[i];
for(int p = 0; p != DNU * PN; ++p)
{
for(int s = 0; s != sSLN[i]; ++s)
{
int subPos = p * sSLN[i] + s;
int totPos = p * SLN + (s + psum);
subProj[i][subPos] = proj[totPos];
}
}
for(int p = 0; p != PN; ++p)
{
for(int s = 0; s != sSLN[i]; ++s)
{
int subPos = p * sSLN[i] + s;
int totPos = p * SLN + (s + psum);
subCossin[i][subPos] = cossin[totPos];
}
}
psum += sSLN[i];
}
}
void combineProjection(
thrust::host_vector<thrust::host_vector<float> >& subProj,
float* proj, const int SLN, const int DNU, const int PN,
std::vector<int>& sSLN, const int gpuNum)
{
int psum = 0;
for(int i = 0; i != gpuNum; ++i)
{
int curPos = sSLN[i];
for(int p = 0; p < DNU * PN; ++p)
{
for(int s = 0; s != sSLN[i]; ++s)
{
int subPos = p * sSLN[i] + s;
int totPos = p * SLN + (s + psum);
proj[totPos] = subProj[i][subPos];
}
}
psum += sSLN[i];
}
}
void combineVolume(
thrust::host_vector<thrust::host_vector<float> >& subVol,
float* vol, const int SLN, const int XN, const int YN,
thrust::host_vector<int>& sSLN, const int gpuNum)
{
int psum = 0;
//omp_set_num_threads();
for(int i = 0; i < gpuNum; ++i)
{
int curPos = sSLN[i];
#pragma omp parallel for
for(int p = 0; p < XN * YN; ++p)
{
for(int s = 0; s != sSLN[i]; ++s)
{
int subPos = p * sSLN[i] + s;
int totPos = p * SLN + (s + psum);
vol[totPos] = subVol[i][subPos];
}
}
psum += sSLN[i];
}
}
// Split the volume
void splitVolume(
std::vector<std::vector<float> >& subVol,
thrust::host_vector<thrust::host_vector<float2> >& subCossin,
float* vol,
thrust::host_vector<float2> cossin,
const int SLN, const int XN, const int YN, const int PN,
std::vector<int>& sSLN, const int gpuNum)
{
int psum = 0;
for(int i = 0; i != gpuNum; ++i)
{
subVol[i].resize(sSLN[i] * XN * YN);
int curPos = sSLN[i];
for(int p = 0; p != XN * YN; ++p)
{
for(int s = 0; s != sSLN[i]; ++s)
{
int subPos = p * sSLN[i] + s;
int totPos = p * SLN + (s + psum);
subVol[i][subPos] = vol[totPos];
}
}
for(int p = 0; p != PN; ++p)
{
for(int s = 0; s != sSLN[i]; ++s)
{
int subPos = p * sSLN[i] + s;
int totPos = p * SLN + (s + psum);
subCossin[i][subPos] = cossin[totPos];
}
}
psum += sSLN[i];
}
}
// Copy the volume from the original to
template<typename Ta, typename Tb>
__global__ void naive_copyToTwoVolumes(Ta* in_ZXY,
Tb* out_ZXY, Tb* out_ZYX,
int XN, int YN, int ZN)
{
int idz = threadIdx.x + blockIdx.x * blockDim.x;
int idx = threadIdx.y + blockIdx.y * blockDim.y;
int idy = threadIdx.z + blockIdx.z * blockDim.z;
if (idx < XN && idy < YN && idz < ZN)
{
int i = (idy * XN + idx) * ZN + idz;
int ni = (idy * (XN + 1) + (idx + 1)) * ZN + idz;
int nj = (idx * (YN + 1) + (idy + 1)) * ZN + idz;
out_ZXY[ni] = in_ZXY[i];
out_ZYX[nj] = in_ZXY[i];
}
}
__global__ void horizontalIntegral(float* prj, int DNU, int DNV, int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int pIdx = threadIdx.y + blockIdx.y * blockDim.y;
if (idv < DNV && pIdx < PN)
{
int headPrt = pIdx * DNU * DNV + idv;
for (int ii = 1; ii < DNU; ++ii)
{
prj[headPrt + ii * DNV] = prj[headPrt + ii * DNV] + prj[headPrt + (ii - 1) * DNV];
}
}
}
__global__ void addOneSidedZeroBoarder(const float* prj_in, float* prj_out, int DNU, int DNV, int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int idu = threadIdx.y + blockIdx.y * blockDim.y;
int pn = threadIdx.z + blockIdx.z * blockDim.z;
if (idu < DNU && idv < DNV && pn < PN)
{
int i = (pn * DNU + idu) * DNV + idv;
int ni = (pn * (DNU + 1) + (idu + 1)) * (DNV + 1) + idv + 1;
prj_out[ni] = prj_in[i];
}
}
__global__ void verticalIntegral2(float* prj, int ZN, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N)
{
int currentHead = idx * ZN;
for (int ii = 1; ii < ZN; ++ii)
{
prj[currentHead + ii] = prj[currentHead + ii] + prj[currentHead + ii - 1];
}
}
}
__global__ void heorizontalIntegral2(float* prj, int DNU, int DNV, int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int pIdx = threadIdx.y + blockIdx.y * blockDim.y;
if (idv < DNV && pIdx < PN)
{
int headPrt = pIdx * DNU * DNV + idv;
for (int ii = 1; ii < DNU; ++ii)
{
prj[headPrt + ii * DNV] = prj[headPrt + ii * DNV] + prj[headPrt + (ii - 1) * DNV];
}
}
}
__global__ void addOneSidedZeroBoarder_multiSlice_Fan(const float* prj_in, float* prj_out, int DNU, int SLN, int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int idu = threadIdx.y + blockIdx.y * blockDim.y;
int pn = threadIdx.z + blockIdx.z * blockDim.z;
if (idu < DNU && idv < SLN && pn < PN)
{
int i = (pn * DNU + idu) * SLN + idv;
int ni = (pn * (DNU + 1) + (idu + 1)) * SLN + idv;
prj_out[ni] = prj_in[i];
}
}
__global__ void heorizontalIntegral_multiSlice_Fan(float* prj, int DNU, int SLN, int PN)
{
int idv = threadIdx.x + blockIdx.x * blockDim.x;
int pIdx = threadIdx.y + blockIdx.y * blockDim.y;
if (idv < SLN && pIdx < PN)
{
int headPrt = pIdx * DNU * SLN + idv;
for (int ii = 1; ii < DNU; ++ii)
{
prj[headPrt + ii * SLN] = prj[headPrt + ii * SLN] + prj[headPrt + (ii - 1) * SLN];
}
}
}
}
__global__ void MultiSlices_DDPROJ_ker(
cudaTextureObject_t volTex1,
cudaTextureObject_t volTex2,
float* proj,
float2 s, // source position
const float2* __restrict__ cossin,
const float* __restrict__ xds,
const float* __restrict__ yds,
const float* __restrict__ bxds,
const float* __restrict__ byds,
float2 objCntIdx,
float dx,
int XN, int YN, int SLN,
int DNU, int PN)
{
int slnIdx = threadIdx.x + blockIdx.x * blockDim.x;
int detIdU = threadIdx.y + blockIdx.y * blockDim.y;
int angIdx = threadIdx.z + blockIdx.z * blockDim.z;
if(slnIdx < SLN && detIdU < DNU && angIdx < PN)
{
float2 dir = cossin[angIdx * SLN + slnIdx]; // cossin;
float2 cursour = make_float2(
s.x * dir.x - s.y * dir.y,
s.x * dir.y + s.y * dir.x); // current source position;
s = dir;
float2 curDet = make_float2(
xds[detIdU] * s.x - yds[detIdU] * s.y,
xds[detIdU] * s.y + yds[detIdU] * s.x);
float2 curDetL = make_float2(
bxds[detIdU] * s.x - byds[detIdU] * s.y,
bxds[detIdU] * s.y + byds[detIdU] * s.x);
float2 curDetR = make_float2(
bxds[detIdU+1] * s.x - byds[detIdU+1] * s.y,
bxds[detIdU+1] * s.y + byds[detIdU+1] * s.x);
dir = normalize(curDet - cursour);
float factL = 0;
float factR = 0;
float constVal = 0;
float obj = 0;
float realL = 0;
float realR = 0;
float intersectLength = 0;
float invdx = 1.0f / dx;
float summ;
if(fabsf(s.x) <= fabsf(s.y))
{
summ = 0;
factL = (curDetL.y - cursour.y) / (curDetL.x - cursour.x);
factR = (curDetR.y - cursour.y) / (curDetR.x - cursour.x);
constVal = dx / fabsf(dir.x);
#pragma unroll
for (int ii = 0; ii < XN; ++ii)
{
obj = (ii - objCntIdx.x) * dx;
realL = (obj - curDetL.x) * factL + curDetL.y;
realR = (obj - curDetR.x) * factR + curDetR.y;
intersectLength = realR - realL;
realL = realL * invdx + objCntIdx.y + 1;
realR = realR * invdx + objCntIdx.y + 1;
summ += (tex3D<float>(volTex2, slnIdx + 0.5f, realR, ii + 0.5) - tex3D<float>(volTex2, slnIdx + 0.5, realL, ii + 0.5)) / intersectLength;
}
__syncthreads();
proj[(angIdx * DNU + detIdU) * SLN + slnIdx] = summ * constVal;
}
else
{
summ = 0;
factL = (curDetL.x - cursour.x) / (curDetL.y - cursour.y);
factR = (curDetR.x - cursour.x) / (curDetR.y - cursour.y);
constVal = dx / fabsf(dir.y);
#pragma unroll
for (int ii = 0; ii < YN; ++ii)
{
obj = (ii - objCntIdx.y) * dx;
realL = (obj - curDetL.y) * factL + curDetL.x;
realR = (obj - curDetR.y) * factR + curDetR.x;
intersectLength = realR - realL;
realL = realL * invdx + objCntIdx.x + 1;
realR = realR * invdx + objCntIdx.x + 1;
summ += (tex3D<float>(volTex1, slnIdx + 0.5f, realR, ii + 0.5) - tex3D<float>(volTex1, slnIdx + 0.5, realL, ii + 0.5)) / intersectLength;
}
__syncthreads();
proj[(angIdx * DNU + detIdU) * SLN + slnIdx] = summ * constVal;
}
}
}
void MultiSlices_DDPROJ(
float* hvol, // the pointer to the image
float* hprj, // the pointer to the projection (SLN, DNU, PN) order
const float x0, const float y0, //position of the initial source
float* xds, float* yds, // distribution of the detector cells
const int DNU, // Number of detector cells
const int SLN, // Number of slices to be projected or backprojected
const float imgXCenter, const float imgYCenter, //Center of the image
const int XN, const int YN, // pixel number of the image
const float dx, // size of the pixel
float* h_angs, // view angles SHOULD BE WITH SIZE SLN * PN
int PN, // # of view angles
byte* mask,
int* startidx,
const int gpuNum)
{
// Regular the projection
for(int i = 0; i != XN * YN; ++i)
{
byte v = mask[i];
for(int z = 0; z != SLN; ++z)
{
hvol[i * SLN + z] *= v;
}
}
float* bxds = new float[DNU + 1];
float* byds = new float[DNU + 1];
DD3Boundaries<float>(DNU + 1, xds, bxds);
DD3Boundaries<float>(DNU + 1, yds, byds);
const float objCntIdxX = (XN - 1.0) * 0.5 - imgXCenter / dx;
const float objCntIdxY = (YN - 1.0) * 0.5 - imgYCenter / dx;
std::vector<int> startIdx(startidx, startidx + gpuNum);
std::vector<int> endIdx(gpuNum);
std::copy(startIdx.begin() + 1,
startIdx.end(), endIdx.begin());
endIdx[gpuNum - 1] = SLN;
std::vector<int> sSLN(gpuNum);
//Split the volumes
std::vector<std::vector<float> > subVol(gpuNum);
thrust::host_vector<thrust::host_vector<float2> > subCossin(gpuNum);
for(int i = 0; i != gpuNum; ++i)
{
//subVol[i].resize(sSLN[i] * XN * YN);
sSLN[i] = endIdx[i] - startIdx[i];
subCossin[i].resize(sSLN[i] * PN);
}
thrust::host_vector<float2> cossin(PN * SLN);
thrust::transform(h_angs, h_angs + PN * SLN,
cossin.begin(),[=](float ang){
return make_float2(cosf(ang), sinf(ang));
});
DD2::splitVolume(subVol, subCossin, hvol, cossin,
SLN, XN, YN, PN, sSLN, gpuNum);
// Generate multiple streams
std::vector<cudaStream_t> stream(gpuNum);
std::vector<int> siz(gpuNum, 0);
std::vector<int> nsiz_ZXY(gpuNum, 0);
std::vector<int> nsiz_ZYX(gpuNum, 0);
thrust::host_vector<thrust::device_vector<float> > SATZXY(gpuNum);
thrust::host_vector<thrust::device_vector<float> > SATZYX(gpuNum);
thrust::host_vector<cudaExtent> volumeSize1(gpuNum);
thrust::host_vector<cudaExtent> volumeSize2(gpuNum);
thrust::host_vector<cudaChannelFormatDesc> channelDesc1(gpuNum);
thrust::host_vector<cudaChannelFormatDesc> channelDesc2(gpuNum);
thrust::host_vector<cudaArray*> d_volumeArray1(gpuNum);
thrust::host_vector<cudaArray*> d_volumeArray2(gpuNum);
thrust::host_vector<cudaMemcpy3DParms> copyParams1(gpuNum);
thrust::host_vector<cudaMemcpy3DParms> copyParams2(gpuNum);
thrust::host_vector<cudaResourceDesc> resDesc1(gpuNum);
thrust::host_vector<cudaResourceDesc> resDesc2(gpuNum);
thrust::host_vector<cudaTextureDesc> texDesc1(gpuNum);
thrust::host_vector<cudaTextureDesc> texDesc2(gpuNum);
thrust::host_vector<cudaTextureObject_t> texObj1(gpuNum);
thrust::host_vector<cudaTextureObject_t> texObj2(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_prj(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_xds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_yds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_bxds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_byds(gpuNum);
thrust::host_vector<thrust::device_vector<float2> > d_cossin(gpuNum);
thrust::host_vector<thrust::host_vector<float> > h_prj(gpuNum);
dim3 pblk(BLKX,BLKY,BLKZ);
thrust::host_vector<dim3> pgid(gpuNum);
omp_set_num_threads(gpuNum);
#pragma omp parallel for
for(int i = 0 ; i < gpuNum; ++i)
{
cudaSetDevice(i);
siz[i] = XN * YN * sSLN[i];
nsiz_ZXY[i] = sSLN[i] * (XN + 1) * YN;
nsiz_ZYX[i] = sSLN[i] * (YN + 1) * XN;
SATZXY[i].resize(nsiz_ZXY[i]);
SATZYX[i].resize(nsiz_ZYX[i]);
thrust::device_vector<float> vol = subVol[i];
dim3 blk(64,16,1);
dim3 gid(
(sSLN[i] + blk.x - 1) / blk.x,
(XN + blk.y - 1) / blk.y,
(YN + blk.z - 1) / blk.z);
DD2::naive_copyToTwoVolumes<<<gid,blk,0,stream[i]>>>(
(thrust::raw_pointer_cast(&vol[0])),
(thrust::raw_pointer_cast(&SATZXY[i][0])),
(thrust::raw_pointer_cast(&SATZYX[i][0])),
XN,YN,sSLN[i]);
vol.clear();
blk.x = 64;
blk.y = 16;
blk.z = 1;
gid.x = (sSLN[i] + blk.x - 1) / blk.x;
gid.y = (YN + blk.y - 1) / blk.y;
gid.z = 1;
DD2::horizontalIntegral<<<gid, blk, 0, stream[i]>>>(
thrust::raw_pointer_cast(&SATZXY[i][0]),
XN + 1, sSLN[i], YN);
blk.x = 64;
blk.y = 16;
blk.z = 1;
gid.x = (sSLN[i] + blk.x - 1) / blk.x;
gid.y = (XN + blk.y - 1) / blk.y;
gid.z = 1;
DD2::horizontalIntegral<<<gid, blk, 0, stream[i]>>>(
thrust::raw_pointer_cast(&SATZYX[i][0]),
YN + 1, sSLN[i], XN);
volumeSize1[i].width = sSLN[i];
volumeSize1[i].height = XN + 1;
volumeSize1[i].depth = YN;
volumeSize2[i].width = sSLN[i];
volumeSize2[i].height = YN + 1;
volumeSize2[i].depth = XN;
channelDesc1[i] = cudaCreateChannelDesc<float>();
channelDesc2[i] = cudaCreateChannelDesc<float>();
cudaMalloc3DArray(&d_volumeArray1[i], &channelDesc1[i], volumeSize1[i]);
cudaMalloc3DArray(&d_volumeArray2[i], &channelDesc2[i], volumeSize2[i]);
copyParams1[i].srcPtr = make_cudaPitchedPtr((void*)
thrust::raw_pointer_cast(&SATZXY[i][0]),
volumeSize1[i].width * sizeof(float),
volumeSize1[i].width, volumeSize1[i].height);
copyParams1[i].dstArray = d_volumeArray1[i];
copyParams1[i].extent = volumeSize1[i];
copyParams1[i].kind = cudaMemcpyDeviceToDevice;
copyParams2[i].srcPtr = make_cudaPitchedPtr((void*)
thrust::raw_pointer_cast(&SATZYX[i][0]),
volumeSize2[i].width * sizeof(float),
volumeSize2[i].width, volumeSize2[i].height);
copyParams2[i].dstArray = d_volumeArray2[i];
copyParams2[i].extent = volumeSize2[i];
copyParams2[i].kind = cudaMemcpyDeviceToDevice;
cudaMemcpy3D(©Params1[i]);
cudaMemcpy3D(©Params2[i]);
SATZXY[i].clear();
SATZYX[i].clear();
memset(&resDesc1[i], 0, sizeof(resDesc1[i]));
memset(&resDesc2[i], 0, sizeof(resDesc2[i]));
resDesc1[i].resType = cudaResourceTypeArray;
resDesc2[i].resType = cudaResourceTypeArray;
resDesc1[i].res.array.array = d_volumeArray1[i];
resDesc2[i].res.array.array = d_volumeArray2[i];
memset(&texDesc1[i], 0, sizeof(texDesc1[i]));
memset(&texDesc2[i], 0, sizeof(texDesc2[i]));
texDesc1[i].addressMode[0] = cudaAddressModeClamp;
texDesc1[i].addressMode[1] = cudaAddressModeClamp;
texDesc1[i].addressMode[2] = cudaAddressModeClamp;
texDesc2[i].addressMode[0] = cudaAddressModeClamp;
texDesc2[i].addressMode[1] = cudaAddressModeClamp;
texDesc2[i].addressMode[2] = cudaAddressModeClamp;
texDesc1[i].filterMode = cudaFilterModeLinear;
texDesc2[i].filterMode = cudaFilterModeLinear;
texDesc1[i].readMode = cudaReadModeElementType;
texDesc2[i].readMode = cudaReadModeElementType;
texDesc1[i].normalizedCoords = false;
texDesc2[i].normalizedCoords = false;
cudaCreateTextureObject(&texObj1[i], &resDesc1[i], &texDesc1[i], nullptr);
cudaCreateTextureObject(&texObj2[i], &resDesc2[i], &texDesc2[i], nullptr);
d_prj[i].resize(sSLN[i] * DNU * PN);
h_prj[i].resize(sSLN[i] * DNU * PN);
d_xds[i].resize(DNU);
thrust::copy(xds,xds+DNU, d_xds[i].begin());
d_yds[i].resize(DNU);
thrust::copy(yds,yds+DNU, d_yds[i].begin());
d_bxds[i].resize(DNU + 1);
thrust::copy(bxds,bxds+DNU+1, d_bxds[i].begin());
d_byds[i].resize(DNU + 1);
thrust::copy(byds,byds+DNU+1, d_byds[i].begin());
d_cossin[i].resize(sSLN[i] * PN);
thrust::copy(subCossin[i].begin(), subCossin[i].end(), d_cossin[i].begin());
pgid[i].x = (sSLN[i] + pblk.x - 1) / pblk.x;
pgid[i].y = (DNU + pblk.y - 1) / pblk.y;
pgid[i].z = (PN + pblk.z - 1) / pblk.z;
}
#pragma omp barrier
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
cudaSetDevice(i);
MultiSlices_DDPROJ_ker<<<pgid[i],pblk, 0, stream[i]>>>
(texObj1[i],texObj2[i],
thrust::raw_pointer_cast(&d_prj[i][0]),
make_float2(x0,y0),
thrust::raw_pointer_cast(&d_cossin[i][0]),
thrust::raw_pointer_cast(&d_xds[i][0]),
thrust::raw_pointer_cast(&d_yds[i][0]),
thrust::raw_pointer_cast(&d_bxds[i][0]),
thrust::raw_pointer_cast(&d_byds[i][0]),
make_float2(objCntIdxX, objCntIdxY),
dx,XN,YN,sSLN[i],DNU,PN);
h_prj[i] = d_prj[i];
}
#pragma omp barrier
DD2::combineProjection(h_prj,hprj, SLN, DNU, PN, sSLN, gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
cudaSetDevice(i);
subVol[i].clear();
subCossin[i].clear();
cudaStreamDestroy(stream[i]);
cudaFreeArray(d_volumeArray1[i]);
cudaFreeArray(d_volumeArray2[i]);
cudaDestroyTextureObject(texObj1[i]);
cudaDestroyTextureObject(texObj2[i]);
d_prj[i].clear();
d_xds[i].clear();
d_yds[i].clear();
d_bxds[i].clear();
d_byds[i].clear();
d_cossin[i].clear();
h_prj[i].clear();
}
#pragma omp barrier
//Clear the data
delete[] bxds;
delete[] byds;
startIdx.clear();
endIdx.clear();
sSLN.clear();
subVol.clear();
subCossin.clear();
cossin.clear();
stream.clear();
siz.clear();
nsiz_ZXY.clear();
nsiz_ZYX.clear();
SATZXY.clear();
SATZYX.clear();
volumeSize1.clear();
volumeSize2.clear();
channelDesc1.clear();
channelDesc2.clear();
d_volumeArray1.clear();
d_volumeArray2.clear();
copyParams1.clear();
copyParams2.clear();
resDesc1.clear();
resDesc2.clear();
texDesc1.clear();
texDesc2.clear();
texObj1.clear();
texObj2.clear();
d_prj.clear();
d_xds.clear();
d_yds.clear();
d_bxds.clear();
d_byds.clear();
d_cossin.clear();
h_prj.clear();
pgid.clear();
}
__global__ void MultiSlices_DDBACK_ker(
cudaTextureObject_t prjTexObj,
float* vol,
const byte* __restrict__ msk,
const float2* __restrict__ cossin,
float2 s,
float S2D,
float2 curvox, // imgCenter index
float dx, float dbeta, float detCntIdx,
int2 VN, int SLN, int PN)
{
int3 id;
id.z = threadIdx.x + __umul24(blockIdx.x, blockDim.x);
id.x = threadIdx.y + __umul24(blockIdx.y, blockDim.y);
id.y = threadIdx.z + __umul24(blockIdx.z, blockDim.z);
if(id.z < SLN && id.x < VN.x && id.y < VN.y)
{
if(msk[id.y * VN.x + id.x] != 1)
{
return;
}
curvox = make_float2((id.x - curvox.x) * dx, (id.y - curvox.y) * dx);
float2 cursour;
float idxL, idxR;
float cosVal;
float summ = 0;
float2 cossinT;
float inv_sid = 1.0f / sqrtf(s.x * s.x + s.y * s.y);
float2 dir;
float l_square;
float l;
float alpha;
float deltaAlpha;
//S2D /= ddv;
dbeta = 1.0 / dbeta;
float ddv;
for(int angIdx = 0; angIdx < PN; ++angIdx)
{
cossinT = cossin[angIdx * SLN + id.z];
cursour = make_float2(
s.x * cossinT.x - s.y * cossinT.y,
s.x * cossinT.y + s.y * cossinT.x);
dir = curvox - cursour;
l_square = dir.x * dir.x + dir.y * dir.y;
l = rsqrtf(l_square); // 1 / sqrt(l_square);
alpha = asinf((cursour.y * dir.x - cursour.x * dir.y) * inv_sid * l);
if(fabsf(cursour.x) > fabsf(cursour.y))
{
ddv = dir.x;
}
else
{
ddv = dir.y;
}
deltaAlpha = ddv / l_square * dx * 0.5;
cosVal = dx / ddv * sqrtf(l_square);
idxL = (alpha - deltaAlpha) * dbeta + detCntIdx + 1.0;
idxR = (alpha + deltaAlpha) * dbeta + detCntIdx + 1.0;
summ += (tex3D<float>(prjTexObj,id.z + 0.5, idxR, angIdx + 0.5) -
tex3D<float>(prjTexObj,id.z + 0.5, idxL, angIdx + 0.5)) * cosVal;
}
__syncthreads();
vol[(id.y * VN.x + id.x) * SLN + id.z] = summ;
}
}
void MultiSlices_DDBACK(
float* hvol, // the pointer to the image
float* hprj, // the pointer to the projection (SLN, DNU, PN) order
const float x0, const float y0, //position of the initial source
float* xds, float* yds, // distribution of the detector cells
const int DNU, // Number of detector cells
const int SLN, // Number of slices to be projected or backprojected
const float imgXCenter, const float imgYCenter, //Center of the image
const int XN, const int YN, // pixel number of the image
const float dx, // size of the pixel
float* h_angs, // view angles SIZE SHOULD BE (SLN x PN)
int PN, // # of view angles
byte* mask,
int* startidx,
const int gpuNum)
{
std::vector<int> startIdx(startidx, startidx + gpuNum);
std::vector<int> endIdx(startIdx.size());
std::copy(startIdx.begin() + 1, startIdx.end(), endIdx.begin());
endIdx[gpuNum - 1] = SLN;
std::vector<int> sSLN(startIdx.size());// = endIdx - startIdx;
for(int i = 0; i < gpuNum; ++i)
{
sSLN[i] = endIdx[i] - startIdx[i];
}
startIdx.clear();
endIdx.clear();
const float2 objCntIdx(
make_float2((XN - 1.0) * 0.5 - imgXCenter / dx,
(YN - 1.0) * 0.5 - imgYCenter / dx));
const float2 sour(make_float2(x0, y0));
const float S2D = hypotf(xds[0] - x0, yds[0] - y0);
const float dbeta = atanf(
(sqrt(powf(xds[1] - xds[0],2.0) + powf(yds[1] - yds[0],2.0)))
/ S2D * 0.5f) * 2.0f;
float* bxds = new float[DNU + 1];
float* byds = new float[DNU + 1];
DD3Boundaries(DNU+1, xds, bxds);
DD3Boundaries(DNU+1, yds, byds);
//Calculate the most left angle
const float detCntIdx = fabsf(atanf(bxds[0] / (y0 - byds[0]))) / dbeta - 0.5f;
delete[] bxds;
delete[] byds;
/////////////////////////////////////////////////////////////////////////////
thrust::host_vector<float2> h_cossin(SLN * PN);
thrust::transform(h_angs, h_angs + PN * SLN,
h_cossin.begin(), [=](float ang)
{return make_float2(cosf(ang),sinf(ang));});
thrust::host_vector<thrust::host_vector<float> > subProj(gpuNum);
thrust::host_vector<thrust::host_vector<float2> > subCossin(gpuNum);
thrust::host_vector<thrust::device_vector<byte> > d_msk(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_vol(gpuNum);
thrust::host_vector<thrust::host_vector<float> > h_vol(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_subProj(gpuNum);
thrust::host_vector<thrust::device_vector<float2> > d_subCossin(gpuNum);
thrust::host_vector<cudaArray*> d_prjArray(gpuNum);
thrust::host_vector<cudaTextureObject_t> texObj(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_prjSAT(gpuNum);
thrust::host_vector<cudaStream_t> stream(gpuNum);
thrust::host_vector<int> siz(gpuNum);
thrust::host_vector<int> nsiz(gpuNum);
thrust::host_vector<cudaExtent> prjSize(gpuNum);
thrust::host_vector<cudaChannelFormatDesc> channelDesc(gpuNum);
dim3 copyBlk(64,16,1);
thrust::host_vector<dim3> copyGid(gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
cudaSetDevice(i);
cudaStreamCreate(&stream[i]);
subProj[i].resize(sSLN[i] * DNU * PN);
subCossin[i].resize(sSLN[i] * PN);
d_msk[i].resize(XN * YN);
thrust::copy(mask, mask + XN * YN, d_msk[i].begin());
d_vol[i].resize(sSLN[i] * XN * YN);
h_vol[i].resize(sSLN[i] * XN * YN);
d_subProj[i].resize(sSLN[i] * DNU * PN);
d_subCossin[i].resize(sSLN[i] * PN);
d_prjSAT[i].resize(sSLN[i] * (DNU + 1) * PN);
}
// Split the projection
DD2::splitProjection(subProj, subCossin, hprj, h_cossin, SLN, DNU,
PN, sSLN, gpuNum);
h_cossin.clear();
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
cudaSetDevice(i);
d_subProj[i] = subProj[i];
subProj[i].clear();
d_subCossin[i] = subCossin[i];
subCossin[i].clear();
siz[i] = DNU * sSLN[i] * PN;
nsiz[i] = (DNU + 1) * sSLN[i] * PN;
copyGid[i].x = (sSLN[i] + copyBlk.x - 1) / copyBlk.x;
copyGid[i].y = (DNU + copyBlk.y - 1) / copyBlk.y;
copyGid[i].z = (PN + copyBlk.z - 1) / copyBlk.z;
DD2::addOneSidedZeroBoarder_multiSlice_Fan << <copyGid[i], copyBlk, 0, stream[i] >> >(
thrust::raw_pointer_cast(&d_subProj[i][0]),
thrust::raw_pointer_cast(&d_prjSAT[i][0]),
DNU, sSLN[i], PN);
copyGid[i].x = (sSLN[i] + copyBlk.x - 1) / copyBlk.x;
copyGid[i].y = (PN + copyBlk.y - 1) / copyBlk.y;
copyGid[i].z = 1;
DD2::heorizontalIntegral_multiSlice_Fan << <copyGid[i], copyBlk, 0, stream[i] >> >(
thrust::raw_pointer_cast(&d_prjSAT[i][0]), DNU + 1, sSLN[i], PN);
d_subProj[i].clear();
/////////////////////////////////////////////////////////////////
prjSize[i].width = sSLN[i];
prjSize[i].height= DNU + 1;
prjSize[i].depth = PN;
channelDesc[i] = cudaCreateChannelDesc<float>();
cudaMalloc3DArray(&d_prjArray[i], &channelDesc[i], prjSize[i]);
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr(
(void*) thrust::raw_pointer_cast(&d_prjSAT[i][0]),
prjSize[i].width * sizeof(float),
prjSize[i].width, prjSize[i].height);
copyParams.dstArray = d_prjArray[i];
copyParams.extent = prjSize[i];
copyParams.kind = cudaMemcpyDeviceToDevice;
cudaMemcpy3D(©Params);
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = d_prjArray[i];
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.addressMode[1] = cudaAddressModeClamp;
texDesc.addressMode[2] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = false;
cudaCreateTextureObject(&texObj[i], &resDesc, &texDesc, nullptr);
d_prjSAT[i].clear();
}
#pragma omp barrier
subProj.clear();
d_prjSAT.clear();
subCossin.clear();
d_subProj.clear();
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
cudaSetDevice(i);
dim3 blk(BLKX,BLKY,BLKZ);
dim3 gid(
(sSLN[i] + blk.x - 1) / blk.x,
(XN + blk.y - 1) / blk.y,
(YN + blk.z - 1) / blk.z);
MultiSlices_DDBACK_ker<< <gid, blk, 0, stream[i] >> >(texObj[i],
thrust::raw_pointer_cast(&d_vol[i][0]),
thrust::raw_pointer_cast(&d_msk[i][0]),
thrust::raw_pointer_cast(&d_subCossin[i][0]),
sour, S2D, objCntIdx,
dx, dbeta, detCntIdx, make_int2(XN, YN), sSLN[i], PN);
h_vol[i] = d_vol[i];
d_vol[i].clear();
d_msk[i].clear();
d_subCossin[i].clear();
cudaDestroyTextureObject(texObj[i]);
cudaFreeArray(d_prjArray[i]);
cudaStreamDestroy(stream[i]);
}
#pragma omp barrier
d_vol.clear();
d_msk.clear();
d_subCossin.clear();
d_prjArray.clear();
texObj.clear();
stream.clear();
siz.clear();
nsiz.clear();
prjSize.clear();
channelDesc.clear();
copyGid.clear();
thrust::host_vector<int> sSLNn = sSLN;
DD2::combineVolume(h_vol, hvol, SLN, XN, YN, sSLNn, gpuNum);
sSLNn.clear();
sSLN.clear();
}
__global__ void MultiSlices_PDPROJ_ker(
cudaTextureObject_t texObj,
float* proj,
float2 s,
float* __restrict__ xds,
float* __restrict__ yds,
float2* __restrict__ cossin, // size should be SLN * PN
float2 objCntIdx,
float dx, int SLN, int DNU, int PN, int XN, int YN)
{
int slnIdx = threadIdx.x + blockIdx.x * blockDim.x;
int detIdx = threadIdx.y + blockIdx.y * blockDim.y;
int angIdx = threadIdx.z + blockIdx.z * blockDim.z;
if(slnIdx < SLN && detIdx < DNU && angIdx < PN)
{
float2 cssv = cossin[angIdx * SLN + slnIdx];
float2 cursour = make_float2(
s.x * cssv.x - s.y * cssv.y,
s.x * cssv.y + s.y * cssv.x);
float summ = xds[detIdx];
float obj = yds[detIdx];
float idx = 0;
float2 curDet = make_float2(
summ * cssv.x - obj * cssv.y,
summ * cssv.y + obj * cssv.x);
float2 dir = normalize(curDet - cursour);
summ = 0;
obj = 0;
if(fabs(cssv.x) <= fabs(cssv.y))
//if(fabsf(dir.y) <= fabsf(dir.x))
{
summ = 0;
#pragma unroll
for(int ii = 0; ii < XN; ++ii)
{
obj = (ii - objCntIdx.x) * dx;
idx = (obj - curDet.x) / dir.x * dir.y + curDet.y;
idx = idx / dx + objCntIdx.y + 0.5f;
summ += tex3D<float>(texObj, slnIdx + 0.5f, ii + 0.5f, idx);
}
__syncthreads();
proj[(angIdx * DNU + detIdx) * SLN + slnIdx] = summ * dx / fabsf(dir.x);
}
else
{
summ = 0;
#pragma unroll
for(int jj = 0; jj < YN; ++jj)
{
obj = (jj - objCntIdx.y) * dx;
idx = (obj - curDet.y) / dir.y * dir.x + curDet.x;
idx = idx / dx + objCntIdx.x + 0.5f;
summ += tex3D<float>(texObj, slnIdx + 0.5f, idx, jj + 0.5f);
}
__syncthreads();
proj[(angIdx * DNU + detIdx) * SLN + slnIdx] = summ * dx / fabsf(dir.y);
}
}
}
void MultiSlices_PDPROJ(
float* hvol, // the pointer to the image
float* hprj, // the pointer to the projection (SLN, DNU, PN) order
const float x0, const float y0, //position of the initial source
float* xds, float* yds, // distribution of the detector cells
const int DNU, // Number of detector cells
const int SLN, // Number of slices to be projected or backprojected
const float imgXCenter, const float imgYCenter, //Center of the image
const int XN, const int YN, // pixel number of the image
const float dx, // size of the pixel
float* h_angs, // view angles SHOULD BE WITH SIZE SLN * PN
int PN, // # of view angles
byte* mask,
int* startIdx, // This means how many slices will be applied to one GPU
const int gpuNum)
{
thrust::host_vector<float> hangs(h_angs,h_angs + PN * SLN);
// Regular the image volume
for(int i = 0; i != XN * YN; ++i)
{
byte v = mask[i];
for(int z = 0; z != SLN; ++z)
{
hvol[i * SLN + z] *= v;
}
}
const float objCntIdxX = (XN - 1.0) * 0.5 - imgXCenter / dx;
const float objCntIdxY = (YN - 1.0) * 0.5 - imgYCenter / dx;
//We do not need the overlapping for projection
std::vector<int> ObjIdx_Start(gpuNum, -1);
std::vector<int> ObjIdx_End(gpuNum, -1);
std::vector<std::vector<float> > subVol(gpuNum);
std::vector<int> sSLN(gpuNum,0);
for(int i = 1; i != gpuNum; ++i)
{
sSLN[i-1] = startIdx[i] - startIdx[i-1];
}
sSLN[gpuNum-1] = SLN - startIdx[gpuNum-1];
std::vector<cudaStream_t> stream(gpuNum);
std::vector<cudaExtent> volumeSize(gpuNum);
thrust::host_vector<thrust::host_vector<float2> > subCossin(gpuNum);
std::vector<int> siz(gpuNum, 0);
for(int i = 0; i != gpuNum; ++i)
{
cudaSetDevice(i);
cudaStreamCreate(&stream[i]); // Generate multiple streams
siz[i] = XN * YN * sSLN[i];
subCossin[i].resize(sSLN[i] * PN);
}
// // precalculate the cossin value
thrust::host_vector<float2> hcossin(PN * SLN);
thrust::transform(h_angs, h_angs + PN * SLN,
hcossin.begin(),[=](float ag){return make_float2(cosf(ag),sinf(ag));});
// Split the volume
DD2::splitVolume(subVol, subCossin, hvol, hcossin, SLN, XN, YN, PN, sSLN, gpuNum);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
std::vector<cudaArray*> d_volumeArray(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_vol(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_prj(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_xds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_yds(gpuNum);
thrust::host_vector<thrust::device_vector<float> > d_angs(gpuNum);
thrust::host_vector<thrust::device_vector<float2> >d_cossin(gpuNum);
dim3 blk(64,16,1);
std::vector<dim3> gid(gpuNum);
std::vector<cudaTextureObject_t> texObj(gpuNum);
omp_set_num_threads(gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
cudaSetDevice(i);
d_vol[i].resize(siz[i]);
d_vol[i] = subVol[i];
subVol[i].clear();
volumeSize[i].width = sSLN[i];
volumeSize[i].height= XN;
volumeSize[i].depth = YN;
cudaMalloc3DArray(&d_volumeArray[i], &channelDesc, volumeSize[i]);
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void*)
thrust::raw_pointer_cast(&d_vol[i][0]),
volumeSize[i].width * sizeof(float),
volumeSize[i].width, volumeSize[i].height);
copyParams.dstArray = d_volumeArray[i];
copyParams.extent = volumeSize[i];
copyParams.kind = cudaMemcpyDeviceToDevice;
cudaMemcpy3DAsync(©Params, stream[i]);
d_vol[i].clear();
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = d_volumeArray[i];
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
texDesc.addressMode[2] = cudaAddressModeBorder;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = false;
texObj[i] = 0;
cudaCreateTextureObject(&texObj[i], &resDesc, &texDesc, nullptr);
d_prj[i].resize(DNU * PN * sSLN[i]);
d_xds[i].resize(DNU);
d_yds[i].resize(DNU);
thrust::copy(xds, xds + DNU, d_xds[i].begin());
thrust::copy(yds, yds + DNU, d_yds[i].begin());
//d_angs[i].resize(PN * SLN);
//thrust::copy(hangs.begin(), hangs.end(), d_angs[i].begin());
d_cossin[i].resize(PN * sSLN[i]);
d_cossin[i] = subCossin[i];
//thrust::transform(d_angs[i].begin(), d_angs[i].end(),
//d_cossin[i].begin(), DD2::CosSinFunctor());
//d_angs[i].clear();
gid[i].x = (sSLN[i] + blk.x - 1) / blk.x;
gid[i].y = (DNU + blk.y - 1) / blk.y;
gid[i].z = (PN + blk.z - 1) / blk.z;
}
thrust::host_vector<thrust::host_vector<float> > h_prj(gpuNum);
// Projection process
omp_set_num_threads(gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
cudaSetDevice(i);
MultiSlices_PDPROJ_ker<<<gid[i],blk, 0, stream[i]>>>(texObj[i],
thrust::raw_pointer_cast(&d_prj[i][0]),
make_float2(x0, y0),
thrust::raw_pointer_cast(&d_xds[i][0]),
thrust::raw_pointer_cast(&d_yds[i][0]),
thrust::raw_pointer_cast(&d_cossin[i][0]),
make_float2(objCntIdxX,objCntIdxY), dx,
sSLN[i], DNU, PN, XN, YN);
h_prj[i].resize(sSLN[i] * DNU * PN);
h_prj[i] = d_prj[i];
}
#pragma omp barrier
DD2::combineProjection(h_prj, hprj, SLN, DNU, PN, sSLN, gpuNum);
// Clean the resources
omp_set_num_threads(gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
cudaSetDevice(i);
subVol[i].clear();
cudaStreamDestroy(stream[i]);
cudaFreeArray(d_volumeArray[i]);
d_vol[i].clear();
d_prj[i].clear();
d_xds[i].clear();
d_yds[i].clear();
d_angs[i].clear();
d_cossin[i].clear();
h_prj[i].clear();
}
hangs.clear();
ObjIdx_Start.clear();
ObjIdx_End.clear();
sSLN.clear();
subVol.clear();
stream.clear();
volumeSize.clear();
d_volumeArray.clear();
d_vol.clear();
d_prj.clear();
d_xds.clear();
d_yds.clear();
d_angs.clear();
d_cossin.clear();
gid.clear();
texObj.clear();
siz.clear();
h_prj.clear();
}
__global__ void MultiSlices_PDBACK_ker(
cudaTextureObject_t texObj, // projection texture
float* vol,
const byte* __restrict__ msk,
const float2* __restrict__ cossin, // size should be SLN * PN
float2 s, // source position
float S2D,
float2 objCntIdx,
float dx,
float dbeta, /// what is dbeta
float detCntIdx,
int SLN, int XN, int YN, int DNU, int PN)
{
int slnIdx = threadIdx.x + blockIdx.x * blockDim.x;
int xIdx = threadIdx.y + blockIdx.y * blockDim.y;
int yIdx = threadIdx.z + blockIdx.z * blockDim.z;
if(slnIdx < SLN && xIdx < XN && yIdx < YN)
{
if(msk[yIdx * XN + xIdx] != 1)
return;
float2 curVox =
make_float2((xIdx - objCntIdx.x) * dx, (yIdx - objCntIdx.y) * dx);
float2 dir;
float2 cursour;
float invsid = rsqrtf(s.x * s.x + s.y * s.y);
float invl;
//float idxZ;
float idxXY;
float alpha;
float cosVal;
float2 cossinT;
float summ = 0;
float tempVal;
dbeta = 1.0 / dbeta;
for(int angIdx = 0; angIdx != PN; ++angIdx)
{
cossinT = cossin[angIdx * SLN + slnIdx];
cursour = make_float2(
s.x * cossinT.x - s.y * cossinT.y,
s.x * cossinT.y + s.y * cossinT.x);
dir = curVox - cursour;
tempVal = dir.x * dir.x + dir.y * dir.y;
invl = rsqrtf(tempVal);
alpha = asinf((cursour.y * dir.x - cursour.x * dir.y) * invl * invsid);
if(fabsf(cursour.x) >= fabsf(cursour.y))
{
cosVal = fabsf(1.0f / dir.x);
}
else
{
cosVal = fabsf(1.0f / dir.y);
}
cosVal *= (dx * sqrtf(tempVal));
idxXY = alpha * dbeta + detCntIdx + 0.5;
summ += tex3D<float>(texObj, slnIdx + 0.5f,
idxXY, angIdx + 0.5f) * cosVal;
}
__syncthreads();
vol[(yIdx * XN + xIdx) * SLN + slnIdx] = summ;
}
}
void MultiSlices_PDBACK(
float* hvol, // the pointer to the image
float* hprj, // the pointer to the projection (SLN, DNU, PN) order
const float x0, const float y0, //position of the initial source
float* xds, float* yds, // distribution of the detector cells
const int DNU, // Number of detector cells
const int SLN, // Number of slices to be projected or backprojected
const float imgXCenter, const float imgYCenter, //Center of the image
const int XN, const int YN, // pixel number of the image
const float dx, // size of the pixel
float* h_angs, // view angles SHOULD BE WITH SIZE SLN*PN
int PN, // # of view angles
byte* mask,
int* startIdx,
const int gpuNum)
{
//Set the start and end slices for each GPU
thrust::host_vector<int> ObjZIdx_Start(startIdx, startIdx + gpuNum);
thrust::host_vector<int> ObjZIdx_End(ObjZIdx_Start.size());
std::copy(ObjZIdx_Start.begin() + 1, ObjZIdx_Start.end(), ObjZIdx_End.begin());
ObjZIdx_End[gpuNum - 1] = SLN;
float* bxds = new float[DNU + 1];
float* byds = new float[DNU + 1];
DD3Boundaries(DNU + 1, xds, bxds);
DD3Boundaries(DNU + 1, yds, byds);
float2 dir = normalize(make_float2(-x0, -y0));
float2 dirL = normalize(make_float2(bxds[0] - x0, byds[0] - y0));
float2 dirR = normalize(make_float2(bxds[DNU] - x0, byds[DNU] - y0));
float dbeta = asin(dirL.x * dirR.y - dirL.y * dirR.x) / DNU;
float minBeta = asin(dir.x * dirL.y - dir.y * dirL.x);
float detCntIdx = -minBeta / dbeta - 0.5;
const float S2D = hypotf(xds[0] - x0, yds[0] - y0);
delete[] bxds;
delete[] byds;
thrust::host_vector<int> sSLN = ObjZIdx_End - ObjZIdx_Start;
const float objCntIdxX = (XN - 1.0f) * 0.5f - imgXCenter / dx;
const float objCntIdxY = (YN - 1.0f) * 0.5f - imgYCenter / dx;
thrust::host_vector<float2> sour(gpuNum);
thrust::host_vector<thrust::device_vector<byte> > msk(gpuNum);
thrust::host_vector<thrust::device_vector<float> > vol(gpuNum);
thrust::host_vector<thrust::device_vector<float> > prj(gpuNum);
thrust::host_vector<thrust::device_vector<float2> > cossin(gpuNum);
thrust::host_vector<cudaArray*> d_prjArray(gpuNum);
thrust::host_vector<cudaTextureObject_t> texObj(gpuNum);
thrust::host_vector<cudaStream_t> stream(gpuNum);
thrust::host_vector<thrust::host_vector<float> > host_vol(gpuNum);
dim3 blk(32,16,1);
thrust::host_vector<dim3> gid(gpuNum);
// precalculate the cossin value
thrust::host_vector<float2> hcossin(PN * SLN);
thrust::transform(h_angs, h_angs + PN * SLN,
hcossin.begin(),[=](float ag){return make_float2(cosf(ag),sinf(ag));});
thrust::host_vector<thrust::host_vector<float2> > subCossin(gpuNum);
//Split the projection data
thrust::host_vector<thrust::host_vector<float> > sbprj(gpuNum);
for(int i = 0 ; i != gpuNum; ++i)
{
sbprj.resize(sSLN[i] * DNU * PN);
}
DD2::splitProjection(sbprj,subCossin, hprj, hcossin, SLN, DNU, PN, sSLN, gpuNum);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
omp_set_num_threads(gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
cudaSetDevice(i);
cudaStreamCreate(&stream[i]);
msk[i].resize(XN * YN);
thrust::copy(mask, mask + XN * YN, msk[i].begin());
vol[i].resize(sSLN[i] * XN * YN);
prj[i].resize(sSLN[i] * DNU * PN);
prj[i] = sbprj[i];
cudaExtent prjSize;
prjSize.width = sSLN[i];
prjSize.height = DNU;
prjSize.depth = PN;
cudaMalloc3DArray(&d_prjArray[i], &channelDesc, prjSize);
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr(
(void*) thrust::raw_pointer_cast(&prj[i][0]),
prjSize.width * sizeof(float),
prjSize.width, prjSize.height);
copyParams.dstArray = d_prjArray[i];
copyParams.extent = prjSize;
copyParams.kind = cudaMemcpyDeviceToDevice;
cudaMemcpy3DAsync(©Params, stream[i]);
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = d_prjArray[i];
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
texDesc.addressMode[2] = cudaAddressModeBorder;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = false;
cudaCreateTextureObject(&texObj[i], &resDesc, &texDesc, nullptr);
prj[i].clear();
cossin[i].resize(PN * sSLN[i]);
cossin[i] = subCossin[i];
gid[i].x = (sSLN[i] + blk.x - 1) / blk.x;
gid[i].y = (DNU + blk.y - 1) / blk.y;
gid[i].z = (PN + blk.z) / blk.z;
}
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
cudaSetDevice(i);
MultiSlices_PDBACK_ker<<<gid[i], blk, 0, stream[i]>>>(
texObj[i],
thrust::raw_pointer_cast(&vol[i][0]),
thrust::raw_pointer_cast(&msk[i][0]),
thrust::raw_pointer_cast(&cossin[i][0]),
make_float2(x0,y0), S2D, make_float2(objCntIdxX,objCntIdxY),
dx, dbeta, detCntIdx, sSLN[i], XN, YN, DNU, PN);
host_vol[i].resize(sSLN[i] * XN * YN);
host_vol[i] = vol[i];
}
#pragma omp barrier
//combine the volume
DD2::combineVolume(host_vol, hvol, SLN, XN, YN, sSLN, gpuNum);
#pragma omp parallel for
for(int i = 0; i < gpuNum; ++i)
{
cudaSetDevice(i);
msk[i].clear();
vol[i].clear();
prj[i].clear();
cossin[i].clear();
cudaDestroyTextureObject(texObj[i]);
cudaFreeArray(d_prjArray[i]);
cudaStreamDestroy(stream[i]);
host_vol[i].clear();
sbprj[i].clear();
}
ObjZIdx_Start.clear();
ObjZIdx_End.clear();
sSLN.clear();
sour.clear();
msk.clear();
vol.clear();
prj.clear();
cossin.clear();
d_prjArray.clear();
texObj.clear();
stream.clear();
host_vol.clear();
gid.clear();
hcossin.clear();
//hangs.clear();
sbprj.clear();
}
extern "C"
void DD2_multiGPU(
float* hvol, // the pointer to the image
float* hprj, // the pointer to the projection (SLN, DNU, PN) order
const int method, // Control to use forward projection or backprojection
const float x0, const float y0, //position of the initial source
float* xds, float* yds, // distribution of the detector cells
const int DNU, // Number of detector cells
const int SLN, // Number of slices to be projected or backprojected
const float imgXCenter, const float imgYCenter, //Center of the image
const int XN, const int YN, // pixel number of the image
const float dx, // size of the pixel
float* hangs, // view angles
int PN, // # of view angles
byte* mask,
int* startIdx,
const int gpuNum)
{
switch(method)
{
case 0: // DD projection
MultiSlices_DDPROJ(hvol, hprj, x0, y0, xds, yds, DNU, SLN,
imgXCenter, imgYCenter,XN, YN, dx, hangs, PN, mask,
startIdx,gpuNum);
break;
case 1: // DD backprojection
MultiSlices_DDBACK(hvol, hprj, x0, y0, xds, yds, DNU, SLN,
imgXCenter, imgYCenter,XN, YN, dx, hangs, PN, mask,
startIdx,gpuNum);
break;
case 2: // PD projection
MultiSlices_PDPROJ(hvol, hprj, x0, y0, xds, yds, DNU, SLN,
imgXCenter, imgYCenter,XN, YN, dx, hangs, PN, mask,
startIdx,gpuNum);
break;
case 3: // PD backprojection
MultiSlices_PDBACK(hvol, hprj, x0, y0, xds, yds, DNU, SLN,
imgXCenter, imgYCenter,XN, YN, dx, hangs, PN, mask,
startIdx,gpuNum);
break;
default:
break;
}
}
|
a8fe6b2382094675f19ac8a5cc846733280757a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../shared/globals.hpp"
#include "../shared/timer.hpp"
#include "../shared/argument_parsing.cuh"
#include "../shared/graph.cuh"
#include "../shared/subgraph.cuh"
#include "../shared/partitioner.cuh"
#include "../shared/subgraph_generator.cuh"
#include "../shared/gpu_error_check.cuh"
#include "../shared/gpu_kernels.cuh"
#include "../shared/subway_utilities.hpp"
#include "../shared/test.cuh"
#include "../shared/test.cu"
#include "../shared/stopwatch.h"
int main(int argc, char** argv)
{
Stopwatch copyTimer;
Stopwatch computeTimer;
Stopwatch generateTimer;
hipFree(0);
ArgumentParser arguments(argc, argv, true, false);
Timer timer;
timer.Start();
GraphPR<OutEdge> graph(arguments.input, true);
graph.ReadGraph();
float readtime = timer.Finish();
cout << "Graph Reading finished in " << readtime/1000 << " (s).\n";
uint source_vertex = 0;
uint max_degree = 0;
for (int i=0; i<graph.num_nodes; i++){
if (graph.outDegree[i]>max_degree){
max_degree = graph.outDegree[i];
source_vertex = i;
}
}
cout<<"Source vertex: "<<source_vertex<<endl;
int *numWalker1 = new int[graph.num_nodes];
int *d_numWalker1, *d_numWalker2;
hipMalloc(&d_numWalker1, sizeof(int) * graph.num_nodes);
hipMalloc(&d_numWalker2, sizeof(int) * graph.num_nodes);
hiprandState_t *randStates;
hipMalloc(&randStates, sizeof(hiprandState_t) * 512);
hipLaunchKernelGGL(( init_rand), dim3(1), dim3(512), 0, 0, randStates, 512);
for(unsigned int i=0; i<graph.num_nodes; i++)
{
graph.value[i] = 0;
numWalker1[i] = 0;
}
//numWalker1[arguments.sourceNode] = graph.num_nodes * 2;
numWalker1[source_vertex] = graph.num_nodes * 2;
gpuErrorcheck(hipMemcpy(graph.d_outDegree, graph.outDegree, graph.num_nodes * sizeof(u_int64_t), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(graph.d_value, graph.value, graph.num_nodes * sizeof(float), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(d_numWalker1, numWalker1, graph.num_nodes * sizeof(int), hipMemcpyHostToDevice));
Subgraph<OutEdge> subgraph(graph.num_nodes, graph.num_edges);
SubgraphGenerator<OutEdge> subgen(graph);
generateTimer.start();
subgen.generate(graph, subgraph, d_numWalker1);
generateTimer.stop();
cout << "generate subgraph" << endl;
Partitioner<OutEdge> partitioner;
timer.Start();
uint gItr = 0;
unsigned long totalActiveNodes = 0;
while (subgraph.numActiveNodes>0)
{
partitioner.partition(subgraph, subgraph.numActiveNodes);
//cout << "num active nodes: " << subgraph.numActiveNodes << "\n";
totalActiveNodes += subgraph.numActiveNodes;
// a super iteration
for(int i=0; i<partitioner.numPartitions; i++)
{
hipDeviceSynchronize();
copyTimer.start();
gpuErrorcheck(hipMemcpy(subgraph.d_activeEdgeList, subgraph.activeEdgeList + partitioner.fromEdge[i], (partitioner.partitionEdgeSize[i]) * sizeof(OutEdge), hipMemcpyHostToDevice));
hipDeviceSynchronize();
copyTimer.stop();
computeTimer.start();
hipLaunchKernelGGL(( ppr_kernel), dim3(partitioner.partitionNodeSize[i]/512 + 1) , dim3(512) , 0, 0, graph.num_nodes,
partitioner.partitionNodeSize[i],
partitioner.fromNode[i],
partitioner.fromEdge[i],
subgraph.d_activeNodes,
subgraph.d_activeNodesPointer,
subgraph.d_activeEdgeList,
graph.d_outDegree,
graph.d_value,
d_numWalker1,
d_numWalker2,
randStates);
hipDeviceSynchronize();
computeTimer.stop();
gpuErrorcheck( hipPeekAtLastError() );
}
hipLaunchKernelGGL(( moveWalkers_ppr), dim3(graph.num_nodes/512 + 1), dim3(512), 0, 0, graph.num_nodes, d_numWalker1, d_numWalker2, graph.d_value, randStates);
generateTimer.start();
subgen.generate(graph, subgraph, d_numWalker1);
generateTimer.stop();
}
float runtime = timer.Finish();
cout << "Processing finished in " << runtime/1000 << " (s).\n";
cout << "Number of iterations = " << gItr << endl;
cout << "compute time: " << computeTimer.total() << " ns copy time: " << copyTimer.total() << " ns\n";
cout << "generate subgraph time: " << generateTimer.total() << " ns\n";
cout << "total active nodes: " << totalActiveNodes << "\n";
gpuErrorcheck(hipMemcpy(graph.value, graph.d_value, graph.num_nodes*sizeof(float), hipMemcpyDeviceToHost));
unsigned long sum = 0;
for (unsigned i = 0; i < graph.num_nodes; i++) {
sum += graph.value[i];
}
cout << "sum: " << sum << endl;
utilities::PrintResults(graph.value, min(30, graph.num_nodes));
if(arguments.hasOutput)
utilities::SaveResults(arguments.output, graph.value, graph.num_nodes);
}
| a8fe6b2382094675f19ac8a5cc846733280757a1.cu | #include "../shared/globals.hpp"
#include "../shared/timer.hpp"
#include "../shared/argument_parsing.cuh"
#include "../shared/graph.cuh"
#include "../shared/subgraph.cuh"
#include "../shared/partitioner.cuh"
#include "../shared/subgraph_generator.cuh"
#include "../shared/gpu_error_check.cuh"
#include "../shared/gpu_kernels.cuh"
#include "../shared/subway_utilities.hpp"
#include "../shared/test.cuh"
#include "../shared/test.cu"
#include "../shared/stopwatch.h"
int main(int argc, char** argv)
{
Stopwatch copyTimer;
Stopwatch computeTimer;
Stopwatch generateTimer;
cudaFree(0);
ArgumentParser arguments(argc, argv, true, false);
Timer timer;
timer.Start();
GraphPR<OutEdge> graph(arguments.input, true);
graph.ReadGraph();
float readtime = timer.Finish();
cout << "Graph Reading finished in " << readtime/1000 << " (s).\n";
uint source_vertex = 0;
uint max_degree = 0;
for (int i=0; i<graph.num_nodes; i++){
if (graph.outDegree[i]>max_degree){
max_degree = graph.outDegree[i];
source_vertex = i;
}
}
cout<<"Source vertex: "<<source_vertex<<endl;
int *numWalker1 = new int[graph.num_nodes];
int *d_numWalker1, *d_numWalker2;
cudaMalloc(&d_numWalker1, sizeof(int) * graph.num_nodes);
cudaMalloc(&d_numWalker2, sizeof(int) * graph.num_nodes);
curandState *randStates;
cudaMalloc(&randStates, sizeof(curandState) * 512);
init_rand<<<1, 512>>>(randStates, 512);
for(unsigned int i=0; i<graph.num_nodes; i++)
{
graph.value[i] = 0;
numWalker1[i] = 0;
}
//numWalker1[arguments.sourceNode] = graph.num_nodes * 2;
numWalker1[source_vertex] = graph.num_nodes * 2;
gpuErrorcheck(cudaMemcpy(graph.d_outDegree, graph.outDegree, graph.num_nodes * sizeof(u_int64_t), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(graph.d_value, graph.value, graph.num_nodes * sizeof(float), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(d_numWalker1, numWalker1, graph.num_nodes * sizeof(int), cudaMemcpyHostToDevice));
Subgraph<OutEdge> subgraph(graph.num_nodes, graph.num_edges);
SubgraphGenerator<OutEdge> subgen(graph);
generateTimer.start();
subgen.generate(graph, subgraph, d_numWalker1);
generateTimer.stop();
cout << "generate subgraph" << endl;
Partitioner<OutEdge> partitioner;
timer.Start();
uint gItr = 0;
unsigned long totalActiveNodes = 0;
while (subgraph.numActiveNodes>0)
{
partitioner.partition(subgraph, subgraph.numActiveNodes);
//cout << "num active nodes: " << subgraph.numActiveNodes << "\n";
totalActiveNodes += subgraph.numActiveNodes;
// a super iteration
for(int i=0; i<partitioner.numPartitions; i++)
{
cudaDeviceSynchronize();
copyTimer.start();
gpuErrorcheck(cudaMemcpy(subgraph.d_activeEdgeList, subgraph.activeEdgeList + partitioner.fromEdge[i], (partitioner.partitionEdgeSize[i]) * sizeof(OutEdge), cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
copyTimer.stop();
computeTimer.start();
ppr_kernel<<< partitioner.partitionNodeSize[i]/512 + 1 , 512 >>>(graph.num_nodes,
partitioner.partitionNodeSize[i],
partitioner.fromNode[i],
partitioner.fromEdge[i],
subgraph.d_activeNodes,
subgraph.d_activeNodesPointer,
subgraph.d_activeEdgeList,
graph.d_outDegree,
graph.d_value,
d_numWalker1,
d_numWalker2,
randStates);
cudaDeviceSynchronize();
computeTimer.stop();
gpuErrorcheck( cudaPeekAtLastError() );
}
moveWalkers_ppr<<<graph.num_nodes/512 + 1, 512>>>(graph.num_nodes, d_numWalker1, d_numWalker2, graph.d_value, randStates);
generateTimer.start();
subgen.generate(graph, subgraph, d_numWalker1);
generateTimer.stop();
}
float runtime = timer.Finish();
cout << "Processing finished in " << runtime/1000 << " (s).\n";
cout << "Number of iterations = " << gItr << endl;
cout << "compute time: " << computeTimer.total() << " ns copy time: " << copyTimer.total() << " ns\n";
cout << "generate subgraph time: " << generateTimer.total() << " ns\n";
cout << "total active nodes: " << totalActiveNodes << "\n";
gpuErrorcheck(cudaMemcpy(graph.value, graph.d_value, graph.num_nodes*sizeof(float), cudaMemcpyDeviceToHost));
unsigned long sum = 0;
for (unsigned i = 0; i < graph.num_nodes; i++) {
sum += graph.value[i];
}
cout << "sum: " << sum << endl;
utilities::PrintResults(graph.value, min(30, graph.num_nodes));
if(arguments.hasOutput)
utilities::SaveResults(arguments.output, graph.value, graph.num_nodes);
}
|
3862a1327e4aea323d06c92de60b09265ebf151a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "star3d1r-32x32-6-128_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 13
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 20;
const AN5D_TYPE __side3Len = 20;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 22;
const AN5D_TYPE __side3Len = 22;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
0.2500f * A[t%2][i][j][k] +
0.1248f * A[t%2][i-1][j][k] + 0.1249f * A[t%2][i+1][j][k] +
0.1250f * A[t%2][i][j-1][k] + 0.1251f * A[t%2][i][j+1][k] +
0.1252f * A[t%2][i][j][k-1] + 0.1253f * A[t%2][i][j][k+1];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| 3862a1327e4aea323d06c92de60b09265ebf151a.cu | #include <assert.h>
#include <stdio.h>
#include "star3d1r-32x32-6-128_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 13
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
float *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 20;
const AN5D_TYPE __side3Len = 20;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 22;
const AN5D_TYPE __side3Len = 22;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
0.2500f * A[t%2][i][j][k] +
0.1248f * A[t%2][i-1][j][k] + 0.1249f * A[t%2][i+1][j][k] +
0.1250f * A[t%2][i][j-1][k] + 0.1251f * A[t%2][i][j+1][k] +
0.1252f * A[t%2][i][j][k-1] + 0.1253f * A[t%2][i][j][k+1];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
349e488145b192919ef183fb9f586d89db7b379c.hip | // !!! This is a file automatically generated by hipify!!!
#include <chrono>
#include <iostream>
#include <hip/hip_runtime.h>
#include "ThomasMatrix.hpp"
#include "utils.hpp"
#include "cuThomasBatch.h"
// CPU kernel
void solve_seq(const double* l, const double* d,
double* u, double* rhs,
const int n, const int N)
{
int first,last;
for (int j = 0; j < N; ++j)
{
first = j*n;
last = first + n - 1;
u[first] /= d[first];
rhs[first] /= d[first];
for (int i = first+1; i < last; i++) {
u[i] /= d[i] - l[i]*u[i-1];
rhs[i] = (rhs[i] - l[i]*rhs[i-1]) / (d[i] - l[i]*u[i-1]);
}
rhs[last] = (rhs[last] - l[last]*rhs[last-1]) / (d[last] - l[last]*u[last-1]);
for (int i = last-1; i >= first; i--) {
rhs[i] -= u[i]*rhs[i+1];
}
}
}
int main(int argc, char const *argv[])
{
if(argc != 5) {
std::cout << "Usage: %s [system size] [#systems] [thread block size] [repeat]" << std::endl;
return -1;
}
const int M = std::stoi(argv[1]);
const int N = std::stoi(argv[2]);
const int BlockSize = std::stoi(argv[3]); // GPU thread block size
const int repeat = std::stoi(argv[4]);
const int matrix_byte_size = M * N * sizeof(double);
//Loading a synthetic tridiagonal matrix into our structure
ThomasMatrix params = loadThomasMatrixSyn(M);
double* u_seq = (double*) malloc(matrix_byte_size);
double* u_Thomas_host = (double*) malloc(matrix_byte_size);
double* u_input = (double*) malloc(matrix_byte_size);
double* d_seq = (double*) malloc(matrix_byte_size);
double* d_Thomas_host = (double*) malloc(matrix_byte_size);
double* d_input = (double*) malloc(matrix_byte_size);
double* l_seq = (double*) malloc(matrix_byte_size);
double* l_Thomas_host = (double*) malloc(matrix_byte_size);
double* l_input = (double*) malloc(matrix_byte_size);
double* rhs_seq = (double*) malloc(matrix_byte_size);
double* rhs_Thomas_host = (double*) malloc(matrix_byte_size);
double* rhs_input = (double*) malloc(matrix_byte_size);
double* rhs_seq_output = (double*) malloc(matrix_byte_size);
double* rhs_seq_interleave = (double*) malloc(matrix_byte_size);
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < M; ++j)
{
u_seq[(i * M) + j] = params.a[j];
u_input[(i * M) + j] = params.a[j];
d_seq[(i * M) + j] = params.d[j];
d_input[(i * M) + j] = params.d[j];
l_seq[(i * M) + j] = params.b[j];
l_input[(i * M) + j] = params.b[j];
rhs_seq[(i * M) + j] = params.rhs[j];
rhs_input[(i * M) + j] = params.rhs[j];
}
}
auto start = std::chrono::steady_clock::now();
// Sequantial CPU Execution for correct error check
for (int n = 0; n < repeat; n++) {
solve_seq( l_seq, d_seq, u_seq, rhs_seq, M, N );
}
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average serial execution time: %f (ms)\n", (time * 1e-6f) / repeat);
for (int i = 0; i < M*N; ++i) {
rhs_seq_output[i] = rhs_seq[i];
}
// initialize again because u_seq and rhs_seq are modified by solve_seq
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < M; ++j)
{
u_seq[(i * M) + j] = params.a[j];
u_input[(i * M) + j] = params.a[j];
d_seq[(i * M) + j] = params.d[j];
d_input[(i * M) + j] = params.d[j];
l_seq[(i * M) + j] = params.b[j];
l_input[(i * M) + j] = params.b[j];
rhs_seq[(i * M) + j] = params.rhs[j];
rhs_input[(i * M) + j] = params.rhs[j];
}
}
// transpose the inputs for sequential accesses on a GPU
for (int i = 0; i < M; ++i)
{
for (int j = 0; j < N; ++j)
{
u_Thomas_host[i*N+j] = u_input[j*M+i];
l_Thomas_host[i*N+j] = l_input[j*M+i];
d_Thomas_host[i*N+j] = d_input[j*M+i];
rhs_Thomas_host[i*N+j] = rhs_input[j*M+i];
rhs_seq_interleave[i*N+j] = rhs_seq_output[j*M+i];
}
}
// Run GPU kernel
double *u_device;
double *d_device;
double *l_device;
double *rhs_device;
hipMalloc((void**)&u_device, matrix_byte_size);
hipMalloc((void**)&l_device, matrix_byte_size);
hipMalloc((void**)&d_device, matrix_byte_size);
hipMalloc((void**)&rhs_device, matrix_byte_size);
hipMemcpyAsync(u_device, u_Thomas_host, matrix_byte_size, hipMemcpyHostToDevice, 0);
hipMemcpyAsync(l_device, l_Thomas_host, matrix_byte_size, hipMemcpyHostToDevice, 0);
hipMemcpyAsync(d_device, d_Thomas_host, matrix_byte_size, hipMemcpyHostToDevice, 0);
hipMemcpyAsync(rhs_device, rhs_Thomas_host, matrix_byte_size, hipMemcpyHostToDevice, 0);
hipDeviceSynchronize();
start = std::chrono::steady_clock::now();
for (int n = 0; n < repeat; n++) {
hipLaunchKernelGGL(( cuThomasBatch), dim3((N/BlockSize)+1), dim3(BlockSize), 0, 0, l_device, d_device, u_device, rhs_device, M, N);
}
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (ms)\n", (time * 1e-6f) / repeat);
hipMemcpyAsync(rhs_Thomas_host, rhs_device, matrix_byte_size, hipMemcpyDeviceToHost, 0);
hipDeviceSynchronize();
// verify
calcError(rhs_seq_interleave, rhs_Thomas_host, N*M);
free(u_seq);
free(u_Thomas_host);
free(u_input);
free(d_seq);
free(d_Thomas_host);
free(d_input);
free(l_seq);
free(l_Thomas_host);
free(l_input);
free(rhs_seq);
free(rhs_Thomas_host);
free(rhs_input);
free(rhs_seq_output);
free(rhs_seq_interleave);
hipFree(l_device);
hipFree(d_device);
hipFree(u_device);
hipFree(rhs_device);
return 0;
}
| 349e488145b192919ef183fb9f586d89db7b379c.cu | #include <chrono>
#include <iostream>
#include <cuda.h>
#include "ThomasMatrix.hpp"
#include "utils.hpp"
#include "cuThomasBatch.h"
// CPU kernel
void solve_seq(const double* l, const double* d,
double* u, double* rhs,
const int n, const int N)
{
int first,last;
for (int j = 0; j < N; ++j)
{
first = j*n;
last = first + n - 1;
u[first] /= d[first];
rhs[first] /= d[first];
for (int i = first+1; i < last; i++) {
u[i] /= d[i] - l[i]*u[i-1];
rhs[i] = (rhs[i] - l[i]*rhs[i-1]) / (d[i] - l[i]*u[i-1]);
}
rhs[last] = (rhs[last] - l[last]*rhs[last-1]) / (d[last] - l[last]*u[last-1]);
for (int i = last-1; i >= first; i--) {
rhs[i] -= u[i]*rhs[i+1];
}
}
}
int main(int argc, char const *argv[])
{
if(argc != 5) {
std::cout << "Usage: %s [system size] [#systems] [thread block size] [repeat]" << std::endl;
return -1;
}
const int M = std::stoi(argv[1]);
const int N = std::stoi(argv[2]);
const int BlockSize = std::stoi(argv[3]); // GPU thread block size
const int repeat = std::stoi(argv[4]);
const int matrix_byte_size = M * N * sizeof(double);
//Loading a synthetic tridiagonal matrix into our structure
ThomasMatrix params = loadThomasMatrixSyn(M);
double* u_seq = (double*) malloc(matrix_byte_size);
double* u_Thomas_host = (double*) malloc(matrix_byte_size);
double* u_input = (double*) malloc(matrix_byte_size);
double* d_seq = (double*) malloc(matrix_byte_size);
double* d_Thomas_host = (double*) malloc(matrix_byte_size);
double* d_input = (double*) malloc(matrix_byte_size);
double* l_seq = (double*) malloc(matrix_byte_size);
double* l_Thomas_host = (double*) malloc(matrix_byte_size);
double* l_input = (double*) malloc(matrix_byte_size);
double* rhs_seq = (double*) malloc(matrix_byte_size);
double* rhs_Thomas_host = (double*) malloc(matrix_byte_size);
double* rhs_input = (double*) malloc(matrix_byte_size);
double* rhs_seq_output = (double*) malloc(matrix_byte_size);
double* rhs_seq_interleave = (double*) malloc(matrix_byte_size);
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < M; ++j)
{
u_seq[(i * M) + j] = params.a[j];
u_input[(i * M) + j] = params.a[j];
d_seq[(i * M) + j] = params.d[j];
d_input[(i * M) + j] = params.d[j];
l_seq[(i * M) + j] = params.b[j];
l_input[(i * M) + j] = params.b[j];
rhs_seq[(i * M) + j] = params.rhs[j];
rhs_input[(i * M) + j] = params.rhs[j];
}
}
auto start = std::chrono::steady_clock::now();
// Sequantial CPU Execution for correct error check
for (int n = 0; n < repeat; n++) {
solve_seq( l_seq, d_seq, u_seq, rhs_seq, M, N );
}
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average serial execution time: %f (ms)\n", (time * 1e-6f) / repeat);
for (int i = 0; i < M*N; ++i) {
rhs_seq_output[i] = rhs_seq[i];
}
// initialize again because u_seq and rhs_seq are modified by solve_seq
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < M; ++j)
{
u_seq[(i * M) + j] = params.a[j];
u_input[(i * M) + j] = params.a[j];
d_seq[(i * M) + j] = params.d[j];
d_input[(i * M) + j] = params.d[j];
l_seq[(i * M) + j] = params.b[j];
l_input[(i * M) + j] = params.b[j];
rhs_seq[(i * M) + j] = params.rhs[j];
rhs_input[(i * M) + j] = params.rhs[j];
}
}
// transpose the inputs for sequential accesses on a GPU
for (int i = 0; i < M; ++i)
{
for (int j = 0; j < N; ++j)
{
u_Thomas_host[i*N+j] = u_input[j*M+i];
l_Thomas_host[i*N+j] = l_input[j*M+i];
d_Thomas_host[i*N+j] = d_input[j*M+i];
rhs_Thomas_host[i*N+j] = rhs_input[j*M+i];
rhs_seq_interleave[i*N+j] = rhs_seq_output[j*M+i];
}
}
// Run GPU kernel
double *u_device;
double *d_device;
double *l_device;
double *rhs_device;
cudaMalloc((void**)&u_device, matrix_byte_size);
cudaMalloc((void**)&l_device, matrix_byte_size);
cudaMalloc((void**)&d_device, matrix_byte_size);
cudaMalloc((void**)&rhs_device, matrix_byte_size);
cudaMemcpyAsync(u_device, u_Thomas_host, matrix_byte_size, cudaMemcpyHostToDevice, 0);
cudaMemcpyAsync(l_device, l_Thomas_host, matrix_byte_size, cudaMemcpyHostToDevice, 0);
cudaMemcpyAsync(d_device, d_Thomas_host, matrix_byte_size, cudaMemcpyHostToDevice, 0);
cudaMemcpyAsync(rhs_device, rhs_Thomas_host, matrix_byte_size, cudaMemcpyHostToDevice, 0);
cudaDeviceSynchronize();
start = std::chrono::steady_clock::now();
for (int n = 0; n < repeat; n++) {
cuThomasBatch<<<(N/BlockSize)+1, BlockSize>>> (l_device, d_device, u_device, rhs_device, M, N);
}
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time: %f (ms)\n", (time * 1e-6f) / repeat);
cudaMemcpyAsync(rhs_Thomas_host, rhs_device, matrix_byte_size, cudaMemcpyDeviceToHost, 0);
cudaDeviceSynchronize();
// verify
calcError(rhs_seq_interleave, rhs_Thomas_host, N*M);
free(u_seq);
free(u_Thomas_host);
free(u_input);
free(d_seq);
free(d_Thomas_host);
free(d_input);
free(l_seq);
free(l_Thomas_host);
free(l_input);
free(rhs_seq);
free(rhs_Thomas_host);
free(rhs_input);
free(rhs_seq_output);
free(rhs_seq_interleave);
cudaFree(l_device);
cudaFree(d_device);
cudaFree(u_device);
cudaFree(rhs_device);
return 0;
}
|
63e3a2d9fde197df54fe11f3f7adce040e60baa0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "labeling.h"
const int threads = 256;
__device__ __host__ int CeilDiv(int a, int b) { return (a - 1) / b + 1; }
__global__ void add_aux(int *pos, int text_size, int *d_aux) {
int gindex = blockIdx.x * blockDim.x + threadIdx.x;
int lindex = threadIdx.x;
if (blockIdx.x != 0 && gindex < text_size)
if (lindex == pos[gindex] - 1)
pos[gindex] += d_aux[blockIdx.x - 1];
}
__global__ void intra_block_scan(const char *text, int *pos, int text_size, int *d_aux) {
__shared__ int sdata[threads * 2];
__shared__ bool fdata[threads * 2];
__shared__ bool ofdata[threads * 2];
__shared__ bool add1[threads * 2];
int gindex = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
int lindex = threadIdx.x * 2;
int tid = threadIdx.x;
if (gindex < text_size)
sdata[lindex] = add1[lindex] = (text[gindex] == ' ' ? 0 : 1);
if (gindex + 1 < text_size)
sdata[lindex + 1] = add1[lindex + 1] = (text[gindex + 1] == ' ' ? 0 : 1);
__syncthreads();
if (lindex == 0)
fdata[lindex] = ofdata[lindex] = true;
else
fdata[lindex] = ofdata[lindex] = (sdata[lindex - 1] != sdata[lindex]);
fdata[lindex + 1] = ofdata[lindex + 1] = (sdata[lindex] != sdata[lindex + 1]);
int offset = 1;
for (int d = threads; d > 0; d >>= 1) {
__syncthreads();
if (tid < d) {
int ai = offset * (lindex + 1) - 1;
int bi = offset * (lindex + 2) - 1;
if (!fdata[bi])
sdata[bi] += sdata[ai];
fdata[bi] = fdata[ai] || fdata[bi];
}
offset <<= 1;
}
if (tid == 0)
sdata[threads * 2 - 1] = 0;
for (int d = 1; d <= threads; d <<= 1) {
__syncthreads();
offset >>= 1;
if (tid < d) {
int ai = offset * (lindex + 1) - 1;
int bi = offset * (lindex + 2) - 1;
int t = sdata[ai];
sdata[ai] = sdata[bi];
if (ofdata[ai + 1])
sdata[bi] = 0;
else if (fdata[ai])
sdata[bi] = t;
else
sdata[bi] += t;
fdata[ai] = false;
}
}
__syncthreads();
if (gindex < text_size)
pos[gindex] = sdata[lindex] + add1[lindex];
if (gindex + 1 < text_size)
pos[gindex + 1] = sdata[lindex + 1] + add1[lindex + 1];
if (tid == 0)
d_aux[blockIdx.x] = sdata[threads * 2 - 1] + add1[threads * 2 - 1];
}
void labeling(const char *text, int *pos, int text_size) {
int *d_aux;
hipMalloc(&d_aux, CeilDiv(text_size, threads) * sizeof(int));
hipLaunchKernelGGL(( intra_block_scan), dim3(CeilDiv(text_size, threads * 2)), dim3(threads), 0, 0,
text, pos, text_size, d_aux);
hipLaunchKernelGGL(( add_aux), dim3(CeilDiv(text_size, threads * 2)), dim3(threads * 2), 0, 0, pos, text_size,
d_aux);
hipFree(d_aux);
}
| 63e3a2d9fde197df54fe11f3f7adce040e60baa0.cu | #include "labeling.h"
const int threads = 256;
__device__ __host__ int CeilDiv(int a, int b) { return (a - 1) / b + 1; }
__global__ void add_aux(int *pos, int text_size, int *d_aux) {
int gindex = blockIdx.x * blockDim.x + threadIdx.x;
int lindex = threadIdx.x;
if (blockIdx.x != 0 && gindex < text_size)
if (lindex == pos[gindex] - 1)
pos[gindex] += d_aux[blockIdx.x - 1];
}
__global__ void intra_block_scan(const char *text, int *pos, int text_size, int *d_aux) {
__shared__ int sdata[threads * 2];
__shared__ bool fdata[threads * 2];
__shared__ bool ofdata[threads * 2];
__shared__ bool add1[threads * 2];
int gindex = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
int lindex = threadIdx.x * 2;
int tid = threadIdx.x;
if (gindex < text_size)
sdata[lindex] = add1[lindex] = (text[gindex] == ' ' ? 0 : 1);
if (gindex + 1 < text_size)
sdata[lindex + 1] = add1[lindex + 1] = (text[gindex + 1] == ' ' ? 0 : 1);
__syncthreads();
if (lindex == 0)
fdata[lindex] = ofdata[lindex] = true;
else
fdata[lindex] = ofdata[lindex] = (sdata[lindex - 1] != sdata[lindex]);
fdata[lindex + 1] = ofdata[lindex + 1] = (sdata[lindex] != sdata[lindex + 1]);
int offset = 1;
for (int d = threads; d > 0; d >>= 1) {
__syncthreads();
if (tid < d) {
int ai = offset * (lindex + 1) - 1;
int bi = offset * (lindex + 2) - 1;
if (!fdata[bi])
sdata[bi] += sdata[ai];
fdata[bi] = fdata[ai] || fdata[bi];
}
offset <<= 1;
}
if (tid == 0)
sdata[threads * 2 - 1] = 0;
for (int d = 1; d <= threads; d <<= 1) {
__syncthreads();
offset >>= 1;
if (tid < d) {
int ai = offset * (lindex + 1) - 1;
int bi = offset * (lindex + 2) - 1;
int t = sdata[ai];
sdata[ai] = sdata[bi];
if (ofdata[ai + 1])
sdata[bi] = 0;
else if (fdata[ai])
sdata[bi] = t;
else
sdata[bi] += t;
fdata[ai] = false;
}
}
__syncthreads();
if (gindex < text_size)
pos[gindex] = sdata[lindex] + add1[lindex];
if (gindex + 1 < text_size)
pos[gindex + 1] = sdata[lindex + 1] + add1[lindex + 1];
if (tid == 0)
d_aux[blockIdx.x] = sdata[threads * 2 - 1] + add1[threads * 2 - 1];
}
void labeling(const char *text, int *pos, int text_size) {
int *d_aux;
cudaMalloc(&d_aux, CeilDiv(text_size, threads) * sizeof(int));
intra_block_scan<<<CeilDiv(text_size, threads * 2), threads>>>(
text, pos, text_size, d_aux);
add_aux<<<CeilDiv(text_size, threads * 2), threads * 2>>>(pos, text_size,
d_aux);
cudaFree(d_aux);
}
|
8232d4ff4cfa05703f148a094eed92be7bae5335.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#ifndef _CLOCK_KERNEL_H_
#define _CLOCK_KERNEL_H_
// This kernel computes a standard parallel reduction and evaluates the
// time it takes to do that for each block. The timing results are stored
// in device memory.
__global__ static void timedReduction(const float * input, float * output, clock_t * timer)
{
// __shared__ float shared[2 * blockDim.x];
extern __shared__ float shared[];
const int tid = threadIdx.x;
const int bid = blockIdx.x;
if (tid == 0) timer[bid] = clock();
// Copy input.
shared[tid] = input[tid];
shared[tid + blockDim.x] = input[tid + blockDim.x];
// Perform reduction to find minimum.
for(int d = blockDim.x; d > 0; d /= 2)
{
__syncthreads();
if (tid < d)
{
float f0 = shared[tid];
float f1 = shared[tid + d];
if (f1 < f0) {
shared[tid] = f1;
}
}
}
// Write result.
if (tid == 0) output[bid] = shared[0];
__syncthreads();
if (tid == 0) timer[bid+gridDim.x] = clock();
}
#endif // _CLOCK_KERNEL_H_
| 8232d4ff4cfa05703f148a094eed92be7bae5335.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#ifndef _CLOCK_KERNEL_H_
#define _CLOCK_KERNEL_H_
// This kernel computes a standard parallel reduction and evaluates the
// time it takes to do that for each block. The timing results are stored
// in device memory.
__global__ static void timedReduction(const float * input, float * output, clock_t * timer)
{
// __shared__ float shared[2 * blockDim.x];
extern __shared__ float shared[];
const int tid = threadIdx.x;
const int bid = blockIdx.x;
if (tid == 0) timer[bid] = clock();
// Copy input.
shared[tid] = input[tid];
shared[tid + blockDim.x] = input[tid + blockDim.x];
// Perform reduction to find minimum.
for(int d = blockDim.x; d > 0; d /= 2)
{
__syncthreads();
if (tid < d)
{
float f0 = shared[tid];
float f1 = shared[tid + d];
if (f1 < f0) {
shared[tid] = f1;
}
}
}
// Write result.
if (tid == 0) output[bid] = shared[0];
__syncthreads();
if (tid == 0) timer[bid+gridDim.x] = clock();
}
#endif // _CLOCK_KERNEL_H_
|
fd5515a7300707b97ae161d3b1317b3704e85fa9.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
<<<<<<< HEAD
=======
#ifndef OPENCV_TINY_GPU_MODULE
>>>>>>> 4a5a6cfc1ba26f73cbd6c6fcaf561ca6dbced81d
namespace filter
{
template void linearRow<ushort3, float3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
<<<<<<< HEAD
=======
#endif
>>>>>>> 4a5a6cfc1ba26f73cbd6c6fcaf561ca6dbced81d
#endif /* CUDA_DISABLER */
| fd5515a7300707b97ae161d3b1317b3704e85fa9.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
<<<<<<< HEAD
=======
#ifndef OPENCV_TINY_GPU_MODULE
>>>>>>> 4a5a6cfc1ba26f73cbd6c6fcaf561ca6dbced81d
namespace filter
{
template void linearRow<ushort3, float3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
<<<<<<< HEAD
=======
#endif
>>>>>>> 4a5a6cfc1ba26f73cbd6c6fcaf561ca6dbced81d
#endif /* CUDA_DISABLER */
|
b4c2de2d503e1ac2d569436510dee5e73439c962.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <sstream>
#include <unistd.h>
#include <math.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <ctime>
#include <sys/time.h>
#include "readWeights70.h"//to read the weights
#include "deviceFunctions70.h"//contains device functions like matmul,add
using namespace std;
inline int _ConvertSMVer2Cores(int major, int minor)
{
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct
{
int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] =
{
{ 0x10, 8 }, // Tesla Generation (SM 1.0) G80 class
{ 0x11, 8 }, // Tesla Generation (SM 1.1) G8x class
{ 0x12, 8 }, // Tesla Generation (SM 1.2) G9x class
{ 0x13, 8 }, // Tesla Generation (SM 1.3) GT200 class
{ 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class
{ 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class
{ 0x30, 192}, // Kepler Generation (SM 3.0) GK10x class
{ 0x32, 192}, // Kepler Generation (SM 3.2) GK10x class
{ 0x35, 192}, // Kepler Generation (SM 3.5) GK11x class
{ 0x37, 192}, // Kepler Generation (SM 3.7) GK21x class
{ 0x50, 128}, // Maxwell Generation (SM 5.0) GM10x class
{ -1, -1 }
};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1)
{
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor))
{
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one to run properly
printf("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index-1].Cores);
return nGpuArchCoresPerSM[index-1].Cores;
}
__global__ void predictKernel(double *X,double *W_i,double *W_f,double *W_c,double *W_o,double *U_i,double *U_f,double *U_c,double *U_o,double *b_i,double *b_f,double *b_c,double *b_o,double *w,double *b,double *result,double *loop_count)//cuda kernel
{
// Get our global thread ID
int tid = blockIdx.x*blockDim.x+threadIdx.x;
//if(tid==31908)
//printf("Done");
loop_count[0]=0;
double x[70][3];//input to lstm,50 timestamps
double *c_t,*h_t,*i_t,*C_t,*f_t,*o_t;
double H[70][140];//storing the output of each timestamp(50 timestamps, each output of size 50)
double input[140],output[12];//input & output of dense layer
double pd1[12],pd2[12];//probabbility density for upper and lower window resp.
int i,j;
double sum,res;
if ((tid>69&&tid<1719551-70))
{
//create upper window
#pragma unroll
for(i=69;i>=0;i--)//i :timestamp from 0-49
{
x[i][0]=*(X+(tid-(69-i))*3+0);
x[i][1]=*(X+(tid-(69-i))*3+1);
x[i][2]=*(X+(tid-(69-i))*3+2);
loop_count[0]++;
}
//prediction for upper window
#pragma unroll
for(i=0;i<70;i++)//i: timestamp(t)
{
if(i==0)
{
i_t=sigmoid(add(matmul1(W_i,x[i]),b_i));
C_t=tan(add(matmul1(W_c,x[i]),b_c));
f_t=sigmoid(add(matmul1(W_f,x[i]),b_f));
c_t=mult(i_t,C_t);
o_t=sigmoid(add(matmul1(W_o,x[i]),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<70;j++)
{
H[i][j]=h_t[j];
loop_count[0]++;
}
}//if
else
{
i_t=sigmoid(add(add(matmul1(W_i,x[i]),matmul2(U_i,h_t)),b_i));
C_t=tan(add(add(matmul1(W_c,x[i]),matmul2(U_c,h_t)),b_c));
f_t=sigmoid(add(add(matmul1(W_f,x[i]),matmul2(U_f,h_t)),b_f));
c_t=add(mult(i_t,C_t),mult(f_t,c_t));
o_t=sigmoid(add(add(matmul1(W_o,x[i]),matmul2(U_o,h_t)),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<70;j++)
{
H[i][j]=h_t[j];
loop_count[0]++;
}
}//else
}
//backward pass
#pragma unroll
for(i=69;i>=0;i--)//i :timestamp from 0-49
{
x[69-i][0]=*(X+(tid-(69-i))*3+0);
x[69-i][1]=*(X+(tid-(69-i))*3+1);
x[69-i][2]=*(X+(tid-(69-i))*3+2);
loop_count[0]++;
}
#pragma unroll
for(i=0;i<70;i++)//i: timestamp(t)
{
if(i==0)
{
i_t=sigmoid(add(matmul1(W_i,x[i]),b_i));
C_t=tan(add(matmul1(W_c,x[i]),b_c));
f_t=sigmoid(add(matmul1(W_f,x[i]),b_f));
c_t=mult(i_t,C_t);
o_t=sigmoid(add(matmul1(W_o,x[i]),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<70;j++)
{
H[i][70+j]=h_t[j];
loop_count[0]++;
}
}//if
else
{
i_t=sigmoid(add(add(matmul1(W_i,x[i]),matmul2(U_i,h_t)),b_i));
C_t=tan(add(add(matmul1(W_c,x[i]),matmul2(U_c,h_t)),b_c));
f_t=sigmoid(add(add(matmul1(W_f,x[i]),matmul2(U_f,h_t)),b_f));
c_t=add(mult(i_t,C_t),mult(f_t,c_t));
o_t=sigmoid(add(add(matmul1(W_o,x[i]),matmul2(U_o,h_t)),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<70;j++)
{
H[i][70+j]=h_t[j];
loop_count[0]++;
}
}//else
}
//Mean Pooling
#pragma unroll
for(j=0;j<140;j++)
{
sum=0;
#pragma unroll
for(i=0;i<70;i++)
{
sum+=H[i][j];
loop_count[0]++;
}
input[j]=sum/(70.0);
}
//Dense Layer
sum=0;
#pragma unroll
for(i=0;i<12;i++)
{
output[i]=b[i];
#pragma unroll
for(j=0;j<140;j++)
{
output[i]+=(input[j]*(*(w+j*12+i)));
loop_count[0]++;
}
sum+=exp(output[i]);
}
#pragma unroll
for(i=0;i<12;i++)//prob density for upper window
{
pd1[i]=exp(output[i])/sum;
loop_count[0]++;
}
//create lower window
#pragma unroll
for(i=0;i<70;i++)//i :timestamp from 0-49
{
x[i][0]=*(X+(tid+i)*3+0);
x[i][1]=*(X+(tid+i)*3+1);
x[i][2]=*(X+(tid+i)*3+2);
loop_count[0]++;
}
//prediction for lower window
#pragma unroll
for(i=0;i<70;i++)//i: timestamp(t)
{
if(i==0)
{
i_t=sigmoid(add(matmul1(W_i,x[i]),b_i));
C_t=tan(add(matmul1(W_c,x[i]),b_c));
f_t=sigmoid(add(matmul1(W_f,x[i]),b_f));
c_t=mult(i_t,C_t);
o_t=sigmoid(add(matmul1(W_o,x[i]),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<70;j++)
{
H[i][j]=h_t[j];
loop_count[0]++;
}
}//if
else
{
i_t=sigmoid(add(add(matmul1(W_i,x[i]),matmul2(U_i,h_t)),b_i));
C_t=tan(add(add(matmul1(W_c,x[i]),matmul2(U_c,h_t)),b_c));
f_t=sigmoid(add(add(matmul1(W_f,x[i]),matmul2(U_f,h_t)),b_f));
c_t=add(mult(i_t,C_t),mult(f_t,c_t));
o_t=sigmoid(add(add(matmul1(W_o,x[i]),matmul2(U_o,h_t)),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<70;j++)
{
H[i][j]=h_t[j];
loop_count[0]++;
}
}//else
}
//Backward pass
#pragma unroll
for(i=0;i<70;i++)//i :timestamp from 0-49
{
x[69-i][0]=*(X+(tid+i)*3+0);
x[69-i][1]=*(X+(tid+i)*3+1);
x[69-i][2]=*(X+(tid+i)*3+2);
loop_count[0]++;
}
//prediction for lower window
#pragma unroll
for(i=0;i<70;i++)//i: timestamp(t)
{
if(i==0)
{
i_t=sigmoid(add(matmul1(W_i,x[i]),b_i));
C_t=tan(add(matmul1(W_c,x[i]),b_c));
f_t=sigmoid(add(matmul1(W_f,x[i]),b_f));
c_t=mult(i_t,C_t);
o_t=sigmoid(add(matmul1(W_o,x[i]),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<70;j++)
{
H[i][70+j]=h_t[j];
loop_count[0]++;
}
}//if
else
{
i_t=sigmoid(add(add(matmul1(W_i,x[i]),matmul2(U_i,h_t)),b_i));
C_t=tan(add(add(matmul1(W_c,x[i]),matmul2(U_c,h_t)),b_c));
f_t=sigmoid(add(add(matmul1(W_f,x[i]),matmul2(U_f,h_t)),b_f));
c_t=add(mult(i_t,C_t),mult(f_t,c_t));
o_t=sigmoid(add(add(matmul1(W_o,x[i]),matmul2(U_o,h_t)),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<70;j++)
{
H[i][70+j]=h_t[j];
loop_count[0]++;
}
}//else
}
//Mean Pooling
#pragma unroll
for(j=0;j<140;j++)
{
sum=0;
#pragma unroll
for(i=0;i<70;i++)
{
sum+=H[i][j];
loop_count[0]++;
}
input[j]=sum/(70.0);
}
//Dense Layer
sum=0;
#pragma unroll
for(i=0;i<12;i++)
{
output[i]=b[i];
#pragma unroll
for(j=0;j<140;j++)
{
output[i]+=(input[j]*(*(w+j*12+i)));
loop_count[0]++;
}
sum+=exp(output[i]);
}
#pragma unroll
for(i=0;i<12;i++)//prob density for upper window
{
pd2[i]=exp(output[i])/sum;
loop_count[0]++;
}
res=0;
#pragma unroll
for(i=0;i<12;i++)
{
res+=(pd1[i]*pd2[i]);
loop_count[0]++;
}
*(result+tid)=res;
}//if tid
}// kernel loop
int main()
{
double *X=(double *)malloc(1719551 * 3 * sizeof(double));//dataset
double *W_i=(double *)malloc(70*3*sizeof(double));
double *W_f=(double *)malloc(70*3*sizeof(double));
double *W_c=(double *)malloc(70*3*sizeof(double));
double *W_o=(double *)malloc(70*3*sizeof(double));
double *U_i=(double *)malloc(70*70*sizeof(double));
double *U_f=(double *)malloc(70*70*sizeof(double));
double *U_c=(double *)malloc(70*70*sizeof(double));
double *U_o=(double *)malloc(70*70*sizeof(double));
double *b_i=(double *)malloc(70*sizeof(double));
double *b_f=(double *)malloc(70*sizeof(double));
double *b_c=(double *)malloc(70*sizeof(double));
double *b_o=(double *)malloc(70*sizeof(double));
double *w=(double *)malloc(140*12*sizeof(double));
double *b=(double *)malloc(12*sizeof(double));
double *result=(double *)malloc(1719551*sizeof(double));
double *loop_count=(double *)malloc(1*sizeof(double));
readWeights(X,W_i,W_f,W_c,W_o,U_i,U_f,U_c,U_o,b_i,b_f,b_c,b_o,w,b);//read the weights from file(readWeights.h)
//for(int p=0;p<50;p++)
//printf("%f ",*(b_i+p));
//printf("\n");
double *X_gpu,*W_i_gpu,*W_f_gpu,*W_c_gpu,*W_o_gpu,*U_i_gpu,*U_f_gpu,*U_c_gpu,*U_o_gpu,*b_i_gpu,*b_f_gpu,*b_c_gpu,*b_o_gpu,*w_gpu,*b_gpu,*result_gpu,*loop_count_gpu;//device vector
size_t bytes1=1719551*3*sizeof(double);//size in bytes of the vector to be sent to gpu
size_t bytes2=70*3*sizeof(double);
size_t bytes3=70*70*sizeof(double);
size_t bytes4=70*sizeof(double);
size_t bytes5=140*12*sizeof(double);
size_t bytes6=12*sizeof(double);
size_t bytes7=1719551*sizeof(double);
// Allocate memory for each vector on GPU
hipMalloc(&X_gpu, bytes1);
hipMalloc(&W_i_gpu,bytes2);
hipMalloc(&W_f_gpu,bytes2);
hipMalloc(&W_c_gpu,bytes2);
hipMalloc(&W_o_gpu,bytes2);
hipMalloc(&U_i_gpu,bytes3);
hipMalloc(&U_f_gpu,bytes3);
hipMalloc(&U_c_gpu,bytes3);
hipMalloc(&U_o_gpu,bytes3);
hipMalloc(&b_i_gpu,bytes4);
hipMalloc(&b_f_gpu,bytes4);
hipMalloc(&b_c_gpu,bytes4);
hipMalloc(&b_o_gpu,bytes4);
hipMalloc(&w_gpu,bytes5);
hipMalloc(&b_gpu,bytes6);
hipMalloc(&result_gpu,bytes7);
hipMalloc(&loop_count_gpu,1*sizeof(double));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Copy host vectors to device
hipMemcpy(X_gpu,X,bytes1,hipMemcpyHostToDevice);
hipMemcpy(W_i_gpu,W_i,bytes2,hipMemcpyHostToDevice);
hipMemcpy(W_f_gpu,W_f,bytes2,hipMemcpyHostToDevice);
hipMemcpy(W_c_gpu,W_c,bytes2,hipMemcpyHostToDevice);
hipMemcpy(W_o_gpu,W_o,bytes2,hipMemcpyHostToDevice);
hipMemcpy(U_i_gpu,U_i,bytes3,hipMemcpyHostToDevice);
hipMemcpy(U_f_gpu,U_f,bytes3,hipMemcpyHostToDevice);
hipMemcpy(U_c_gpu,U_c,bytes3,hipMemcpyHostToDevice);
hipMemcpy(U_o_gpu,U_o,bytes3,hipMemcpyHostToDevice);
hipMemcpy(b_i_gpu,b_i,bytes4,hipMemcpyHostToDevice);
hipMemcpy(b_f_gpu,b_f,bytes4,hipMemcpyHostToDevice);
hipMemcpy(b_c_gpu,b_c,bytes4,hipMemcpyHostToDevice);
hipMemcpy(b_o_gpu,b_o,bytes4,hipMemcpyHostToDevice);
hipMemcpy(w_gpu,w,bytes5,hipMemcpyHostToDevice);
hipMemcpy(b_gpu,b,bytes6,hipMemcpyHostToDevice);
hipMemcpy(loop_count_gpu,loop_count,1*sizeof(double),hipMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)1719551/blockSize);
// Execute the kernel
//Gflops
double fs_t, fe_t, ft_t;
struct timeval t;
int cudaCores, smCount, totalThreads;
double f_avg;
int i=0;
hipSetDevice(i);
// Get device properties
printf("\nCUDA Device #%d\n\n", (i+1));
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
smCount = devProp.multiProcessorCount;
cudaCores = _ConvertSMVer2Cores(devProp.major, devProp.minor);
totalThreads=1719551-140;
gettimeofday(&t, NULL);
fs_t = t.tv_sec+(t.tv_usec/1000000.0);
hipEventRecord(start);
hipLaunchKernelGGL(( predictKernel), dim3(gridSize), dim3(blockSize), 0, 0, X_gpu,W_i_gpu,W_f_gpu,W_c_gpu,W_o_gpu,U_i_gpu,U_f_gpu,U_c_gpu,U_o_gpu,b_i_gpu,b_f_gpu,b_c_gpu,b_o_gpu,w_gpu,b_gpu,result_gpu,loop_count_gpu);
hipEventRecord(stop);
; hipDeviceSynchronize();
gettimeofday(&t, NULL);
fe_t = t.tv_sec+(t.tv_usec/1000000.0);
ft_t = fe_t - fs_t;
hipMemcpy(loop_count,loop_count_gpu,sizeof(double),hipMemcpyDeviceToHost);
cout<<loop_count[0]<<' '<<smCount<<' '<<cudaCores<<' '<<totalThreads<<'\n';
f_avg += (loop_count[0]*smCount*cudaCores*totalThreads*10)/(ft_t*1000000000);
hipMemcpy(result,result_gpu,bytes7,hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
cout<<"Time:"<<'\n';
cout<<(float)(milliseconds/1000)<<'\n';
for(int z=31908;z<=31968;z++)
cout<<result[z]<<' ';
printf("Number of FLOPs: %lf G-FLOPs\n", (f_avg));
hipFree(X_gpu);
hipFree(W_i_gpu);
hipFree(W_f_gpu);
hipFree(W_c_gpu);
hipFree(W_o_gpu);
hipFree(U_i_gpu);
hipFree(U_f_gpu);
hipFree(U_c_gpu);
hipFree(U_o_gpu);
hipFree(b_i_gpu);
hipFree(b_f_gpu);
hipFree(b_c_gpu);
hipFree(b_o_gpu);
hipFree(w_gpu);
hipFree(b_gpu);
hipFree(result_gpu);
return 0;
}
| b4c2de2d503e1ac2d569436510dee5e73439c962.cu | #include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <sstream>
#include <unistd.h>
#include <math.h>
#include <stdlib.h>
#include <cuda.h>
#include <ctime>
#include <sys/time.h>
#include "readWeights70.h"//to read the weights
#include "deviceFunctions70.h"//contains device functions like matmul,add
using namespace std;
inline int _ConvertSMVer2Cores(int major, int minor)
{
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct
{
int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] =
{
{ 0x10, 8 }, // Tesla Generation (SM 1.0) G80 class
{ 0x11, 8 }, // Tesla Generation (SM 1.1) G8x class
{ 0x12, 8 }, // Tesla Generation (SM 1.2) G9x class
{ 0x13, 8 }, // Tesla Generation (SM 1.3) GT200 class
{ 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class
{ 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class
{ 0x30, 192}, // Kepler Generation (SM 3.0) GK10x class
{ 0x32, 192}, // Kepler Generation (SM 3.2) GK10x class
{ 0x35, 192}, // Kepler Generation (SM 3.5) GK11x class
{ 0x37, 192}, // Kepler Generation (SM 3.7) GK21x class
{ 0x50, 128}, // Maxwell Generation (SM 5.0) GM10x class
{ -1, -1 }
};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1)
{
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor))
{
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one to run properly
printf("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index-1].Cores);
return nGpuArchCoresPerSM[index-1].Cores;
}
__global__ void predictKernel(double *X,double *W_i,double *W_f,double *W_c,double *W_o,double *U_i,double *U_f,double *U_c,double *U_o,double *b_i,double *b_f,double *b_c,double *b_o,double *w,double *b,double *result,double *loop_count)//cuda kernel
{
// Get our global thread ID
int tid = blockIdx.x*blockDim.x+threadIdx.x;
//if(tid==31908)
//printf("Done");
loop_count[0]=0;
double x[70][3];//input to lstm,50 timestamps
double *c_t,*h_t,*i_t,*C_t,*f_t,*o_t;
double H[70][140];//storing the output of each timestamp(50 timestamps, each output of size 50)
double input[140],output[12];//input & output of dense layer
double pd1[12],pd2[12];//probabbility density for upper and lower window resp.
int i,j;
double sum,res;
if ((tid>69&&tid<1719551-70))
{
//create upper window
#pragma unroll
for(i=69;i>=0;i--)//i :timestamp from 0-49
{
x[i][0]=*(X+(tid-(69-i))*3+0);
x[i][1]=*(X+(tid-(69-i))*3+1);
x[i][2]=*(X+(tid-(69-i))*3+2);
loop_count[0]++;
}
//prediction for upper window
#pragma unroll
for(i=0;i<70;i++)//i: timestamp(t)
{
if(i==0)
{
i_t=sigmoid(add(matmul1(W_i,x[i]),b_i));
C_t=tan(add(matmul1(W_c,x[i]),b_c));
f_t=sigmoid(add(matmul1(W_f,x[i]),b_f));
c_t=mult(i_t,C_t);
o_t=sigmoid(add(matmul1(W_o,x[i]),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<70;j++)
{
H[i][j]=h_t[j];
loop_count[0]++;
}
}//if
else
{
i_t=sigmoid(add(add(matmul1(W_i,x[i]),matmul2(U_i,h_t)),b_i));
C_t=tan(add(add(matmul1(W_c,x[i]),matmul2(U_c,h_t)),b_c));
f_t=sigmoid(add(add(matmul1(W_f,x[i]),matmul2(U_f,h_t)),b_f));
c_t=add(mult(i_t,C_t),mult(f_t,c_t));
o_t=sigmoid(add(add(matmul1(W_o,x[i]),matmul2(U_o,h_t)),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<70;j++)
{
H[i][j]=h_t[j];
loop_count[0]++;
}
}//else
}
//backward pass
#pragma unroll
for(i=69;i>=0;i--)//i :timestamp from 0-49
{
x[69-i][0]=*(X+(tid-(69-i))*3+0);
x[69-i][1]=*(X+(tid-(69-i))*3+1);
x[69-i][2]=*(X+(tid-(69-i))*3+2);
loop_count[0]++;
}
#pragma unroll
for(i=0;i<70;i++)//i: timestamp(t)
{
if(i==0)
{
i_t=sigmoid(add(matmul1(W_i,x[i]),b_i));
C_t=tan(add(matmul1(W_c,x[i]),b_c));
f_t=sigmoid(add(matmul1(W_f,x[i]),b_f));
c_t=mult(i_t,C_t);
o_t=sigmoid(add(matmul1(W_o,x[i]),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<70;j++)
{
H[i][70+j]=h_t[j];
loop_count[0]++;
}
}//if
else
{
i_t=sigmoid(add(add(matmul1(W_i,x[i]),matmul2(U_i,h_t)),b_i));
C_t=tan(add(add(matmul1(W_c,x[i]),matmul2(U_c,h_t)),b_c));
f_t=sigmoid(add(add(matmul1(W_f,x[i]),matmul2(U_f,h_t)),b_f));
c_t=add(mult(i_t,C_t),mult(f_t,c_t));
o_t=sigmoid(add(add(matmul1(W_o,x[i]),matmul2(U_o,h_t)),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<70;j++)
{
H[i][70+j]=h_t[j];
loop_count[0]++;
}
}//else
}
//Mean Pooling
#pragma unroll
for(j=0;j<140;j++)
{
sum=0;
#pragma unroll
for(i=0;i<70;i++)
{
sum+=H[i][j];
loop_count[0]++;
}
input[j]=sum/(70.0);
}
//Dense Layer
sum=0;
#pragma unroll
for(i=0;i<12;i++)
{
output[i]=b[i];
#pragma unroll
for(j=0;j<140;j++)
{
output[i]+=(input[j]*(*(w+j*12+i)));
loop_count[0]++;
}
sum+=exp(output[i]);
}
#pragma unroll
for(i=0;i<12;i++)//prob density for upper window
{
pd1[i]=exp(output[i])/sum;
loop_count[0]++;
}
//create lower window
#pragma unroll
for(i=0;i<70;i++)//i :timestamp from 0-49
{
x[i][0]=*(X+(tid+i)*3+0);
x[i][1]=*(X+(tid+i)*3+1);
x[i][2]=*(X+(tid+i)*3+2);
loop_count[0]++;
}
//prediction for lower window
#pragma unroll
for(i=0;i<70;i++)//i: timestamp(t)
{
if(i==0)
{
i_t=sigmoid(add(matmul1(W_i,x[i]),b_i));
C_t=tan(add(matmul1(W_c,x[i]),b_c));
f_t=sigmoid(add(matmul1(W_f,x[i]),b_f));
c_t=mult(i_t,C_t);
o_t=sigmoid(add(matmul1(W_o,x[i]),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<70;j++)
{
H[i][j]=h_t[j];
loop_count[0]++;
}
}//if
else
{
i_t=sigmoid(add(add(matmul1(W_i,x[i]),matmul2(U_i,h_t)),b_i));
C_t=tan(add(add(matmul1(W_c,x[i]),matmul2(U_c,h_t)),b_c));
f_t=sigmoid(add(add(matmul1(W_f,x[i]),matmul2(U_f,h_t)),b_f));
c_t=add(mult(i_t,C_t),mult(f_t,c_t));
o_t=sigmoid(add(add(matmul1(W_o,x[i]),matmul2(U_o,h_t)),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<70;j++)
{
H[i][j]=h_t[j];
loop_count[0]++;
}
}//else
}
//Backward pass
#pragma unroll
for(i=0;i<70;i++)//i :timestamp from 0-49
{
x[69-i][0]=*(X+(tid+i)*3+0);
x[69-i][1]=*(X+(tid+i)*3+1);
x[69-i][2]=*(X+(tid+i)*3+2);
loop_count[0]++;
}
//prediction for lower window
#pragma unroll
for(i=0;i<70;i++)//i: timestamp(t)
{
if(i==0)
{
i_t=sigmoid(add(matmul1(W_i,x[i]),b_i));
C_t=tan(add(matmul1(W_c,x[i]),b_c));
f_t=sigmoid(add(matmul1(W_f,x[i]),b_f));
c_t=mult(i_t,C_t);
o_t=sigmoid(add(matmul1(W_o,x[i]),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<70;j++)
{
H[i][70+j]=h_t[j];
loop_count[0]++;
}
}//if
else
{
i_t=sigmoid(add(add(matmul1(W_i,x[i]),matmul2(U_i,h_t)),b_i));
C_t=tan(add(add(matmul1(W_c,x[i]),matmul2(U_c,h_t)),b_c));
f_t=sigmoid(add(add(matmul1(W_f,x[i]),matmul2(U_f,h_t)),b_f));
c_t=add(mult(i_t,C_t),mult(f_t,c_t));
o_t=sigmoid(add(add(matmul1(W_o,x[i]),matmul2(U_o,h_t)),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<70;j++)
{
H[i][70+j]=h_t[j];
loop_count[0]++;
}
}//else
}
//Mean Pooling
#pragma unroll
for(j=0;j<140;j++)
{
sum=0;
#pragma unroll
for(i=0;i<70;i++)
{
sum+=H[i][j];
loop_count[0]++;
}
input[j]=sum/(70.0);
}
//Dense Layer
sum=0;
#pragma unroll
for(i=0;i<12;i++)
{
output[i]=b[i];
#pragma unroll
for(j=0;j<140;j++)
{
output[i]+=(input[j]*(*(w+j*12+i)));
loop_count[0]++;
}
sum+=exp(output[i]);
}
#pragma unroll
for(i=0;i<12;i++)//prob density for upper window
{
pd2[i]=exp(output[i])/sum;
loop_count[0]++;
}
res=0;
#pragma unroll
for(i=0;i<12;i++)
{
res+=(pd1[i]*pd2[i]);
loop_count[0]++;
}
*(result+tid)=res;
}//if tid
}// kernel loop
int main()
{
double *X=(double *)malloc(1719551 * 3 * sizeof(double));//dataset
double *W_i=(double *)malloc(70*3*sizeof(double));
double *W_f=(double *)malloc(70*3*sizeof(double));
double *W_c=(double *)malloc(70*3*sizeof(double));
double *W_o=(double *)malloc(70*3*sizeof(double));
double *U_i=(double *)malloc(70*70*sizeof(double));
double *U_f=(double *)malloc(70*70*sizeof(double));
double *U_c=(double *)malloc(70*70*sizeof(double));
double *U_o=(double *)malloc(70*70*sizeof(double));
double *b_i=(double *)malloc(70*sizeof(double));
double *b_f=(double *)malloc(70*sizeof(double));
double *b_c=(double *)malloc(70*sizeof(double));
double *b_o=(double *)malloc(70*sizeof(double));
double *w=(double *)malloc(140*12*sizeof(double));
double *b=(double *)malloc(12*sizeof(double));
double *result=(double *)malloc(1719551*sizeof(double));
double *loop_count=(double *)malloc(1*sizeof(double));
readWeights(X,W_i,W_f,W_c,W_o,U_i,U_f,U_c,U_o,b_i,b_f,b_c,b_o,w,b);//read the weights from file(readWeights.h)
//for(int p=0;p<50;p++)
//printf("%f ",*(b_i+p));
//printf("\n");
double *X_gpu,*W_i_gpu,*W_f_gpu,*W_c_gpu,*W_o_gpu,*U_i_gpu,*U_f_gpu,*U_c_gpu,*U_o_gpu,*b_i_gpu,*b_f_gpu,*b_c_gpu,*b_o_gpu,*w_gpu,*b_gpu,*result_gpu,*loop_count_gpu;//device vector
size_t bytes1=1719551*3*sizeof(double);//size in bytes of the vector to be sent to gpu
size_t bytes2=70*3*sizeof(double);
size_t bytes3=70*70*sizeof(double);
size_t bytes4=70*sizeof(double);
size_t bytes5=140*12*sizeof(double);
size_t bytes6=12*sizeof(double);
size_t bytes7=1719551*sizeof(double);
// Allocate memory for each vector on GPU
cudaMalloc(&X_gpu, bytes1);
cudaMalloc(&W_i_gpu,bytes2);
cudaMalloc(&W_f_gpu,bytes2);
cudaMalloc(&W_c_gpu,bytes2);
cudaMalloc(&W_o_gpu,bytes2);
cudaMalloc(&U_i_gpu,bytes3);
cudaMalloc(&U_f_gpu,bytes3);
cudaMalloc(&U_c_gpu,bytes3);
cudaMalloc(&U_o_gpu,bytes3);
cudaMalloc(&b_i_gpu,bytes4);
cudaMalloc(&b_f_gpu,bytes4);
cudaMalloc(&b_c_gpu,bytes4);
cudaMalloc(&b_o_gpu,bytes4);
cudaMalloc(&w_gpu,bytes5);
cudaMalloc(&b_gpu,bytes6);
cudaMalloc(&result_gpu,bytes7);
cudaMalloc(&loop_count_gpu,1*sizeof(double));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Copy host vectors to device
cudaMemcpy(X_gpu,X,bytes1,cudaMemcpyHostToDevice);
cudaMemcpy(W_i_gpu,W_i,bytes2,cudaMemcpyHostToDevice);
cudaMemcpy(W_f_gpu,W_f,bytes2,cudaMemcpyHostToDevice);
cudaMemcpy(W_c_gpu,W_c,bytes2,cudaMemcpyHostToDevice);
cudaMemcpy(W_o_gpu,W_o,bytes2,cudaMemcpyHostToDevice);
cudaMemcpy(U_i_gpu,U_i,bytes3,cudaMemcpyHostToDevice);
cudaMemcpy(U_f_gpu,U_f,bytes3,cudaMemcpyHostToDevice);
cudaMemcpy(U_c_gpu,U_c,bytes3,cudaMemcpyHostToDevice);
cudaMemcpy(U_o_gpu,U_o,bytes3,cudaMemcpyHostToDevice);
cudaMemcpy(b_i_gpu,b_i,bytes4,cudaMemcpyHostToDevice);
cudaMemcpy(b_f_gpu,b_f,bytes4,cudaMemcpyHostToDevice);
cudaMemcpy(b_c_gpu,b_c,bytes4,cudaMemcpyHostToDevice);
cudaMemcpy(b_o_gpu,b_o,bytes4,cudaMemcpyHostToDevice);
cudaMemcpy(w_gpu,w,bytes5,cudaMemcpyHostToDevice);
cudaMemcpy(b_gpu,b,bytes6,cudaMemcpyHostToDevice);
cudaMemcpy(loop_count_gpu,loop_count,1*sizeof(double),cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)1719551/blockSize);
// Execute the kernel
//Gflops
double fs_t, fe_t, ft_t;
struct timeval t;
int cudaCores, smCount, totalThreads;
double f_avg;
int i=0;
cudaSetDevice(i);
// Get device properties
printf("\nCUDA Device #%d\n\n", (i+1));
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
smCount = devProp.multiProcessorCount;
cudaCores = _ConvertSMVer2Cores(devProp.major, devProp.minor);
totalThreads=1719551-140;
gettimeofday(&t, NULL);
fs_t = t.tv_sec+(t.tv_usec/1000000.0);
cudaEventRecord(start);
predictKernel<<<gridSize, blockSize>>>(X_gpu,W_i_gpu,W_f_gpu,W_c_gpu,W_o_gpu,U_i_gpu,U_f_gpu,U_c_gpu,U_o_gpu,b_i_gpu,b_f_gpu,b_c_gpu,b_o_gpu,w_gpu,b_gpu,result_gpu,loop_count_gpu);
cudaEventRecord(stop);
; cudaThreadSynchronize();
gettimeofday(&t, NULL);
fe_t = t.tv_sec+(t.tv_usec/1000000.0);
ft_t = fe_t - fs_t;
cudaMemcpy(loop_count,loop_count_gpu,sizeof(double),cudaMemcpyDeviceToHost);
cout<<loop_count[0]<<' '<<smCount<<' '<<cudaCores<<' '<<totalThreads<<'\n';
f_avg += (loop_count[0]*smCount*cudaCores*totalThreads*10)/(ft_t*1000000000);
cudaMemcpy(result,result_gpu,bytes7,cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout<<"Time:"<<'\n';
cout<<(float)(milliseconds/1000)<<'\n';
for(int z=31908;z<=31968;z++)
cout<<result[z]<<' ';
printf("Number of FLOPs: %lf G-FLOPs\n", (f_avg));
cudaFree(X_gpu);
cudaFree(W_i_gpu);
cudaFree(W_f_gpu);
cudaFree(W_c_gpu);
cudaFree(W_o_gpu);
cudaFree(U_i_gpu);
cudaFree(U_f_gpu);
cudaFree(U_c_gpu);
cudaFree(U_o_gpu);
cudaFree(b_i_gpu);
cudaFree(b_f_gpu);
cudaFree(b_c_gpu);
cudaFree(b_o_gpu);
cudaFree(w_gpu);
cudaFree(b_gpu);
cudaFree(result_gpu);
return 0;
}
|
f5676998f8d2b9f9c845e082eed37ae2af89a53d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/column/column.hpp>
#include <cudf/contiguous_split.hpp>
#include <thrust/iterator/counting_iterator.h>
void contiguous_split(cudf::table_view const& src_table, std::vector<cudf::size_type> const& splits)
{
auto result = cudf::contiguous_split(src_table, splits);
}
void chunked_pack(cudf::table_view const& src_table, std::vector<cudf::size_type> const&)
{
auto const mr = rmm::mr::get_current_device_resource();
auto const stream = cudf::get_default_stream();
auto user_buffer = rmm::device_uvector<std::uint8_t>(100L * 1024 * 1024, stream, mr);
auto chunked_pack = cudf::chunked_pack::create(src_table, user_buffer.size(), mr);
while (chunked_pack->has_next()) {
auto iter_size = chunked_pack->next(user_buffer);
}
stream.synchronize();
}
template <typename T, typename ContigSplitImpl>
void BM_contiguous_split_common(benchmark::State& state,
std::vector<T>& src_cols,
int64_t num_rows,
int64_t num_splits,
int64_t bytes_total,
ContigSplitImpl& impl)
{
// generate splits
std::vector<cudf::size_type> splits;
if (num_splits > 0) {
cudf::size_type const split_stride = num_rows / num_splits;
// start after the first element.
auto iter = thrust::make_counting_iterator(1);
splits.reserve(num_splits);
std::transform(iter,
iter + num_splits,
std::back_inserter(splits),
[split_stride, num_rows](cudf::size_type i) {
return ::min(i * split_stride, static_cast<cudf::size_type>(num_rows));
});
}
for (auto const& col : src_cols)
// computing the null count is not a part of the benchmark's target code path, and we want the
// property to be pre-computed so that we measure the performance of only the intended code path
[[maybe_unused]]
auto const nulls = col->null_count();
auto const src_table = cudf::table(std::move(src_cols));
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
impl(src_table, splits);
}
// it's 2x bytes_total because we're both reading and writing.
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * bytes_total * 2);
}
class ContiguousSplit : public cudf::benchmark {};
class ChunkedPack : public cudf::benchmark {};
template <typename ContiguousSplitImpl>
void BM_contiguous_split(benchmark::State& state, ContiguousSplitImpl& impl)
{
int64_t const total_desired_bytes = state.range(0);
cudf::size_type const num_cols = state.range(1);
cudf::size_type const num_splits = state.range(2);
bool const include_validity = state.range(3) != 0;
cudf::size_type el_size = 4; // ints and floats
int64_t const num_rows = total_desired_bytes / (num_cols * el_size);
// generate input table
auto builder = data_profile_builder().cardinality(0).distribution<int>(cudf::type_id::INT32,
distribution_id::UNIFORM);
if (not include_validity) builder.no_validity();
auto src_cols = create_random_table(cycle_dtypes({cudf::type_id::INT32}, num_cols),
row_count{static_cast<cudf::size_type>(num_rows)},
data_profile{builder})
->release();
int64_t const total_bytes =
total_desired_bytes +
(include_validity ? (max(int64_t{1}, (num_rows / 32)) * sizeof(cudf::bitmask_type) * num_cols)
: 0);
BM_contiguous_split_common(state, src_cols, num_rows, num_splits, total_bytes, impl);
}
class ContiguousSplitStrings : public cudf::benchmark {};
class ChunkedPackStrings : public cudf::benchmark {};
template <typename ContiguousSplitImpl>
void BM_contiguous_split_strings(benchmark::State& state, ContiguousSplitImpl& impl)
{
int64_t const total_desired_bytes = state.range(0);
cudf::size_type const num_cols = state.range(1);
cudf::size_type const num_splits = state.range(2);
bool const include_validity = state.range(3) != 0;
constexpr int64_t string_len = 8;
std::vector<const char*> h_strings{
"aaaaaaaa", "bbbbbbbb", "cccccccc", "dddddddd", "eeeeeeee", "ffffffff", "gggggggg", "hhhhhhhh"};
int64_t const col_len_bytes = total_desired_bytes / num_cols;
int64_t const num_rows = col_len_bytes / string_len;
// generate input table
data_profile profile = data_profile_builder().no_validity().cardinality(0).distribution(
cudf::type_id::INT32,
distribution_id::UNIFORM,
0ul,
include_validity ? h_strings.size() * 2 : h_strings.size() - 1); // out of bounds nullified
cudf::test::strings_column_wrapper one_col(h_strings.begin(), h_strings.end());
std::vector<std::unique_ptr<cudf::column>> src_cols(num_cols);
for (int64_t idx = 0; idx < num_cols; idx++) {
auto random_indices = create_random_column(
cudf::type_id::INT32, row_count{static_cast<cudf::size_type>(num_rows)}, profile);
auto str_table = cudf::gather(cudf::table_view{{one_col}},
*random_indices,
(include_validity ? cudf::out_of_bounds_policy::NULLIFY
: cudf::out_of_bounds_policy::DONT_CHECK));
src_cols[idx] = std::move(str_table->release()[0]);
}
int64_t const total_bytes =
total_desired_bytes + ((num_rows + 1) * sizeof(cudf::offset_type)) +
(include_validity ? (max(int64_t{1}, (num_rows / 32)) * sizeof(cudf::bitmask_type) * num_cols)
: 0);
BM_contiguous_split_common(state, src_cols, num_rows, num_splits, total_bytes, impl);
}
#define CSBM_BENCHMARK_DEFINE(name, size, num_columns, num_splits, validity) \
BENCHMARK_DEFINE_F(ContiguousSplit, name)(::benchmark::State & state) \
{ \
BM_contiguous_split(state, contiguous_split); \
} \
BENCHMARK_REGISTER_F(ContiguousSplit, name) \
->Args({size, num_columns, num_splits, validity}) \
->Unit(benchmark::kMillisecond) \
->UseManualTime() \
->Iterations(8)
CSBM_BENCHMARK_DEFINE(6Gb512ColsNoValidity, (int64_t)6 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_BENCHMARK_DEFINE(6Gb512ColsValidity, (int64_t)6 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_BENCHMARK_DEFINE(6Gb10ColsNoValidity, (int64_t)6 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_BENCHMARK_DEFINE(6Gb10ColsValidity, (int64_t)6 * 1024 * 1024 * 1024, 10, 256, 1);
CSBM_BENCHMARK_DEFINE(4Gb512ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_BENCHMARK_DEFINE(4Gb512ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_BENCHMARK_DEFINE(4Gb10ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_BENCHMARK_DEFINE(4Gb10ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 256, 1);
CSBM_BENCHMARK_DEFINE(4Gb4ColsNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 4, 0, 1);
CSBM_BENCHMARK_DEFINE(4Gb4ColsValidityNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 4, 0, 1);
CSBM_BENCHMARK_DEFINE(1Gb512ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_BENCHMARK_DEFINE(1Gb512ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_BENCHMARK_DEFINE(1Gb10ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_BENCHMARK_DEFINE(1Gb10ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 256, 1);
CSBM_BENCHMARK_DEFINE(1Gb1ColNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 1, 0, 1);
CSBM_BENCHMARK_DEFINE(1Gb1ColValidityNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 1, 0, 1);
#define CSBM_STRINGS_BENCHMARK_DEFINE(name, size, num_columns, num_splits, validity) \
BENCHMARK_DEFINE_F(ContiguousSplitStrings, name)(::benchmark::State & state) \
{ \
BM_contiguous_split_strings(state, contiguous_split); \
} \
BENCHMARK_REGISTER_F(ContiguousSplitStrings, name) \
->Args({size, num_columns, num_splits, validity}) \
->Unit(benchmark::kMillisecond) \
->UseManualTime() \
->Iterations(8)
CSBM_STRINGS_BENCHMARK_DEFINE(4Gb512ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_STRINGS_BENCHMARK_DEFINE(4Gb512ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_STRINGS_BENCHMARK_DEFINE(4Gb10ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_STRINGS_BENCHMARK_DEFINE(4Gb10ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 256, 1);
CSBM_STRINGS_BENCHMARK_DEFINE(4Gb4ColsNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 4, 0, 0);
CSBM_STRINGS_BENCHMARK_DEFINE(4Gb4ColsValidityNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 4, 0, 1);
CSBM_STRINGS_BENCHMARK_DEFINE(1Gb512ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_STRINGS_BENCHMARK_DEFINE(1Gb512ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_STRINGS_BENCHMARK_DEFINE(1Gb10ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_STRINGS_BENCHMARK_DEFINE(1Gb10ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 256, 1);
CSBM_STRINGS_BENCHMARK_DEFINE(1Gb1ColNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 1, 0, 0);
CSBM_STRINGS_BENCHMARK_DEFINE(1Gb1ColValidityNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 1, 0, 1);
#define CCSBM_BENCHMARK_DEFINE(name, size, num_columns, num_splits, validity) \
BENCHMARK_DEFINE_F(ChunkedPack, name)(::benchmark::State & state) \
{ \
BM_contiguous_split(state, chunked_pack); \
} \
BENCHMARK_REGISTER_F(ChunkedPack, name) \
->Args({size, num_columns, num_splits, validity}) \
->Unit(benchmark::kMillisecond) \
->UseManualTime() \
->Iterations(8)
CCSBM_BENCHMARK_DEFINE(6Gb512ColsNoValidity, (int64_t)6 * 1024 * 1024 * 1024, 512, 0, 0);
CCSBM_BENCHMARK_DEFINE(6Gb512ColsValidity, (int64_t)6 * 1024 * 1024 * 1024, 512, 0, 1);
CCSBM_BENCHMARK_DEFINE(6Gb10ColsNoValidity, (int64_t)6 * 1024 * 1024 * 1024, 10, 0, 0);
CCSBM_BENCHMARK_DEFINE(6Gb10ColsValidity, (int64_t)6 * 1024 * 1024 * 1024, 10, 0, 1);
CCSBM_BENCHMARK_DEFINE(4Gb512ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 0, 0);
CCSBM_BENCHMARK_DEFINE(4Gb512ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 0, 1);
CCSBM_BENCHMARK_DEFINE(4Gb10ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 0, 0);
CCSBM_BENCHMARK_DEFINE(4Gb10ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 0, 1);
CCSBM_BENCHMARK_DEFINE(4Gb4ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 4, 0, 1);
CCSBM_BENCHMARK_DEFINE(1Gb512ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 0, 0);
CCSBM_BENCHMARK_DEFINE(1Gb512ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 0, 1);
CCSBM_BENCHMARK_DEFINE(1Gb10ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 0, 0);
CCSBM_BENCHMARK_DEFINE(1Gb10ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 0, 1);
CCSBM_BENCHMARK_DEFINE(1Gb1ColValidity, (int64_t)1 * 1024 * 1024 * 1024, 1, 0, 1);
#define CCSBM_STRINGS_BENCHMARK_DEFINE(name, size, num_columns, num_splits, validity) \
BENCHMARK_DEFINE_F(ChunkedPackStrings, name)(::benchmark::State & state) \
{ \
BM_contiguous_split_strings(state, chunked_pack); \
} \
BENCHMARK_REGISTER_F(ChunkedPackStrings, name) \
->Args({size, num_columns, num_splits, validity}) \
->Unit(benchmark::kMillisecond) \
->UseManualTime() \
->Iterations(8)
CCSBM_STRINGS_BENCHMARK_DEFINE(4Gb512ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 0, 0);
CCSBM_STRINGS_BENCHMARK_DEFINE(4Gb512ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 0, 1);
CCSBM_STRINGS_BENCHMARK_DEFINE(4Gb10ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 0, 0);
CCSBM_STRINGS_BENCHMARK_DEFINE(4Gb10ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 0, 1);
CCSBM_STRINGS_BENCHMARK_DEFINE(4Gb4ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 4, 0, 1);
CCSBM_STRINGS_BENCHMARK_DEFINE(1Gb512ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 0, 0);
CCSBM_STRINGS_BENCHMARK_DEFINE(1Gb512ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 0, 1);
CCSBM_STRINGS_BENCHMARK_DEFINE(1Gb10ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 0, 0);
CCSBM_STRINGS_BENCHMARK_DEFINE(1Gb10ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 0, 1);
CCSBM_STRINGS_BENCHMARK_DEFINE(1Gb1ColValidity, (int64_t)1 * 1024 * 1024 * 1024, 1, 0, 1);
| f5676998f8d2b9f9c845e082eed37ae2af89a53d.cu | /*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <benchmarks/common/generate_input.hpp>
#include <benchmarks/fixture/benchmark_fixture.hpp>
#include <benchmarks/synchronization/synchronization.hpp>
#include <cudf_test/column_wrapper.hpp>
#include <cudf/column/column.hpp>
#include <cudf/contiguous_split.hpp>
#include <thrust/iterator/counting_iterator.h>
void contiguous_split(cudf::table_view const& src_table, std::vector<cudf::size_type> const& splits)
{
auto result = cudf::contiguous_split(src_table, splits);
}
void chunked_pack(cudf::table_view const& src_table, std::vector<cudf::size_type> const&)
{
auto const mr = rmm::mr::get_current_device_resource();
auto const stream = cudf::get_default_stream();
auto user_buffer = rmm::device_uvector<std::uint8_t>(100L * 1024 * 1024, stream, mr);
auto chunked_pack = cudf::chunked_pack::create(src_table, user_buffer.size(), mr);
while (chunked_pack->has_next()) {
auto iter_size = chunked_pack->next(user_buffer);
}
stream.synchronize();
}
template <typename T, typename ContigSplitImpl>
void BM_contiguous_split_common(benchmark::State& state,
std::vector<T>& src_cols,
int64_t num_rows,
int64_t num_splits,
int64_t bytes_total,
ContigSplitImpl& impl)
{
// generate splits
std::vector<cudf::size_type> splits;
if (num_splits > 0) {
cudf::size_type const split_stride = num_rows / num_splits;
// start after the first element.
auto iter = thrust::make_counting_iterator(1);
splits.reserve(num_splits);
std::transform(iter,
iter + num_splits,
std::back_inserter(splits),
[split_stride, num_rows](cudf::size_type i) {
return std::min(i * split_stride, static_cast<cudf::size_type>(num_rows));
});
}
for (auto const& col : src_cols)
// computing the null count is not a part of the benchmark's target code path, and we want the
// property to be pre-computed so that we measure the performance of only the intended code path
[[maybe_unused]]
auto const nulls = col->null_count();
auto const src_table = cudf::table(std::move(src_cols));
for (auto _ : state) {
cuda_event_timer raii(state, true); // flush_l2_cache = true, stream = 0
impl(src_table, splits);
}
// it's 2x bytes_total because we're both reading and writing.
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * bytes_total * 2);
}
class ContiguousSplit : public cudf::benchmark {};
class ChunkedPack : public cudf::benchmark {};
template <typename ContiguousSplitImpl>
void BM_contiguous_split(benchmark::State& state, ContiguousSplitImpl& impl)
{
int64_t const total_desired_bytes = state.range(0);
cudf::size_type const num_cols = state.range(1);
cudf::size_type const num_splits = state.range(2);
bool const include_validity = state.range(3) != 0;
cudf::size_type el_size = 4; // ints and floats
int64_t const num_rows = total_desired_bytes / (num_cols * el_size);
// generate input table
auto builder = data_profile_builder().cardinality(0).distribution<int>(cudf::type_id::INT32,
distribution_id::UNIFORM);
if (not include_validity) builder.no_validity();
auto src_cols = create_random_table(cycle_dtypes({cudf::type_id::INT32}, num_cols),
row_count{static_cast<cudf::size_type>(num_rows)},
data_profile{builder})
->release();
int64_t const total_bytes =
total_desired_bytes +
(include_validity ? (max(int64_t{1}, (num_rows / 32)) * sizeof(cudf::bitmask_type) * num_cols)
: 0);
BM_contiguous_split_common(state, src_cols, num_rows, num_splits, total_bytes, impl);
}
class ContiguousSplitStrings : public cudf::benchmark {};
class ChunkedPackStrings : public cudf::benchmark {};
template <typename ContiguousSplitImpl>
void BM_contiguous_split_strings(benchmark::State& state, ContiguousSplitImpl& impl)
{
int64_t const total_desired_bytes = state.range(0);
cudf::size_type const num_cols = state.range(1);
cudf::size_type const num_splits = state.range(2);
bool const include_validity = state.range(3) != 0;
constexpr int64_t string_len = 8;
std::vector<const char*> h_strings{
"aaaaaaaa", "bbbbbbbb", "cccccccc", "dddddddd", "eeeeeeee", "ffffffff", "gggggggg", "hhhhhhhh"};
int64_t const col_len_bytes = total_desired_bytes / num_cols;
int64_t const num_rows = col_len_bytes / string_len;
// generate input table
data_profile profile = data_profile_builder().no_validity().cardinality(0).distribution(
cudf::type_id::INT32,
distribution_id::UNIFORM,
0ul,
include_validity ? h_strings.size() * 2 : h_strings.size() - 1); // out of bounds nullified
cudf::test::strings_column_wrapper one_col(h_strings.begin(), h_strings.end());
std::vector<std::unique_ptr<cudf::column>> src_cols(num_cols);
for (int64_t idx = 0; idx < num_cols; idx++) {
auto random_indices = create_random_column(
cudf::type_id::INT32, row_count{static_cast<cudf::size_type>(num_rows)}, profile);
auto str_table = cudf::gather(cudf::table_view{{one_col}},
*random_indices,
(include_validity ? cudf::out_of_bounds_policy::NULLIFY
: cudf::out_of_bounds_policy::DONT_CHECK));
src_cols[idx] = std::move(str_table->release()[0]);
}
int64_t const total_bytes =
total_desired_bytes + ((num_rows + 1) * sizeof(cudf::offset_type)) +
(include_validity ? (max(int64_t{1}, (num_rows / 32)) * sizeof(cudf::bitmask_type) * num_cols)
: 0);
BM_contiguous_split_common(state, src_cols, num_rows, num_splits, total_bytes, impl);
}
#define CSBM_BENCHMARK_DEFINE(name, size, num_columns, num_splits, validity) \
BENCHMARK_DEFINE_F(ContiguousSplit, name)(::benchmark::State & state) \
{ \
BM_contiguous_split(state, contiguous_split); \
} \
BENCHMARK_REGISTER_F(ContiguousSplit, name) \
->Args({size, num_columns, num_splits, validity}) \
->Unit(benchmark::kMillisecond) \
->UseManualTime() \
->Iterations(8)
CSBM_BENCHMARK_DEFINE(6Gb512ColsNoValidity, (int64_t)6 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_BENCHMARK_DEFINE(6Gb512ColsValidity, (int64_t)6 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_BENCHMARK_DEFINE(6Gb10ColsNoValidity, (int64_t)6 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_BENCHMARK_DEFINE(6Gb10ColsValidity, (int64_t)6 * 1024 * 1024 * 1024, 10, 256, 1);
CSBM_BENCHMARK_DEFINE(4Gb512ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_BENCHMARK_DEFINE(4Gb512ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_BENCHMARK_DEFINE(4Gb10ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_BENCHMARK_DEFINE(4Gb10ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 256, 1);
CSBM_BENCHMARK_DEFINE(4Gb4ColsNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 4, 0, 1);
CSBM_BENCHMARK_DEFINE(4Gb4ColsValidityNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 4, 0, 1);
CSBM_BENCHMARK_DEFINE(1Gb512ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_BENCHMARK_DEFINE(1Gb512ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_BENCHMARK_DEFINE(1Gb10ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_BENCHMARK_DEFINE(1Gb10ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 256, 1);
CSBM_BENCHMARK_DEFINE(1Gb1ColNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 1, 0, 1);
CSBM_BENCHMARK_DEFINE(1Gb1ColValidityNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 1, 0, 1);
#define CSBM_STRINGS_BENCHMARK_DEFINE(name, size, num_columns, num_splits, validity) \
BENCHMARK_DEFINE_F(ContiguousSplitStrings, name)(::benchmark::State & state) \
{ \
BM_contiguous_split_strings(state, contiguous_split); \
} \
BENCHMARK_REGISTER_F(ContiguousSplitStrings, name) \
->Args({size, num_columns, num_splits, validity}) \
->Unit(benchmark::kMillisecond) \
->UseManualTime() \
->Iterations(8)
CSBM_STRINGS_BENCHMARK_DEFINE(4Gb512ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_STRINGS_BENCHMARK_DEFINE(4Gb512ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_STRINGS_BENCHMARK_DEFINE(4Gb10ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_STRINGS_BENCHMARK_DEFINE(4Gb10ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 256, 1);
CSBM_STRINGS_BENCHMARK_DEFINE(4Gb4ColsNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 4, 0, 0);
CSBM_STRINGS_BENCHMARK_DEFINE(4Gb4ColsValidityNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 4, 0, 1);
CSBM_STRINGS_BENCHMARK_DEFINE(1Gb512ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 256, 0);
CSBM_STRINGS_BENCHMARK_DEFINE(1Gb512ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 256, 1);
CSBM_STRINGS_BENCHMARK_DEFINE(1Gb10ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 256, 0);
CSBM_STRINGS_BENCHMARK_DEFINE(1Gb10ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 256, 1);
CSBM_STRINGS_BENCHMARK_DEFINE(1Gb1ColNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 1, 0, 0);
CSBM_STRINGS_BENCHMARK_DEFINE(1Gb1ColValidityNoSplits, (int64_t)1 * 1024 * 1024 * 1024, 1, 0, 1);
#define CCSBM_BENCHMARK_DEFINE(name, size, num_columns, num_splits, validity) \
BENCHMARK_DEFINE_F(ChunkedPack, name)(::benchmark::State & state) \
{ \
BM_contiguous_split(state, chunked_pack); \
} \
BENCHMARK_REGISTER_F(ChunkedPack, name) \
->Args({size, num_columns, num_splits, validity}) \
->Unit(benchmark::kMillisecond) \
->UseManualTime() \
->Iterations(8)
CCSBM_BENCHMARK_DEFINE(6Gb512ColsNoValidity, (int64_t)6 * 1024 * 1024 * 1024, 512, 0, 0);
CCSBM_BENCHMARK_DEFINE(6Gb512ColsValidity, (int64_t)6 * 1024 * 1024 * 1024, 512, 0, 1);
CCSBM_BENCHMARK_DEFINE(6Gb10ColsNoValidity, (int64_t)6 * 1024 * 1024 * 1024, 10, 0, 0);
CCSBM_BENCHMARK_DEFINE(6Gb10ColsValidity, (int64_t)6 * 1024 * 1024 * 1024, 10, 0, 1);
CCSBM_BENCHMARK_DEFINE(4Gb512ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 0, 0);
CCSBM_BENCHMARK_DEFINE(4Gb512ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 0, 1);
CCSBM_BENCHMARK_DEFINE(4Gb10ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 0, 0);
CCSBM_BENCHMARK_DEFINE(4Gb10ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 0, 1);
CCSBM_BENCHMARK_DEFINE(4Gb4ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 4, 0, 1);
CCSBM_BENCHMARK_DEFINE(1Gb512ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 0, 0);
CCSBM_BENCHMARK_DEFINE(1Gb512ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 0, 1);
CCSBM_BENCHMARK_DEFINE(1Gb10ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 0, 0);
CCSBM_BENCHMARK_DEFINE(1Gb10ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 0, 1);
CCSBM_BENCHMARK_DEFINE(1Gb1ColValidity, (int64_t)1 * 1024 * 1024 * 1024, 1, 0, 1);
#define CCSBM_STRINGS_BENCHMARK_DEFINE(name, size, num_columns, num_splits, validity) \
BENCHMARK_DEFINE_F(ChunkedPackStrings, name)(::benchmark::State & state) \
{ \
BM_contiguous_split_strings(state, chunked_pack); \
} \
BENCHMARK_REGISTER_F(ChunkedPackStrings, name) \
->Args({size, num_columns, num_splits, validity}) \
->Unit(benchmark::kMillisecond) \
->UseManualTime() \
->Iterations(8)
CCSBM_STRINGS_BENCHMARK_DEFINE(4Gb512ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 0, 0);
CCSBM_STRINGS_BENCHMARK_DEFINE(4Gb512ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 512, 0, 1);
CCSBM_STRINGS_BENCHMARK_DEFINE(4Gb10ColsNoValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 0, 0);
CCSBM_STRINGS_BENCHMARK_DEFINE(4Gb10ColsValidity, (int64_t)4 * 1024 * 1024 * 1024, 10, 0, 1);
CCSBM_STRINGS_BENCHMARK_DEFINE(4Gb4ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 4, 0, 1);
CCSBM_STRINGS_BENCHMARK_DEFINE(1Gb512ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 0, 0);
CCSBM_STRINGS_BENCHMARK_DEFINE(1Gb512ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 512, 0, 1);
CCSBM_STRINGS_BENCHMARK_DEFINE(1Gb10ColsNoValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 0, 0);
CCSBM_STRINGS_BENCHMARK_DEFINE(1Gb10ColsValidity, (int64_t)1 * 1024 * 1024 * 1024, 10, 0, 1);
CCSBM_STRINGS_BENCHMARK_DEFINE(1Gb1ColValidity, (int64_t)1 * 1024 * 1024 * 1024, 1, 0, 1);
|
22983c74580a451e6f9027445f2f809fe4914c18.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <string>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// Includes CUDA
#include <hip/hip_runtime.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#define MAX_EPSILON_ERROR 5e-3f
#define tile_width 8
// Define the files that are to be save and the reference images for validation
const char *imageFilename = "lena_bw.pgm";
texture<float, 2, hipReadModeElementType> tex;
texture<float,2,hipReadModeElementType> tex_edge;
__global__ void conv_tex(float *out,int width,int height,int m_width,int m_height){
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int offset = m_width/2;
float value = 0;
for (int j = 0; j < m_height; ++j) {
for (int i = 0; i < m_width; ++i) {
int mapi = i-offset;
int mapj = j-offset;
float u = ((float)x + mapi - (float)width/2)/(float)(width);
float v = ((float)y + mapj - (float)width/2)/(float)(width);
value += tex2D(tex,y-(int)(m_width/2)+i , x-(int)(m_height/2)+j)*tex2D(tex_edge,i,j);
}
}
if(value>0.4 || value<-0.4){
out[x*width+y] = 1;
}
else{
out[x*width+y] = 0;
}
}
void runTest(int argc, char **argv);
int main(int argc, char **argv)
{
runTest(argc, argv);
hipDeviceReset();
return 0;
}
void runTest(int argc, char **argv)
{
int devID = findCudaDevice(argc, (const char **) argv);
//convulution mask
float *edgeDectection = (float*)malloc(sizeof(float)*3*3);
float edge[9] = {-1,0,1,-2,0,2,-1,0,1};
edgeDectection=&edge[0];
// load image from disk
float *hData = NULL;
unsigned int width, height;
char *imagePath = sdkFindFilePath(imageFilename, argv[0]);
if (imagePath == NULL)
{
printf("Unable to source image file: %s\n", imageFilename);
exit(EXIT_FAILURE);
}
sdkLoadPGM(imagePath, &hData, &width, &height);
unsigned int size = width * height * sizeof(float);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipChannelFormatDesc edge_cd = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipArray *cuArray;
hipArray *edge_cu;
checkCudaErrors(hipMallocArray(&cuArray,&channelDesc,width,height));
checkCudaErrors(hipMallocArray(&edge_cu,&edge_cd,3,3));
checkCudaErrors(hipMemcpyToArray(cuArray,0,0,hData,size,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpyToArray(edge_cu,0,0,edgeDectection,3*3*sizeof(float),hipMemcpyHostToDevice));
// Set texture parameters
tex.addressMode[0] = hipAddressModeWrap;
tex.addressMode[1] = hipAddressModeWrap;
tex_edge.addressMode[0] = hipAddressModeWrap;
tex_edge.addressMode[1] = hipAddressModeWrap;
// Bind the array to the texture
checkCudaErrors(hipBindTextureToArray(tex, cuArray, channelDesc));
checkCudaErrors(hipBindTextureToArray(tex_edge, edge_cu, edge_cd));
dim3 dimBlock(8, 8, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
// tex_time
float *txData = NULL;
checkCudaErrors(hipMalloc((void **) &txData, size));
StopWatchInterface *t_timer = NULL;
sdkCreateTimer(&t_timer);
sdkStartTimer(&t_timer);
hipLaunchKernelGGL(( conv_tex), dim3(dimGrid),dim3(dimBlock),0, 0, txData,width,height,3,3);
getLastCudaError("Kernel execution failed");
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&t_timer);
printf("Processing time for texture: %f (ms)\n", sdkGetTimerValue(&t_timer));
printf("%.2f Mpixels/sec\n",(width *height / (sdkGetTimerValue(&t_timer) / 1000.0f)) / 1e6);
sdkDeleteTimer(&t_timer);
// Allocate mem for the result on host side
float *tex_out = (float *) malloc(size);
checkCudaErrors(hipMemcpy(tex_out,txData,size,hipMemcpyDeviceToHost));
char tex_outputfile[1024];
strcpy(tex_outputfile, imagePath);
strcpy(tex_outputfile + strlen(imagePath) - 4, "_texture_out.pgm");
sdkSavePGM(tex_outputfile, tex_out, width, height);
printf("Wrote '%s'\n", tex_outputfile);
free(imagePath);
}
| 22983c74580a451e6f9027445f2f809fe4914c18.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <string>
#include <math.h>
#ifdef _WIN32
# define WINDOWS_LEAN_AND_MEAN
# define NOMINMAX
# include <windows.h>
#endif
// Includes CUDA
#include <cuda_runtime.h>
// Utilities and timing functions
#include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h
// CUDA helper functions
#include <helper_cuda.h> // helper functions for CUDA error check
#define MAX_EPSILON_ERROR 5e-3f
#define tile_width 8
// Define the files that are to be save and the reference images for validation
const char *imageFilename = "lena_bw.pgm";
texture<float, 2, cudaReadModeElementType> tex;
texture<float,2,cudaReadModeElementType> tex_edge;
__global__ void conv_tex(float *out,int width,int height,int m_width,int m_height){
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int offset = m_width/2;
float value = 0;
for (int j = 0; j < m_height; ++j) {
for (int i = 0; i < m_width; ++i) {
int mapi = i-offset;
int mapj = j-offset;
float u = ((float)x + mapi - (float)width/2)/(float)(width);
float v = ((float)y + mapj - (float)width/2)/(float)(width);
value += tex2D(tex,y-(int)(m_width/2)+i , x-(int)(m_height/2)+j)*tex2D(tex_edge,i,j);
}
}
if(value>0.4 || value<-0.4){
out[x*width+y] = 1;
}
else{
out[x*width+y] = 0;
}
}
void runTest(int argc, char **argv);
int main(int argc, char **argv)
{
runTest(argc, argv);
cudaDeviceReset();
return 0;
}
void runTest(int argc, char **argv)
{
int devID = findCudaDevice(argc, (const char **) argv);
//convulution mask
float *edgeDectection = (float*)malloc(sizeof(float)*3*3);
float edge[9] = {-1,0,1,-2,0,2,-1,0,1};
edgeDectection=&edge[0];
// load image from disk
float *hData = NULL;
unsigned int width, height;
char *imagePath = sdkFindFilePath(imageFilename, argv[0]);
if (imagePath == NULL)
{
printf("Unable to source image file: %s\n", imageFilename);
exit(EXIT_FAILURE);
}
sdkLoadPGM(imagePath, &hData, &width, &height);
unsigned int size = width * height * sizeof(float);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaChannelFormatDesc edge_cd = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray *cuArray;
cudaArray *edge_cu;
checkCudaErrors(cudaMallocArray(&cuArray,&channelDesc,width,height));
checkCudaErrors(cudaMallocArray(&edge_cu,&edge_cd,3,3));
checkCudaErrors(cudaMemcpyToArray(cuArray,0,0,hData,size,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpyToArray(edge_cu,0,0,edgeDectection,3*3*sizeof(float),cudaMemcpyHostToDevice));
// Set texture parameters
tex.addressMode[0] = cudaAddressModeWrap;
tex.addressMode[1] = cudaAddressModeWrap;
tex_edge.addressMode[0] = cudaAddressModeWrap;
tex_edge.addressMode[1] = cudaAddressModeWrap;
// Bind the array to the texture
checkCudaErrors(cudaBindTextureToArray(tex, cuArray, channelDesc));
checkCudaErrors(cudaBindTextureToArray(tex_edge, edge_cu, edge_cd));
dim3 dimBlock(8, 8, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
// tex_time
float *txData = NULL;
checkCudaErrors(cudaMalloc((void **) &txData, size));
StopWatchInterface *t_timer = NULL;
sdkCreateTimer(&t_timer);
sdkStartTimer(&t_timer);
conv_tex<<<dimGrid,dimBlock,0>>>(txData,width,height,3,3);
getLastCudaError("Kernel execution failed");
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&t_timer);
printf("Processing time for texture: %f (ms)\n", sdkGetTimerValue(&t_timer));
printf("%.2f Mpixels/sec\n",(width *height / (sdkGetTimerValue(&t_timer) / 1000.0f)) / 1e6);
sdkDeleteTimer(&t_timer);
// Allocate mem for the result on host side
float *tex_out = (float *) malloc(size);
checkCudaErrors(cudaMemcpy(tex_out,txData,size,cudaMemcpyDeviceToHost));
char tex_outputfile[1024];
strcpy(tex_outputfile, imagePath);
strcpy(tex_outputfile + strlen(imagePath) - 4, "_texture_out.pgm");
sdkSavePGM(tex_outputfile, tex_out, width, height);
printf("Wrote '%s'\n", tex_outputfile);
free(imagePath);
}
|
11ab1f34e45028b835b7e0353eb2712518bf35ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include "helper_cuda.h"
#include "Mandelbrot_kernel.h"
#include "Mandelbrot_kernel.cuh"
#ifdef PROFILE
#include <include/CuprRuntime.h>
#endif
// The Mandelbrot CUDA GPU thread function
template<class T>
__global__ void Mandelbrot0(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff,
const T xJP, const T yJP, const T scale, const uchar4 colors, const int frame,
const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ)
{
// loop until all blocks completed
for (unsigned int blockIndex=blockIdx.x; blockIndex < numBlocks; blockIndex += gridDim.x)
{
unsigned int blockX = blockIndex % gridWidth;
unsigned int blockY = blockIndex / gridWidth;
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH))
{
// Calculate the location
const T xPos = (T)ix * scale + xOff;
const T yPos = (T)iy * scale + yOff;
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrot<T>(xPos, yPos, xJP, yJP, crunch, isJ);
// int m = blockIdx.x; // uncomment to see scheduling order
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m)
{
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
}
else
{
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int pixel = imageW * iy + ix;
if (frame == 0)
{
color.w = 0;
dst[pixel] = color;
}
else
{
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1;
dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1;
dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1;
}
}
}
} // Mandelbrot0
// The Mandelbrot CUDA GPU thread function (double single version)
__global__ void MandelbrotDS0(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1,
const float yOff0, const float yOff1, const float xJP, const float yJP, const float scale,
const uchar4 colors, const int frame, const int animationFrame, const int gridWidth,
const int numBlocks, const bool isJ)
{
// loop until all blocks completed
for (unsigned int blockIndex=blockIdx.x; blockIndex < numBlocks; blockIndex += gridDim.x)
{
unsigned int blockX = blockIndex % gridWidth;
unsigned int blockY = blockIndex / gridWidth;
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH))
{
// Calculate the location
float xPos0 = (float)ix * scale;
float xPos1 = 0.0f;
float yPos0 = (float)iy * scale;
float yPos1 = 0.0f;
dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1);
dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1);
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ);
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m)
{
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
}
else
{
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int pixel = imageW * iy + ix;
if (frame == 0)
{
color.w = 0;
dst[pixel] = color;
}
else
{
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1;
dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1;
dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1;
}
}
}
} // MandelbrotDS0
// The Mandelbrot secondary AA pass CUDA GPU thread function
template<class T>
__global__ void Mandelbrot1(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff,
const T xJP, const T yJP, const T scale, const uchar4 colors, const int frame,
const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ)
{
// loop until all blocks completed
for (unsigned int blockIndex=blockIdx.x; blockIndex < numBlocks; blockIndex += gridDim.x)
{
unsigned int blockX = blockIndex % gridWidth;
unsigned int blockY = blockIndex / gridWidth;
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH))
{
// Get the current pixel color
int pixel = imageW * iy + ix;
uchar4 pixelColor = dst[pixel];
int count = 0;
// Search for pixels out of tolerance surrounding the current pixel
if (ix > 0)
{
count += CheckColors(pixelColor, dst[pixel - 1]);
}
if (ix + 1 < imageW)
{
count += CheckColors(pixelColor, dst[pixel + 1]);
}
if (iy > 0)
{
count += CheckColors(pixelColor, dst[pixel - imageW]);
}
if (iy + 1 < imageH)
{
count += CheckColors(pixelColor, dst[pixel + imageW]);
}
if (count)
{
// Calculate the location
const T xPos = (T)ix * scale + xOff;
const T yPos = (T)iy * scale + yOff;
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrot(xPos, yPos, xJP, yJP, crunch, isJ);
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m)
{
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
}
else
{
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1;
dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1;
dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1;
}
}
}
} // Mandelbrot1
// The Mandelbrot secondary AA pass CUDA GPU thread function (double single version)
__global__ void MandelbrotDS1(uchar4 *dst, const int imageW, const int imageH, const int crunch,
const float xOff0, const float xOff1, const float yOff0, const float yOff1,
const float xJP, const float yJP, const float scale, const uchar4 colors,
const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ)
{
// loop until all blocks completed
for (unsigned int blockIndex=blockIdx.x; blockIndex < numBlocks; blockIndex += gridDim.x)
{
unsigned int blockX = blockIndex % gridWidth;
unsigned int blockY = blockIndex / gridWidth;
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH))
{
// Get the current pixel color
int pixel = imageW * iy + ix;
uchar4 pixelColor = dst[pixel];
int count = 0;
// Search for pixels out of tolerance surrounding the current pixel
if (ix > 0)
{
count += CheckColors(pixelColor, dst[pixel - 1]);
}
if (ix + 1 < imageW)
{
count += CheckColors(pixelColor, dst[pixel + 1]);
}
if (iy > 0)
{
count += CheckColors(pixelColor, dst[pixel - imageW]);
}
if (iy + 1 < imageH)
{
count += CheckColors(pixelColor, dst[pixel + imageW]);
}
if (count)
{
// Calculate the location
float xPos0 = (float)ix * scale;
float xPos1 = 0.0f;
float yPos0 = (float)iy * scale;
float yPos1 = 0.0f;
dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1);
dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1);
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ);
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m)
{
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
}
else
{
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1;
dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1;
dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1;
}
}
}
} // MandelbrotDS1
// The host CPU Mandelbrot thread spawner
void RunMandelbrot0(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff,
const double xjp, const double yjp, const double scale, const uchar4 colors, const int frame,
const int animationFrame, const int mode, const int numSMs, const bool isJ, int version)
{
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
int numWorkerBlocks = numSMs;
switch (mode)
{
default:
case 0:
hipLaunchKernelGGL(( Mandelbrot0<float>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, (float)xOff, (float)yOff,
(float)xjp, (float)yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ);
break;
case 1:
float x0, x1, y0, y1;
dsdeq(x0, x1, xOff);
dsdeq(y0, y1, yOff);
hipLaunchKernelGGL(( MandelbrotDS0), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, x0, x1, y0, y1,
xjp, yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ);
break;
case 2:
hipLaunchKernelGGL(( Mandelbrot0<double>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, xOff, yOff,
xjp, yjp, scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ);
break;
}
getLastCudaError("Mandelbrot0 kernel execution failed.\n");
} // RunMandelbrot0
// The host CPU Mandelbrot thread spawner
void RunMandelbrot1(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff,
const double xjp, const double yjp, const double scale, const uchar4 colors, const int frame,
const int animationFrame, const int mode, const int numSMs, const bool isJ, int version)
{
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
int numWorkerBlocks = numSMs;
switch (mode)
{
default:
case 0:
hipLaunchKernelGGL(( Mandelbrot1<float>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, (float)xOff, (float)yOff,
(float)xjp, (float)yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ);
break;
case 1:
float x0, x1, y0, y1;
dsdeq(x0, x1, xOff);
dsdeq(y0, y1, yOff);
hipLaunchKernelGGL(( MandelbrotDS1), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, x0, x1, y0, y1,
xjp, yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ);
break;
case 2:
hipLaunchKernelGGL(( Mandelbrot1<double>), dim3(numWorkerBlocks), dim3(threads), 0, 0, dst, imageW, imageH, crunch, xOff, yOff,
xjp, yjp, scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ);
break;
}
getLastCudaError("Mandelbrot1 kernel execution failed.\n");
} // RunMandelbrot1
| 11ab1f34e45028b835b7e0353eb2712518bf35ad.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include "helper_cuda.h"
#include "Mandelbrot_kernel.h"
#include "Mandelbrot_kernel.cuh"
#ifdef PROFILE
#include <include/CuprRuntime.h>
#endif
// The Mandelbrot CUDA GPU thread function
template<class T>
__global__ void Mandelbrot0(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff,
const T xJP, const T yJP, const T scale, const uchar4 colors, const int frame,
const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ)
{
// loop until all blocks completed
for (unsigned int blockIndex=blockIdx.x; blockIndex < numBlocks; blockIndex += gridDim.x)
{
unsigned int blockX = blockIndex % gridWidth;
unsigned int blockY = blockIndex / gridWidth;
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH))
{
// Calculate the location
const T xPos = (T)ix * scale + xOff;
const T yPos = (T)iy * scale + yOff;
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrot<T>(xPos, yPos, xJP, yJP, crunch, isJ);
// int m = blockIdx.x; // uncomment to see scheduling order
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m)
{
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
}
else
{
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int pixel = imageW * iy + ix;
if (frame == 0)
{
color.w = 0;
dst[pixel] = color;
}
else
{
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1;
dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1;
dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1;
}
}
}
} // Mandelbrot0
// The Mandelbrot CUDA GPU thread function (double single version)
__global__ void MandelbrotDS0(uchar4 *dst, const int imageW, const int imageH, const int crunch, const float xOff0, const float xOff1,
const float yOff0, const float yOff1, const float xJP, const float yJP, const float scale,
const uchar4 colors, const int frame, const int animationFrame, const int gridWidth,
const int numBlocks, const bool isJ)
{
// loop until all blocks completed
for (unsigned int blockIndex=blockIdx.x; blockIndex < numBlocks; blockIndex += gridDim.x)
{
unsigned int blockX = blockIndex % gridWidth;
unsigned int blockY = blockIndex / gridWidth;
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH))
{
// Calculate the location
float xPos0 = (float)ix * scale;
float xPos1 = 0.0f;
float yPos0 = (float)iy * scale;
float yPos1 = 0.0f;
dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1);
dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1);
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ);
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m)
{
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
}
else
{
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int pixel = imageW * iy + ix;
if (frame == 0)
{
color.w = 0;
dst[pixel] = color;
}
else
{
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (dst[pixel].x * frame + color.x + frame2) / frame1;
dst[pixel].y = (dst[pixel].y * frame + color.y + frame2) / frame1;
dst[pixel].z = (dst[pixel].z * frame + color.z + frame2) / frame1;
}
}
}
} // MandelbrotDS0
// The Mandelbrot secondary AA pass CUDA GPU thread function
template<class T>
__global__ void Mandelbrot1(uchar4 *dst, const int imageW, const int imageH, const int crunch, const T xOff, const T yOff,
const T xJP, const T yJP, const T scale, const uchar4 colors, const int frame,
const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ)
{
// loop until all blocks completed
for (unsigned int blockIndex=blockIdx.x; blockIndex < numBlocks; blockIndex += gridDim.x)
{
unsigned int blockX = blockIndex % gridWidth;
unsigned int blockY = blockIndex / gridWidth;
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH))
{
// Get the current pixel color
int pixel = imageW * iy + ix;
uchar4 pixelColor = dst[pixel];
int count = 0;
// Search for pixels out of tolerance surrounding the current pixel
if (ix > 0)
{
count += CheckColors(pixelColor, dst[pixel - 1]);
}
if (ix + 1 < imageW)
{
count += CheckColors(pixelColor, dst[pixel + 1]);
}
if (iy > 0)
{
count += CheckColors(pixelColor, dst[pixel - imageW]);
}
if (iy + 1 < imageH)
{
count += CheckColors(pixelColor, dst[pixel + imageW]);
}
if (count)
{
// Calculate the location
const T xPos = (T)ix * scale + xOff;
const T yPos = (T)iy * scale + yOff;
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrot(xPos, yPos, xJP, yJP, crunch, isJ);
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m)
{
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
}
else
{
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1;
dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1;
dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1;
}
}
}
} // Mandelbrot1
// The Mandelbrot secondary AA pass CUDA GPU thread function (double single version)
__global__ void MandelbrotDS1(uchar4 *dst, const int imageW, const int imageH, const int crunch,
const float xOff0, const float xOff1, const float yOff0, const float yOff1,
const float xJP, const float yJP, const float scale, const uchar4 colors,
const int frame, const int animationFrame, const int gridWidth, const int numBlocks, const bool isJ)
{
// loop until all blocks completed
for (unsigned int blockIndex=blockIdx.x; blockIndex < numBlocks; blockIndex += gridDim.x)
{
unsigned int blockX = blockIndex % gridWidth;
unsigned int blockY = blockIndex / gridWidth;
// process this block
const int ix = blockDim.x * blockX + threadIdx.x;
const int iy = blockDim.y * blockY + threadIdx.y;
if ((ix < imageW) && (iy < imageH))
{
// Get the current pixel color
int pixel = imageW * iy + ix;
uchar4 pixelColor = dst[pixel];
int count = 0;
// Search for pixels out of tolerance surrounding the current pixel
if (ix > 0)
{
count += CheckColors(pixelColor, dst[pixel - 1]);
}
if (ix + 1 < imageW)
{
count += CheckColors(pixelColor, dst[pixel + 1]);
}
if (iy > 0)
{
count += CheckColors(pixelColor, dst[pixel - imageW]);
}
if (iy + 1 < imageH)
{
count += CheckColors(pixelColor, dst[pixel + imageW]);
}
if (count)
{
// Calculate the location
float xPos0 = (float)ix * scale;
float xPos1 = 0.0f;
float yPos0 = (float)iy * scale;
float yPos1 = 0.0f;
dsadd(xPos0, xPos1, xPos0, xPos1, xOff0, xOff1);
dsadd(yPos0, yPos1, yPos0, yPos1, yOff0, yOff1);
// Calculate the Mandelbrot index for the current location
int m = CalcMandelbrotDS(xPos0, xPos1, yPos0, yPos1, xJP, yJP, crunch, isJ);
m = m > 0 ? crunch - m : 0;
// Convert the Mandelbrot index into a color
uchar4 color;
if (m)
{
m += animationFrame;
color.x = m * colors.x;
color.y = m * colors.y;
color.z = m * colors.z;
}
else
{
color.x = 0;
color.y = 0;
color.z = 0;
}
// Output the pixel
int frame1 = frame + 1;
int frame2 = frame1 / 2;
dst[pixel].x = (pixelColor.x * frame + color.x + frame2) / frame1;
dst[pixel].y = (pixelColor.y * frame + color.y + frame2) / frame1;
dst[pixel].z = (pixelColor.z * frame + color.z + frame2) / frame1;
}
}
}
} // MandelbrotDS1
// The host CPU Mandelbrot thread spawner
void RunMandelbrot0(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff,
const double xjp, const double yjp, const double scale, const uchar4 colors, const int frame,
const int animationFrame, const int mode, const int numSMs, const bool isJ, int version)
{
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
int numWorkerBlocks = numSMs;
switch (mode)
{
default:
case 0:
Mandelbrot0<float><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, (float)xOff, (float)yOff,
(float)xjp, (float)yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ);
break;
case 1:
float x0, x1, y0, y1;
dsdeq(x0, x1, xOff);
dsdeq(y0, y1, yOff);
MandelbrotDS0<<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, x0, x1, y0, y1,
xjp, yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ);
break;
case 2:
Mandelbrot0<double><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, xOff, yOff,
xjp, yjp, scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ);
break;
}
getLastCudaError("Mandelbrot0 kernel execution failed.\n");
} // RunMandelbrot0
// The host CPU Mandelbrot thread spawner
void RunMandelbrot1(uchar4 *dst, const int imageW, const int imageH, const int crunch, const double xOff, const double yOff,
const double xjp, const double yjp, const double scale, const uchar4 colors, const int frame,
const int animationFrame, const int mode, const int numSMs, const bool isJ, int version)
{
dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
dim3 grid(iDivUp(imageW, BLOCKDIM_X), iDivUp(imageH, BLOCKDIM_Y));
int numWorkerBlocks = numSMs;
switch (mode)
{
default:
case 0:
Mandelbrot1<float><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, (float)xOff, (float)yOff,
(float)xjp, (float)yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ);
break;
case 1:
float x0, x1, y0, y1;
dsdeq(x0, x1, xOff);
dsdeq(y0, y1, yOff);
MandelbrotDS1<<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, x0, x1, y0, y1,
xjp, yjp, (float)scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ);
break;
case 2:
Mandelbrot1<double><<<numWorkerBlocks, threads>>>(dst, imageW, imageH, crunch, xOff, yOff,
xjp, yjp, scale, colors, frame, animationFrame, grid.x, grid.x *grid.y, isJ);
break;
}
getLastCudaError("Mandelbrot1 kernel execution failed.\n");
} // RunMandelbrot1
|
6aad313b1834a22a16eaa557ade8f94cad008094.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <omp.h>
#include "FractaleMOO.h"
#include "Device.h"
#include "Mandelbrot.h"
#include "Julia.h"
__global__ void processMandelbrot(uchar4* ptrDevPixels, int w, int h, int n, const DomaineMath& domaineMath);
__global__ void processJulia(uchar4* ptrDevPixels, int w, int h, int n, float c1, float c2, const DomaineMath& domaineMath);
FractaleMOO::FractaleMOO(int w, int h, DomaineMath* domain, Fractale* algo, int nmin, int nmax) {
this->algo = algo;
this->nmin = nmin;
this->nmax = nmax;
this->w = w;
this->h = h;
this->n = this->nmin;
this->step = 1;
this->ptrDomain = domain;
this->dg = dim3(16, 16, 1);
this->db = dim3(32, 32, 1);
Device::assertDim(dg, db);
HANDLE_ERROR(hipMalloc(&this->ptrDevDomain, sizeof(DomaineMath)));
}
FractaleMOO::~FractaleMOO() {
HANDLE_ERROR(hipFree(this->ptrDevDomain));
delete this->algo;
delete this->ptrDomain;
}
/**
* Override
*/
void FractaleMOO::process(uchar4* ptrDevPixels, int w, int h, const DomaineMath& domaineMath) {
HANDLE_ERROR(hipMemcpy(this->ptrDevDomain, &domaineMath, sizeof(DomaineMath), hipMemcpyHostToDevice));
if (Mandelbrot* mandelbrot = dynamic_cast<Mandelbrot*>(this->algo)) {
hipLaunchKernelGGL(( processMandelbrot), dim3(dg),dim3(db), 0, 0, ptrDevPixels, w, h, this->n, *this->ptrDevDomain );
} else if (Julia* julia = dynamic_cast<Julia*>(this->algo)) {
hipLaunchKernelGGL(( processJulia), dim3(dg),dim3(db), 0, 0, ptrDevPixels, w, h, this->n, julia->c1, julia->c2, *this->ptrDevDomain );
} else {
throw "Not supported algorithm";
}
}
/**
* Override
*/
DomaineMath* FractaleMOO::getDomaineMathInit() {
return this->ptrDomain;
}
/**
* Override
*/
void FractaleMOO::animationStep() {
if( this->n == this->nmax ) {
this->step = -1;
} else if(this->n == this->nmin ) {
this->step = 1;
}
this->n += this->step;
}
/**
* Override
*/
float FractaleMOO::getAnimationPara() {
return (float) this->n;
}
/**
* Override
*/
int FractaleMOO::getW() {
return this->w;
}
/**
* Override
*/
int FractaleMOO::getH() {
return this->h;
}
/**
* Override
*/
string FractaleMOO::getTitle() {
if (Mandelbrot* mandelbrot = dynamic_cast<Mandelbrot*>(this->algo)) {
return "Fractale Mandelbrot";
} else if (Julia* julia = dynamic_cast<Julia*>(this->algo)) {
return "Fractale Julia";
} else {
return "Not supported algorithm";
}
}
| 6aad313b1834a22a16eaa557ade8f94cad008094.cu | #include <iostream>
#include <omp.h>
#include "FractaleMOO.h"
#include "Device.h"
#include "Mandelbrot.h"
#include "Julia.h"
__global__ void processMandelbrot(uchar4* ptrDevPixels, int w, int h, int n, const DomaineMath& domaineMath);
__global__ void processJulia(uchar4* ptrDevPixels, int w, int h, int n, float c1, float c2, const DomaineMath& domaineMath);
FractaleMOO::FractaleMOO(int w, int h, DomaineMath* domain, Fractale* algo, int nmin, int nmax) {
this->algo = algo;
this->nmin = nmin;
this->nmax = nmax;
this->w = w;
this->h = h;
this->n = this->nmin;
this->step = 1;
this->ptrDomain = domain;
this->dg = dim3(16, 16, 1);
this->db = dim3(32, 32, 1);
Device::assertDim(dg, db);
HANDLE_ERROR(cudaMalloc(&this->ptrDevDomain, sizeof(DomaineMath)));
}
FractaleMOO::~FractaleMOO() {
HANDLE_ERROR(cudaFree(this->ptrDevDomain));
delete this->algo;
delete this->ptrDomain;
}
/**
* Override
*/
void FractaleMOO::process(uchar4* ptrDevPixels, int w, int h, const DomaineMath& domaineMath) {
HANDLE_ERROR(cudaMemcpy(this->ptrDevDomain, &domaineMath, sizeof(DomaineMath), cudaMemcpyHostToDevice));
if (Mandelbrot* mandelbrot = dynamic_cast<Mandelbrot*>(this->algo)) {
processMandelbrot<<<dg,db>>>(ptrDevPixels, w, h, this->n, *this->ptrDevDomain );
} else if (Julia* julia = dynamic_cast<Julia*>(this->algo)) {
processJulia<<<dg,db>>>(ptrDevPixels, w, h, this->n, julia->c1, julia->c2, *this->ptrDevDomain );
} else {
throw "Not supported algorithm";
}
}
/**
* Override
*/
DomaineMath* FractaleMOO::getDomaineMathInit() {
return this->ptrDomain;
}
/**
* Override
*/
void FractaleMOO::animationStep() {
if( this->n == this->nmax ) {
this->step = -1;
} else if(this->n == this->nmin ) {
this->step = 1;
}
this->n += this->step;
}
/**
* Override
*/
float FractaleMOO::getAnimationPara() {
return (float) this->n;
}
/**
* Override
*/
int FractaleMOO::getW() {
return this->w;
}
/**
* Override
*/
int FractaleMOO::getH() {
return this->h;
}
/**
* Override
*/
string FractaleMOO::getTitle() {
if (Mandelbrot* mandelbrot = dynamic_cast<Mandelbrot*>(this->algo)) {
return "Fractale Mandelbrot";
} else if (Julia* julia = dynamic_cast<Julia*>(this->algo)) {
return "Fractale Julia";
} else {
return "Not supported algorithm";
}
}
|
492d5b3791bc39497fabcafa4c0a773c36b5830e.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuImage.h"
cuImage::cuImage()
:width(0), height(0), d_data(NULL), h_data(NULL),t_data(NULL)
{
}
void cuImage::Allocate(
int w, int h, int p, float *devMem, float *hostMem)
{
width = w;
height = h;
pitch = p;
d_data = devMem;
h_data = hostMem;
// safeCall(hipMallocPitch((void **)&d_data, &pitch, (sizeof(float)*width), (sizeof(float)height));
// pitch /= sizeof(float);
if (d_data==NULL)
printf("Failed to allocate device data\n");
}
void cuImage::Allocate1D(
int w, int h, float *hostMem)
{
width = w;
height = h;
h_data = hostMem;
safeCall(hipMalloc(&d_data,width*height*sizeof(float)));
if (d_data==NULL)
printf("Failed to allocate device data\n");
}
void cuImage::AllocateMat1D(cv::Mat &src,bool withHost ){
width = src.cols;
height = src.rows;
Mat gray, gray_fpt;
if( src.channels() == 3 || src.channels() == 4 )
{
cvtColor(src, gray, COLOR_BGR2GRAY);
gray.convertTo(gray_fpt,DataType<float>::type, 1, 0);
}
else
src.convertTo(gray_fpt,DataType<float>::type, 1, 0);
h_data = (float*)gray_fpt.data;
safeCall(hipMalloc(&d_data,width*height*sizeof(float)));
if (d_data==NULL)
printf("Failed to allocate device data\n");
safeCall(hipMemcpy(d_data,h_data,width*height*sizeof(float),hipMemcpyHostToDevice));
if(withHost){
hostIner = true;
h_data = new float[height*width];
}
}
cuImage::~cuImage()
{
if (d_data!=NULL)
safeCall(hipFree(d_data));
if (h_data!=NULL&&hostIner==true)
free(h_data);
}
namespace cusift {
void CudaImage::Allocate(int w, int h, int p, bool host, float *devmem, float *hostmem)
{
width = w;
height = h;
pitch = p;
d_data = devmem;
h_data = hostmem;
t_data = NULL;
if (devmem==NULL) {
safeCall(hipMallocPitch((void **)&d_data, (size_t*)&pitch, (size_t)(sizeof(float)*width), (size_t)height));
pitch /= sizeof(float);
if (d_data==NULL)
printf("Failed to allocate device data\n");
d_internalAlloc = true;
}
if (host && hostmem==NULL) {
h_data = (float *)malloc(sizeof(float)*pitch*height);
h_internalAlloc = true;
}
}
CudaImage::CudaImage() :
width(0), height(0), d_data(NULL), h_data(NULL), t_data(NULL), d_internalAlloc(false), h_internalAlloc(false)
{
}
CudaImage::~CudaImage()
{
if (d_internalAlloc && d_data!=NULL)
safeCall(hipFree(d_data));
d_data = NULL;
if (h_internalAlloc && h_data!=NULL)
free(h_data);
h_data = NULL;
if (t_data!=NULL)
safeCall(hipFreeArray((hipArray *)t_data));
t_data = NULL;
}
double CudaImage::Download()
{
TimerGPU timer(0);
int p = sizeof(float)*pitch;
if (d_data!=NULL && h_data!=NULL)
safeCall(hipMemcpy2D(d_data, p, h_data, sizeof(float)*width, sizeof(float)*width, height, hipMemcpyHostToDevice));
double gpuTime = timer.read();
#ifdef VERBOSE
printf("Download time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
double CudaImage::Readback()
{
TimerGPU timer(0);
int p = sizeof(float)*pitch;
safeCall(hipMemcpy2D(h_data, sizeof(float)*width, d_data, p, sizeof(float)*width, height, hipMemcpyDeviceToHost));
double gpuTime = timer.read();
#ifdef VERBOSE
printf("Readback time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
double CudaImage::InitTexture()
{
TimerGPU timer(0);
hipChannelFormatDesc t_desc = hipCreateChannelDesc<float>();
safeCall(hipMallocArray((hipArray **)&t_data, &t_desc, pitch, height));
if (t_data==NULL)
printf("Failed to allocated texture data\n");
double gpuTime = timer.read();
#ifdef VERBOSE
printf("InitTexture time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
double CudaImage::CopyToTexture(CudaImage &dst, bool host)
{
if (dst.t_data==NULL) {
printf("Error CopyToTexture: No texture data\n");
return 0.0;
}
if ((!host || h_data==NULL) && (host || d_data==NULL)) {
printf("Error CopyToTexture: No source data\n");
return 0.0;
}
TimerGPU timer(0);
if (host)
safeCall(hipMemcpyToArray((hipArray *)dst.t_data, 0, 0, h_data, sizeof(float)*pitch*dst.height, hipMemcpyHostToDevice));
else
safeCall(hipMemcpyToArray((hipArray *)dst.t_data, 0, 0, d_data, sizeof(float)*pitch*dst.height, hipMemcpyDeviceToDevice));
safeCall(hipDeviceSynchronize());
double gpuTime = timer.read();
#ifdef VERBOSE
printf("CopyToTexture time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
//new operator
void CudaImage::copyDevice(CudaImage &src){
width = src.width;
height = src.height;
pitch = src.pitch;
safeCall(hipMallocPitch((void **)&d_data, (size_t*)&pitch, (size_t)(sizeof(float)*width), (size_t)height));
pitch /= sizeof(float);
safeCall(hipMemcpy2D(d_data, sizeof(float)*pitch, src.d_data, sizeof(float)*src.pitch, sizeof(float)*width, height, hipMemcpyDeviceToDevice));
d_internalAlloc = true;
}
void CudaImage::copyDevice(CudaImage &src,bool haveDevice){
width = src.width;
height = src.height;
pitch = src.pitch;
if(!haveDevice){
safeCall(hipMallocPitch((void **)&d_data, (size_t*)&pitch, (size_t)(sizeof(float)*width), (size_t)height));
d_internalAlloc = true;
}
pitch /= sizeof(float);
safeCall(hipMemcpy2D(d_data, sizeof(float)*pitch, src.d_data, sizeof(float)*src.pitch, sizeof(float)*width, height, hipMemcpyDeviceToDevice));
}
}
| 492d5b3791bc39497fabcafa4c0a773c36b5830e.cu | #include "cuImage.h"
cuImage::cuImage()
:width(0), height(0), d_data(NULL), h_data(NULL),t_data(NULL)
{
}
void cuImage::Allocate(
int w, int h, int p, float *devMem, float *hostMem)
{
width = w;
height = h;
pitch = p;
d_data = devMem;
h_data = hostMem;
// safeCall(cudaMallocPitch((void **)&d_data, &pitch, (sizeof(float)*width), (sizeof(float)height));
// pitch /= sizeof(float);
if (d_data==NULL)
printf("Failed to allocate device data\n");
}
void cuImage::Allocate1D(
int w, int h, float *hostMem)
{
width = w;
height = h;
h_data = hostMem;
safeCall(cudaMalloc(&d_data,width*height*sizeof(float)));
if (d_data==NULL)
printf("Failed to allocate device data\n");
}
void cuImage::AllocateMat1D(cv::Mat &src,bool withHost ){
width = src.cols;
height = src.rows;
Mat gray, gray_fpt;
if( src.channels() == 3 || src.channels() == 4 )
{
cvtColor(src, gray, COLOR_BGR2GRAY);
gray.convertTo(gray_fpt,DataType<float>::type, 1, 0);
}
else
src.convertTo(gray_fpt,DataType<float>::type, 1, 0);
h_data = (float*)gray_fpt.data;
safeCall(cudaMalloc(&d_data,width*height*sizeof(float)));
if (d_data==NULL)
printf("Failed to allocate device data\n");
safeCall(cudaMemcpy(d_data,h_data,width*height*sizeof(float),cudaMemcpyHostToDevice));
if(withHost){
hostIner = true;
h_data = new float[height*width];
}
}
cuImage::~cuImage()
{
if (d_data!=NULL)
safeCall(cudaFree(d_data));
if (h_data!=NULL&&hostIner==true)
free(h_data);
}
namespace cusift {
void CudaImage::Allocate(int w, int h, int p, bool host, float *devmem, float *hostmem)
{
width = w;
height = h;
pitch = p;
d_data = devmem;
h_data = hostmem;
t_data = NULL;
if (devmem==NULL) {
safeCall(cudaMallocPitch((void **)&d_data, (size_t*)&pitch, (size_t)(sizeof(float)*width), (size_t)height));
pitch /= sizeof(float);
if (d_data==NULL)
printf("Failed to allocate device data\n");
d_internalAlloc = true;
}
if (host && hostmem==NULL) {
h_data = (float *)malloc(sizeof(float)*pitch*height);
h_internalAlloc = true;
}
}
CudaImage::CudaImage() :
width(0), height(0), d_data(NULL), h_data(NULL), t_data(NULL), d_internalAlloc(false), h_internalAlloc(false)
{
}
CudaImage::~CudaImage()
{
if (d_internalAlloc && d_data!=NULL)
safeCall(cudaFree(d_data));
d_data = NULL;
if (h_internalAlloc && h_data!=NULL)
free(h_data);
h_data = NULL;
if (t_data!=NULL)
safeCall(cudaFreeArray((cudaArray *)t_data));
t_data = NULL;
}
double CudaImage::Download()
{
TimerGPU timer(0);
int p = sizeof(float)*pitch;
if (d_data!=NULL && h_data!=NULL)
safeCall(cudaMemcpy2D(d_data, p, h_data, sizeof(float)*width, sizeof(float)*width, height, cudaMemcpyHostToDevice));
double gpuTime = timer.read();
#ifdef VERBOSE
printf("Download time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
double CudaImage::Readback()
{
TimerGPU timer(0);
int p = sizeof(float)*pitch;
safeCall(cudaMemcpy2D(h_data, sizeof(float)*width, d_data, p, sizeof(float)*width, height, cudaMemcpyDeviceToHost));
double gpuTime = timer.read();
#ifdef VERBOSE
printf("Readback time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
double CudaImage::InitTexture()
{
TimerGPU timer(0);
cudaChannelFormatDesc t_desc = cudaCreateChannelDesc<float>();
safeCall(cudaMallocArray((cudaArray **)&t_data, &t_desc, pitch, height));
if (t_data==NULL)
printf("Failed to allocated texture data\n");
double gpuTime = timer.read();
#ifdef VERBOSE
printf("InitTexture time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
double CudaImage::CopyToTexture(CudaImage &dst, bool host)
{
if (dst.t_data==NULL) {
printf("Error CopyToTexture: No texture data\n");
return 0.0;
}
if ((!host || h_data==NULL) && (host || d_data==NULL)) {
printf("Error CopyToTexture: No source data\n");
return 0.0;
}
TimerGPU timer(0);
if (host)
safeCall(cudaMemcpyToArray((cudaArray *)dst.t_data, 0, 0, h_data, sizeof(float)*pitch*dst.height, cudaMemcpyHostToDevice));
else
safeCall(cudaMemcpyToArray((cudaArray *)dst.t_data, 0, 0, d_data, sizeof(float)*pitch*dst.height, cudaMemcpyDeviceToDevice));
safeCall(cudaThreadSynchronize());
double gpuTime = timer.read();
#ifdef VERBOSE
printf("CopyToTexture time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
//new operator
void CudaImage::copyDevice(CudaImage &src){
width = src.width;
height = src.height;
pitch = src.pitch;
safeCall(cudaMallocPitch((void **)&d_data, (size_t*)&pitch, (size_t)(sizeof(float)*width), (size_t)height));
pitch /= sizeof(float);
safeCall(cudaMemcpy2D(d_data, sizeof(float)*pitch, src.d_data, sizeof(float)*src.pitch, sizeof(float)*width, height, cudaMemcpyDeviceToDevice));
d_internalAlloc = true;
}
void CudaImage::copyDevice(CudaImage &src,bool haveDevice){
width = src.width;
height = src.height;
pitch = src.pitch;
if(!haveDevice){
safeCall(cudaMallocPitch((void **)&d_data, (size_t*)&pitch, (size_t)(sizeof(float)*width), (size_t)height));
d_internalAlloc = true;
}
pitch /= sizeof(float);
safeCall(cudaMemcpy2D(d_data, sizeof(float)*pitch, src.d_data, sizeof(float)*src.pitch, sizeof(float)*width, height, cudaMemcpyDeviceToDevice));
}
}
|
2b565e60c7eda91d68dba3786dbb5c649eb35bb4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
//
//
#include <ops/declarable/helpers/s_t_d.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename T>
static _CUDA_G void spaceToDepthKernel(
const void *vx, const Nd4jLong *xShapeInfo,
void *vz, const Nd4jLong *zShapeInfo,
const int block_size,
const bool isNHWC) {
auto input_ptr = reinterpret_cast<const T *>(vx);
auto output_ptr = reinterpret_cast<T *>(vz);
const int batch_size = shape::sizeAt(xShapeInfo, 0);
const int input_depth = isNHWC ? shape::sizeAt(xShapeInfo, 3) : shape::sizeAt(xShapeInfo, 1);
const int input_height = isNHWC ? shape::sizeAt(xShapeInfo, 1) : shape::sizeAt(xShapeInfo, 2);
const int input_width = isNHWC ? shape::sizeAt(xShapeInfo, 2) : shape::sizeAt(xShapeInfo, 3);
const int output_depth = isNHWC ? shape::sizeAt(zShapeInfo, 3) : shape::sizeAt(zShapeInfo, 1);
const int output_height = isNHWC ? shape::sizeAt(zShapeInfo, 1) : shape::sizeAt(zShapeInfo, 2);
const int output_width = isNHWC ? shape::sizeAt(zShapeInfo, 2) : shape::sizeAt(zShapeInfo, 3);
const int input_depth_by_output_height = input_depth * output_height;
const int output_area = output_width * output_height;
const int output_depth_by_output_area = output_depth * output_area;
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (isNHWC) {
const int total_count = batch_size * input_height * input_width * input_depth;
for (int inp_idx = tid; inp_idx < total_count; inp_idx += blockDim.x * gridDim.x){
// inp_idx = d + input_depth * (w + input_width * (h + input_height * b))
const int d = inp_idx % input_depth;
const int inp_idx2 = inp_idx / input_depth;
const int w = inp_idx2 % input_width;
const int inp_idx3 = inp_idx2 / input_width;
const int h = inp_idx3 % input_height;
const int b = inp_idx3 / input_height;
const int out_h = h / block_size;
const int offset_h = h % block_size;
const int out_w = w / block_size;
const int offset_w = w % block_size;
const int offset_d = (offset_h * block_size + offset_w) * input_depth;
const int out_d = d + offset_d;
const int out_idx = out_d + output_depth * (out_w + output_width * (out_h + output_height * b));
*(output_ptr + out_idx) = *(input_ptr + inp_idx);
}
} else {
const int total_count = batch_size * output_depth_by_output_area;
for (int inp_idx = tid; inp_idx < total_count; inp_idx += blockDim.x * gridDim.x) {
const int n_iC_oY_bY_oX = inp_idx / block_size;
const int bX = inp_idx - n_iC_oY_bY_oX * block_size;
const int n_iC_oY_bY = n_iC_oY_bY_oX / output_width;
const int oX = n_iC_oY_bY_oX - n_iC_oY_bY * output_width;
const int n_iC_oY = n_iC_oY_bY / block_size;
const int bY = n_iC_oY_bY - n_iC_oY * block_size;
const int n = n_iC_oY / input_depth_by_output_height;
const int iC_oY = n_iC_oY - n * input_depth_by_output_height;
const int output_idx = oX + (((n * block_size + bY) * block_size + bX) * input_depth_by_output_height + iC_oY) * output_width;
*(output_ptr + output_idx) = *(input_ptr + inp_idx);
}
}
}
template <typename T>
static void _spaceTodepth_(sd::LaunchContext * context, const NDArray &input, NDArray *output, int block_size, bool isNHWC) {
hipLaunchKernelGGL(( spaceToDepthKernel<T>), dim3(512), dim3(512), 1024, *context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), block_size, isNHWC);
}
void _spaceTodepth(sd::LaunchContext * context, const NDArray &input, NDArray *output, int block_size, bool isNHWC) {
NDArray::prepareSpecialUse({output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), _spaceTodepth_, (context, input, output, block_size, isNHWC), LIBND4J_TYPES);
NDArray::registerSpecialUse({output}, {&input});
}
}
}
} | 2b565e60c7eda91d68dba3786dbb5c649eb35bb4.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
//
//
#include <ops/declarable/helpers/s_t_d.h>
namespace sd {
namespace ops {
namespace helpers {
template <typename T>
static _CUDA_G void spaceToDepthKernel(
const void *vx, const Nd4jLong *xShapeInfo,
void *vz, const Nd4jLong *zShapeInfo,
const int block_size,
const bool isNHWC) {
auto input_ptr = reinterpret_cast<const T *>(vx);
auto output_ptr = reinterpret_cast<T *>(vz);
const int batch_size = shape::sizeAt(xShapeInfo, 0);
const int input_depth = isNHWC ? shape::sizeAt(xShapeInfo, 3) : shape::sizeAt(xShapeInfo, 1);
const int input_height = isNHWC ? shape::sizeAt(xShapeInfo, 1) : shape::sizeAt(xShapeInfo, 2);
const int input_width = isNHWC ? shape::sizeAt(xShapeInfo, 2) : shape::sizeAt(xShapeInfo, 3);
const int output_depth = isNHWC ? shape::sizeAt(zShapeInfo, 3) : shape::sizeAt(zShapeInfo, 1);
const int output_height = isNHWC ? shape::sizeAt(zShapeInfo, 1) : shape::sizeAt(zShapeInfo, 2);
const int output_width = isNHWC ? shape::sizeAt(zShapeInfo, 2) : shape::sizeAt(zShapeInfo, 3);
const int input_depth_by_output_height = input_depth * output_height;
const int output_area = output_width * output_height;
const int output_depth_by_output_area = output_depth * output_area;
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
if (isNHWC) {
const int total_count = batch_size * input_height * input_width * input_depth;
for (int inp_idx = tid; inp_idx < total_count; inp_idx += blockDim.x * gridDim.x){
// inp_idx = d + input_depth * (w + input_width * (h + input_height * b))
const int d = inp_idx % input_depth;
const int inp_idx2 = inp_idx / input_depth;
const int w = inp_idx2 % input_width;
const int inp_idx3 = inp_idx2 / input_width;
const int h = inp_idx3 % input_height;
const int b = inp_idx3 / input_height;
const int out_h = h / block_size;
const int offset_h = h % block_size;
const int out_w = w / block_size;
const int offset_w = w % block_size;
const int offset_d = (offset_h * block_size + offset_w) * input_depth;
const int out_d = d + offset_d;
const int out_idx = out_d + output_depth * (out_w + output_width * (out_h + output_height * b));
*(output_ptr + out_idx) = *(input_ptr + inp_idx);
}
} else {
const int total_count = batch_size * output_depth_by_output_area;
for (int inp_idx = tid; inp_idx < total_count; inp_idx += blockDim.x * gridDim.x) {
const int n_iC_oY_bY_oX = inp_idx / block_size;
const int bX = inp_idx - n_iC_oY_bY_oX * block_size;
const int n_iC_oY_bY = n_iC_oY_bY_oX / output_width;
const int oX = n_iC_oY_bY_oX - n_iC_oY_bY * output_width;
const int n_iC_oY = n_iC_oY_bY / block_size;
const int bY = n_iC_oY_bY - n_iC_oY * block_size;
const int n = n_iC_oY / input_depth_by_output_height;
const int iC_oY = n_iC_oY - n * input_depth_by_output_height;
const int output_idx = oX + (((n * block_size + bY) * block_size + bX) * input_depth_by_output_height + iC_oY) * output_width;
*(output_ptr + output_idx) = *(input_ptr + inp_idx);
}
}
}
template <typename T>
static void _spaceTodepth_(sd::LaunchContext * context, const NDArray &input, NDArray *output, int block_size, bool isNHWC) {
spaceToDepthKernel<T><<<512, 512, 1024, *context->getCudaStream()>>>(input.specialBuffer(), input.specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), block_size, isNHWC);
}
void _spaceTodepth(sd::LaunchContext * context, const NDArray &input, NDArray *output, int block_size, bool isNHWC) {
NDArray::prepareSpecialUse({output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), _spaceTodepth_, (context, input, output, block_size, isNHWC), LIBND4J_TYPES);
NDArray::registerSpecialUse({output}, {&input});
}
}
}
} |
c09b0199a33f2d4fc14dd392722684103846030c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_4_left;
int xdim0_update_halo_kernel5_plus_4_left_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_4_left;
int ydim0_update_halo_kernel5_plus_4_left_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_4_left;
int xdim1_update_halo_kernel5_plus_4_left_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_4_left;
int ydim1_update_halo_kernel5_plus_4_left_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_plus_4_left*(y)+xdim0_update_halo_kernel5_plus_4_left*ydim0_update_halo_kernel5_plus_4_left*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_plus_4_left*(y)+xdim1_update_halo_kernel5_plus_4_left*ydim1_update_halo_kernel5_plus_4_left*(z))
//user function
__device__
inline void update_halo_kernel5_plus_4_left_gpu(double *vol_flux_z, double *mass_flux_z, const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = (vol_flux_z[OPS_ACC0(4,0,0)]);
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = (mass_flux_z[OPS_ACC1(4,0,0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_4_left(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel5_plus_4_left + idx_z * 1*1 * xdim0_update_halo_kernel5_plus_4_left * ydim0_update_halo_kernel5_plus_4_left;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel5_plus_4_left + idx_z * 1*1 * xdim1_update_halo_kernel5_plus_4_left * ydim1_update_halo_kernel5_plus_4_left;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_4_left_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_4_left_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,88)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(88,"update_halo_kernel5_plus_4_left");
OPS_kernels[88].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_4_left_h || ydim0 != ydim0_update_halo_kernel5_plus_4_left_h || xdim1 != xdim1_update_halo_kernel5_plus_4_left_h || ydim1 != ydim1_update_halo_kernel5_plus_4_left_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel5_plus_4_left, &xdim0, sizeof(int) );
xdim0_update_halo_kernel5_plus_4_left_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel5_plus_4_left, &ydim0, sizeof(int) );
ydim0_update_halo_kernel5_plus_4_left_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel5_plus_4_left, &xdim1, sizeof(int) );
xdim1_update_halo_kernel5_plus_4_left_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel5_plus_4_left, &ydim1, sizeof(int) );
ydim1_update_halo_kernel5_plus_4_left_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[88].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_4_left), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[88].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[88].mpi_time += t2-t1;
OPS_kernels[88].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[88].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 88;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 88;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_4_left_execute;
if (OPS_diags > 1) {
ops_timing_realloc(88,"update_halo_kernel5_plus_4_left");
}
ops_enqueue_kernel(desc);
}
#endif
| c09b0199a33f2d4fc14dd392722684103846030c.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_4_left;
int xdim0_update_halo_kernel5_plus_4_left_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_4_left;
int ydim0_update_halo_kernel5_plus_4_left_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_4_left;
int xdim1_update_halo_kernel5_plus_4_left_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_4_left;
int ydim1_update_halo_kernel5_plus_4_left_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_plus_4_left*(y)+xdim0_update_halo_kernel5_plus_4_left*ydim0_update_halo_kernel5_plus_4_left*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_plus_4_left*(y)+xdim1_update_halo_kernel5_plus_4_left*ydim1_update_halo_kernel5_plus_4_left*(z))
//user function
__device__
inline void update_halo_kernel5_plus_4_left_gpu(double *vol_flux_z, double *mass_flux_z, const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = (vol_flux_z[OPS_ACC0(4,0,0)]);
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = (mass_flux_z[OPS_ACC1(4,0,0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_4_left(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel5_plus_4_left + idx_z * 1*1 * xdim0_update_halo_kernel5_plus_4_left * ydim0_update_halo_kernel5_plus_4_left;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel5_plus_4_left + idx_z * 1*1 * xdim1_update_halo_kernel5_plus_4_left * ydim1_update_halo_kernel5_plus_4_left;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_4_left_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_4_left_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,88)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(88,"update_halo_kernel5_plus_4_left");
OPS_kernels[88].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_4_left_h || ydim0 != ydim0_update_halo_kernel5_plus_4_left_h || xdim1 != xdim1_update_halo_kernel5_plus_4_left_h || ydim1 != ydim1_update_halo_kernel5_plus_4_left_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel5_plus_4_left, &xdim0, sizeof(int) );
xdim0_update_halo_kernel5_plus_4_left_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel5_plus_4_left, &ydim0, sizeof(int) );
ydim0_update_halo_kernel5_plus_4_left_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel5_plus_4_left, &xdim1, sizeof(int) );
xdim1_update_halo_kernel5_plus_4_left_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel5_plus_4_left, &ydim1, sizeof(int) );
ydim1_update_halo_kernel5_plus_4_left_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[88].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel5_plus_4_left<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[88].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[88].mpi_time += t2-t1;
OPS_kernels[88].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[88].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 88;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 88;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_4_left_execute;
if (OPS_diags > 1) {
ops_timing_realloc(88,"update_halo_kernel5_plus_4_left");
}
ops_enqueue_kernel(desc);
}
#endif
|
6a5043e2054ee87de7eb80e170ecf492dd1ec625.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" // ensure function name will be left alone rather than mangled like a C++ function
{
// Compute the standard normal density at an array of n points (x) and stores output in y.
__global__ void std_normal_pdf_double(const double *x, double *y, unsigned int n)
{
// assumes a 2-d grid of 1-d blocks
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
const double ONE_OVER_ROOT_TWOPI = 1.0/sqrt(2.0*M_PI);
if(i<n) y[i] = exp(-0.5*x[i]*x[i])*ONE_OVER_ROOT_TWOPI;
}
// Compute the standard normal density at an array of n points (x) and stores output in y.
__global__ void std_normal_pdf_float(const float *x, float *y, unsigned int n)
{
// assumes a 2-d grid of 1-d blocks
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
const float ONE_OVER_ROOT_TWOPI_F = rsqrt(2.0f*3.14159265358979f);
if(i<n) y[i] = exp(-0.5f*x[i]*x[i])*ONE_OVER_ROOT_TWOPI_F;
}
// Compute the standard normal density at an array of n points (x) and stores output in y.
__global__ void normal_pdf_double(const double *x, const double *mu, const double *sig, double *y, unsigned int n)
{
// assumes a 2-d grid of 1-d blocks
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
const float ONE_OVER_ROOT_TWOPI = 1.0/sqrt(2.0*M_PI);
if(i<n)
{
double dx = x[i] - mu[i];
y[i] = exp(-0.5*dx*dx)*ONE_OVER_ROOT_TWOPI/sig[i];
}
}
// Compute the standard normal density at an array of n points (x) and stores output in y.
__global__ void normal_pdf_float(const float *x, const float *mu, const float *sig, float *y, unsigned int n)
{
// assumes a 2-d grid of 1-d blocks
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
const float ONE_OVER_ROOT_TWOPI_F = rsqrt(2.0f*3.14159265358979f);
if(i<n)
{
float dx = x[i] - mu[i];
y[i] = exp(-0.5f*dx*dx)*ONE_OVER_ROOT_TWOPI_F/sig[i];
}
}
__global__ void sum_simplistic_double(const double *input, double *output, unsigned int n)
{
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
double sum = 0.0;
if (i==0)
{
for(int j=0;j<n;++j)
sum += input[j];
}
output[0] = sum;
}
__global__ void sum_simplistic_float(const float *input, float *output, unsigned int n)
{
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
float sum = 0.0;
if (i==0)
{
for(int j=0;j<n;++j)
sum += input[j];
}
output[0] = sum;
}
// Adopted from https://code.google.com/p/stanford-cs193g-sp2010/source/browse/trunk/tutorials/sum_reduction.cu
// this kernel computes, per-block, the sum
// of a block-sized portion of the input
// using a block-wide reduction
__global__ void block_sum_double(const double *input,
double *per_block_results,
unsigned int n)
{
extern __shared__ double sdata[];
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
// load input into __shared__ memory
double x = 0.0;
if(i < n)
{
x = input[i];
}
sdata[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern
for(unsigned int offset = blockDim.x / 2;
offset > 0;
offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0)
{
unsigned int block_id_1d = (blockIdx.y * gridDim.x + blockIdx.x);
per_block_results[block_id_1d] = sdata[0];
}
}
}
| 6a5043e2054ee87de7eb80e170ecf492dd1ec625.cu | extern "C" // ensure function name will be left alone rather than mangled like a C++ function
{
// Compute the standard normal density at an array of n points (x) and stores output in y.
__global__ void std_normal_pdf_double(const double *x, double *y, unsigned int n)
{
// assumes a 2-d grid of 1-d blocks
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
const double ONE_OVER_ROOT_TWOPI = 1.0/sqrt(2.0*M_PI);
if(i<n) y[i] = exp(-0.5*x[i]*x[i])*ONE_OVER_ROOT_TWOPI;
}
// Compute the standard normal density at an array of n points (x) and stores output in y.
__global__ void std_normal_pdf_float(const float *x, float *y, unsigned int n)
{
// assumes a 2-d grid of 1-d blocks
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
const float ONE_OVER_ROOT_TWOPI_F = rsqrt(2.0f*3.14159265358979f);
if(i<n) y[i] = exp(-0.5f*x[i]*x[i])*ONE_OVER_ROOT_TWOPI_F;
}
// Compute the standard normal density at an array of n points (x) and stores output in y.
__global__ void normal_pdf_double(const double *x, const double *mu, const double *sig, double *y, unsigned int n)
{
// assumes a 2-d grid of 1-d blocks
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
const float ONE_OVER_ROOT_TWOPI = 1.0/sqrt(2.0*M_PI);
if(i<n)
{
double dx = x[i] - mu[i];
y[i] = exp(-0.5*dx*dx)*ONE_OVER_ROOT_TWOPI/sig[i];
}
}
// Compute the standard normal density at an array of n points (x) and stores output in y.
__global__ void normal_pdf_float(const float *x, const float *mu, const float *sig, float *y, unsigned int n)
{
// assumes a 2-d grid of 1-d blocks
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
const float ONE_OVER_ROOT_TWOPI_F = rsqrt(2.0f*3.14159265358979f);
if(i<n)
{
float dx = x[i] - mu[i];
y[i] = exp(-0.5f*dx*dx)*ONE_OVER_ROOT_TWOPI_F/sig[i];
}
}
__global__ void sum_simplistic_double(const double *input, double *output, unsigned int n)
{
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
double sum = 0.0;
if (i==0)
{
for(int j=0;j<n;++j)
sum += input[j];
}
output[0] = sum;
}
__global__ void sum_simplistic_float(const float *input, float *output, unsigned int n)
{
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
float sum = 0.0;
if (i==0)
{
for(int j=0;j<n;++j)
sum += input[j];
}
output[0] = sum;
}
// Adopted from https://code.google.com/p/stanford-cs193g-sp2010/source/browse/trunk/tutorials/sum_reduction.cu
// this kernel computes, per-block, the sum
// of a block-sized portion of the input
// using a block-wide reduction
__global__ void block_sum_double(const double *input,
double *per_block_results,
unsigned int n)
{
extern __shared__ double sdata[];
unsigned int i = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
// load input into __shared__ memory
double x = 0.0;
if(i < n)
{
x = input[i];
}
sdata[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern
for(unsigned int offset = blockDim.x / 2;
offset > 0;
offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0)
{
unsigned int block_id_1d = (blockIdx.y * gridDim.x + blockIdx.x);
per_block_results[block_id_1d] = sdata[0];
}
}
}
|
a4adc3f5839d585deae0277d42c7f3b7052fb154.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
float h_A[]= {
0.646300533086186, 0.6891034119322159, 0.5468255896007155, 0.6042228186164886, 0.8659380581803113, 0.6300291449865434, 0.6636944471272259, 0.9882951548595007, 0.6352107108241554, 0.5790636985735749, 0.8804145795069749, 0.9456035439132031, 0.6321246094793169, 0.5520083637849034, 0.8193643662644936, 0.948699220113753, 0.6755087191072062, 0.8452024670159349, 0.5158472479991425, 0.7454278577521886, 0.8203518918008311, 0.8306414037192553, 0.9102755274193095, 0.8049150489951427, 0.6634987536615461, 0.5516742816892066, 0.6842642708230713, 0.7483998039947184, 0.8051003412268876, 0.5649583199862422, 0.8121027556323586, 0.5450967323115479, 0.6219450160218438, 0.5105097521704045, 0.9137357556898562, 0.5150533504856335, 0.9355026026464295, 0.710832721093494, 0.9629822013245587, 0.5863652172884737, 0.9265505203829214, 0.5420760069497614, 0.6783567622586935, 0.8976679836225981, 0.5509090210473756, 0.6604391659811224, 0.999366552142813, 0.9348916843328499, 0.713477120025813, 0.7305105281555632, 0.5508255633550583, 0.5329064212395214, 0.6742118985756717, 0.689140376023022, 0.7270457963615451, 0.7209463549118231, 0.7283645311972975, 0.7472317688709345, 0.8427026709428014, 0.8917006197702075, 0.7860696907282438, 0.8998261806382524, 0.774140322305406, 0.7407395850512472, 0.7016144680644383, 0.9513347541186932, 0.9539583490820657, 0.8055369082163983, 0.66731931499848, 0.9269077839786752, 0.8036882303399886, 0.8353559289667416, 0.7487273959642642, 0.95231413311441, 0.52936400623473, 0.6622843724305907, 0.7865911951337959, 0.8490486059918574, 0.5654667231844523, 0.902222672460675, 0.7377938242893363, 0.845162901466018, 0.6178930176516815, 0.8820423918233746, 0.819658695927562, 0.7297992824653494, 0.8608408011644345, 0.7814267405834245, 0.5451303358395813, 0.8364497176294705, 0.9476071729161337, 0.8683920694866987, 0.6120014563881961, 0.7324781077435785, 0.952295205463342, 0.6058991359641921, 0.8291900507261571, 0.5226152595564822, 0.5947825439255605, 0.8782021043314617, 0.7569168372950734, 0.5797010072157455, 0.6594462100662504, 0.8337467492618065, 0.9914618549442806, 0.9321282144535272, 0.7246478245290418, 0.7161212795026455, 0.6016851675753103, 0.5125244716188995, 0.9299996842565255, 0.9903492897808992, 0.6237940900552181, 0.8205895991703788, 0.9884414871779998, 0.5866948961638769, 0.998033951487114, 0.5295990006307705, 0.5611045875923815, 0.961245424281093, 0.7226827601352674, 0.9640509189881881, 0.6519866100403702, 0.7892687497473407, 0.8167014390840873, 0.6765396258366096, 0.8449615379127254, 0.596268393959178, 0.7939249923913629, 0.5168816989873475, 0.9345299250253124, 0.6309463095185301, 0.8986805331645678, 0.7523313838450412, 0.82531344079664, 0.7627569522371702, 0.9031553492515547, 0.9494476034448109, 0.6288373163385834, 0.7924580168091493, 0.721495346147775, 0.834300321380002, 0.6469884013241809, 0.6472322377172521, 0.7984745212053365, 0.962294073457627, 0.7886130868301862, 0.8776482969543955, 0.6478837103419697, 0.9882165119301165, 0.8740486150400817, 0.5056143331065995, 0.6618047410295208, 0.7610666592760644, 0.5702625328895041, 0.9369221577593562, 0.9494164874124904, 0.59238444587363, 0.8780965038804809, 0.5218639139258541, 0.6812401728541819, 0.9328253167831007, 0.5161381775199221, 0.9100393851884749, 0.9728293591126462, 0.811344502001907, 0.782823841572214, 0.8658958032470887, 0.663719411218762, 0.9692889930153497, 0.6467599014694876, 0.589304535120637, 0.5094633344034718, 0.7025147226816439, 0.6598503212467648, 0.7020746471976945, 0.8830918473238974, 0.7866485844831004, 0.6345775079170256, 0.5615367280865449, 0.8802227833065868, 0.8582600706563485, 0.9155482170781064, 0.9530293740421751, 0.8182483372937428, 0.8524389803132264, 0.7241233536334677, 0.5151834741836199, 0.7385812199918054, 0.7943893265125952, 0.9051641860383268, 0.619534572253894, 0.8242822542479566, 0.6413536058059588, 0.9807819497947537, 0.9898101031902062, 0.8415733555438634, 0.9867989644513635, 0.9373926397421499, 0.8237322958318012, 0.9271544178576562, 0.8356995743720048, 0.5658178563673646, 0.9705983473416786, 0.6736511025432669, 0.7830998587352098, 0.7045935293009279, 0.6834898270240016, 0.6499489142941706, 0.8808467511064504, 0.6642293957183821, 0.8653745173498084, 0.6267646816753698, 0.8074151052755552, 0.6799619044150402, 0.9942692009440288, 0.8625681533776105, 0.9257538399244084, 0.9914011126522407, 0.7917287747201345, 0.6046048508747064, 0.532450046417468, 0.6437265828207415, 0.8897046260829842, 0.9224802213438084, 0.6057855632559244, 0.9499914588921554, 0.611727504863876, 0.7587968793908222, 0.67209262512403, 0.9950600556677005, 0.7501592342573983, 0.8822302791460712, 0.562604449598759, 0.9938232800091651, 0.9950277163985779, 0.7850563426271466, 0.6291752867355491, 0.5837153379176236, 0.7493907741017607, 0.6658782563135046, 0.6476146082689006, 0.5974297752374016, 0.7034458626620241, 0.5161588039335538, 0.7186483385553928, 0.9061181053411442, 0.6296220803731616, 0.8344587501610189, 0.7658368512919322, 0.5193911487477161, 0.5254419830916216, 0.7504808479462405, 0.8552544477499019, 0.8289137948682707, 0.5426242175782335, 0.858845508474556, 0.8252356216134121, 0.8866364015504669, 0.5073032774858128, 0.74148567685559, 0.5809190407335006, 0.8382147766638192, 0.5021179405425454, 0.5612965191774761, 0.521704780892861, 0.9620418862791433, 0.6967427399939414, 0.6533446492141379, 0.7147919014055153, 0.7887651897338765, 0.8217711569279046, 0.5366901108437196, 0.9734989556630906, 0.5485794158722644, 0.5458920483132449, 0.9415910985632716, 0.6905162757784671, 0.8177732442367671, 0.5193192818261485, 0.904439960839182, 0.577484626417917, 0.5023125567163751, 0.5351238363408092, 0.5506641464567381, 0.9097008547341774, 0.5028782459886247, 0.9775999371965542, 0.8480896041264325, 0.9524433141692397, 0.8790167956693373, 0.6918264294189349, 0.6610097567204785, 0.5590353481221483, 0.9055975628804205, 0.6238987671821737, 0.9890972864990741, 0.6749305158850749, 0.6388066974704508, 0.9249096968119721, 0.9237097208162639, 0.9956186647783947, 0.7502845085261427, 0.9157536785718855, 0.6367148161459021, 0.8914383120371315, 0.7754815852778648, 0.7442070581925427, 0.7168533964646541, 0.8035208845828656, 0.9058793058946397, 0.5506057302703941, 0.5610586777236432, 0.6198991192228714, 0.6759727566157296, 0.6521536736152977, 0.8911054170392861, 0.8730066061369885, 0.9052165427830005, 0.6290577933163359, 0.6266432294048905, 0.5833044339268814, 0.542572680556954, 0.709871771808865, 0.9961238310508744, 0.5220897050603603, 0.8772828170016069, 0.8770640256265352, 0.6734715416008624, 0.66448493340621, 0.711886014564672, 0.8948545491540754, 0.742454808358824, 0.5031309948396989, 0.998105761408189, 0.7416349611897435, 0.9833498748501672, 0.9434160912644086, 0.9287361899928851, 0.5668358498590604, 0.5516216715871469, 0.9180811238230364, 0.5003209498989232, 0.9919408995756567, 0.6098195086655246, 0.8529329865006654, 0.5483923087170157, 0.5108091169433435, 0.9310974593281147, 0.5131543331047703, 0.5522897530875988, 0.6135861087993936, 0.942225130594302, 0.7883109048664032, 0.980358430652991, 0.7427552974292404, 0.9008592468210845, 0.8330182916915136, 0.5116266438107839, 0.7155765952485353, 0.5586222664249273, 0.8094091386607725, 0.8137341760476213, 0.596062013591621, 0.6233668771354632, 0.6220904258017181, 0.7729922623950242, 0.9242654659438391, 0.9918232862707279, 0.7038756613345727, 0.6955031237560236, 0.7330651825711396, 0.9814429451532296, 0.6637713448493832, 0.6273600806376864, 0.7920615560597659, 0.7560570649825917, 0.8667792441759616, 0.9311245995511708, 0.7390681612865757, 0.8622775259815559, 0.6482748715498996, 0.599471939542426, 0.5779101969326355, 0.801001618896622, 0.7871239215733595, 0.8926484924268543, 0.7436028538658448, 0.5052795513219919, 0.963326129083858, 0.6198395865526083, 0.7441529681401635, 0.9601662315681015, 0.7548356711809994, 0.9954837830129526, 0.6759079213898986, 0.9621800756631611, 0.89322570586708, 0.7735070667526001, 0.6915282225910707, 0.9341423848626801, 0.6211207388152615, 0.6819132804430946, 0.9004959920486495, 0.6869188767615144, 0.8235250113200907, 0.9166578872951766, 0.9013150686850961, 0.8380839858962025, 0.9073157003461221, 0.9543454184467036, 0.9055858124509073, 0.8918637925715706, 0.9903134675953051, 0.5127986114584449, 0.696692956130758, 0.5370523981116533, 0.7001724629171293, 0.9916611642074329, 0.9857366515184061, 0.924747395057823, 0.9445137331888381, 0.7766195394457958, 0.6195252022646915, 0.9546686663614874, 0.9844751038378801, 0.844027852820263, 0.7447830762001548, 0.6791783356204665, 0.5337497689443171, 0.7495252125656233, 0.7131514817215079, 0.877101450045404, 0.945751398349572, 0.8683084143497066, 0.8372134741459116, 0.6646904302471112, 0.8462850394417225, 0.5136995543425608, 0.7093735655361126, 0.8695010981991498, 0.7935643355642961, 0.5975979145080428, 0.6512819838748201, 0.5073914397414905, 0.7782127409874147, 0.6518832024877972, 0.9932076929834982, 0.5595762061403449, 0.9922246720679966, 0.5687109423220303, 0.838105358258217, 0.969625314094954, 0.7190620443864618, 0.8664081309733891, 0.667132130714411, 0.6353479152159843, 0.9881038444464245, 0.9749456914050801, 0.6056862445310439, 0.7651350765547513, 0.7111211950747746, 0.7268386451680536, 0.6345617335482233, 0.520889051962435, 0.9564077666058417, 0.5944431116457711, 0.7802461524486353, 0.9072284896017788, 0.7054058059340611, 0.5258608417539039, 0.9747301898084519, 0.8116918485805862, 0.5947262934805981, 0.835470561834067, 0.8289930620314478, 0.5932868967061289, 0.712644147073302, 0.6305110950712968, 0.8225485490400115, 0.8741217768921016, 0.6296711447821639, 0.706668544980442, 0.9063993574246532, 0.6289844799422348, 0.9689829115352273, 0.8146775882388788, 0.9554286901233602, 0.5417569350671105, 0.805744277346577, 0.6248071416806387, 0.6357804381341573, 0.5849076492790561, 0.9432718005552067, 0.8528011364197232, 0.9393180145037578, 0.9055210552178747, 0.6218196701805875, 0.5630041801771573, 0.9057041149454138, 0.9699522473736628, 0.6410947775990148, 0.9467625040225263, 0.8152812695819185, 0.7962021187861228, 0.943125023708686, 0.593552831309881, 0.8989205954589661, 0.5432046020734231, 0.8479840409511219, 0.8508701651831969, 0.5299197849521209, 0.5325591497756108, 0.8793828608768641, 0.9369757353802319, 0.5594073029268327, 0.5286711607741894, 0.8548877866552773, 0.5686180685915301, 0.7391274856651364, 0.7926810868411738, 0.8954541553960164, 0.5655129506186884, 0.9109372852850768, 0.8388292670887891, 0.6047967180900435, 0.6235155630673881, 0.8682310872676411, 0.9662344277289328, 0.6665840484803136, 0.9948095299771151, 0.7204924530281454, 0.6999509968934565, 0.5766348487013278, 0.7367778800867257, 0.6837518469566681, 0.7770362604491128, 0.7587593463886848, 0.6310152500067889, 0.9919350377155323, 0.5287101907299846, 0.6341323580528309, 0.6416557527648712, 0.5883926591073251, 0.631425571085183, 0.8420540781161465, 0.6143637234888837, 0.8104077645102102, 0.9959475858013923, 0.9714705072445354, 0.6585921059526243, 0.8276218042252581, 0.5236214010776717, 0.8033802033078954, 0.7285054761100016, 0.5029381516906044, 0.9234000025643223, 0.6620917674867863, 0.561503064154029, 0.5771265064791333, 0.8742754298885033, 0.7971833382563311, 0.7199981465522782, 0.7252244800011279, 0.5969156324580289, 0.7381505609258481, 0.5680191990851209, 0.5729645938848937, 0.7039117180288094, 0.9133157526199824, 0.5354540604279023, 0.5974301685518403, 0.5038891564217153, 0.8157942322558649, 0.7859410990553226, 0.8056504129361373, 0.6700358006832727, 0.6973785780151314, 0.5778551791276771, 0.5271360770943279, 0.8813298017380735, 0.5704358736540642, 0.7085428617195324, 0.9275680222175162, 0.5098610056870094, 0.9834861477586689, 0.5938433356175594, 0.6385932490945254, 0.6407794033101999, 0.9894985463088162, 0.6947984656850965, 0.6908835745820855, 0.9434479881043976, 0.6400819937464182, 0.6227332389056729, 0.9317305409527508, 0.7490244259085336, 0.7080759286228255, 0.7261418110356387, 0.9849611732531696, 0.8275907799859956, 0.7877709069472671, 0.6635787742348993, 0.9566498365513382, 0.9748628490405655, 0.6920318310713454, 0.9891349039024118, 0.6207854977713125, 0.5846708458760457, 0.638580615990205, 0.6107995937311764, 0.6273644935634082, 0.8791650679890575, 0.548205927966527, 0.9068404986513314, 0.7206877308174136, 0.8159804032398001, 0.7058470702694106, 0.7782382475467775, 0.554249281387349, 0.9329381142030011, 0.5511034131330732, 0.7350473494414951, 0.9682932885624354, 0.7124734961916888, 0.9885473291650108, 0.6051389970719803, 0.6532977034077603, 0.7299236953606625, 0.7255637558553876, 0.7654754536758199, 0.5954706519063397, 0.5918197380827519, 0.9673961499233998, 0.8059289887491525, 0.9788048055672876, 0.6030938291157355, 0.7844111512535903, 0.5347588790075453, 0.7080473758010537, 0.6515005741162019, 0.7964049788091725, 0.8410201353420025, 0.6714313442320763, 0.5910490044351887, 0.8995951419154249, 0.5184526686036783, 0.7925424835796833, 0.7679126300955836, 0.9590203996334217, 0.9903091706139668, 0.7216147584909365, 0.9976929437673969, 0.9704877980644011, 0.5470410188986154, 0.5186496911750648, 0.7275990372036649, 0.6748393260356575, 0.7191963173930802, 0.7788636130564965, 0.7043338505652308, 0.9406509282040869, 0.52560660969902, 0.9616897082844407, 0.5269839631434159, 0.6345471970871202, 0.7327621134132786, 0.5424223826722891, 0.6168325444359117, 0.6391903370210928, 0.8675913802283597, 0.7232739802939148, 0.6507550219605913, 0.9393934233789198, 0.8263999233020167, 0.7485394947870259, 0.5132102800989462, 0.625481521007652, 0.7739774047696706, 0.8857528228584339, 0.9238937325350356, 0.6833594764663675, 0.5085098423318805, 0.60230946300701, 0.9636284306289673, 0.7561520464326359, 0.5270883050805939, 0.7334739362727096, 0.812623690762135, 0.7789196515887583, 0.664852634521558, 0.879214888978636, 0.7926990751198626, 0.5970731570931582, 0.5355408806670174, 0.8625834422501488, 0.8591529471731963, 0.8343916360589914, 0.5093196955880548, 0.9616363595445148, 0.5602164363946163, 0.7172587103636402, 0.8135567537750678, 0.5115644623041465, 0.6116482269828576, 0.6993011884296083, 0.6353127926219422, 0.6707370605525017, 0.6775098986182565, 0.5641301865518293, 0.9806327958605061, 0.6565364439300072, 0.9204489360294551, 0.8897248477441302, 0.5042206987979128, 0.6867821143085405, 0.8326952648842246, 0.8133394645759977, 0.631755831093546, 0.7168534543337715, 0.553815544401435, 0.9210250470711574, 0.8693782939314261, 0.707530539955956, 0.8280387791335548, 0.9898684171960364, 0.9106199707727254, 0.8917367641107529, 0.9175156167204768, 0.6570701428088337, 0.8850069006100854, 0.9016916422038186, 0.6217220978517903, 0.732174065703552, 0.9311739233989099, 0.7568504517733975, 0.8992529268458326, 0.8349936529456221, 0.7594875346301262, 0.9825630141201046, 0.8352988135145627, 0.9576918526608564, 0.7281027500440791, 0.7017470713526053, 0.8891857370680291, 0.5002872327542913, 0.94839759897853, 0.5867946200920484, 0.7109245157620054, 0.7900136686828181, 0.7454451494369139, 0.5634953251365167, 0.780902394602301, 0.7360328369367453, 0.9592633173685323, 0.6611483647614069, 0.6183278808848897, 0.5651687423987624, 0.9233209866077092, 0.8569320658136195, 0.5587178571907216, 0.9146668880118916, 0.5304406517948714, 0.7397743026504903, 0.5984947276197737, 0.7800889432751461, 0.9562199258305095, 0.6432837513122842, 0.5550505863181256, 0.9235852230924513, 0.733640926191228, 0.776085602341642, 0.9524542079848878, 0.847250075104929, 0.7383027441400019, 0.7390817430532659, 0.5934418555980321, 0.939762183617721, 0.7243231328197473, 0.8502483578899043, 0.5133099121559599, 0.7053783530245934, 0.8376415716976677, 0.6373816462943671, 0.8866852726164371, 0.6866767594367265, 0.9713340705964297, 0.8351969079006495, 0.5776190440980556, 0.5764313107857315, 0.5384416761182539, 0.53363574776288, 0.5034229345108567, 0.5980528030498259, 0.5110302205892395, 0.8948816982126281, 0.8736692914770265, 0.6978317046133657, 0.99010958406235, 0.7502160632818542, 0.6153720826043493, 0.5256339570888131, 0.9980145093725308, 0.7755385200777405, 0.569001590154208, 0.9176724785790076, 0.8181647947535928, 0.7374480098332676, 0.6909665020438541, 0.8142226548304321, 0.9742959309490044, 0.7369373381019042, 0.5827434624790624, 0.9271939313775199, 0.9317717788450606, 0.7216238185386424, 0.6985416016974761, 0.5807711852783854, 0.5361875519031234, 0.5535722388142401, 0.9228446765858227, 0.9872259771598613, 0.7961943683173853, 0.7509096841824182, 0.8920629777940434, 0.7196990780883037, 0.9465682735955805, 0.7023041340593035, 0.5003835914835418, 0.9551501012151069, 0.567378116214371, 0.6789007117283913, 0.8131317297967569, 0.5157030155040458, 0.8903269497329267, 0.9698394707342216, 0.880610216618767, 0.5638125233271751, 0.9439176534406095, 0.6617599887583265, 0.5971275743875617, 0.8433006006231972, 0.6361631341613987, 0.9314640172014954, 0.6003004748354532, 0.9335735031331582, 0.6512360228208864, 0.5086987276822852, 0.9289508271411779, 0.7764175949496209, 0.7217065396368492, 0.524910731412012, 0.5528080380926884, 0.6064559602195418, 0.5868470659969289, 0.6635625851282483, 0.9960085972417181, 0.9708803572636373, 0.7949211055726071, 0.9311976496496084, 0.5058496039334064, 0.9849933979285113, 0.754603430565185, 0.6020953284255771, 0.8881080637470343, 0.5444215224718545, 0.9790234088872531, 0.9507974984957426, 0.6060991653926988, 0.5812583351180527, 0.9352654402216442, 0.6986846296293825, 0.9168548851724783, 0.5943415536972141, 0.7972851114931117, 0.9238458252041004, 0.7785439940736236, 0.8027079420177673, 0.625666199588418, 0.9878265887939884, 0.8684623032413582, 0.9657038529429582, 0.6748846472509984, 0.6763728035482574, 0.863897236201751, 0.8878137025210504, 0.882000334699977, 0.9874521190492971, 0.9968497286219995, 0.7085143622349079, 0.7502563166466953, 0.5100596894405709, 0.7221069810109304, 0.5119915802691737, 0.6772169647506838, 0.7944568379199017, 0.8985029224042136, 0.6440184616163815, 0.637449236824535, 0.8830754347167523, 0.7009911817687886, 0.7345796154807748, 0.8674924373519134, 0.5915263587943979, 0.6006873299642819, 0.935101364309366, 0.5916840133813797, 0.7025417008931176, 0.8738420055138969, 0.8515053284533634, 0.532849192641057, 0.8372844013505963, 0.6928445088878702, 0.5000481438995283, 0.5884335221398116, 0.5995670630808899, 0.9888530084238121, 0.5438025285085197, 0.5095021646983758, 0.8894768148169612, 0.6466027312844618, 0.5842592246182025, 0.6407079406106068, 0.9032205059340827, 0.8851738069269643, 0.7745756049713829, 0.6454741220170227, 0.6795858748123376, 0.6269660238146295, 0.5474855981954658, 0.5414390310120987, 0.9741726869608864, 0.5810304537700886, 0.7099185038655249, 0.5934671282518265, 0.6398698522874915, 0.5002762153443079, 0.5859937767770008, 0.6670655649237398, 0.9185268232951198, 0.9105261178683872, 0.572102526223022, 0.6327333283895704, 0.9320586638263583, 0.9026164478514013, 0.7180313118623922, 0.660405559974598, 0.5149313797841719, 0.7054232144392834, 0.5740223050858865, 0.6275890571803284, 0.9267748250226557, 0.9800441654951677, 0.507829382203053, 0.6419830505612427, 0.7088084545395703, 0.5335691036102399, 0.9141766177125922, 0.6341179915055504, 0.5884302188764479, 0.9210203431912694, 0.7434708823921008, 0.5987877133257116, 0.6492463885149824, 0.9479386335243074, 0.5840048168805851, 0.6965573314801549, 0.8494532049049655, 0.6078019087261085, 0.7384128836762065, 0.5950362018635402, 0.9937877541408366, 0.780514396725051, 0.7698684040692368, 0.976694951920666, 0.7459299494984142, 0.8168788335134516, 0.6234539464714894, 0.6198843520593409, 0.9697927309373819, 0.8316396562914714, 0.7592195918822549, 0.5077289855037546, 0.5098327402585117, 0.8774091203916243, 0.9091281027415556, 0.614181245033647, 0.5810847215641792, 0.7892407118312466, 0.7772499067663932, 0.984650564958578, 0.9091021548873972, 0.6740446906572493, 0.8293863877721068, 0.6226249360311578, 0.6547289407526626, 0.6042360601159997, 0.65963962983253, 0.8538765541782942, 0.6417756251984567, 0.7496272766906809, 0.6157990105244564, 0.6963683703890142, 0.8878627934136596, 0.9434391660816033, 0.9286000386024276, 0.6547148468210243, 0.9386945753296556, 0.7776746037920286, 0.5410738256834172, 0.5541537887220906, 0.566096810007787, 0.7358519830139285, 0.7612404901406558, 0.6523714579294894, 0.5605225243157361, 0.6525721726253293, 0.6322496631504118, 0.8822743815096532, 0.6608110422999461, 0.5329385729201039, 0.580515496665571, 0.5779912079140113, 0.87997203223194, 0.9898542697442663, 0.8837662614684308, 0.8544232781478822, 0.5022842180548204, 0.5773064792910672, 0.7000346056691509, 0.718178137054416, 0.5619155200597288, 0.7235558961907185, 0.9406133465773018, 0.6031276692009929, 0.7910496614542846, 0.5308397615651719, 0.7846357213502575, 0.732955045034702, 0.660689603613501, 0.624289347522082, 0.8362875255492506, 0.7137812227450171, 0.5647727715887381, 0.9218689726515423, 0.9018539978672373, 0.6124883956060718, 0.5444309741330681, 0.5961502031060293, 0.6645214096905985, 0.9141330615507679, 0.7775357416815067, 0.5995049883443608, 0.8604483111182484, 0.5541032546241027, 0.8089363999340876, 0.9964760805106907, 0.506888214034129, 0.6832218426476613, 0.8301052263479682, 0.5965132647446971, 0.7986420428667425, 0.8468177817819622, 0.8562725128157398, 0.9310770114279951, 0.6744064769329418, 0.6243212131028824, 0.6708758667597288, 0.7834429795058873, 0.9432635554966639, 0.5957322054153149, 0.852522052426096, 0.9840865714593958, 0.6284795713964796, 0.8577619768166257, 0.7834187556268557, 0.7598640599146151, 0.9503367574361032, 0.8626399322323969, 0.64924784869908, 0.8446978433049003, 0.8411085966844416, 0.8925151032605001, 0.7091735431702804, 0.5597521801285236, 0.5502308175904501, 0.5254614794048132, 0.9605476270430058, 0.951637702110574, 0.8604801880713773, 0.990710364114952, 0.6655172596096852, 0.8501810642029749, 0.6001422674077659, 0.7603824968044223, 0.6406260944029176, 0.5169377858960995, 0.8967587379119223, 0.7394718814613863, 0.6484652171756973, 0.5438339034064437, 0.9935483396714682, 0.5849071752608563, 0.627438105749589, 0.9748620047026393, 0.9257332841564356, 0.8878997598941276, 0.5256609082863422, 0.9064511396772408, 0.9587194048007492, 0.5081764783610119, 0.8249905737305162, 0.6496397513801804, 0.7195955682880676, 0.7359959541681667, 0.9703071744057263, 0.8722490060693451, 0.9769402537669138, 0.5094019439460136, 0.6126200264330806, 0.8684780898967995, 0.5956058159270011, 0.634553677925165, 0.8848127447141709, 0.5484706883921411, 0.8679089029485725, 0.9546972653518806, 0.8794620109801896, 0.5279504419038505, 0.7126180146048886, 0.9102055908771131, 0.8644315606962762, 0.8247123860600846, 0.5921112363602581, 0.6083673824929476, 0.5141519057771338, 0.9760424747773796, 0.7388198003770832, 0.9030473608551833, 0.904359911597262, 0.8638327120164218, 0.7382776034258496, 0.7094500429092381, 0.8287307558957766, 0.8447362311752091, 0.9785323302753333, 0.736732262464013, 0.7646723240894204, 0.7237446545726518, 0.5312982298304042, 0.7374050603540891, 0.8661083699118752, 0.9404874446075214, 0.6414711394173191, 0.6645194727047316, 0.7493180113974431, 0.7034505260472272, 0.6518367147635957, 0.6108364199269274, 0.7823246082941627, 0.7544465339385489, 0.5395698291431562, 0.6508190074543254, 0.5253117886085376, 0.8826920181853335, 0.9128581699322444, 0.8775904303667588, 0.9530524152776587, 0.757540024053555, 0.7970528714870762, 0.8405165121084591, 0.7283701977135559, 0.8671076989061002, 0.7822913892834322, 0.9427750325895521, 0.9600203869932834, 0.5396420948196136, 0.7307336987601604, 0.5959344008466603, 0.5776775177983159, 0.819142990425868, 0.6944146798811895, 0.7832859900074962, 0.6801847757931612, 0.9562701633899062, 0.7073889663331205, 0.5371517686907452, 0.5848038209178932, 0.6938954583876427, 0.9675153814133772, 0.9106809336078343, 0.7797237871405178, 0.9076233577730968, 0.6225333434628848, 0.9360833778123654, 0.755369345876056, 0.8287246287544336, 0.7832527602513946, 0.6830294948575468, 0.8217939616674648, 0.9727466352953125, 0.7098410595636506, 0.5030784525369147, 0.5099524752704464, 0.699598836281111, 0.555056886140102, 0.7944902259103886, 0.5355412850764948, 0.8045953576527118, 0.6490594403989591, 0.5023053789482935, 0.5481470344054418, 0.6399705982155802, 0.7844154284709528, 0.99014092490572, 0.9394735331043571, 0.6345423902977484, 0.6345885277603347, 0.6059020120817644, 0.6162847990429718, 0.9938693460348769, 0.9328400015594591, 0.6165395760986283, 0.9594682926040896, 0.6622919370118638, 0.9726975942649864, 0.5879807594666935, 0.5088247547089808, 0.6965997150463837, 0.9811608422948641, 0.8840675507065754, 0.9461812951681865, 0.8351248119731944, 0.5258843901761987, 0.6435590303497869, 0.8202875258008769, 0.7188447487617302, 0.7537645472558716, 0.8602731300035864, 0.8213784766089811, 0.6233752684188447, 0.7483878865072116, 0.50831758545153, 0.6575499844823798, 0.5380055033765019, 0.973742952448867, 0.8789710918614744, 0.7859915308789611, 0.7795509374148415, 0.5649060990073578, 0.7342098766891465, 0.564522965051357, 0.8676055501459201, 0.8472994826441227, 0.541154935013825, 0.8442759512833237, 0.5504082923093758, 0.6013676037224776, 0.9132968095667775, 0.9090459752866911, 0.7523346988693529, 0.5744768965300744, 0.9071978714393114, 0.995205231231019, 0.7015666490876219, 0.6860956478368885, 0.9301163684069222, 0.5976847519892772, 0.8298652469795309, 0.7348524460887784, 0.742158085690625, 0.6770698920585714, 0.860925612589416, 0.8873602465364756, 0.8437063814421497, 0.6209881732403102, 0.829286545633026, 0.5194834129005317, 0.9906188454842377, 0.5093668333896422, 0.5288771726621817, 0.6753817717410937, 0.5058557013786278, 0.8566538681674771, 0.8723433921961546, 0.986852463115012, 0.7027213269100716, 0.8460417142725801, 0.8369293454511714, 0.8996028741129123, 0.8070300480397428, 0.6395918389393982, 0.6247988069825273, 0.5217237552768497, 0.9342462247028214, 0.5253919575192875, 0.9815169780012409, 0.7333905685896838, 0.9754139904718684, 0.7543786396478807, 0.5553422065483762, 0.8605854785466969, 0.7134281059264829, 0.8283217475427667, 0.7679318837676344, 0.9064655279993168, 0.5340062039068005, 0.5967458090600182, 0.7449451895754696, 0.7009630626585777, 0.8229852180533714, 0.7710466568238832, 0.7893926797078262, 0.817593298383644, 0.6617725579564899, 0.8374289808524042, 0.8111016365128783, 0.533743690882045, 0.5921943670802837, 0.8660611424836043, 0.6305624362282244, 0.9549253702967202, 0.6785916275513784, 0.756979539974934, 0.8996012053945419, 0.8146714034375477, 0.6805102217565409, 0.9352072622350791, 0.6090949891172568, 0.5912686591571317, 0.6971505625008763, 0.5071837255359728, 0.7374163333106882, 0.8523875481946477, 0.56776876698454, 0.6248777695410921, 0.8450663188772065, 0.6095041964288364, 0.8442595393557512, 0.9643969221663399, 0.8917021871612274, 0.7489457698695341, 0.5814112596780918, 0.7915501323598708, 0.8864148194083357, 0.6043682732411422, 0.7757927970030094, 0.8414617032413634, 0.5127137823903312, 0.8567724441958259, 0.6096130293164774, 0.6764684849289472, 0.8460181956771147, 0.8696529308729957, 0.7251476480008732, 0.5954690025886057, 0.6568670955123213, 0.6944252959426949, 0.5745244305618826, 0.8282796889683786, 0.5631043299831184, 0.7496262715990387, 0.9874906496118887, 0.8966278353205451, 0.5386878772027497, 0.7192172797212988, 0.9524368515562635, 0.948137475377858, 0.5836522477410689, 0.6172856380623571, 0.5136821094715727, 0.5776015301502713, 0.9325720853696757, 0.9358075816466761, 0.6922749689595328, 0.7852498892451859, 0.912267458881405, 0.6742255335694698, 0.7825304761528185, 0.8650554793338581, 0.616915032070197, 0.7810817168499768, 0.9473839014056948, 0.833115484908957, 0.8620544746165284, 0.5446811432723602, 0.6800025829987993, 0.5371002649252101, 0.8073457868335473, 0.5203839902214875, 0.8879935715586273, 0.9050492337238809, 0.6244224538063633, 0.8954581886045854, 0.6749155672495473, 0.5327180582267492, 0.6068500446531124, 0.6181917388588856, 0.928827225520807, 0.8706357918505327, 0.7569962458361807, 0.632191131482521, 0.8804432451958644, 0.9068103574280871, 0.7156312683798343, 0.551462738681055, 0.7527167245345066, 0.6769902031144203, 0.8491450711886029, 0.8869128730515499, 0.6857741949438243, 0.5183713894200075, 0.6597834749918241, 0.649392365841013, 0.6327994903390479, 0.5499856606413038, 0.8876766433343859, 0.7804487525561608, 0.7796656778818778, 0.6484136414898243, 0.8160956501029327, 0.6423040468014047, 0.5671073335602459, 0.5526960361178777, 0.5553425527673415, 0.9755499600629666, 0.9928124220227508, 0.6058651779313444, 0.6747882667387863, 0.8467797893684955, 0.6685222062966949, 0.5294215942221532, 0.8551626982140346, 0.6960009500385091, 0.7945150136104191, 0.8325705370376602, 0.5777289361329725, 0.5814658204500128, 0.8327616354819651, 0.7958688395679077, 0.7701728647558552, 0.5924808022506777, 0.5462275854987788, 0.522210665552151, 0.6056395856981897, 0.5178370985212434, 0.8943275807428943, 0.9657874897859372, 0.9632183094600104, 0.8669319389396521, 0.8203026553770668, 0.9379869353251327, 0.9423806547201279, 0.5125447251955291, 0.6336199700406894, 0.6832034183414766, 0.607363621009056, 0.7319400487880183, 0.6655850057385082, 0.9688448447358833, 0.5665282114140241, 0.7453251265824534, 0.7960961578288981, 0.7742151704064646, 0.9511709473649064, 0.9252329964190595, 0.6539343559829813, 0.5391208097912679, 0.7302206845223498, 0.5358925784953117, 0.6105242434897172, 0.903830102265514, 0.6541822195524265, 0.7166172233936803, 0.9812827898923311, 0.9014105163125912, 0.7833210136095907, 0.7524349487505273, 0.5892425376933675, 0.8321436417314974, 0.5669995368466987, 0.7981550018744118, 0.6163970300775787, 0.6163245398802475, 0.6851213037980425, 0.6221799795635743, 0.8200215587543256, 0.8800228163434116, 0.6252504767446851, 0.9388372708142068, 0.6474065066154271, 0.5585177486673933, 0.9414591324677744, 0.7494190285321578, 0.8665670048851938, 0.6441625882369895, 0.5500864534070471, 0.5645315317349273, 0.9101297690499928, 0.914060329266259, 0.5370183630537323, 0.6062367316655717, 0.8123459107196331, 0.9139763783020962, 0.8401469997958583, 0.7680045167064222, 0.5236781655543681, 0.5127182912740973, 0.9844074043200753, 0.7028101781298418, 0.7750288273436847, 0.6028738265986948, 0.8017024253856293, 0.8217832925465804, 0.8669114038313149, 0.552438085395886, 0.7851719354908234, 0.9854974630650922, 0.5631888515722597, 0.8077333668559901, 0.805110839528868, 0.6808573491308427, 0.6210993576490422, 0.5582073155176236, 0.8886806979987446, 0.6346451473353911, 0.7047633982508874, 0.9828449970100417, 0.7779544679141983, 0.7244238444737225, 0.6602807303982986, 0.8489727051208034, 0.736280056727493, 0.6973148694446145, 0.9986240268748925, 0.5807384198079513, 0.9810149587070336, 0.6360983801168032, 0.7890882768283014, 0.7180638337133805, 0.5018512003036433, 0.803478032443756, 0.5179078761901904, 0.947212930362237, 0.8911588293935601, 0.9223407798106737, 0.6356717171145961, 0.9034008222528855, 0.8291715261646946, 0.9933956589015248, 0.5245271086617016, 0.8747290432021282, 0.975061617448933, 0.7408085622198496, 0.773796871774915, 0.5394209648105619, 0.5451148488755475, 0.6391951931191318, 0.776788243909841, 0.8261706644507478, 0.9047868831325021, 0.5481106526128778, 0.8047905630122272, 0.6934334265616842, 0.9951449361533032, 0.9008723073222187, 0.8303249530582817, 0.9614719279223345, 0.5939099457870023, 0.855212835446811, 0.5736026167526582, 0.5337544065275819, 0.5276587875341385, 0.9201768486160865, 0.921438288641341, 0.6116135180314728, 0.8887019191529395, 0.9724180433247355, 0.7894497591403742, 0.8836457043741688, 0.5130707105222323, 0.9008478611980439, 0.6465700465092636, 0.7981920691567168, 0.7860991150167704, 0.5188982865064249, 0.8121762235466801, 0.6930998638912795, 0.968866891125206, 0.7991567009751961, 0.6979732658740968, 0.7754261892340776, 0.8912489381979298, 0.9531509455817969, 0.675440420635226, 0.9249797978245234, 0.7000756166411202, 0.7932128659110149, 0.7441123206676281, 0.6853047031566151, 0.9982201398072461, 0.8100448187441223, 0.9654304013952628, 0.8436742915516208, 0.7608565272353498, 0.7387329789121503, 0.6881608908337034, 0.7293064229397865, 0.6288318800765664, 0.6452694573946312, 0.745596481377305, 0.7052388202751451, 0.9398021384725406, 0.9936044527953776, 0.5053367455693586, 0.9397217810177876, 0.8727579917614927, 0.6305339096359578, 0.9701398564611007, 0.9036349319877326, 0.7360448214963455, 0.7667628545404437, 0.8908001704442619, 0.8709745942139613, 0.9829927471669158, 0.6638527760593609, 0.5323941673051724, 0.8632625240863816, 0.9488306849931083, 0.9625398788421153, 0.6234109570123262, 0.7058963191092794, 0.7956753117484123, 0.9979352047169103, 0.7582837579218544, 0.9767289083196253, 0.9777560204220709, 0.868573185281674, 0.5198583096881311, 0.7598794783061432, 0.9416989693185505, 0.9593428687847696, 0.969980158218346, 0.9130102007756591, 0.6332307042735479, 0.7803254954082977, 0.7295677033961345, 0.5794764109158461, 0.7712186890082855, 0.7029614180618526, 0.5771843279854566, 0.9381772811249327, 0.6503719945852242, 0.577763031307815, 0.7471884797307842, 0.7378569028354156, 0.9016599419918686, 0.588184676927477, 0.7508085875758775, 0.9413754730045316, 0.8830156722865798, 0.7183488445510054, 0.8345149441436648, 0.7196573979021692, 0.6598014882326657, 0.5753303679412307, 0.5979103739218312, 0.7487044684446251, 0.6941474295156114, 0.7083503731318006, 0.5839368425388733, 0.7544146040739399, 0.5850350519375349, 0.9592364248777274, 0.5388398060977094, 0.5691401513402823, 0.9789920570884498, 0.6208860235227177, 0.697479624994208, 0.9097855066223199, 0.8412782684620281, 0.6634511613869263, 0.6714209893235005, 0.833730302160608, 0.7431177031941008, 0.8940578270865556, 0.8466025019568351, 0.8390888910449469, 0.6518282611776997, 0.650333591307825, 0.8613617511772558, 0.6758351232236768, 0.6877408117881705, 0.5401824116330747, 0.7555265991133051, 0.7748705234614401, 0.9722725604800513, 0.573648991905642, 0.8900860682195646, 0.5151775873549511, 0.9613551364001022, 0.7821282303822312, 0.9179684193360822, 0.954375487520639, 0.6375117026710135, 0.9969647853257733, 0.8753013808757907, 0.8767860462910737, 0.710778000695732, 0.5742968745965789, 0.8162380259980637, 0.9158520010548006, 0.7807470860740728, 0.8979230455024267, 0.9936572275271202, 0.7083052038787085, 0.7902521609007434, 0.6434419180679427, 0.7357963130984683, 0.6065238357981039, 0.9800933376603832, 0.7692620281134402, 0.7579853441464142, 0.6683577412390953, 0.7543696139052103, 0.6638397343036877, 0.5727361366627709, 0.5332073131414556, 0.9311153557504835, 0.6679040016288984, 0.5278687396235189, 0.7877730191756949, 0.9039152602573776, 0.7982597461978624, 0.5589322450917251, 0.6458198255012435, 0.9025203932036321, 0.7319232465980791, 0.70737607160659, 0.5574214483185417, 0.7216494100920376, 0.7647229285542485, 0.5899485792738064, 0.7673460156090166, 0.9875782996529305, 0.98618450010204, 0.6010520895145822, 0.5526428261395229, 0.8709623139063545, 0.8710733729535005, 0.5064902455823086, 0.604306883624546, 0.7007514757499618, 0.8361132049149278, 0.8996417257909157, 0.7520941596671157, 0.5528428164716568, 0.8768181301526347, 0.8305087292664315, 0.7446329381927888, 0.7001460895625479, 0.5835099802245496, 0.8502155045387971, 0.6252382019572444, 0.6063793722532907, 0.8413295403100353, 0.914332814387289, 0.6816095934237016, 0.5534579898743051, 0.972733984563328, 0.5850269979411842, 0.7214791777950158, 0.5757249107479367, 0.5364819227699389, 0.7672358014717838, 0.6567572953392699, 0.5275593403499311, 0.5350875316422268, 0.7573155608493425, 0.9160726940427522, 0.9544130961581814, 0.9077428702601527, 0.5449241445444004, 0.7823543751102859, 0.8966934734508378, 0.5880343360531544, 0.6688908267964186, 0.7003690216784018, 0.6180258104395422, 0.5460745121723951, 0.6952546253838328, 0.9476619625512321, 0.6699261023972627, 0.854368404283271, 0.9812526164066645, 0.6185533158716691, 0.7189393189370077, 0.7301822144253631, 0.5276956853595252, 0.7486260567960925, 0.7979547086510521, 0.5505542475907209, 0.5067118481511903, 0.6802081783472471, 0.5826107562880349, 0.9973960968257876, 0.5442625662027267, 0.5933856100950915, 0.8004133938603625, 0.6501603018478066, 0.9136498268435296, 0.973223525863065, 0.5340426222197758, 0.8334922180033413, 0.5680340101986648, 0.8483948479170726, 0.6976033081306539, 0.944263324299244, 0.6092948520069132, 0.9479844073486698, 0.7658755727841363, 0.9366904211777938, 0.8237204435444285, 0.6648709396861319, 0.6071892963529493, 0.9023091319716239, 0.6593248585704261, 0.7696253717257049, 0.8621859509734695, 0.8541978830798401, 0.7989712613329538, 0.822594641126195, 0.7849292712487492, 0.6826915901698933, 0.9625566265883636, 0.8747900625701517, 0.786501169606096, 0.5611417539844026, 0.7335236065041818, 0.7012644465242514, 0.8997867341425713, 0.5497858689040093, 0.56753509286922, 0.5545375466589157, 0.8377569370145683, 0.6003668191791804, 0.5692010022069849, 0.7787286890731056, 0.6696248960097018, 0.7004274113592284, 0.9058876092872112, 0.7550524871368689, 0.9491336146987548, 0.7638181162883693, 0.6683507302712779, 0.7563238899322937, 0.8181436562956627, 0.721340716306976, 0.7563906068387825, 0.5658492377708144, 0.6628779787012683, 0.5197302897117531, 0.7040943788443303, 0.7954878739004267, 0.8543947776280933, 0.7154412310828782, 0.8203192889255231, 0.7215129650498571, 0.5467154663022733, 0.9572352265178746, 0.7429350000460719, 0.9806098094503721, 0.9170709116151697, 0.9858593894620771, 0.6673810311766473, 0.6501687179182749, 0.8266081311834439, 0.5306604446954801, 0.6716790080972266, 0.7774885192180188, 0.9345563707193072, 0.6746297844713616, 0.9164054466096763, 0.8030842107816305, 0.5202398620730274, 0.6534579779571236, 0.7731552883763311, 0.8394929989774749, 0.9285368502247304, 0.8416179063204652, 0.572874709828308, 0.8422790217454043, 0.9170645353878004, 0.7914905149517789, 0.6183538300151361, 0.535795338483187, 0.943304840316853, 0.8005439159146318, 0.7112905374991182, 0.7443576891671735, 0.7710353683658455, 0.5921313216562778, 0.9566716460998531, 0.7661440351783944, 0.8224724469269697, 0.591354294756712, 0.6470669947876853, 0.5992991607180007, 0.7960556235589228, 0.8900459170531745, 0.9208182611947631, 0.7460974983454941, 0.9433293109296301, 0.9137401385463397, 0.51672588425317, 0.6108843491768661, 0.938590669229814, 0.5062922604442429, 0.9979074647141006, 0.7445964931827995, 0.6486125891994764, 0.9458553589203578, 0.6362973443860189, 0.550696422561825, 0.677478613167537, 0.7770316211587592, 0.5121534214133081, 0.5209739824821442, 0.8021146091983891, 0.9149970984341234, 0.900621548585335, 0.581359362457005, 0.6829404947328868, 0.6538135938967646, 0.6031098722170811, 0.5997375306113115, 0.7659769529842986, 0.7253929493668845, 0.7720305679776065, 0.5660331843801046, 0.9207277424036889, 0.5217060734655707, 0.9327566887731054, 0.7744635363223176, 0.8030699714017145, 0.5705856962071845, 0.9242749124292089, 0.7583590442944816, 0.902113197251282, 0.7348835333342476, 0.9333935326712411, 0.7667268289618221, 0.7560830480587016, 0.7058091809979673, 0.8152982252443272, 0.9929260438158979, 0.939069166399255, 0.8397961524188727, 0.8857540839785139, 0.7617290243853512, 0.9036188146321261, 0.8140801657215706, 0.9035303391780497, 0.5467023636254116, 0.9827284504198194, 0.9019867528844698, 0.7851386858698965, 0.891608871059943, 0.6174531632188842, 0.8900252379026531, 0.861549894770399, 0.7768943405300803, 0.934528355613536, 0.6441735201545122, 0.8388281831649465, 0.9770759599730278, 0.781010404395656, 0.8671511065214479, 0.6969580503977257, 0.7135742293138223, 0.6293419320786222, 0.7918855875656434, 0.5607649307356393, 0.6175899417005879, 0.6094739708223893, 0.515101135854747, 0.5443531279216514, 0.5537161990710757, 0.9989882345402354, 0.8428042677641336, 0.8189260499995997, 0.85777781553411, 0.8290312512273856, 0.7460253275425582, 0.542399986198209, 0.510260344703211, 0.823531965249229, 0.8414759630516149, 0.7989750593387412, 0.9138964041779954, 0.9153969286541697, 0.9720058922426311, 0.9071481959427632, 0.6728902463793919, 0.730625471398446, 0.9005255159632273, 0.770034625745865, 0.9214041906779316, 0.8341368186084965, 0.846986104149309, 0.8391202149537814, 0.8005140216882831, 0.9095236492176553, 0.785325810916474, 0.5489304452867346, 0.792587351679225, 0.5293739123157402, 0.8270659452138094, 0.7514625588864067, 0.8202507187125352, 0.817561435301652, 0.7467330550147773, 0.9277406245496785, 0.752885451613533, 0.6484528604224584, 0.5542351877021723, 0.9954285111739991, 0.8116745128136178, 0.6533920953399586, 0.7281161122485529, 0.7324037817436879, 0.8838107854590571, 0.9118347124806095, 0.9196970680584746, 0.9611010623607499, 0.7739342010009924, 0.7441658962211481, 0.5651958978313366, 0.5873684319377303, 0.9284159627352556, 0.6346351906027625, 0.671606134138911, 0.6610032738031324, 0.5429595131303434, 0.7504688934505459, 0.7032971483216719, 0.6731852664133458, 0.8066455590689368, 0.8382349784506453, 0.5483299859843886, 0.5293054391149836, 0.8457193158083962, 0.8690082028655831, 0.705650003032489, 0.8086137506139984, 0.8273093899057027, 0.6907198371155754, 0.8545508898689513, 0.7073294982752711, 0.5572109504914221, 0.8754942214317016, 0.6715919119662541, 0.9903331411493443, 0.6204501247263183, 0.9473818186619448, 0.685159600255169, 0.9330679448884298, 0.9890905848649757, 0.5861603771812822, 0.7225951152606965, 0.5231181671218508, 0.6195905568078993, 0.8943872210741156, 0.8350570405630343, 0.7752029062354104, 0.9896247165165813, 0.7553432155180784, 0.768291212312699, 0.7521423346222214, 0.8637565757601864, 0.8082660014510369, 0.6078965201018187, 0.749710619165771, 0.5156610705791325, 0.5769700920105281, 0.807745415766798, 0.5932623989049783, 0.7196948628959172, 0.7864965525231593, 0.7489442150358967, 0.614675169026283, 0.8264502384613146, 0.5224662845843853, 0.9624405536426044, 0.5903287545609569, 0.6253542175541514, 0.5980550970355402, 0.9135905955780861, 0.9186593526992112, 0.9741657915359625, 0.9804192433752115, 0.605254972879977, 0.9075233293234832, 0.7723452615061284, 0.9603993162389632, 0.9493959944789986, 0.5971390898517117, 0.6684630958599818, 0.8253076311191903, 0.5061247385704244, 0.5181063613012312, 0.617573262151663, 0.9123165920730871, 0.892528260586356, 0.8796924221722413, 0.5604717861796283, 0.7120610872394912, 0.7471513994320598, 0.9729386907784328, 0.5994833256549406, 0.8954767169224921, 0.7864761705897556, 0.6477691102954963, 0.71664572458387, 0.6272676403149164, 0.5743462806317712, 0.9679693801471798, 0.5046917393992689, 0.7715937702297035, 0.6316605339232974, 0.5845468925124607, 0.9776573595186898, 0.6847486281023281, 0.8378915597059021, 0.9333256439809108, 0.8735461081647014, 0.7830294053644733, 0.5051969124367539, 0.5417345550664883, 0.9668244465385268, 0.6399789463298933, 0.7788904521198569, 0.5605660406907831, 0.512295152419642, 0.9676386332591524, 0.6803892954850141, 0.887713485351433, 0.9477586222264694, 0.7822455691943133, 0.7676570623958365, 0.5549812163456553, 0.9049934574089542, 0.7005695789577748, 0.7801350696720641, 0.5554540825773526, 0.6277682552189735, 0.9416706129794157, 0.581110406746204, 0.9998795779380831, 0.5071531498280116, 0.6509534128833041, 0.6067852794486263, 0.7384474831602019, 0.8151115376246312, 0.9296540574134302, 0.510953035283791, 0.9706166906530969, 0.9333234214105983, 0.6435319363317172, 0.70530909691761, 0.7233121331592953, 0.9515165826723813, 0.7691306327763093, 0.5546574974070104, 0.725567082663143, 0.8590032689410457, 0.7896482674839831, 0.514549077463718, 0.9567610257809955, 0.7068365275677078, 0.753496519622916, 0.8777238064777778, 0.6610077151679061, 0.5089035225013274, 0.6626949464231799, 0.6114582437987941, 0.7819345887698279, 0.9089890050359167, 0.6062580040903423, 0.688161429412204, 0.6596286572056427, 0.7614275290108808, 0.5692654273605788, 0.874484136443246, 0.6309931318539849, 0.6649364549409689, 0.8643315637600362, 0.7978171182778633, 0.682262225740459, 0.8247772665775043, 0.5191093195713357, 0.8332145820313304, 0.7527318190273292, 0.7591958004315374, 0.8971370237457637, 0.6605427289661666, 0.706996608440069, 0.7476155106276041, 0.7961045588703075, 0.7251426775297496, 0.9237054566370994, 0.8636958484883313, 0.8531755443049407, 0.6766576759781902, 0.8906338143674815, 0.727860749212329, 0.9436405643444785, 0.7932765559480011, 0.8096944839988567, 0.8770889006395786, 0.9320152107742977, 0.5529168603160912, 0.7229712145846612, 0.9731433496675921, 0.9897250068618613, 0.7727447522659878, 0.6724110273846232, 0.7319352486760768, 0.9530533465031759, 0.6590286594396747, 0.9369588083347946, 0.832879954190183, 0.5927141407708627, 0.7447633910861983, 0.8950945541851583, 0.90578923744337, 0.6066790960173287, 0.9871076762463247, 0.7306218015326649, 0.6600472091752305, 0.8636243118331752, 0.5329980997587327, 0.5674797478322691, 0.8056050333728324, 0.975299755054505, 0.6799768655548581, 0.9487663115412681, 0.5836749986687946, 0.8884754182827872, 0.6787739338007341, 0.6951765689914934, 0.9073007185067242, 0.5134861220336069, 0.9810025897139225, 0.8308775611299204, 0.6335431782637555, 0.8714570561751791, 0.5488842286345786, 0.5768300370859633, 0.5204718756717979, 0.7953812422624443, 0.662900508897597, 0.8369158235480018, 0.9822863051645975, 0.7938374748267347, 0.6588597952663638, 0.6161893439852649, 0.5546863378652598, 0.9893054987702137, 0.9520460972034095, 0.5829792492698787, 0.5637006315632925, 0.7223580573625948, 0.724684393965551, 0.5693430208400079, 0.836961887286368, 0.6824498749397339, 0.6437860844684828, 0.7557532408520562, 0.6974266986586238, 0.9792910815407916, 0.8741350965891597, 0.5708852199310624, 0.8799291502610319, 0.8233852043619405, 0.8171978338258923, 0.978865215805058, 0.5529219712500684, 0.8803881276563228, 0.8581786307250284, 0.87200441007383, 0.7080231337489273, 0.7317361029353358, 0.6775152808067225, 0.9837716837573258, 0.9428325852019912, 0.7881054949340741, 0.6604412680923197, 0.6123692497857589, 0.8279384531631737, 0.760512428402689, 0.5972164710035721, 0.6479183484564794, 0.5762187023869051, 0.54758366562878, 0.9115206731689018, 0.8832833866600073, 0.7325905421051581, 0.8091193299835803, 0.839218824437634, 0.9787852124836454, 0.8053339152700596, 0.8356852532425556, 0.8240507869938187, 0.9740237752640535, 0.9931761245615565, 0.5950118822625011, 0.5289375126691105, 0.8559781842765348, 0.6198372847777663, 0.5490619246624096, 0.6048304538025988, 0.8343607340914828, 0.9566811589713188, 0.7696567609185765, 0.7828540736789713, 0.6809236749253211, 0.9799225203251257, 0.9821625866090269, 0.6737628581156196, 0.9340461352902343, 0.742056025076328, 0.9507728944675005, 0.5587624479893024, 0.7457438426699534, 0.9470554148049211, 0.9933152408253991, 0.7069231524303603, 0.889057519778365, 0.5097479559011764, 0.6545222685367909, 0.6271453375362639, 0.5818624862242291, 0.5690300348310493, 0.5181433736063241, 0.5451720251797354, 0.8371573837955228, 0.6699579486628621, 0.9761444889402684, 0.7365151551551907, 0.8382031221102908, 0.7970965926296717, 0.7011588850450396, 0.5822560955915597, 0.7900619160887581, 0.7989130984535968, 0.8065475006843086, 0.6581855563522824, 0.5018047446237279, 0.7569519823323595, 0.5147426583559769, 0.9786745996656923, 0.8156139183305978, 0.7356477244192167, 0.7541804835470702, 0.734757208003097, 0.5045587299945418, 0.5303455895669095, 0.5713755070179968, 0.6372861417531028, 0.9627696140651487, 0.9973125164045592, 0.8869422560109821, 0.8065892369376447, 0.8126287976684519, 0.8669461386946946, 0.5256207364711027, 0.9120951454103885, 0.5263114869658219, 0.8866379730033018, 0.6833837778724448, 0.7994810952293212, 0.8623676753394862, 0.8066674767874644, 0.5092868411919669, 0.6689057761528561, 0.7649170552457144, 0.5051594478259663, 0.5598542495452838, 0.8634237398019453, 0.8154254228316611, 0.9695870547033658, 0.5342552018407609, 0.6869483631022941, 0.8797981981981702, 0.6230704386581206, 0.6166376633018309, 0.5186378750513103, 0.6265359989878394, 0.8546445377605627, 0.5947277275861709, 0.9194109160605044, 0.5521957790993656, 0.9481596704170852, 0.6346443784157285, 0.902720305805383, 0.8798004121069422, 0.5490523252301799, 0.9543544676678031, 0.9931011629862341, 0.777460798459517, 0.5655936415419727, 0.7069151465705608, 0.8640018719038445, 0.7375912152650734, 0.9630426941457727, 0.5800725176796135, 0.7067429130726242, 0.5146532146504491, 0.6435235663647425, 0.5471384627308264, 0.7105161356710972, 0.958679319050755, 0.8367101807249165, 0.7070243753339476, 0.7229045329606405, 0.7972691951544764, 0.6798233598993134, 0.7176235919063468, 0.91170447398034, 0.736465366477916, 0.9022858658245363, 0.5512810534115868, 0.7789875815921998, 0.783733420400949, 0.9759962432645646, 0.9464786383038353, 0.7169211327302041, 0.7936240875860956, 0.7901108840341563, 0.8903502875435869, 0.9144223752598559, 0.7941804934130069, 0.7864643503028335, 0.6562524526404936, 0.9525407875730805, 0.9178651810410354, 0.9844773720148667, 0.7428850534569553, 0.9259735016821641, 0.9310059241538635, 0.5745888946490565, 0.9680115523000683, 0.8783056637959048, 0.9748459952839612, 0.5048248469836063, 0.6900185306083388, 0.6809436344827771, 0.8603493764130179, 0.6040203265327851, 0.724434059439697, 0.8744526760103659, 0.8959109158829978, 0.8633346161056987, 0.5168716993171554, 0.5863811811012163, 0.7361499793485369, 0.8679139925211152, 0.6337635401464927, 0.9028995554897881, 0.8993560319581759, 0.8465668761919004, 0.9101853799598072, 0.6242008060943864, 0.9907173990737392, 0.8466624833129868, 0.7814457499248777, 0.8801828320798788, 0.5777127154023846, 0.8912307235422683, 0.5955918883919609, 0.5439038370871672, 0.8380609814805291, 0.689369915728844, 0.6116051484545539, 0.8768057782314285, 0.6730714909100632, 0.978129673361231, 0.8680020507823825, 0.8188962649611309, 0.6342455228119152, 0.9265904641638976, 0.5396357177335984, 0.7805362185041311, 0.7666336630606617, 0.8477902985070589, 0.7685461610970374, 0.9244424645284983, 0.6564172428911061, 0.700061548600162, 0.9542737293493, 0.6587555416589623, 0.807877736582622, 0.5490998315671063, 0.6732663742778978, 0.9272557593484276, 0.8492664615678696, 0.6527384739289815, 0.5789324406344215, 0.8595752544462529, 0.7199049967515996, 0.9985635174173131, 0.9693653293979474, 0.6860893255624885, 0.9580263119461534, 0.5969360287138548, 0.7770003017609765, 0.8089293082810078, 0.5146350564179565, 0.7228140987150389, 0.5949030597265885, 0.8094936795387526, 0.7596294652669495, 0.7542210783324407, 0.5326594788760274, 0.7699999140622817, 0.5919471535024157, 0.8686730016877595, 0.6362015381767266, 0.538337631615097, 0.8343494614105527, 0.6756806600243378, 0.5756201682482686, 0.7491075021429778, 0.9815118077059359, 0.9395633546614336, 0.5956239069759983, 0.7425720690787192, 0.7154305443662787, 0.9968382502660822, 0.8981249912967668, 0.7631937763790451, 0.8237530318349429, 0.7704249714564485, 0.8120336479093986, 0.9509689306344252, 0.7322956776295946, 0.6941067520580373, 0.6665363339867199, 0.7711806098103131, 0.8457683611430785, 0.7583070012806803, 0.5069869038150167, 0.7180279413534858, 0.6030709990528049, 0.6911238352269893, 0.6732640762654942, 0.8742941663241869, 0.6499021865080621, 0.6936150371355578, 0.9732765132731132, 0.6987618545716321, 0.7874824292693328, 0.7954977566993784, 0.7322148904092525, 0.8857213600048387, 0.620660636875375, 0.734998084701467, 0.7469864488538172, 0.5922974245450295, 0.7567669374534955, 0.8217452846132344, 0.5171538297641295, 0.6274506760878458, 0.5352762722426246, 0.6441919252895842, 0.7643891481051122, 0.655923680364914, 0.886130697520665, 0.705279483649611, 0.9818570281155916, 0.6142002444406094, 0.605648154807536, 0.8269870485087334, 0.887489980653451, 0.9328236488725601, 0.7657944523881715, 0.9156619629829084, 0.7577306265038513, 0.5584383874842338, 0.8714261335992777, 0.7313762972170608, 0.8589050303191069, 0.9286434649706492, 0.5664020281358664, 0.9802593059768752, 0.6323747465045446, 0.9868530100953928, 0.6412094106059348, 0.9515638805601974, 0.5781493926003649, 0.8752831106883118, 0.9005257805109437, 0.7431371636778721, 0.5775509823933744, 0.981761118342831, 0.7273972304373442, 0.5856572199992669, 0.9549697612498349, 0.7338728086464137, 0.7460960000186192, 0.8738883292627051, 0.7184570670290089, 0.7894762901135266, 0.6484698284503627, 0.7021811743370342, 0.6671495327232173, 0.7109590691741912, 0.735793837626517, 0.9520369292813727, 0.9368124013178012, 0.7011508992779548, 0.9644674091159212, 0.9930851310438813, 0.9628599473343858, 0.9771180645076885, 0.8311646659613939, 0.9093548689193252, 0.9844447754975272, 0.5180229700419133, 0.5830273197114607, 0.5727743005966157, 0.8470941450330498, 0.6408640082631372, 0.9347396973669702, 0.7552507321465418, 0.5280512426857488, 0.8679893575086652, 0.7031834116245146, 0.9999728660875287, 0.8028559474112216, 0.8345880264591095, 0.5402199393068219, 0.5671105727456287, 0.796304015318111, 0.5407857408765387, 0.9884319762394158, 0.9043779451422389, 0.8022965108889071, 0.8236880988076585, 0.7507299674643768, 0.8152673871444787, 0.9270064191521026, 0.9990834099311146, 0.6656452983711956, 0.8635584436024839, 0.9383011381179119, 0.7912127667046729, 0.6257386764408103, 0.9812754327329374, 0.617003191643571, 0.9218170184367902, 0.8259013291318453, 0.681465407627226, 0.8376392239787434, 0.5882225432120576, 0.7290551371901529, 0.9041454390975521, 0.9861050830856393, 0.7914331636113184, 0.6636479437494521, 0.5778125779063306, 0.5186231066985278, 0.9890878737411377, 0.6275106175980223, 0.5758846237890598, 0.5808122141634511, 0.8795260394759081, 0.6427466114882376, 0.6364578295744638, 0.5662334711144658, 0.887498948547137, 0.6367025310575114, 0.7374418014910966, 0.6558064521544521, 0.5098887775421721, 0.7041687933642886, 0.8315753606671847, 0.6176636425624564, 0.9209163927534108, 0.9128771114681451, 0.8245762516849193, 0.7065620625876674, 0.6288317225786085, 0.877175204648489, 0.8761680345614649, 0.554608048213807, 0.8381300165721286, 0.501558645329331, 0.748445132108857, 0.9298802995336695, 0.8724780864642443, 0.9288990667712753, 0.7156604187725404, 0.781087990251971, 0.9235982628380839, 0.8134755225456809, 0.9348047742259575, 0.916640026365166, 0.9164582484883936, 0.5567793533624232, 0.8098510299985918, 0.6331936574214978, 0.5476240241078518, 0.9993718673619383, 0.754133011811504, 0.6598927729760993, 0.948291858279573, 0.7690700415926701, 0.5477038994489942, 0.7260116056545639, 0.9549052178595907, 0.7231979598968336, 0.6208852995712937, 0.6841627497487581, 0.7625954119820191, 0.9896578092179137, 0.7298919955436527, 0.9245976754745124, 0.7917930645614574, 0.9331379420674093, 0.6347274351988547, 0.7638102505408855, 0.9149387642088896, 0.880339704359326, 0.6322539669911438, 0.8249827733167769, 0.6315705515873904, 0.9491516734204624, 0.6114884284480185, 0.5910510516556442, 0.520224421106239, 0.8497498605060272, 0.8095044790220168, 0.9217699632374752, 0.6235921911073896, 0.7947277118678993, 0.6054092256584153, 0.7144545948111161, 0.9198234226599118, 0.9377742726253322, 0.523967084634474, 0.7508308725479722, 0.6099129238255967, 0.9899167351070215, 0.5387228842776299, 0.8616417008046178, 0.6283461282469319, 0.7038784339452411, 0.7262584812630147, 0.8559436255019984, 0.9333244081005722, 0.8684241808601367, 0.9735236774547937, 0.7634734226747131, 0.7070722217570924, 0.7868115576795063, 0.6514313640781826, 0.5510085621127909, 0.5528774605750735, 0.9462516815341069, 0.713381775953112, 0.8576945003873562, 0.7184572334267079, 0.7123423856799495, 0.9229596618394008, 0.9960311686251071, 0.7275633309403311, 0.7997294678764446, 0.720502747646887, 0.9131331784300525, 0.8807644856929224, 0.9698916050350079, 0.6798356738593773, 0.6217116369418392, 0.6447914409371804, 0.5859399947768993, 0.7465943563795356, 0.6885755419362358, 0.640144162092532, 0.965534846121196, 0.8625278057086292, 0.556617006226183, 0.8524144897116295, 0.8182895104182293, 0.8680952952592913, 0.5951411185876139, 0.5698907301684174, 0.9965195547204829, 0.5845090760067999, 0.7973168732427041, 0.7159297239065208, 0.9587418087742497, 0.6298805441904416, 0.8422190710013395, 0.8863777734152868, 0.5715672485349452, 0.748368371407697, 0.8318585809964365, 0.5013859547358902, 0.8673809950442855, 0.7744223108616175, 0.7555840604549188, 0.5373812348395822, 0.5857326877046667, 0.5144902226155832, 0.8053972779516523, 0.9725703869895006, 0.7424875104264131, 0.5067951607846526, 0.6428089647315594, 0.695222581239705, 0.5522497964621969, 0.5783687677846919, 0.6520303801481416, 0.9127341849478945, 0.5065082530209337, 0.7989402845174212, 0.7724124603236244, 0.8970213171822643, 0.6861530521017105, 0.6617661553003059, 0.891922749393079, 0.733509908039073, 0.8936646334099936, 0.6938047893768016, 0.9676046669119243, 0.6053888226376785, 0.9244752338014608, 0.8863946224542801, 0.7681190163262692, 0.5166947749448361, 0.8823969910345482, 0.7383638469614456, 0.8856475430353085, 0.910778632984675, 0.8724518573926602, 0.6365800889850816, 0.7109834802253054, 0.9121069073505311, 0.5121735893319554, 0.8207268630591772, 0.7069732023838176, 0.6805295629318702, 0.9444179059990174, 0.9990157375404339, 0.6019415284493639, 0.5055515817018845, 0.536144605456282, 0.8257571912478197, 0.7720660031765909, 0.5634141980704449, 0.7247427112535103, 0.9106268795079893, 0.8662500256401682, 0.5540867588109475, 0.6407855086930864, 0.655697770775693, 0.9748494648665471, 0.6757783127468109, 0.966283689337771, 0.6328087613147584, 0.6108118821958863, 0.934415943823405, 0.7525769685894936, 0.551788839128966, 0.8532773998929755, 0.9986651178314444, 0.532006708390215, 0.6575820481818266, 0.8451469746374114, 0.9053523752343307, 0.9467457868478049, 0.547255901486583, 0.6601299774805685, 0.9384494518703321, 0.5529869182720059, 0.8650132847003973, 0.5809219314126022, 0.6250242419417851, 0.6509530864830715, 0.8451843740667544, 0.5883170393737849, 0.6972266332587103, 0.7347631307366861, 0.6476330925662354, 0.687745763438558, 0.6644372633265089, 0.5108479762049543, 0.6974638128847298, 0.573480872087137, 0.6212509567621658, 0.8159635626271795, 0.7564560370959083, 0.8267236878082831, 0.8289178368696328, 0.8769370987234637, 0.6083276834302205, 0.5958331246320492, 0.5470784764086558, 0.6630917509729672, 0.5443216363292709, 0.7721469362791709, 0.9960646202870738, 0.8422677929623476, 0.7633137803128115, 0.9782915097954004, 0.5427112210990855, 0.9542208691935687, 0.6886269194743009, 0.8105798525699153, 0.6137464414142324, 0.6552488338929965, 0.5647010523353941, 0.641936031843235, 0.6946078755853831, 0.8832321884231562, 0.5878894492302429, 0.7507466215449854, 0.5389001185389588, 0.8326984893569314, 0.777693951596017, 0.6257982561432613, 0.7286296627464895, 0.5799615308375898, 0.9330435950536866, 0.9343156849920031, 0.8510445372813951, 0.824281477928634, 0.7474531460213685, 0.6460310921610075, 0.652562288958389, 0.7770021478623697, 0.9791559593393566, 0.7066570907433651, 0.7404642967511094, 0.915598832298995, 0.6549000882336721, 0.644576087636844, 0.6584895861371702, 0.7494330517996384, 0.6384452459070249, 0.971658386436084, 0.7784608753416428, 0.9087118100048681, 0.9659569028015902, 0.8223280752871056, 0.7990285585419121, 0.8610747028121333, 0.6720114557459784, 0.9117327998312855, 0.8170186171430238, 0.9110856060391874, 0.6064471536017452, 0.7318648284038936, 0.6064927975779224, 0.6974981862164347, 0.8304882132341257, 0.9887585375018222, 0.5582847346892855, 0.9434522189453984, 0.6100870505331326, 0.7895830861033359, 0.7403434727692486, 0.9348475147874735, 0.8390849887207219, 0.508307576282874, 0.827740446923393, 0.9899963669794161, 0.5268115418799011, 0.5495696025510243, 0.8174793874293795, 0.5239272526889847, 0.7124269781624796, 0.9400258342865182, 0.5410714952632729, 0.9792741636356486, 0.7680762763018079, 0.5803305528157401, 0.8585520216969782, 0.5352943254131031, 0.6943019587081634, 0.514877785345754, 0.6260799151252154, 0.6392316083054342, 0.8574710530745562, 0.5940103327229413, 0.8685870597275481, 0.6207446085227611, 0.9000712369129003, 0.9483270496899965, 0.7908493301894062, 0.6856660172438929, 0.9765804305811796, 0.9188213211500387, 0.6983703456471102, 0.6263355897574212, 0.535219857361493, 0.809866948201803, 0.8996749260850472, 0.6774785312280267, 0.5093973987875141, 0.6641533525729038, 0.5553036975634971, 0.7658066463113495, 0.6738018809679437, 0.8980896873691, 0.8212924576560029, 0.5687448258716743, 0.5899457443038727, 0.6396537843001906, 0.8916991223144561, 0.6176986551222454, 0.6724900677038548, 0.6697095217351017, 0.885010470102074, 0.8825754772666274, 0.9972816383247338, 0.9614515966791926, 0.5179428698013692, 0.6458716426985314, 0.6992178586158968, 0.6212178268685749, 0.6653059060064983, 0.521874857685959, 0.6929554354654772, 0.6716975259471956, 0.8906210865306023, 0.674515423440637, 0.842225665911988, 0.5555304367008632, 0.6705745188721852, 0.7938126095000784, 0.9657930963034723, 0.6878603430895267, 0.6529771357421242, 0.5553179552443095, 0.6736146870011457, 0.5201879417895816, 0.8805175070723285, 0.7167347139117826, 0.6189476820718061, 0.5109611303914623, 0.5626061824271404, 0.5671178917684991, 0.5822523636546284, 0.7769161048414299, 0.9358702680709181, 0.5577732493075203, 0.6497998162451883, 0.7638871352984999, 0.6329629385979526, 0.6858768342422081, 0.9213109776817622, 0.9389299508293628, 0.7742933274111782, 0.6041845124241805, 0.799180299192398, 0.5420165263444549, 0.9715216748632493, 0.5311495597629001, 0.5025477077161122, 0.5569836544383839, 0.9686297452692627, 0.8763849116611057, 0.6887883432004733, 0.6516708429999932, 0.7854834293860016, 0.6421068057942882, 0.5587133904421753, 0.5173411464119198, 0.5593825587742427, 0.5501117817756719, 0.6423051188938742, 0.5934408022037668, 0.972904788461578, 0.8905737211692906, 0.8061552168579224, 0.7242356084435998, 0.8215895561970789, 0.7954729579510433, 0.6192574656272241, 0.9593568597716282, 0.9102017028341398, 0.6983819704313335, 0.9063768034474678, 0.740280817081831, 0.6172778944007216, 0.6808367966634448, 0.8975908794035696, 0.572147079286146, 0.619520884473197, 0.5271721894235619, 0.9384557646773504, 0.7541992581932402, 0.5379752023271718, 0.7272150426242809, 0.9596032809000303, 0.9780024947400857, 0.8308810030179176, 0.5229640526271784, 0.9785657756807455, 0.9862666240834506, 0.9297264049447922, 0.614750870509662, 0.9206426608195313, 0.9892126447973957, 0.5835563579995111, 0.8231009771168398, 0.5814460839297517, 0.654337494638498, 0.6130351588628368, 0.8645624983591264, 0.7715220542196328, 0.9843596912923173, 0.5838913897750408, 0.948424818012455, 0.6691096277164525, 0.7375075133186222, 0.5699809144264969, 0.8509199837212886, 0.8726708065959075, 0.6023393170796506, 0.9453937330776331, 0.9692719920336377, 0.8619385747637986, 0.8598620685145281, 0.7621018190148284, 0.5829649564299816, 0.6544936620780588, 0.9318163334603144, 0.7888903498409228, 0.919268724679065, 0.6466017196928089, 0.9517965555713161, 0.9066443383210316, 0.5823760864435448, 0.8253687875543247, 0.5309485317971765, 0.8813231022417395, 0.8684764650516927, 0.5305354586572975, 0.8010120230590927, 0.6515963757541372, 0.6939462880629641, 0.7779077507068506, 0.6519176084458396, 0.9973354734229636, 0.69558604652423, 0.7403033279716278, 0.6266660730956128, 0.7845683134145913, 0.8924369975243188, 0.7451005377277966, 0.6678729044858713, 0.9878756018421113, 0.9982380305385424, 0.8205530714859642, 0.9005060023080076, 0.5448344326685193, 0.7467928169124635, 0.5645954847799738, 0.6852258078651019, 0.8136399919667003, 0.6535867888445563, 0.6388520939202006, 0.9413234727487838, 0.8251148351271446, 0.8587997393910023, 0.9821604471167107, 0.6502002430488762, 0.6372546735528262, 0.6119269096585362, 0.714386370109676, 0.6729654340814765, 0.8853657302938767, 0.8620592846906303, 0.9531539744562754, 0.5690723447707369, 0.9915848383240959, 0.8610858535435809, 0.9279540015302579, 0.7606387614690593, 0.8742952704314373, 0.9021353314200078, 0.8424591314971492, 0.8324801243650101, 0.841959673974177, 0.812586526516916, 0.8860448194426664, 0.9447546847670494, 0.8905482347149242, 0.5854598983395367, 0.6595675887977281, 0.9595705630517264, 0.8205729162580688, 0.7332826656119501, 0.8277780416768445, 0.853843206619471, 0.9697040199869242, 0.6431313316453253, 0.940966536616924, 0.5039207888323194, 0.8118809192251186, 0.9730143159765838, 0.69030063909415, 0.5633663960784416, 0.5625041430064642, 0.8695394521232119, 0.5677623715507125, 0.9253237037450515, 0.5827611555854666, 0.9746065481922803, 0.8740554336476879, 0.5937445475176004, 0.8004831997113873, 0.9853951231390151, 0.6303598266878185, 0.7134655731217103, 0.6712005244808303, 0.5047961229459063, 0.6782522988502386, 0.5580027423848344, 0.6516174661110543, 0.7609535162716914, 0.6255064277778388, 0.9995630858087493, 0.7777337675455454, 0.9028381210197616, 0.7809058442629301, 0.6955546837083117, 0.5971413324759302, 0.5193984700234073, 0.9264914213660417, 0.998878042183359, 0.8152123365338648, 0.645200567109361, 0.5913849043820043, 0.9281195087704646, 0.6183482148548842, 0.7928355485988103, 0.8680121824295421, 0.9114863315953611, 0.911829853697415, 0.9961262159577486, 0.6890721975318735, 0.550814326035635, 0.84040862878113, 0.9095906059579562, 0.8794522661348058, 0.5086950883882884, 0.73738305066707, 0.9934896617607336, 0.5066266052835346, 0.6402215166262903, 0.5634933346039623, 0.8357575275744097, 0.7471629845648465, 0.7299624133096554, 0.5849862221241143, 0.5209731055290912, 0.939702131983081, 0.6883736337071296, 0.6144965115436358, 0.6522437862620436, 0.8587349310147792, 0.7489200595859344, 0.7803984758338863, 0.7924887558033862, 0.7770623788861637, 0.7734271293910542, 0.5357752757576888, 0.6676981608404828, 0.6358526424424067, 0.711994247478485, 0.7417364091085609, 0.9385334603858646, 0.9523955644382778, 0.6781011377975206, 0.9387831576516374, 0.649493513566189, 0.7184956816150068, 0.9036961007365656, 0.8886483250802912, 0.6884995181000493, 0.7190578598827985, 0.8873462399457571, 0.8316872307288017, 0.6271125166458418, 0.9332436335987297, 0.8412943236284491, 0.5610915585409105, 0.693198728377997, 0.8545646526463988, 0.9928640098155328, 0.55431265881697, 0.7917879446070553, 0.7397863120674625, 0.9808948294563449, 0.7419201242974098, 0.7257797507430891, 0.5721795056946823, 0.5226225787379777, 0.7831831264924121, 0.6222563741397473, 0.7766273368807064, 0.8722073867581213, 0.7433245269330808, 0.8913910533279896, 0.6478375943186332, 0.7434445303410112, 0.6691654331177973, 0.6217763777961304, 0.8516899497994386, 0.9143731377592739, 0.5253663463722491, 0.8318676359991238, 0.9787516087158492, 0.7043413837019878, 0.7270903256589749, 0.5486240670067759, 0.7126419461088204, 0.6433841365541946, 0.6610627774958524, 0.5718330747742512, 0.6261614705006793, 0.643915249198313, 0.88748867237881, 0.5585392292949019, 0.6375532106136279, 0.6198353712487953, 0.6046431667361039, 0.6302500749138311, 0.9494072167849648, 0.643463107922541, 0.539465316561905, 0.9528051679697753, 0.506808946502495, 0.8818546143802247, 0.7027518384800191, 0.8132983497521293, 0.696685658250286, 0.6459113678970879, 0.8702312758853779, 0.7189615058998694, 0.6422523303444169, 0.8957794933365832, 0.811413862241635, 0.7737164260147078, 0.6497752393392638, 0.949237280834248, 0.9218137565876985, 0.9927123453771043, 0.6834136065285386, 0.863836896274043, 0.718911385277518, 0.598707502155756, 0.545678788802366, 0.7149112558166684, 0.7915979854834762, 0.9889895519816244, 0.6345932938051326, 0.5559852168300599, 0.8315255518419433, 0.8212844721435784, 0.8877230900315228, 0.7874484757994056, 0.9535522374581296, 0.7381312100951917, 0.7683138170765311, 0.7827418465364089, 0.6598400835485346, 0.8907769734205615, 0.5605026504138684, 0.9612194258152587, 0.8340129071536857, 0.7896083344726624, 0.6258567815772469, 0.8552619394493164, 0.8896459374169016, 0.690712163024782, 0.7979027364651596, 0.6545876441604241, 0.7535592100755548, 0.6203593033408575, 0.7709617407100469, 0.8720633626275407, 0.5479727347643444, 0.7931270743702402, 0.8209373358010248, 0.567455533372637, 0.6295983247762753, 0.607280305830612, 0.9452167439478709, 0.9689547657586115, 0.5926939461233975, 0.9590242923684239, 0.5790650203377397, 0.5349212446720722, 0.5091261332246073, 0.9090029369972192, 0.9579979350485391, 0.672307624160464, 0.6453499328881792, 0.6752872043237532, 0.5348799636107799, 0.8738551246897349, 0.7160314395438616, 0.7056245392249119, 0.8034086430505435, 0.7730835435779225, 0.7527149397205569, 0.7099617673092162, 0.8316703187897805, 0.8559023894833847, 0.55620165136069, 0.7406839702345319, 0.9489835323257468, 0.9093067758524173, 0.9247783282230195, 0.6452162212309418, 0.5696045966356812, 0.7928415269672435, 0.8676674676721278, 0.6521659896865557, 0.9165253882736794, 0.5983450368442071, 0.77095780377614, 0.9291368970376137, 0.7532520431114305, 0.6789064078951697, 0.9585396475041217, 0.5937881234265951, 0.7945986797467082, 0.8815127166017247, 0.6026300418495191, 0.903247705484133, 0.8788680081992724, 0.9306219434986991, 0.8810212648859881, 0.617528652079323, 0.5393958187499712, 0.6126737347663227, 0.6327019713697709, 0.5948628670576597, 0.7606420939320937, 0.6703508535004945, 0.5951364184508888, 0.5418505181819386, 0.7791488789521541, 0.8220851687747881, 0.6308149540632034, 0.7077149402870209, 0.8459072426932202, 0.688008839677668, 0.9949243549052196, 0.7567844404528272, 0.6286413724651778, 0.957858061644219, 0.5609264416694368, 0.5488079769705481, 0.8061463251727707, 0.6377668171631314, 0.8867945725346138, 0.7869394284897333, 0.813201364835874, 0.7420702202156905, 0.9370397588634805, 0.6799184570547614, 0.6383520509170746, 0.7805647570998111, 0.6531414859237896, 0.6358850434480706, 0.8193336280209982, 0.5260035459772704, 0.9515176360284558, 0.7157444993779563, 0.7731324258168022, 0.7488448710270095, 0.5923295548592449, 0.9173438146923893, 0.5392018391463578, 0.6617031408059719, 0.602587809030132, 0.753604828344475, 0.9082223516752721, 0.55743202563428, 0.9254203820146125, 0.7799102599921897, 0.8405400690659512, 0.698241200546821, 0.7869651025522404, 0.6190496886915977, 0.7673797681995875, 0.7458212073108065, 0.7830935659745806, 0.9715059495734295, 0.6459235359749544, 0.8725287362243865, 0.5547278918634684, 0.7078702878103338, 0.7084763024275992, 0.624031111600611, 0.8452512097882166, 0.6775843596273977, 0.9843693178420654, 0.9265939372483094, 0.9867341457680598, 0.6240033735608352, 0.6521777037779948, 0.608276429520284, 0.9109713817247029, 0.7154254503173342, 0.8019158510486082, 0.5096962362538366, 0.8177089563007522, 0.7108628791242275, 0.7155861144562817, 0.9311217922799493, 0.6941686593453158, 0.5581812843833018, 0.5815568417487507, 0.6512028545484427, 0.8194260376062752, 0.5734598270964976, 0.8156177416217079, 0.8689998526756706, 0.7435404452433698, 0.5055232265511538, 0.6063597514743844, 0.9755761576841162, 0.5421758991578574, 0.8277280565809857, 0.5160524120908312, 0.6199390878129909, 0.7996430877393228, 0.7963537442002, 0.7569930537500249, 0.9577015062679295, 0.8201875104556684, 0.7672144651596682, 0.8554917749112714, 0.7368079073064119, 0.9726932414615075, 0.8500582087996447, 0.7917842600463328, 0.7771065969010073, 0.8841445641639687, 0.7084568956111985, 0.8791482213764787, 0.946078636023286, 0.5136610727575222, 0.6436875577965118, 0.9287817129407083, 0.9557826845721052, 0.7064629504181233, 0.7732242648464418, 0.624534298564204, 0.5786064145931726, 0.9196767371657896, 0.9975977956827418, 0.71973310894555, 0.8955195100118065, 0.9512358947663333, 0.980114315449454, 0.8162875676729238, 0.6742574295375401, 0.6970840122387553, 0.9170377302538466, 0.7424754645668379, 0.8311038060414927, 0.7997472586262004, 0.5738482988796733, 0.5710208367548929, 0.5244760338945305, 0.6049067954838865, 0.8324884540830526, 0.587493097411732, 0.8242676379200653, 0.6813119976619324, 0.9916915258479801, 0.5856001231177388, 0.7873007575293056, 0.581550798582791, 0.9624389078003763, 0.847563918695218, 0.645331780426017, 0.5834630804432425, 0.6935235607134487, 0.7286623174171017, 0.8878313729275369, 0.6939120892604103, 0.9881775734389289, 0.9458884632299343, 0.8638753425885227, 0.5799015114763102, 0.7954435427638655, 0.9021326689483955, 0.7139117714052743, 0.6500157467358381, 0.8102646992299348, 0.7450189341136846, 0.6820087693332869, 0.5233681686869455, 0.5191990695290827, 0.7574246430887979, 0.9077053538869346, 0.6117007933725858, 0.5445485088494841, 0.6697045844783496, 0.7992498890570828, 0.9081407312887695, 0.8276896542604137, 0.5613381584216895, 0.7301444691568819, 0.9571224261693565, 0.7182064467495601, 0.583924638811383, 0.9552815459279431, 0.625785796139117, 0.9688982087596112, 0.8136252438113549, 0.9557468788134917, 0.5615872808084174, 0.5312945404510725, 0.7942063949804652, 0.912881126929669, 0.9664721245713612, 0.7627492298069771, 0.7762953326408714, 0.9466036997905837, 0.857157863551866, 0.6739699954341467, 0.6325637688355272, 0.8753099941637541, 0.5371341166036516, 0.5437715704621935, 0.8993780773979673, 0.7009792160710844, 0.8940641965748185, 0.9888080101055712, 0.5391517742872183, 0.515123413625564, 0.6764126909992434, 0.8263265512526189, 0.5843585251060119, 0.9382908119772059, 0.7980076598750045, 0.9597282754654074, 0.5054249769693624, 0.9943847460958914, 0.8502777016919079, 0.7434970537236489, 0.7247926239630486, 0.796656354005673, 0.971683482369424, 0.8090917685366033, 0.9612995765671701, 0.5571830640219726, 0.9788257786862173, 0.8003921349032442, 0.8544270898364673, 0.8014299647466352, 0.6393921558928591, 0.5312097701245597, 0.7554520090329353, 0.6629401596083269, 0.8722878715133413, 0.5317065054487198, 0.8326439465847859, 0.9243730072559788, 0.8170886835452208, 0.8072167144611968, 0.5724840282565273, 0.9258492091288794, 0.557820797068928, 0.5065154198608162, 0.822010813240383, 0.8941161705799804, 0.601239908447723, 0.8677696656361946, 0.7332052309217382, 0.7726990540125216, 0.8522218827766268, 0.841346976456111, 0.7673712107837727, 0.6572134298166652, 0.6692103430541971, 0.931179481372892, 0.6707227703059945, 0.9278623447203607, 0.7114052151854906, 0.5318774234651663, 0.7302896471658002, 0.7260709299887957, 0.8434558513390727, 0.9339343290592527, 0.7256228467661592, 0.7664974167771713, 0.9954358445789393, 0.8592696512227952, 0.7609630677847377, 0.7003489234798793, 0.6035522793345249, 0.5065413105460304, 0.6614354570224472, 0.5441316942210639, 0.5319726959814496, 0.5026186337305418, 0.9455465846661896, 0.7918942891724192, 0.8793036141120224, 0.9848488278127272, 0.6852087100870523, 0.9355796064038278, 0.7396980557578998, 0.5503400624640208, 0.8401619014365923, 0.7079790450996849, 0.9636805907366446, 0.6148770210183507, 0.5804328184882694, 0.9192041804909354, 0.6670378401901227, 0.8231344780973455, 0.8534275126256647, 0.5210637899335233, 0.6330795788815231, 0.9592082833426989, 0.8885168888483177, 0.8643588047018562, 0.79501489138546, 0.684716962614645, 0.6811785520466094, 0.9026336812382265, 0.5615573811679293, 0.7328893633552775, 0.8166702747288044, 0.7369316715736981, 0.6985707007629697, 0.7856809411144526, 0.7484439887517627, 0.5329049725998627, 0.9863789117970325, 0.8717081657368656, 0.6780711632093408, 0.9935171185866303, 0.8820999204986406, 0.8186887870089998, 0.5090454511611939, 0.6784640026968956, 0.6546884441922959, 0.829655012533842, 0.5526039129588501, 0.5940918174079493, 0.6324667241414657, 0.5500611118302707, 0.7470319141312312, 0.6024338102400357, 0.7870668433978628, 0.8933278879358957, 0.5042887551819166, 0.6015473929561779, 0.9537591284792527, 0.528670935480388, 0.7691729965815337, 0.704770301574143, 0.9956583818416309, 0.816918525404061, 0.858726683379873, 0.5661414785048022, 0.6928789687265098, 0.503389707321987, 0.5547296235437986, 0.6118134360316878, 0.6188162653202343, 0.5669853507822556, 0.5582296610031758, 0.6372578866436718, 0.7656390181741654, 0.5822412857786028, 0.882864624114071, 0.5142782232434921, 0.6778854487595252, 0.5987112730293984, 0.8230199029339251, 0.6038758072719975, 0.9981408470938775, 0.6870075103332252, 0.8589727315151138, 0.6329422786410714, 0.9136139585600513, 0.7111296342194271, 0.8864856777928148, 0.9210128891394684, 0.7272568830162269, 0.8189284084467376, 0.7242804633348967, 0.5730336762497968, 0.6698101827436966, 0.5204515985292524, 0.9102551354442964, 0.5416205294732693, 0.7173904800770623, 0.8453685361868895, 0.6796536367183759, 0.5092169682430465, 0.7021842621488219, 0.7393334924186158, 0.9362154472037767, 0.9175006676525574, 0.7149858770791998, 0.682891006672204, 0.6450145072943625, 0.6687187130889057, 0.7628093097740092, 0.5011291743111654, 0.819676421411458, 0.6361421966364851, 0.6064141203009172, 0.9895785374816427, 0.5974159400688883, 0.8904718738395601, 0.7797455782748164, 0.592923915927807, 0.849310357579185, 0.5050958196390294, 0.8485137733079822, 0.9607709821579519, 0.8831982962816691, 0.6729625427540922, 0.9681282183483451, 0.993414662706744, 0.7897667987169767, 0.557891664716643, 0.5775619662848736, 0.6512402443325611, 0.5707089592284371, 0.9014033277583067, 0.7395855114739904, 0.8605974221409114, 0.836639768888213, 0.7565351349210164, 0.5982640002513511, 0.8124806121624889, 0.8993340527395236, 0.9916763122921861, 0.5468170836321744, 0.8357500286936452, 0.5620298678634142, 0.5197986930769143, 0.9864587652485126, 0.6754738942325857, 0.8566673859798264, 0.8163899571540126, 0.5656880252758867, 0.7680538908002641, 0.8449255642532012, 0.6670434764219642, 0.9035499118473491, 0.6793801331282379, 0.9677290352747815, 0.917372343470783, 0.6309613412790787, 0.7640653932166399, 0.7157744196066911, 0.8817713604446832, 0.9764088218173926, 0.6465847897676495, 0.958743110380376, 0.7611273601020474, 0.602525806955866, 0.9999148619848277, 0.942393023132452, 0.5665935441663009, 0.9575035698165404, 0.8469278897328135, 0.7009303322871345, 0.5563903608442924, 0.650134718936613, 0.6936309704648581, 0.5068212493500892, 0.5667841083086648, 0.8730977658347476, 0.8410574889930859, 0.9982359159757487, 0.6531874705281489, 0.8309254048718318, 0.9434633669084433, 0.7981172618962339, 0.5987566775338533, 0.6100897293316787, 0.9960298474300473, 0.7765133018671321, 0.5799956531707086, 0.8304121662388477, 0.9260123447888606, 0.8208219496649777, 0.8640084736614946, 0.6137671097639377, 0.5344935055655694, 0.7713061794337774, 0.6068911727030901, 0.6639795570176098, 0.7337364498220698, 0.6212133471522083, 0.581610615959643, 0.6534586375129603, 0.9164638627178077, 0.5574991746725858, 0.5147399877463121, 0.9138046475611028, 0.5925357172355872, 0.5611840319700676, 0.5255692938336589, 0.6380433689124168, 0.8682354533362517, 0.544855459434477, 0.833235093585187, 0.7487825533698623, 0.9440666783564186, 0.6621870803877137, 0.7314147025288857, 0.7321861609052341, 0.9477216830592485, 0.8511050563201316, 0.7189576805497994, 0.7002019718412824, 0.7744575406020626, 0.8801281342684931, 0.7688208853330227, 0.932444367738331, 0.5186891682793808, 0.9589703702223153, 0.9265566489381707, 0.83615556150078, 0.5682256902268517, 0.6576899289276235, 0.5393028926783151, 0.6771221845844255, 0.6167968831770942, 0.7355570907820095, 0.9830399557858535, 0.6526847703456047, 0.7382091511683249, 0.6640644402140274, 0.6480315642245522, 0.6841829791619647, 0.9692215734243179, 0.9676905589954694, 0.741934062202609, 0.9144500135606437, 0.6608471587989674, 0.7374901458705366, 0.6976868411452279, 0.5957651810819602, 0.8315700946037611, 0.6295869588452225, 0.5428152320491866, 0.5712372955608487, 0.7495267710680142, 0.9038243545304325, 0.6570050291117601, 0.9234695501073802, 0.5303288783108595, 0.5844852847661716, 0.7584880924558421, 0.9653510497207571, 0.5263372564760783, 0.8323914841867697, 0.7683550789476248, 0.5359041663880585, 0.5048768443862367, 0.8848286911786718, 0.6467039420750584, 0.6734309105238592, 0.8434881285308087, 0.7407761212798936, 0.6794761541878416, 0.70593883228417, 0.6411812498667444, 0.7658940751198802, 0.7340786186812144, 0.8269624046615084, 0.761277079378587, 0.6715274923152821, 0.6800819851912574, 0.8698624849915333, 0.8449324677683518, 0.9988375817902537, 0.6807079530900053, 0.5772086360624057, 0.6339026709639966, 0.9366528735945245, 0.9216004363453842, 0.5386141553624999, 0.7949779355341388, 0.6733334759545241, 0.6337737318676749, 0.9503845490945169, 0.8947982229383484, 0.6891546452928713, 0.5829450307610553, 0.532441889879574, 0.6466424258510267, 0.5720859530537848, 0.6630369326671308, 0.5870181403923631, 0.9804860216956797, 0.6710634789301442, 0.9480770654184532, 0.8072949594754686, 0.882543026336918, 0.7282246981248792, 0.7429791247812471, 0.6590821813674898, 0.9572948356939357, 0.7989719219028398, 0.9188349847700179, 0.6656548012730772, 0.6094634841623002, 0.7646778224993602, 0.5833440266296736, 0.7418442682075158, 0.5435092810977257, 0.7389553567889235, 0.566080747253845, 0.515498505580895, 0.6694824977154976, 0.8859146998494019, 0.6474507392860643, 0.7583181117156567, 0.8395132749873349, 0.8421563210798101, 0.7615442386378874, 0.6867596253410417, 0.6662154658553658, 0.9408779554856246, 0.9984995509467864, 0.6769774429766968, 0.9585123428568754, 0.9969714923976143, 0.9734783882890401, 0.5666401778505616, 0.8123083414841522, 0.8885633699526383, 0.9572025569239236, 0.6169785371759298, 0.5106157697341235, 0.657184633619085, 0.8371219306232669, 0.5003396811423793, 0.6947679817632985, 0.607158823167471, 0.8068508717768978, 0.6307154224687177, 0.62038691848973, 0.7771629470776246, 0.9891597513328989, 0.8665260714909688, 0.98637174108884, 0.8195062694215338, 0.5694686706112337, 0.5991837199562903, 0.7506516419783535, 0.698259439504132, 0.5125753137452125, 0.8957238629819819, 0.9104936206423208, 0.7853570450271141, 0.8905140859888988, 0.5701409204224014, 0.5657158556408055, 0.518557072388959, 0.8740406468725668, 0.6645938638399376, 0.7155173246535531, 0.5432298593995508, 0.5461103481072734, 0.6265613490772608, 0.8680121880628084, 0.6021134560718792, 0.7751054228711158, 0.9562917486489062, 0.7794080472234545, 0.6303766895583742, 0.7750811446737986, 0.8692828727530821, 0.873291943675436, 0.8724663325553061, 0.9194837616333418, 0.5106560618272171, 0.5147012046277386, 0.6386692224765844, 0.6116947170543141, 0.9322375423002343, 0.9510138482142512, 0.5694758480847406, 0.9642979704252896, 0.518420453375281, 0.806122240269282, 0.7003074832644395, 0.5778291782003334, 0.9832326905623061, 0.5740839970304883, 0.5752743525869655, 0.9324897523018479, 0.927453476816596, 0.7132332763119531, 0.8141695232790711, 0.7693713829058235, 0.5490531407479449, 0.5293322780642298, 0.6663198962426441, 0.9002553128003958, 0.5314106689414464, 0.8800186299780169, 0.5834557905221731, 0.5209828024852274, 0.5869724804638142, 0.8540492965086786, 0.964600042060846, 0.6678994317646645, 0.9403755282970285, 0.640366982027853, 0.8648396807142804, 0.8852168611884617, 0.7932609305904013, 0.5000058069356004, 0.8783663361389134, 0.545478207748705, 0.8689498361910496, 0.6357183997080752, 0.7237000469036434, 0.571802601621621, 0.643716584580313, 0.5754999320033336, 0.8125283567133252, 0.5196753238336698, 0.8257083816023767, 0.8301249004193336, 0.6338430614681396, 0.8446058107961882, 0.6976421935721422, 0.9124127120758313, 0.6190771364891969, 0.7200668119283998, 0.6542945244115069, 0.7633893204444755, 0.8641388595100432, 0.5520997379499468, 0.9656237118875759, 0.955836978877487, 0.8809713199855504, 0.5718749062350967, 0.8183727540886943, 0.5642095624875023, 0.8383393418827026, 0.5622619592808484, 0.778297259794763, 0.6808486752091714, 0.6011553493069326, 0.7383487705277274, 0.5480376698059135, 0.7045802052144723, 0.9596182272053837, 0.8957051660430186, 0.7964367114365456, 0.5722751784807805, 0.5242659456377172, 0.8896044067090096, 0.7603798642710892, 0.5957407593796249, 0.8102849392348339, 0.8337914193100725, 0.7272561066318319, 0.6638280292496291, 0.6966953715151525, 0.6999515792767945, 0.532415997669985, 0.7388852024445376, 0.6554105080243475, 0.7522097752952477, 0.7551907052631905, 0.5570149811600222, 0.6312111549128626, 0.509097648574182, 0.9733942554993267, 0.92649909836852, 0.7822538439036006, 0.77570936925348, 0.8347176616667091, 0.7302361304543254, 0.9989645300124596, 0.7683345820321004, 0.7197023319335154, 0.5967578521351464, 0.6777813309933147, 0.80333063945048, 0.6299424052014534, 0.70109985952833, 0.6960206153714118, 0.9337081333999288, 0.5043253122078875, 0.5201698613830248, 0.65022107731901, 0.5161877970020062, 0.92402568020914, 0.9560461413471488, 0.9410793066542592, 0.8200658762856166, 0.9644581009889854, 0.7642600884738648, 0.8007652908985127, 0.5603761787667784, 0.8031438739761867, 0.6169326806798776, 0.8708480102327775, 0.9425768876920138, 0.7265730665647927, 0.8521036120403186, 0.7465197721761688, 0.7961671585680661, 0.5287108283521518, 0.5387567567653542, 0.6603964423526814, 0.8241074424044703, 0.584820556479368, 0.7944129155125247, 0.6897766853291547, 0.8653979635116928, 0.6414926524572317, 0.543228757464977, 0.5441807058171211, 0.7940746364553134, 0.7892549650625567, 0.8088430103999313, 0.8738846420815032, 0.9069153582911532, 0.9131356942596419, 0.7214931503764366, 0.9180839242237961, 0.5898538794111412, 0.6859770756514207, 0.7926624558334525, 0.7077110878138801, 0.8087125442189511, 0.6841352457134595, 0.8426976029416047, 0.5760505051735223, 0.5876755546636621, 0.9095127957770642, 0.5730854618942245, 0.6500782858712222, 0.6184155454466136, 0.7033277077546618, 0.885900898482743, 0.956010121041853, 0.5430346490930387, 0.7021981794720178, 0.8349998060936681, 0.9518186752138404, 0.689314079636953, 0.7411112688831284, 0.5699656654850984, 0.703580627080387, 0.672568851821226, 0.9870216815011864, 0.7493836752752427, 0.7349627803465353, 0.8347264541141706, 0.62494619271138, 0.598609868870011, 0.9314622578150027, 0.5299525734767938, 0.6974818449883845, 0.9788091527058718, 0.6326204847461595, 0.9226294482445663, 0.881983700176592, 0.9190340423687833, 0.9746802267847279, 0.9033679878963403, 0.8180012983119787, 0.6390339059186692, 0.665285647033, 0.534596447642389, 0.5237997864871153, 0.8315919171944963, 0.7080958110328355, 0.8253458028314571, 0.9026934946468455, 0.9341155137080166, 0.7762046457469692, 0.7136663216095916, 0.7645020542569076, 0.5499554213610942, 0.7742772421172878, 0.9109674120056599, 0.5677356334791448, 0.8619718111544717, 0.8837447117186533, 0.798152334434139, 0.9510852244614009, 0.9391624761184056, 0.7252255581514284, 0.7557801382530468, 0.7714410815540117, 0.7214649062949225, 0.6132770113673156, 0.9020515094994529, 0.984273593979863, 0.6665769057972586, 0.5130073001211634, 0.8587676725299223, 0.7574468432979367, 0.5008961562901291, 0.9437592937005722, 0.9535714323186884, 0.8656115296404863, 0.8361605025048535, 0.6759785569273185, 0.7744065450638811, 0.6918840474771344, 0.9116137631868243, 0.5109371181991671, 0.5074166027008746, 0.7994755338219182, 0.5914561417574447, 0.7708520256734908, 0.7219357585480064, 0.5696735590930131, 0.8348107269909326, 0.7333429227824955, 0.5412966668367738, 0.6220701766480571, 0.5274620212911437, 0.7570654491251468, 0.6693175013604735, 0.8969885681405452, 0.7969923493870732, 0.6314384936037383, 0.778292626163283, 0.8465408693631262, 0.5123935968048314, 0.5139250246727514, 0.960475625572737, 0.8957948678468645, 0.7502827313056744, 0.8948610030796551, 0.5685483362507204, 0.8705636429672062, 0.8810424124786556, 0.5089006289387963, 0.9407446181148191, 0.8325077592632077, 0.5338392196505475, 0.6944405143944403, 0.8783123920142356, 0.5054053971309349, 0.6411647795655461, 0.7171404016567708, 0.5011877887394671, 0.9156493234738379, 0.8920951578475997, 0.7453501062219658, 0.7328310259609825, 0.622928435209141, 0.9113913248980074, 0.9526614958653113, 0.7613701846650887, 0.882810808032283, 0.9286746433774523, 0.5843805269469209, 0.7822578096688348, 0.5798086348346037, 0.7627948460516543, 0.9759483585316773, 0.5734805271903853, 0.8960247781430969, 0.612157057236264, 0.7067966734137503, 0.61838480287867, 0.6311439482853578, 0.6653151767520196, 0.8951488738400091, 0.5131020759123713, 0.5945511529550429, 0.6842835460840468, 0.7595356749533781, 0.7811218909728554, 0.7850971985458973, 0.9149877130323446, 0.5613645541716035, 0.8842588367022449, 0.8552301779168274, 0.9258518524561191, 0.6759451410685073, 0.7418227103311832, 0.5151297221604283, 0.6831006508827469, 0.7473236453440704, 0.675331635917438, 0.8136679331248764, 0.6312998911217114, 0.8528034870466061, 0.913759381467317, 0.5742871637391994, 0.8738168653308233, 0.6356336846931386, 0.7303866233989145, 0.7677606975417203, 0.7810147388569528, 0.7072083886215599, 0.7642070465253954, 0.9988711827833896, 0.5753152382281933, 0.840586134517336, 0.7815841885596546, 0.6583625369758598, 0.9438413061732989, 0.8347018301717503, 0.8253438178174447, 0.9500157088982555, 0.5188278625099285, 0.544153688237175, 0.672368953614815, 0.5718442386125245, 0.671552339611757, 0.7633951521651976, 0.6097840772831067, 0.981082764456042, 0.820469720972372, 0.8618552064667606, 0.935799508562552, 0.7115836422706601, 0.5864412079775658, 0.6581350405651933, 0.7485555694336558, 0.7004639536401864, 0.8321156507314456, 0.7761848688849782, 0.8241348823975753, 0.8845036224023013, 0.8343446771566043, 0.6882072078975485, 0.5509421057567443, 0.9678211976701672, 0.8604249945711597, 0.5381600407866463, 0.8614049592019696, 0.6578246141185837, 0.6543865382033379, 0.71995128751772, 0.7320551164401925, 0.6658306534473848, 0.8233534074245743, 0.7184747685497447, 0.5374982713590863, 0.8066207803228286, 0.8964833331386177, 0.7349827330309026, 0.7404439943259014, 0.6095780823833523, 0.7084744742411935, 0.9770420965796427, 0.9333551639598039, 0.6166561998298261, 0.8117160228358999, 0.9687900960393783, 0.7778041777698022, 0.9104507084245063, 0.7487645882521281, 0.5463598994245631, 0.6239028681498036, 0.8752892843045276, 0.7452589309198996, 0.9402298904389718, 0.5584559236811011, 0.8211481824631822, 0.6812482338093159, 0.522724292012774, 0.5934190429482458, 0.9699262112881231, 0.9985618341479784, 0.5272351665772099, 0.9032032426122593, 0.6865701436704157, 0.6154559552562626, 0.8579656515160645, 0.8918778054157227, 0.8286773031985666, 0.6574417542668115, 0.8864959886334347, 0.7476183342851597, 0.7000070252952126, 0.7293188721289562, 0.9793583039125586, 0.6836022256167696, 0.7421064255074348, 0.656335242508051, 0.5949851945181044, 0.9614559720743296, 0.6788196372122397, 0.7207783868143132, 0.754997909968957, 0.7355637782167364, 0.9760181429824475, 0.79481673236159, 0.6543270099624543, 0.9242180941032984, 0.9554303591665239, 0.754930588808185, 0.6830383461447994, 0.9252279937165782, 0.540540130517633, 0.6755624254092565, 0.9974515672872952, 0.9524148408561037, 0.9238675477401828, 0.9131358024753227, 0.6103431463262127, 0.6162802907770288, 0.9040008437471867, 0.8677952861468807, 0.8176068920227844, 0.9821973985704648, 0.6659827805611458, 0.8288310479610299, 0.8168969045309622, 0.5916271266110253, 0.9532172259751979, 0.9294455454787773, 0.8685091185646399, 0.7164776068397839, 0.5341177400883543, 0.6687242564064899, 0.9121561373151799, 0.5191306573559509, 0.7100067399365613, 0.6660841148791972, 0.829534945470392, 0.7025144613715623, 0.8730002388480345, 0.5959035226233262, 0.564412112813724, 0.6539121769975078, 0.5726483637521085, 0.5939007742425575, 0.5527643267279283, 0.6427082469538203, 0.9186513489858849, 0.8399292729038601, 0.9465947657958218, 0.8604909214998393, 0.8438457408835476, 0.8072989723430226, 0.608264834108954, 0.6739189233236519, 0.5260977643244609, 0.8684168795909704, 0.723430103135951, 0.9885231250904746, 0.5393633299736686, 0.9965072564339282, 0.6055891946156189, 0.6821919364415744, 0.9293812316412557, 0.9842416376946038, 0.7665017202671227, 0.9857777041401417, 0.8880020494812835, 0.5104274601296117, 0.6636807138344175, 0.8977369595641145, 0.670711732951939, 0.7101194809973523, 0.5935002250023373, 0.7085682591681464, 0.6260697658896572, 0.5115719594782069, 0.978597404025569, 0.9711412780388528, 0.5323463186836713, 0.7443673718638817, 0.5534759230084786, 0.5320585519363925, 0.5549329635831545, 0.5457584491030174, 0.8856809557080141, 0.6971766576615703, 0.9534369858722478, 0.7075979683769842, 0.5186234517986679, 0.7460625014903279, 0.7121570291575405, 0.568867023785018, 0.9219906914836531, 0.7483642948521463, 0.8566159348860984, 0.6610289330697701, 0.6072952278732857, 0.6893717118806366, 0.9889030215181692, 0.5676680358886557, 0.9035170099538545, 0.9973123248652106, 0.9103414506871877, 0.5692582194538067, 0.7477913163089939, 0.9346456437228037, 0.9803655898875825, 0.7036695855409989, 0.5867458900847056, 0.887575696459987, 0.7522539684843835, 0.736946681704536, 0.7933727235827319, 0.8136852391666874, 0.9942498362471706, 0.8665527033984071, 0.9501379255441257, 0.7156746655851214, 0.7841013142769957, 0.7390315864725435, 0.5156428464282075, 0.7386079562567796, 0.8775200896386206, 0.8957082599358825, 0.5059902142367407, 0.502550910021164, 0.8642224567707346, 0.5776575090699293, 0.6326819176327442, 0.6607039093099374, 0.9064906483272657, 0.5480572572671709, 0.5398923822057775, 0.7674651816129506, 0.8310322111801978, 0.9576449964580247, 0.7214845984629337, 0.5055966576048021, 0.8364429014559547, 0.8982441753996935, 0.8561866938769425, 0.9121594337316888, 0.711217267601604, 0.8268529022838278, 0.5996116314617441, 0.675959430812749, 0.7778624616582475, 0.7427233797355075, 0.6256184058009254, 0.5080799852863116, 0.8996295681706485, 0.8114081929537738, 0.5858332593202784, 0.9162351559479616, 0.5639018365498003, 0.6500645176929221, 0.5826681288791782, 0.7954344319006095, 0.7234519772609602, 0.7003029762242992, 0.9020343511632415, 0.6357025324314911, 0.6713717746942747, 0.8647648079365163, 0.7845220086867348, 0.9062427823730064, 0.7235819152763311, 0.9489553995135045, 0.9282358177913306, 0.7016315753945147, 0.6949452691735609, 0.8265645162110082, 0.8548632173635953, 0.6688230863141941, 0.8398355983340178, 0.9784915536951264, 0.9536922186163591, 0.6208845502814971, 0.7238466222316613, 0.9821530338779665, 0.5041573231981352, 0.6555274657852574, 0.7927521027951518, 0.7036047285111762, 0.8356879624819646, 0.7397411983648284, 0.9139157810000794, 0.8604452232977858, 0.665413060210704, 0.6363748612142833, 0.6443454225155846, 0.6105836412009449, 0.5758189765092213, 0.5255362938128991, 0.9304495638475007, 0.6366591749889869, 0.7130991531182121, 0.742345107255981, 0.8396368474515439, 0.9917681604211397, 0.7516860532631663, 0.7244921343101527, 0.6686355090064794, 0.6928632004123215, 0.6976304606241784, 0.6612489073738339, 0.6809033094866476, 0.7619591933832485, 0.698833013729103, 0.9585557580537929, 0.9797920054979969, 0.9855547506887493, 0.9214248060039312, 0.9186914935631555, 0.5843561858425479, 0.8010985488255218, 0.8440664648744154, 0.6018087291883145, 0.9012563199501973, 0.8374199648737702, 0.795661406330584, 0.7960622714437549, 0.8552094691470538, 0.8544717574694283, 0.5629658498768744, 0.6071386655153468, 0.6151117073751593, 0.8180557640914754, 0.9478717872263751, 0.5252096677993874, 0.6838835358442397, 0.7366854530263358, 0.6144063742344184, 0.8778855722955683, 0.8363282637810704, 0.9287257745608704, 0.7587985867748129, 0.656073183992102, 0.5785837877872285, 0.883226159806636, 0.6491367515159512, 0.7753889172822628, 0.9916210892190954, 0.7650454984572947, 0.8420904265694902, 0.9582282142838426, 0.9961385020148816, 0.6456006659432508, 0.6389447523509337, 0.5144791725545079, 0.6479251122879925, 0.515886404666275, 0.7854346530952321, 0.5192985116967572, 0.5590782518856463, 0.9094716486672645, 0.64157757165627, 0.9262581850399487, 0.9964114398883375, 0.8206019931991638, 0.9662159804212183, 0.5061272723218497, 0.7618026120943746, 0.9410795795971356, 0.9617134576099076, 0.5846478678979898, 0.767864490716794, 0.8021694925285706, 0.8333555431790537, 0.9385973913809338, 0.9793107255439716, 0.648973724454168, 0.9396080906128588, 0.9168326135275411, 0.9042202101780509, 0.5887014247155781, 0.6817711573565666, 0.779952811679819, 0.6783648996566026, 0.9481520209337795, 0.6919404085348397, 0.7181547543053408, 0.6089496088979642, 0.5479043501981455, 0.7651984015570508, 0.6600288330354326, 0.6039525894026194, 0.6375343301229046, 0.5894704933255116, 0.9097531733684207, 0.7026355727232999, 0.9047534417321893, 0.505556923461971, 0.6620824949546744, 0.6904099701483333, 0.8000567564957775, 0.7387391480576346, 0.6524935719082019, 0.8134089427346523, 0.5263174996821618, 0.8896399740307409, 0.9763187107667436, 0.9892097833255319, 0.9700506210998167, 0.5300987058787058, 0.8296996776171313, 0.634872420232828, 0.5402091529883839, 0.6372476846286199, 0.6936789909507765, 0.8776598822428939, 0.5556111393930533, 0.835237501367988, 0.6941716627132704, 0.934483719630499, 0.958326007083752, 0.6018999510742888, 0.9129302137945171, 0.7838810133795702, 0.9773790523783594, 0.7592129588466809, 0.9494533522086162, 0.8621195420557352, 0.9150536508721089, 0.5001862319308534, 0.893442665855245, 0.8382127640403796, 0.5328512515456705, 0.6385744576633918, 0.7539374253912282, 0.9873618722171875, 0.9894300185496903, 0.6516634086161293, 0.6760481679596156, 0.9860429921028746, 0.6720491468766756, 0.676194732755744, 0.6266010386467017, 0.9656720958783187, 0.6653606751278193, 0.5530121266326611, 0.7870716378546273, 0.5338784067571696, 0.865088549304875, 0.673577776892917, 0.9827081665507307, 0.6524150580745054, 0.632399063924097, 0.7300131898867317, 0.8327004460796393, 0.5434621965675879, 0.6748887713413922, 0.7205421895221704, 0.8395616837833538, 0.7840112694878065, 0.7495487791854549, 0.8455326681130642, 0.5324256040620687, 0.9144297801909465, 0.5282219804014493, 0.5549625970599592, 0.9662310323354126, 0.6447346727049663, 0.8333133813525198, 0.7247826415871037, 0.8065072993815536, 0.506163120588488, 0.9963226682286757, 0.7156742886447665, 0.9107778017087054, 0.5502902420264376, 0.7854479898697108, 0.9969025319096945, 0.5519972480362527, 0.8536891303278593, 0.6375372092282958, 0.674027719904182, 0.6303209325286557, 0.6433571881970991, 0.6988802127388056, 0.8860296406889692, 0.7922703680927962, 0.7572808820757377, 0.9922440807436588, 0.5573709158904556, 0.9789396585596872, 0.6689039772877672, 0.7686341997049408, 0.9677306607413034, 0.745070731027007, 0.7413647263271838, 0.8312641881796591, 0.6410111513009584, 0.6704960873691872, 0.8622257573471338, 0.75381910677669, 0.5700469587577306, 0.9557219977769258, 0.6954874873345098, 0.5957814470636679, 0.5423522226396933, 0.9353542225557778, 0.7599610296502375, 0.5128871666278307, 0.5651774142610106, 0.5537307166324317, 0.8739058446977492, 0.9421079314958218, 0.6912739065487253, 0.9967267939674567, 0.5518928551394332, 0.9267284792652294, 0.5789747830722392, 0.6626976349908329, 0.6163203781733855, 0.9363244203226314, 0.8948714975018945, 0.5214783985245284, 0.9168860539894772, 0.9052932006167165, 0.6774335970225241, 0.6630973780482265, 0.9930775094404001, 0.8129198214276246, 0.8455114461051217, 0.5164534605937872, 0.6050806044849881, 0.6682224791968006, 0.8647628565774225, 0.5810229912673434, 0.6447832253172068, 0.8545954073584852, 0.5736692407153106, 0.7653818935849277, 0.538308789142607, 0.5572240262266513, 0.5097945065493701, 0.948398610357437, 0.5171745868454434, 0.7881033007845146, 0.7309215400073392, 0.8286242438608158, 0.6044326315616171, 0.9719792998742722, 0.5492842371603607, 0.6258540198349027, 0.9128996039921438, 0.5213644970355132, 0.772455381964787, 0.8908564273300332, 0.618066068039179, 0.7850026566593354, 0.8984844055972834, 0.8894705887592418, 0.8863700782951249, 0.6542095841620074, 0.6455303469473022, 0.7823715739634269, 0.7979141116388988, 0.7644610906192374, 0.8025277264462468, 0.80745199109308, 0.8490458642581238, 0.8683469717840697, 0.5541381919932256, 0.666574513804582, 0.975011405157963, 0.875042314111878, 0.6989097969007545, 0.6482755714852344, 0.720438934925844, 0.5045830321043804, 0.5996077821537482, 0.8942552110584305, 0.8079362934551291, 0.8708580847725722, 0.9655677030106309, 0.7205319790130653, 0.9373132937076454, 0.888371479394419, 0.7936034659112817, 0.8111978509682978, 0.8465209033172204, 0.9725726717871929, 0.8110614396192282, 0.5967338661243291, 0.6207388035043866, 0.987583557643821, 0.5272236431695583, 0.6222067978763364, 0.8460710313413496, 0.7910523449333242, 0.6862238871442647, 0.5056749752705407, 0.6046016304515754, 0.9812902187925922, 0.988550399579386, 0.805386537678998, 0.6972713064253814, 0.915147884907915, 0.8837750426480389, 0.5612741338425564, 0.895080356587941, 0.6377695485394768, 0.5446805925421283, 0.5007266461144655, 0.8684685076628353, 0.5653054916759821, 0.7009384748068157, 0.8732893815031064, 0.7523736302870523, 0.9242940860478683, 0.8169784583178696, 0.6086746475674403, 0.8927098083323383, 0.7604628436861527, 0.5726414466839644, 0.6998925619700167, 0.7605235794676684, 0.708191068677783, 0.5178391866521537, 0.7775334355596157, 0.5685030827343135, 0.8432905815397527, 0.7554271294148058, 0.9419232221710917, 0.9046279933351717, 0.7112322642628832, 0.8731272181391675, 0.7091951687682443, 0.590523463136732, 0.6722722352928301, 0.5157093554285511, 0.8115541634662325, 0.8774551064111218, 0.882748650381509, 0.9792856914216577, 0.6448967411770633, 0.7996157247328766, 0.5718594677083753, 0.9798719810024641, 0.6666267885514123, 0.5240318363238934, 0.7624552202938945, 0.8678053858676439, 0.8297798063041102, 0.7993135680821599, 0.8670615655199345, 0.5617580223731742, 0.5103460006762935, 0.71569208559022, 0.9364169294563183, 0.6449850207789598, 0.590563994001492, 0.6242535496081378, 0.6179751360390937, 0.8345621896286477, 0.6088965013339616, 0.612616990499441, 0.6497769389521777, 0.6151845809732843, 0.6217169080809714, 0.5758173482437918, 0.741999583255877, 0.8876529066642673, 0.9684639314776782, 0.9440127958628906, 0.5798993706280596, 0.7539656857359934, 0.6059253523625864, 0.70372180060122, 0.7480830986138708, 0.8055245271609341, 0.6499593975067659, 0.5564261169013638, 0.9784838364467618, 0.8857767922074817, 0.5536956320676218, 0.8869616638030391, 0.8580835079603728, 0.7396156176861628, 0.9610523077671722, 0.7251860103706547, 0.9864032004899759, 0.884241564870379, 0.900769591042476, 0.6948585987495902, 0.9790568458645679, 0.8199310403177209, 0.9434198099989655, 0.8617708400583362, 0.7569618756740477, 0.7477771874030309, 0.915100218415511, 0.9200585341128239, 0.6694981859778837, 0.8674568394086739, 0.9701611613689733, 0.759791175405224, 0.6391564425593448, 0.585762618410348, 0.8530707111359189, 0.8619129677351934, 0.8922530419752425, 0.8337760254731911, 0.8052787534060222, 0.9727653235114824, 0.994656636180288, 0.9303010429780785, 0.7408786104035472, 0.8985205605395863, 0.7519587184721476, 0.7258881510666814, 0.5081311499569656, 0.814529170383263, 0.5234585526552524, 0.9752012045352194, 0.9093942118088721, 0.5145055499209501, 0.7498530481485208, 0.8326149591173078, 0.9438811465166068, 0.799767630101196, 0.5106275157910719, 0.5129107162369075, 0.5812060167687076, 0.977412361852132, 0.948798284493662, 0.626706102722419, 0.8553085808036411, 0.6590759512801778, 0.9212824474725669, 0.8167985004798506, 0.5167588369156132, 0.7562820756987764, 0.5237127583781183, 0.5327005398826717, 0.5114894429111333, 0.5452077556984849, 0.989701362751594, 0.8963502917148533, 0.6206919943103992, 0.8828758411664133, 0.6040659062537469, 0.5606449099596892, 0.9304726708451986, 0.6667980159209577, 0.5284368909809218, 0.8586424193346593, 0.8406884810498844, 0.767338019870124, 0.8139338077618663, 0.9485968122205686, 0.5108413344359136, 0.8225778592113587, 0.8925925588191661, 0.7239061164788698, 0.9409420679563103, 0.6166441822046764, 0.8333851650014761, 0.8725780310241862, 0.8724609955315272, 0.6412746475947531, 0.7507160870084728, 0.6855605908982398, 0.6762086439172391, 0.9946360546939546, 0.8895557214538586, 0.7968787314538355, 0.6705484581317189, 0.750963780969884, 0.5692483094684155, 0.7559875259936252, 0.818728674429207, 0.8295800037176841, 0.7444454271304493, 0.9524202813356795, 0.8588896597167563, 0.8342995884543927, 0.9161102117415443, 0.998331905932066, 0.9066585252269295, 0.9298018970053079, 0.6764629725418927, 0.7688219165422201, 0.8876403101439907, 0.5714713064039865, 0.8115471474842331, 0.8397706970997907, 0.9651048141567546, 0.5304659066947781, 0.5078995689397696, 0.8177137654654869, 0.7032081737369679, 0.7759603524681997, 0.6360366381639903, 0.9084883373570118, 0.6280887362601106, 0.9742906929331399, 0.7820801138138798, 0.6552859682134242, 0.9956836467562704, 0.8206115612687664, 0.831244613900549, 0.749181609555085, 0.6428790910240353, 0.9703870509388732, 0.8003280349250845, 0.9346010015960775, 0.7321561760797317, 0.850643556433561, 0.8790562583161815, 0.937094040180904, 0.5778541633949055, 0.9320716206607246, 0.5755066534989784, 0.6981796578212742, 0.86804327865494, 0.9313109389226089, 0.5860421136726794, 0.6979123572334596, 0.5562115157312126, 0.6544038601673974, 0.9545930379067129, 0.7901642320455392, 0.9911908326117609, 0.9093283017501468, 0.6912548281890023, 0.8934881587055545, 0.6673018399168282, 0.9481531991056924, 0.8116404538812667, 0.5770754248899665, 0.5196117212832351, 0.6979862196154123, 0.9245485013813307, 0.9917188220948336, 0.610076792018597, 0.5499141185003689, 0.955130421009397, 0.6246213241222536, 0.731071981838161, 0.9719961374373023, 0.6560272229543945, 0.6341377238834913, 0.7540249462609622, 0.6786389195926681, 0.6745884051607864, 0.6294019630179234, 0.6060127236375559, 0.6663154563897371, 0.58079534741119, 0.7620100270432207, 0.7513447071612716, 0.9585109744380339, 0.6009890744655164, 0.9121535759308554, 0.9566285666553154, 0.6824405287556708, 0.8639903982818585, 0.8529525637219864, 0.6681915170856614, 0.8723931114241338, 0.5422347987520577, 0.9348408509193347, 0.5895058141497216, 0.6533248819793822, 0.8591311585183602, 0.7533034441939205, 0.7397525129753619, 0.8449103857413255, 0.9504865702120677, 0.5871515120192112, 0.5125716253608092, 0.5321440807463691, 0.6318359214082465, 0.7738158985772965, 0.6208270749831529, 0.5723376132508678, 0.6295520426619294, 0.6079321789461327, 0.6307962236594624, 0.8720668013612612, 0.8573411159038253, 0.7994830479560654, 0.5291129366698315, 0.730299898341132, 0.9992615214712324, 0.6489138667889941, 0.5570532966529903, 0.7987420251300448, 0.5148767588015648, 0.5527355044801634, 0.924065301217761, 0.8314834555166328, 0.8765994018850569, 0.9177234086707626, 0.57744477014974, 0.9328207268559405, 0.6129153592088881, 0.5706522486999459, 0.8143160171643082, 0.7820681460886361, 0.9065851481515177, 0.6747397129193216, 0.662307905265836, 0.5394189610744446, 0.7157743243330196, 0.9039485508480232, 0.9667686813609421, 0.6656391845140814, 0.5484272229592088, 0.9623074549208026, 0.7812868313896815, 0.8905651914898415, 0.8343217898773115, 0.7224740160666783, 0.6575307055122546, 0.9799199071174773, 0.8594535075791576, 0.6683701936870281, 0.9313869844106115, 0.7656408799377834, 0.883508213446771, 0.6456353267342998, 0.8409905388214477, 0.6871639260185411, 0.6587994136517332, 0.6425464747042164, 0.6553159285025008, 0.747979223736001, 0.7692181538101066, 0.743110228910683, 0.7214353977774258, 0.6200631941956483, 0.6875547424660127, 0.7438073924911068, 0.9899620830289777, 0.831224918590273, 0.7242530387656467, 0.6378274276205242, 0.530447116746884, 0.8124503711694029, 0.9982278806039738, 0.5212795472509764, 0.9438474297895023, 0.7861639295705878, 0.8329548336005874, 0.6712733462473335, 0.5964346733084825, 0.8489478062827802, 0.787153868650061, 0.952430226281932, 0.8299829707311365, 0.9011520767420665, 0.6810330481933665, 0.7959215924779854, 0.9722465038605195, 0.7826533281930301, 0.9366391191315367, 0.7479530558240454, 0.8400515875519181, 0.5946967229333473, 0.9460118570216752, 0.8453284970022399, 0.8251354974527325, 0.6168331129517055, 0.6346175932775842, 0.5674624961393282, 0.7279324391880804, 0.9427375806116307, 0.9453012988849482, 0.566598313039319, 0.5470043532956596, 0.5506119789704131, 0.5552027739065204, 0.8798668426857461, 0.7814265117057642, 0.9105978810568618, 0.7551641220745543, 0.7437903608140974, 0.8332810931866148, 0.5476016993855162, 0.9006731853398722, 0.5130279235575103, 0.5301623726086515, 0.6872910075239711, 0.7465196035742174, 0.5236905595675623, 0.579334991486078, 0.9101331620921582, 0.5025037859152981, 0.7623963808924552, 0.8587356036158069, 0.8836943878390509, 0.624908748798299, 0.8689501034086986, 0.6383179754719102, 0.6425498652060226, 0.6865549932687112, 0.7412463734676329, 0.6280305951979734, 0.6437810921796641, 0.664301809566634, 0.9982913378202819, 0.6835782479013142, 0.9298308701742145, 0.7381133754037414, 0.602769739861282, 0.6755225753091366, 0.5413389702294492, 0.8481471109755765, 0.7439782030925761, 0.8138407649855817, 0.5598508062584108, 0.6050458116401012, 0.516861323675953, 0.8895397779871683, 0.6518058261117071, 0.5725061949757606, 0.623547182902894, 0.9919124403847397, 0.659000456026009, 0.8083253554612775, 0.6451740727197146, 0.6282817066348705, 0.5639453402867562, 0.5946570805200544, 0.5080975294363962, 0.5524821751962263, 0.9259315526655564, 0.6973665614471751, 0.617131134865859, 0.5194055776007778, 0.7390835637360076, 0.6479371583420295, 0.901291366850015, 0.9416712931538062, 0.8702742810410187, 0.5234046261809551, 0.779562077366474, 0.5046784748198361, 0.8325822926406503, 0.9648522581365508, 0.919124956626611, 0.5788926264152097, 0.9697145713945676, 0.5926024252350969, 0.7196987400088733, 0.5650184378653482, 0.8986896930685337, 0.5887093116647508, 0.8317607004520826, 0.9000910444385397, 0.9704087779681649, 0.9822206676047478, 0.9390015404380947, 0.7350260278778347, 0.9777580044780979, 0.8209574602624066, 0.5456521126082413, 0.6690582116818913, 0.7106763566968786, 0.5358275300663002, 0.5175184644545454, 0.9671598442824778, 0.8434477370108506, 0.5336708151203241, 0.5491230678879331, 0.7052364681589574, 0.8980568381314566, 0.8490511202073877, 0.9963586262301354, 0.7150740891105958, 0.9154839500867846, 0.5416947623775938, 0.8258943743094442, 0.8831062675985984, 0.9996308901716864, 0.8606535904520898, 0.7485940629171184, 0.8348248664365192, 0.6568738720798364, 0.7059718916191766, 0.8191499316839674, 0.5355428450253126, 0.994181873315425, 0.7192744543246001, 0.911784555144449, 0.7361051525610574, 0.9461471624844567, 0.9919364509529531, 0.7647576243968814, 0.9939217810877989, 0.7923205022211934, 0.5917206228464386, 0.7540679084528483, 0.6380451416340747, 0.90876941200116, 0.9109647990255668, 0.8762237801742128, 0.8718376708948153, 0.6089466601474736, 0.9422088278828535, 0.6992615667696045, 0.7428203481317598, 0.5037763202746968, 0.7581342581140671, 0.7428133483437884, 0.9507251888770272, 0.505767854923442, 0.7636171563857431, 0.7186992225390344, 0.5345903069888588, 0.8721687992114349, 0.6745420846651248, 0.9626044992106007, 0.7701081217624797, 0.9169916420117684, 0.6294399343689318, 0.8010339290208828, 0.8325697982122198, 0.6690063737377006, 0.5758870749139879, 0.9308131521451991, 0.9741957498204521, 0.8426989858673751, 0.6721335472374792, 0.9840082004175503, 0.6398082832468853, 0.7146739203313057, 0.6388539708213767, 0.5518810251899979, 0.6086908267141273, 0.9853312240410674, 0.7695695937780771, 0.5236407077102752, 0.8050335400548572, 0.9590852344348106, 0.6047981891703997, 0.7082475900864009, 0.8126924379507021, 0.6555187245669541, 0.5477763460078676, 0.6767294576614296, 0.7567072811039409, 0.929177106706844, 0.8297560141639826, 0.9084981060472459, 0.7295145406424864, 0.9576025106244299, 0.6031987365854332, 0.7400211830018926, 0.9131839791103595, 0.7564706896145135, 0.9069179695939111, 0.745852089723429, 0.5693640220477191, 0.530967997180285, 0.5878471579495275, 0.5272256477221992, 0.5541266028270933, 0.9054360457380093, 0.8451492738151849, 0.6276794665682606, 0.6393005769479679, 0.9156127329190741, 0.5732111073062887, 0.6820696140948646, 0.5487604696872912, 0.6638210307197243, 0.5357885664849409, 0.5574079664671834, 0.7520949373170231, 0.9310433927684811, 0.9611895583051681, 0.5508748444696392, 0.571856751833049, 0.8083324369894647, 0.6401666279076357, 0.5149783616224346, 0.712951402389226, 0.9697771418618912, 0.9598937218479184, 0.9315437048818521, 0.8780095209303238, 0.8307292114659354, 0.8632091632818641, 0.6631836697892819, 0.9032255101652717, 0.6208718076132246, 0.7981130510134825, 0.7619133161493655, 0.8393621474624626, 0.5142109244042133, 0.6853662583631417, 0.9174443774617522, 0.6264996016755534, 0.8142093949560223, 0.6107622286363483, 0.7631103323289437, 0.9595058972873932, 0.7480961120622356, 0.7870747650761089, 0.6563860545810211, 0.974933332862683, 0.8790500055690837, 0.5173268655970613, 0.9077615245351769, 0.5649220280946787, 0.9250425181197677, 0.515768911176496, 0.9861365498919419, 0.6204874195655736, 0.8856072237824193, 0.9696742394850757, 0.5601860727169075, 0.5566053171793514, 0.8148928789997116, 0.9276377366796138, 0.7620261840941702, 0.583685460339791, 0.5012766658992078, 0.658912385309783, 0.997645539852714, 0.8691218141782133, 0.740275226906329, 0.8604454937652504, 0.9714989365548432, 0.8680033424336302, 0.8591432115153267, 0.8818996997172206, 0.851022579128784, 0.8889644380070153, 0.5579733654774679, 0.7015377441221222, 0.5765878562999958, 0.848423838051402, 0.6794217040789039, 0.620849406482332, 0.9365532854758876, 0.9804626419221518, 0.933896126925242, 0.9638704780744998, 0.6932434659020816, 0.624355925829668, 0.9914349538704588, 0.5973919147172617, 0.9904960431312819, 0.6871952043194742, 0.5919116200084016, 0.5975622089389194, 0.7210960692113648, 0.8489350246926728, 0.5913121600241779, 0.827416809809387, 0.808243543542049, 0.916582822950619, 0.7827504972902646, 0.8035431848607406, 0.7420322889687868, 0.8738482420566027, 0.6719095823717143, 0.9413923383732696, 0.8894892870199551, 0.9286226397585142, 0.8480164759616844, 0.5499974689635071, 0.682134780883203, 0.9106390015767272, 0.5415436187482721, 0.6253802185391212, 0.9071158302684275, 0.6646717036149808, 0.9701109219243478, 0.6426738804352837, 0.6713109598327993, 0.6822901646962827, 0.6609658523183088, 0.5259440187730062, 0.9996007269133378, 0.6542960807197811, 0.9604380888060101, 0.7939003493320789, 0.6710603868510411, 0.9143222686394259, 0.7011769611265453, 0.6984193396617862, 0.7835829108338777, 0.9029053761138512, 0.5185301212420292, 0.9768748983913915, 0.9415011457014427, 0.605667165162392, 0.5807487221579383, 0.5229972519469499, 0.8996238300777222, 0.5333523175178193, 0.6338312707718852, 0.7906595502622555, 0.7394776958855113, 0.8822647031012276, 0.5792456726207447, 0.6060289752084239, 0.59738722505868, 0.8036987054884588, 0.6318838918780572, 0.7542769303287236, 0.7371766288707337, 0.5317398004401181, 0.9859062415675864, 0.5090268555795172, 0.5953014524822569, 0.6845006038992987, 0.618116678501057, 0.8397584495301553, 0.8911417998315636, 0.7574370040360896, 0.8022049029347608, 0.7412617112073377, 0.8521415328975266, 0.7847448973135493, 0.9361824326521333, 0.6141933606996414, 0.5579393041666796, 0.743446264721986, 0.8218650490572034, 0.7638250078143927, 0.6037231985775295, 0.5646252241428036, 0.8413183953870893, 0.8763059540536184, 0.7659744179680156, 0.5764257574449114, 0.9540026281628655, 0.9459591208912934, 0.6740129097806586, 0.8174383407713175, 0.929506104553306, 0.8332831904528146, 0.663263566225297, 0.5013466267031965, 0.8810081628878481, 0.961015739295236, 0.7202279995184866, 0.8956031104786478, 0.6629094231665342, 0.6499454852891273, 0.5635704829501369, 0.949100252346629, 0.801409983664389, 0.8499217548992319, 0.7102412441342927, 0.5659466859462172, 0.8310819671728318, 0.64919690062132, 0.9745646407105648, 0.5353443784099708, 0.7807915025186756, 0.8944976796764404, 0.7322101169379684, 0.8929598254934342, 0.8225385148406966, 0.9877194806062892, 0.8519158779828959, 0.7490921416150285, 0.9910482718179279, 0.8629317263921277, 0.9324597259320305, 0.7523593035203828, 0.5337732995406582, 0.9381318318201499, 0.9534112778188844, 0.9901212873349984, 0.8453933032595844, 0.5665860762199117, 0.848745001468165, 0.7302542818847466, 0.5806048673755082, 0.8892291005016375, 0.5600656515812532, 0.6621505183869394, 0.9882551078284239, 0.6184017365398122, 0.7034795141286356, 0.5713806747775991, 0.9793210788772707, 0.518739591676302, 0.9770530336462055, 0.6238009551441674, 0.769552498182572, 0.9021421508179768, 0.5864536992558724, 0.6650409805719825, 0.7029192050307664, 0.9247051787419717, 0.630844010055782, 0.8251133014313583, 0.7433441661396432, 0.5007663927200829, 0.5657765378652244, 0.8560847084618797, 0.532407529824069, 0.6203428995684561, 0.8657224988368766, 0.9414847678534359, 0.8128096812842516, 0.9415149845042652, 0.9968675477337037, 0.6910928344764198, 0.862197167781477, 0.7900355818901039, 0.8144255043885911, 0.5715349855162275, 0.6451606351791037, 0.906487817882614, 0.7955509442071145, 0.6138461827193851, 0.8672158586412106, 0.9821305854356762, 0.6490146156074252, 0.7497819861332757, 0.6233209219296154, 0.9961367778355639, 0.8268389494579773, 0.5911445306477774, 0.8692570468595011, 0.606225822878868, 0.9071164031009722, 0.9763444065528841, 0.8330365267963592, 0.5816252915967688, 0.8034701004585916, 0.9815171685338404, 0.6407791119865044, 0.6705284390014794, 0.9895017351388478, 0.8043267796899647, 0.9053671578191433, 0.8501588407732036, 0.9299602133962361, 0.7797121335804715, 0.7580661891275962, 0.757995553462457, 0.9207085675003752, 0.6603775693497382, 0.9203211401929199, 0.8063039460173962, 0.5072220115588004, 0.5834564258945258, 0.926672586643553, 0.5733840626417026, 0.6725181326603895, 0.7627139978493577, 0.9188483425695912, 0.6588471441608188, 0.5489400419829952, 0.5530431436732157, 0.6926795995203132, 0.8336761107772268, 0.5353857019029865, 0.9135668443449176, 0.9404195846644339, 0.6045938732687836, 0.832102712481127, 0.7246067899538654, 0.5797261865561318, 0.9086097530184468, 0.6794967576079536, 0.7279788100183431, 0.58352781439535, 0.9132851253243481, 0.7916021707578611, 0.9197166061178488, 0.6628595991537716, 0.8751491715264126, 0.6358378980923691, 0.6961644191741689, 0.8791648323764291, 0.7255280967969631, 0.8463327300462983, 0.6462634564166616, 0.654412280900885, 0.5028359673078285, 0.7320363872494591, 0.6738780657907539, 0.553314417855775, 0.8056666401066834, 0.5144258586110524, 0.620400666159552, 0.9618885420852021, 0.7212572393134558, 0.5921208623210962, 0.7307119192881724, 0.5658365851952072, 0.8744866290286397, 0.9094057340288264, 0.8727496131607384, 0.7963817783094735, 0.9445730122412959, 0.545107747796955, 0.7785372942342575, 0.5510461234613888, 0.6704647554854897, 0.6297175253271474, 0.9476322025069388, 0.6382585177372411, 0.6858951473332586, 0.506924472348355, 0.9711299642876424, 0.8416808932653932, 0.5171392145582858, 0.6070454066199347, 0.8181799947568047, 0.5949269235194905, 0.7011254287996236, 0.8851795644865501, 0.9336508387720961, 0.8114802756022452, 0.5335668795271751, 0.8477596579754565, 0.7839979889210541, 0.96357153760001, 0.7491378700375544, 0.8409461201072477, 0.5182573778086701, 0.983121330872997, 0.5727923494724874, 0.8733073479444509, 0.873732730809655, 0.5476747736734127, 0.8775909512349692, 0.6955266788524741, 0.6173700379279927, 0.5658567754437628, 0.6583144046193574, 0.5692106979715361, 0.8098959753159554, 0.9453743233580805, 0.5651749684071069, 0.940926259648261, 0.685107147700752, 0.9944575322647939, 0.8289494905652102, 0.809748986515362, 0.7199759114344715, 0.6477977122734841, 0.5008459124374651, 0.5458201066945627, 0.6673267427553381, 0.8082324602413753, 0.9251185181115764, 0.6593063601022977, 0.8616616372119741, 0.6480564029317369, 0.9928492532923685, 0.5858379800429006, 0.6648109554655944, 0.5843321664540182, 0.6036045936172416, 0.6213941508282957, 0.770544857122542, 0.6881063153758744, 0.9727727046133338, 0.5755144719462586, 0.7771927164667769, 0.547975611687048, 0.9100790258106277, 0.8693354264924416, 0.6873658692536446, 0.9375406398921834, 0.7125903578414812, 0.9670200877604898, 0.8996804982842057, 0.8270947704818606, 0.5220927459316825, 0.8331401696859071, 0.9262220868971276, 0.7662790279540495, 0.7277118081156889, 0.5429633465109894, 0.6499297467997416, 0.7774402106926288, 0.8690704463178767, 0.8746366409400838, 0.5732673225574458, 0.9005027649739645, 0.5942022846172836, 0.5016184431267685, 0.5724667566027446, 0.5679170853475457, 0.5571346161915087, 0.5359664606916478, 0.9042250327422112, 0.6119169740813302, 0.9537720500114317, 0.6052013758886547, 0.5649458789910851, 0.6698502872027516, 0.5943665694122892, 0.8420214281960239, 0.8922024667608199, 0.8834439172075004, 0.8585943863956393, 0.6545870971585632, 0.511864462734207, 0.7104262024285828, 0.8471447300162056, 0.5798181072903852, 0.9768848182634415, 0.7655062438199809, 0.9043508886239935, 0.5525912837584785, 0.8742135676270333, 0.5924430532702758, 0.7835322160675412, 0.9694952627970288, 0.6790714392487864, 0.7404036568514782, 0.6136874138894772, 0.7372237934320325, 0.5994036089057619, 0.9782860121655232, 0.6905422385653593, 0.7586769845549562, 0.5644560016279353, 0.5531692185323454, 0.9685674417118189, 0.7008834836166964, 0.8997264544497989, 0.8718475745698481, 0.7226049685942544, 0.9042912484634908, 0.6513439857000194, 0.5700677759815922, 0.5497150381648168, 0.6227497770911719, 0.7503174533864772, 0.9106822756873143, 0.9840962079114687, 0.7300989295293154, 0.6965864075687607, 0.7605939906386932, 0.6274927950607967, 0.7280673714160244, 0.5789315699577973, 0.7844095538149041, 0.6364644021146414, 0.7439610644815688, 0.862522773149345, 0.8124127148999893, 0.7329070540894043, 0.5881468559225811, 0.5711736600977816, 0.6516721351146957, 0.9034669670756992, 0.9756058714140338, 0.5105552274536298, 0.7901895171755392, 0.6576826102206815, 0.5153097142342173, 0.990789303225108, 0.5088438668768593, 0.7995434358221443, 0.6931214716326113, 0.7634814188405696, 0.73911987430203, 0.5298346603877444, 0.671802121252893, 0.5833694748626387, 0.7760111993931386, 0.5629325953234372, 0.5625252355339437, 0.7731941368835478, 0.9959076126478873, 0.508411347241783, 0.8637280487632221, 0.849528047564218, 0.6153752783385149, 0.737419052442398, 0.9961788216051635, 0.9308677218503676, 0.8703340877994614, 0.962764480661111, 0.7766341278383857, 0.969699634462984, 0.9249277200849835, 0.8428072199301783, 0.6542399343112852, 0.7788041204479893, 0.5149808868552206, 0.7703701196956748, 0.6656170615685806, 0.5014842724418124, 0.7339314124414604, 0.7084390098495446, 0.8612823595873873, 0.5534892031891943, 0.5983864941820476, 0.5052883209214856, 0.850654267767163, 0.6037475957376495, 0.7281082593814012, 0.9544564991410622, 0.9605432076438106, 0.8343915022026079, 0.9921839173575588, 0.8659412169494761, 0.7741720891365971, 0.7472776350435302, 0.9417336132983741, 0.7625703370421142, 0.9702957075734349, 0.6520372094621814, 0.5853461211600434, 0.8312698947732468, 0.9517634394178764, 0.793792661047068, 0.9203309022242423, 0.7526004833442819, 0.8126399077384473, 0.5468337059865118, 0.6910297688184622, 0.7602136431901315, 0.8941452215693861, 0.6971265675618927, 0.9405182724430502, 0.6533103886769077, 0.9814364566749274, 0.8507623924593727, 0.7565945531783052, 0.6281461594141244, 0.5528693137681613, 0.9288626428132976, 0.9603078392148165, 0.6981603402405614, 0.7032064823913237, 0.7208791863083243, 0.7321398921344049, 0.982615419205818, 0.8889257198841766, 0.7967556972668262, 0.5800975864571819, 0.5675293292341701, 0.6014051578786626, 0.9793672301564219, 0.5889080862383254, 0.9485731740737803, 0.8619029917296794, 0.953946219999102, 0.9184200168732022, 0.8599682805617193, 0.8671002933337709, 0.9461143436862698, 0.7598611511349622, 0.922132352683315, 0.6421955777614081, 0.8356584099843797, 0.632279882794637, 0.831633128237651, 0.6855347294184113, 0.7310709300373774, 0.7526302259840572, 0.947448154782603, 0.8772582935443404, 0.9482422778697546, 0.7940726000511904, 0.7450961864831841, 0.7727237420772337, 0.6558486134198822, 0.7896300882656345, 0.8169735607765589, 0.9255731208067093, 0.7515772237941648, 0.5285892025573582, 0.9325548935754131, 0.6467259116353458, 0.5527276411995121, 0.6938334564574848, 0.9020166972932648, 0.9511910759048087, 0.850539562392973, 0.9830790204678698, 0.6114751978110489, 0.5696758379184861, 0.6073976614326303, 0.9835859482457012, 0.6815113601183531, 0.7149422699609673, 0.668469500576615, 0.8134721582485482, 0.9523961505524883, 0.7036764527741294, 0.940936459619827, 0.9203568050849684, 0.6047497360139672, 0.8909711892624385, 0.9276841192761078, 0.8159069790078146, 0.6510312797058044, 0.96457022368362, 0.8197751949050621, 0.6492127877581992, 0.8174404523827372, 0.6456050955745896, 0.5537637854404754, 0.803423481452022, 0.5935711113967153, 0.5656568526537833, 0.6946488551082611, 0.9208290721609491, 0.8285723699677446, 0.5039348227860196, 0.83807770884522, 0.94941060981521, 0.9255519929388483, 0.83151446768875, 0.704261165107587, 0.6705925131448701, 0.7954414904634906, 0.9102417497439719, 0.8973814848083963, 0.7027823156685774, 0.6809743161338941, 0.8400930961410447, 0.6544825206912691, 0.5936191121475589, 0.6098432715716356, 0.6187910356982614, 0.5468658771089432, 0.6264343715214674, 0.5041356670127476, 0.9674962226078226, 0.9050277737885746, 0.7936593582349388, 0.6096407380274126, 0.613611000053517, 0.6238373968969477, 0.7029527730992118, 0.9370604268068581, 0.8806417952241865, 0.5360856084300725, 0.8656844068638787, 0.6668690507771546, 0.6417667250502159, 0.7738938003977714, 0.9426986965128328, 0.6056344566130712, 0.5230112096206755, 0.9159236840030894, 0.9937432348751171, 0.9345038135971047, 0.7683750727524779, 0.5775777960988799, 0.988611692752493, 0.9887296420955052, 0.536327618330819, 0.756566974451988, 0.8366075335427013, 0.9107928771245182, 0.5161937138360806, 0.6413721752920131, 0.8178308551283758, 0.5253290804143698, 0.8248561064031583, 0.5192251763088735, 0.7881903435787168, 0.8067590265203075, 0.8367853891313793, 0.900248399087032, 0.9629618712131891, 0.8197196871984488, 0.9571480519343173, 0.8696243147053886, 0.6024004004835963, 0.9940597346652376, 0.9152422533551574, 0.7880486411629548, 0.9414990986979996, 0.8970066882784551, 0.5453874358818824, 0.6969838763223108, 0.862193537591254, 0.7781637822417875, 0.5200707261894677, 0.9611839472602827, 0.8880188053232222, 0.8108581395730916, 0.9283254825625256, 0.8449422127229824, 0.6762651829236576, 0.8498937136682188, 0.8167577210771826, 0.979465973456586, 0.7685849046161162, 0.8311605190210216, 0.754767555670858, 0.6669684184801851, 0.9205544972427047, 0.6737882991367233, 0.6472369512866004, 0.8404396384940804, 0.7556380512059366, 0.943057687436327, 0.9452457419539588, 0.7540793253934455, 0.8134577598617327, 0.6870691929818225, 0.951508655185747, 0.6844400623811586, 0.5861785558268452, 0.5406831513303881, 0.7746811900699899, 0.5068925077969787, 0.5977354603726354, 0.9377364336913849, 0.7409974300096525, 0.6037342773254262, 0.8832145645567583, 0.5175076757744124, 0.7662699207204287, 0.9616248496438993, 0.9407397480028867, 0.8995356409653562, 0.6819297687111362, 0.6827526137152915, 0.5003349963411161, 0.7609182947481975, 0.788044318677101, 0.9109547547272163, 0.7778506670147598, 0.9214191628760735, 0.6461387854501006, 0.7102858055994863, 0.954014839734733, 0.6196056386352573, 0.5681670402891323, 0.5133303087650926, 0.5426087875466159, 0.5895786915594983, 0.7746487471172482, 0.8774653772073235, 0.5702753385572272, 0.7142300140535749, 0.6845494399315013, 0.7802740964462768, 0.5895505683992917, 0.7630675455299742, 0.9128899976811513, 0.9836318671591682, 0.685072030278494, 0.9928829912429464, 0.9107919622238143, 0.6295716059777767, 0.761727332751532, 0.7262962855497962, 0.7283901717213126, 0.766932388131865, 0.6357798368712051, 0.604507285867328, 0.6763687771279823, 0.6721865311114175, 0.5186769465341295, 0.8066526417101189, 0.737600245117265, 0.7574946253389045, 0.9822925363868376, 0.6400476720230552, 0.6269319627821257, 0.6952627196645965, 0.9166843147863557, 0.8773734656332947, 0.9900767689051748, 0.6135923163376094, 0.660832847432361, 0.8564925625261977, 0.968731827898049, 0.9807762483112663, 0.9727796581255344, 0.7561000628256942, 0.8481712049796453, 0.5565347548037309, 0.7459688846947418, 0.82472607104824, 0.8199137975632552, 0.9516790158451226, 0.9402416471917567, 0.9840108406153008, 0.8947776522944455, 0.774628391694426, 0.8986312324925527, 0.5314981070528024, 0.9625744961090141, 0.8911141130487985, 0.7132441127140182, 0.512990257284528, 0.579316618736835, 0.8620983300791822, 0.9833976751025184, 0.9527207830301339, 0.6298273625868904, 0.815495241343476, 0.5881015291527121, 0.7913188640624692, 0.7099210530947955, 0.7522925591640094, 0.5944220340185769, 0.6988632483908601, 0.637266770148003, 0.9576553890178743, 0.9007946341426135, 0.7417935527913493, 0.591436046108186, 0.9998238071596031, 0.5506011167893692, 0.88525712334556, 0.80754877405888, 0.7699961638591879, 0.803929094757599, 0.5663042473531694, 0.8214355169222599, 0.5391587489341252, 0.7605221834560651, 0.5304718300472717, 0.717559255198535, 0.9443316979473941, 0.7943145054172224, 0.9050678036083267, 0.6151612639198532, 0.5602507667394581, 0.8042916469699162, 0.5655426798382674, 0.652548419767767, 0.7713290554797467, 0.7899289360152547, 0.637770175257261, 0.7233317545761551, 0.9089221483811096, 0.5117995326940902, 0.8324344031561803, 0.6153085617230847, 0.9083125477844938, 0.8324152383943755, 0.8090296580483545, 0.5001745848730008, 0.9962346890679281, 0.7373417662116579, 0.9951703386629501, 0.7395931315663136, 0.5696377912019076, 0.5917488874097298, 0.6748573579327317, 0.8769432891857535, 0.5691026729976516, 0.6761289211131405, 0.8314405814992777, 0.9134457819583013, 0.6225968020866421, 0.6135759445656412, 0.8311070898123274, 0.7283674419712469, 0.9486284002644159, 0.6844657630718356, 0.5707254250317484, 0.6909127166143363, 0.6445000342932923, 0.7456531283124668, 0.7859649317468724, 0.6010633225460422, 0.7406827147584976, 0.8724638258710198, 0.5153928851581455, 0.9988760099479499, 0.5669304755233022, 0.8095205407625105, 0.792622485041846, 0.7913910846377656, 0.5535494736315726, 0.6919053993657291, 0.6536061016632948, 0.7464073118075729, 0.6396588396613225, 0.5794470834528156, 0.6490326893473868, 0.690088561595623, 0.874372007474997, 0.8838552498683668, 0.9743723992673781, 0.7362385941938411, 0.852415921151637, 0.804573500790353, 0.7144546658608908, 0.9113633838322783, 0.8485944806382635, 0.9714298649279068, 0.909170510926882, 0.7429611394694472, 0.5803296224511241, 0.5976068794202294, 0.8940634121658386, 0.7269907695543304, 0.6643535658670378, 0.7882089909695664, 0.8682977316935927, 0.8771195191775105, 0.6633989309201578, 0.740784654049776, 0.7534958910455833, 0.5707755124321667, 0.7274099527553044, 0.5249795181708543, 0.8138896329575629, 0.6967900973580026, 0.5932684545176272, 0.7327506945953604, 0.5553576267162681, 0.77581738471256, 0.6526446923467705, 0.672476438486218, 0.8794851882331634, 0.5546299276591804, 0.6728883641211967, 0.6187927615823623, 0.9692284082564657, 0.9675316647841373, 0.6465906066702043, 0.8603072997320135, 0.6933337633834702, 0.9473599492476242, 0.7345802044687432, 0.8675181044301365, 0.8694198932878385, 0.7770807119429932, 0.9952964264644264, 0.775219941157125, 0.862614356753628, 0.604606272156893, 0.8709402513683127, 0.9857742099084161, 0.8360069393803965, 0.6566118588365555, 0.6170684800499598, 0.5363769203703678, 0.900759320763458, 0.6005496919475729, 0.5691091550441016, 0.7656958835970669, 0.8801159654922822, 0.9441994008703869, 0.798699052743302, 0.9364585577258434, 0.9681879163365615, 0.6611034011875769, 0.5679327352358461, 0.6501808973341667, 0.521372901249659, 0.5458059553407324, 0.6497833035818329, 0.7280676266542376, 0.7208274603999099, 0.5629405338872506, 0.8236189303830068, 0.8832237715362599, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0};
int h_B[]= {
1, 3, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 339, 341, 343, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 396, 398, 400, 402, 404, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480, 482, 484, 486, 488, 490, 493, 495, 497, 499, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 524, 526, 528, 530, 532, 534, 536, 538, 540, 542, 544, 546, 548, 550, 552, 554, 556, 559, 561, 563, 565, 567, 569, 572, 574, 578, 580, 582, 584, 586, 588, 590, 592, 594, 596, 598, 600, 602, 604, 606, 608, 610, 612, 614, 616, 618, 620, 622, 624, 626, 628, 630, 632, 634, 636, 638, 640, 642, 644, 646, 648, 650, 652, 654, 656, 658, 660, 662, 664, 666, 668, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 690, 692, 694, 696, 699, 701, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 744, 746, 748, 750, 752, 754, 756, 758, 760, 762, 764, 766, 768, 770, 772, 774, 776, 778, 780, 782, 784, 786, 788, 790, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 816, 818, 820, 822, 824, 826, 828, 830, 832, 834, 836, 838, 840, 842, 845, 847, 849, 851, 853, 855, 858, 860, 862, 864, 866, 868, 870, 872, 874, 876, 878, 880, 882, 884, 886, 888, 890, 892, 894, 896, 898, 900, 902, 904, 906, 908, 910, 912, 915, 917, 919, 921, 923, 925, 927, 929, 931, 933, 935, 937, 939, 941, 943, 945, 949, 951, 953, 955, 957, 959, 961, 963, 965, 967, 969, 971, 973, 975, 977, 979, 981, 983, 985, 987, 989, 991, 993, 995, 997, 999, 1001, 1003, 1005, 1007, 1009, 1011, 1013, 1015, 1017, 1019, 1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1040, 1042, 1044, 1046, 1048, 1050, 1052, 1054, 1056, 1058, 1062, 1064, 1069, 1071, 1073, 1075, 1077, 1079, 1081, 1083, 1085, 1087, 1090, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1108, 1111, 1113, 1115, 1117, 1119, 1121, 1123, 1125, 1127, 1129, 1132, 1134, 1136, 1138, 1141, 1143, 1145, 1147, 1150, 1152, 1156, 1158, 1161, 1163, 1167, 1169, 1171, 1173, 1175, 1177, 1179, 1181, 1184, 1186, 1189, 1191, 1193, 1195, 1197, 1199, 1201, 1203, 1205, 1207, 1210, 1212, 1215, 1217, 1220, 1222, 1225, 1227, 1230, 1232, 1238, 1240, 1243, 1245, 1248, 1250, 1252, 1254, 1256, 1258, 1260, 1262, 1264, 1266, 1268, 1270, 1272, 1274, 1277, 1279, 1281, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1305, 1307, 1309, 1311, 1313, 1315, 1317, 1319, 1321, 1323, 1325, 1327, 1329, 1331, 1334, 1336, 1338, 1340, 1342, 1344, 1346, 1348, 1350, 1352, 1357, 1359, 1361, 1363, 1367, 1369, 1372, 1374, 1376, 1378, 1380, 1382, 1384, 1386, 1388, 1390, 1393, 1395, 1399, 1401, 1404, 1406, 1409, 1411, 1414, 1416, 1419, 1421, 1424, 1426, 1429, 1431, 1434, 1436, 1439, 1441, 1443, 1445, 1447, 1449, 1452, 1454, 1458, 1460, 1462, 1464, 1469, 1471, 1473, 1475, 1479, 1481, 1483, 1485, 1487, 1489, 1491, 1493, 1495, 1497, 1499, 1501, 1503, 1505, 1507, 1509, 1511, 1513, 1515, 1517, 1519, 1521, 1523, 1525, 1527, 1529, 1531, 1533, 1535, 1537, 1539, 1541, 1543, 1545, 1547, 1549, 1551, 1553, 1556, 1558, 1560, 1562, 1564, 1566, 1568, 1570, 1572, 1574, 1576, 1578, 1581, 1583, 1585, 1587, 1589, 1591, 1593, 1595, 1597, 1599, 1601, 1603, 1605, 1607, 1609, 1611, 1613, 1615, 1617, 1619, 1621, 1623, 1625, 1627, 1629, 1631, 1633, 1635, 1637, 1639, 1644, 1646, 1648, 1650, 1652, 1654, 1656, 1658, 1661, 1663, 1665, 1667, 1669, 1671, 1673, 1675, 1677, 1679, 1681, 1683, 1685, 1687, 1689, 1691, 1695, 1697, 1701, 1703, 1705, 1707, 1709, 1711, 1713, 1715, 1718, 1720, 1722, 1724, 1726, 1728, 1732, 1734, 1740, 1742, 1744, 1746, 1748, 1750, 1753, 1755, 1758, 1760, 1762, 1764, 1766, 1768, 1771, 1773, 1776, 1778, 1781, 1783, 1786, 1788, 1791, 1793, 1796, 1798, 1800, 1802, 1804, 1806, 1810, 1812, 1814, 1816, 1818, 1820, 1822, 1824, 1826, 1828, 1830, 1832, 1834, 1836, 1838, 1840, 1842, 1844, 1846, 1848, 1850, 1852, 1855, 1857, 1859, 1861, 1865, 1867, 1869, 1871, 1873, 1875, 1877, 1864, 1877, 1864, 1877, 1864, 1921, 1923, 1925, 1927, 1929, 1931, 1731, 1580, 1580, 1237, 1235, 1468, 1468, 1739, 1418, 1423, 1234, 1209, 1237, 1235, 1790, 1237, 1235, 1739, 1737, 1739, 1737, 1736, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 1555, 1237, 1235, 1641, 1555, 1237, 1235, 1237, 1235, 1752, 1694, 1731, 1237, 1235, 1224, 1229, 1224, 1229, 1237, 1235, 1757, 1752, 1643, 1877, 1209, 1237, 1235, 1234, 1237, 1235, 1234, 1209, 1237, 1235, 1224, 1229, 1224, 1229, 1209, 1234, 1237, 1235, 1224, 1229, 1224, 1229, 1209, 1234, 1237, 1235, 1061, 1060, 1808, 1643, 1641, 1643, 1790, 1736, 1808, 1641, 1237, 1235, 1757, 1694, 2285, 2287, 2289, 2291, 2294, 2296, 2298, 2300, 2303, 2305, 2307, 2309, 2312, 2314, 2316, 2318, 2320, 2322, 2324, 2326, 2328, 2330, 2332, 2334, 2336, 2338, 2340, 2342, 2344, 2346, 2349, 2351, 2353, 2355, 2357, 2359, 1456, 1451, 2364, 2366, 2368, 2370, 2372, 2374, 2376, 2378, 2380, 2382, 2384, 2386, 2388, 2390, 2392, 2394, 1237, 1235, 1224, 1229, 1224, 1229, 1237, 1235, 1237, 1235, 1061, 1060, 1237, 1235, 1418, 1423, 1451, 1451, 1717, 1775, 1752, 1757, 1757, 1752, 1785, 1785, 1757, 1752, 1757, 1752, 1775, 1757, 1752, 1737, 1737, 1757, 1752, 1717, 2615, 2617, 2619, 2621, 2623, 2625, 2627, 2629, 2631, 2633, 2635, 2637, 2639, 2641, 2644, 2646, 2649, 2651, 2653, 2655, 1061, 1060, 1214, 1219, 1229, 1224, 1237, 1235, 1214, 1219, 1229, 1224, 1149, 1229, 1224, 1237, 1235, 1149, 1155, 1237, 1235, 1237, 1235, 1456, 1451, 1438, 1456, 1451, 1467, 1423, 1418, 1423, 1433, 1418, 1433, 1438, 1456, 1451, 1457, 1398, 1398, 1457, 1467, 1877, 1643, 1641, 1770, 1770, 1739, 1737, 1739, 1737, 1877, 1864, 1877, 1864, 1877, 1864, 1877, 1864, 1864, 1864, 2979, 2981, 2984, 2986, 2988, 2990, 2992, 2994, 2996, 2998, 3000, 3002, 3004, 3006, 3008, 3010, 3012, 3014, 3016, 3018, 3020, 3022, 3024, 3026, 3028, 3030, 3032, 3034, 3036, 3038, 3040, 3042, 3044, 3046, 3048, 3050, 3052, 3054, 3056, 3058, 3060, 3062, 3065, 3067, 3070, 3072, 3074, 3076, 3078, 3080, 3083, 3085, 3089, 3091, 3094, 3096, 3100, 3102, 3104, 3106, 3108, 3110, 3113, 3115, 3119, 3121, 3124, 3126, 3130, 3132, 3134, 3136, 3139, 3141, 3098, 3093, 3146, 3144, 3098, 3093, 3128, 3123, 3128, 3123, 3146, 3144, 2983, 2983, 3098, 3093, 3064, 3146, 3144, 3098, 3093, 3143, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3128, 3123, 3128, 3123, 3128, 3123, 3098, 3093, 3098, 3093, 3098, 3093, 3128, 3123, 3128, 3123, 3146, 3144, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3151, 3680, 3682, 3688, 3690, 3149, 3147, 3149, 3147, 3149, 3147, 2658, 3707, 3709, 3098, 3093, 3064, 3098, 3093, 3098, 3093, 3143, 2658, 2972, 2972, 4018, 4020, 3146, 3144, 4053, 4055, 4057, 4059, 4062, 4064, 3146, 3144, 3146, 3144, 3149, 3147, 3082, 3088, 3112, 3118, 3144, 3146, 3146, 3144, 3149, 3147, 3151, 4140, 4142, 4145, 4147, 4152, 4154, 4157, 4159, 4162, 4164, 4166, 4168, 4171, 4173, 4175, 4177, 4156, 4061, 4161, 4156, 4181, 4179, 4161, 4156, 4181, 4179, 4181, 4179, 4151, 4161, 4061, 4181, 4179, 4151, 4179, 4181, 4181, 4179, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 6656, 6658, 6660, 6662, 6664, 6666, 6668, 6670, 6672, 6674, 6676, 6678, 6680, 6682, 6684, 6686, 6688, 6690, 6692, 6694, 6696, 6698, 6700, 6702, 6704, 6706, 6708, 6710, 6712, 6714, 6716, 6718, 6720, 6722, 6724, 6726, 6728, 6730, 6732, 6734, 6736, 6738, 6740, 6742, 6744, 6746, 6748, 6750, 6752, 6754, 6756, 6758, 6760, 6762, 6764, 6766, 6768, 6770, 6772, 6774, 6776, 6778, 6780, 6782, 6784, 6786, 6788, 6790, 6792, 6794, 6796, 6798, 6800, 6802, 6804, 6806, 6808, 6810, 6812, 6814, 6816, 6818, 6820, 6822, 6824, 6826, 6828, 6830, 6832, 6834, 6836, 6838, 6840, 6842, 6844, 6846, 6848, 6850, 6852, 6854, 6856, 6858, 6860, 6862, 6864, 6866, 6868, 6870, 6872, 6874, 6876, 6878, 6880, 6882, 6884, 6886, 6888, 6890, 6892, 6894, 6896, 6898, 6900, 6902, 6904, 6906, 6908, 6910, 6912, 6914, 6916, 6918, 6920, 6922, 6924, 6926, 6928, 6930, 6932, 6934, 6936, 6938, 6940, 6942, 6944, 6946, 6948, 6950, 6952, 6954, 6956, 6958, 6960, 6962, 6964, 6966, 6968, 6970, 6972, 6974, 6976, 6978, 6980, 6982, 6984, 6986, 6988, 6990, 6992, 6994, 6996, 6998, 7000, 7002, 7004, 7006, 7008, 7010, 7012, 7014, 7016, 7018, 7020, 7022, 7024, 7026, 7028, 7030, 7032, 7034, 7036, 7038, 7040, 7042, 7044, 7046, 7048, 7050, 7052, 7054, 7056, 7058, 7060, 7062, 7064, 7066, 7068, 7070, 7072, 7074, 7076, 7078, 7080, 7082, 7084, 7086, 7088, 7090, 7092, 7094, 7096, 7098, 7100, 7102, 7104, 7106, 7108, 7110, 7112, 7114, 7116, 7118, 7120, 7122, 7124, 7126, 7128, 7130, 7132, 7134, 7136, 7138, 7140, 7142, 7144, 7146, 7148, 7150, 7152, 7154, 7156, 7158, 7160, 7162, 7164, 7166, 7168, 7170, 7172, 7174, 7176, 7178, 7180, 7182, 7184, 7186, 7188, 7190, 7192, 7194, 7196, 7198, 7200, 7202, 7204, 7206, 7208, 7210, 7212, 7214, 7216, 7218, 7220, 7222, 7224, 7226, 7228, 7230, 7232, 7234, 7236, 7238, 7240, 7242, 7244, 7246, 7248, 7250, 7252, 7254, 7256, 7258, 7260, 7262, 7264, 7266, 7268, 7270, 7272, 7274, 7276, 7278, 7280, 7282, 7284, 7286, 7288, 7290, 7292, 7294, 7296, 7298, 7300, 7302, 7304, 7306, 7308, 7310, 7312, 7314, 7316, 7318, 7320, 7322, 7324, 7326, 7328, 7330, 7332, 7334, 7336, 7338, 7340, 7342, 7344, 7346, 7348, 7350, 7352, 7354, 7356, 7358, 7360, 7362, 7364, 7366, 7368, 7370, 7372, 7374, 7376, 7378, 7380, 7382, 7384, 7386, 7388, 7390, 7392, 7394, 7396, 7398, 7400, 7402, 7404, 7406, 7408, 7410, 7412, 7414, 7416, 7418, 7420, 7422, 7424, 7426, 7428, 7430, 7432, 7434, 7436, 7438, 7440, 7442, 7444, 7446, 7448, 7450, 7452, 7454, 7456, 7458, 7460, 7462, 7464, 7466, 7468, 7470, 7472, 7474, 7476, 7478, 7480, 7482, 7484, 7486, 7488, 7490, 7492, 7494, 7496, 7498, 7500, 7502, 7504, 7506, 7508, 7510, 7512, 7514, 7516, 7518, 7520, 7522, 7524, 7526, 7528, 7530, 7532, 7534, 7536, 7538, 7540, 7542, 7543, 7544, 7545, 7546, 7547, 7548, 7550, 7552, 7554, 7555, 7556, 7557, 7558, 7559, 7560, 7561, 7562, 7563, 7564, 7565, 7566, 7567, 7568, 7569, 7570, 7571, 7572, 7573, 7574, 7575, 7576, 7577, 7578, 7579, 7580, 7581, 7582, 7583, 7584, 7585, 7586, 7587, 7588, 7589, 7590, 7591, 7592, 7593, 7594, 7595, 7596, 7597, 7598, 7599, 7600, 7601, 7602, 7603, 7604, 7605, 7606, 7607, 7608, 7609, 7610, 7611, 7612, 7613, 7614, 7615, 7616, 7617, 7618, 7619, 7620, 7621, 7622, 7623, 7624, 7625, 7626, 7627, 7628, 7629, 7630, 7631, 7632, 7633, 7634, 7635, 7636, 7637, 7638, 7639, 7640, 7641, 7642, 7643, 7644, 7645, 7646, 7647, 7649, 7651, 7653, 7655, 7657, 7659, 7661, 7663, 7665, 7667, 7669, 7671, 7673, 7675, 7677, 7679, 7681, 7683, 7684, 7685, 7687, 7689, 7691, 7693, 7695, 7697, 7699, 7701, 7702, 7703, 7704, 7705, 7706, 7707, 7708, 7709, 7710, 7711, 7712, 7713, 7714, 7715, 7716, 7717, 7718, 7719, 7720, 7721, 7722, 7723, 7724, 7725, 7726, 7727, 7728, 7729, 7730, 7731, 7732, 7733, 7734, 7735, 7736, 7737, 7738, 7739, 7741, 7743, 7745, 7747, 7749, 7751, 7753, 7755, 7757, 7759, 7760, 7761, 7762, 7763, 7764, 7765, 7766, 7767, 7768, 7769, 7770, 7771, 7772, 7773, 7774, 7775, 7776, 7777, 7778, 7779, 7780, 7781, 7782, 7783, 7784, 7785, 7786, 7787, 7788, 7789, 7790, 7791, 7792, 7793, 7794, 7795, 7796, 7797, 7798, 7799, 7800, 7801, 7802, 7803, 7804, 7805, 7806, 7807, 7808, 7809, 7810, 7811, 7812, 7813, 7814, 7815, 7816, 7817, 7818, 7819, 7820, 7821, 7823, 7825, 7827, 7829, 7831, 7833, 7835, 7837, 7839, 7841, 7843, 7845, 7847, 7849, 7851, 7853, 7855, 7857, 7859, 7861, 7863, 7865, 7867, 7869, 7871, 7873, 7875, 7877, 7879, 7881, 7883, 7885, 7887, 7889, 7891, 7893, 7895, 7896, 7897, 7898, 7899, 7900, 7901, 7902, 7903, 7904, 7905, 7906, 7907, 7908, 7909, 7910, 7911, 7912, 7913, 7914, 7915, 7916, 7917, 7918, 7919, 7920, 7921, 7922, 7923, 7924, 7925, 7926, 7927, 7928, 7929, 7930, 7931, 7932, 7933, 7934, 7935, 7936, 7937, 7938, 7939, 7940, 7941, 7942, 7943, 7944, 7945, 7946, 7947, 7948, 7949, 7950, 7951, 7952, 7953, 7954, 7955, 7956, 7958, 7960, 7961, 7962, 7963, 7964, 7965, 7966, 7967, 7969, 7970, 7971, 7972, 7973, 7974, 7975, 7976, 7977, 7978, 7979, 7980, 7982, 7983, 7984, 7986, 7988, 7990, 7991, 7992, 7993, 7994, 7995, 7996, 7997, 7998, 7999, 8000, 8001, 8002, 8003, 8004, 8005, 8006, 8007, 8009, 8011, 8013, 8015, 8017, 8019, 8021, 8023, 8024, 8025, 8026, 8027, 8028, 8029, 8030, 8031, 8032, 8033, 8034, 8035, 8036, 8037, 8038, 8039, 8040, 8041, 8042, 8043, 8044, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8321, 8323, 1808, 8415, 8495, 8507, 8458, 8503, 8505, 1877, 8321, 8323, 1808, 8415, 8497, 8509, 8499, 8511, 8458, 8503, 8505, 1877, 1408, 1403, 1408, 1403, 1413, 1423, 1418, 1433, 1428, 1438, 8065, 1456, 1451, 1438, 8065, 1456, 1451, 8067, 576, 576, 576, 576, 1580, 1580, 1580, 8069, 1188, 1183, 8073, 1188, 1183, 1209, 1234, 8519, 8077, 1188, 1183, 698, 698, 698, 1785, 1785, 8426, 576, 1790, 1770, 1790, 1468, 1468, 1770, 8524, 8082, 1699, 1699, 1699, 1699, 1699, 1214, 1229, 1224, 8526, 8528, 1731, 1736, 1736, 1731, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 698, 8096, 8450, 1736, 1731, 8099, 1752, 1736, 1731, 1739, 1737, 8099, 1752, 8097, 1752, 8099, 1752, 8426, 8102, 1188, 1183, 8106, 1188, 1183, 8110, 1757, 1752, 1775, 8113, 8115, 1808, 8117, 8119, 8121, 8123, 1757, 1209, 1234, 8531, 1438, 8127, 576, 1736, 1731, 8533, 1736, 1731, 8535, 576, 576, 576, 576, 8132, 8134, 1790, 1790, 1790, 1790, 1790, 1739, 1737, 8538, 8540, 8542, 8135, 8137, 1188, 1183, 8141, 1188, 1183, 1209, 1234, 8546, 1780, 1785, 1785, 1785, 1785, 8148, 1757, 1752, 1785, 1785, 1785, 1785, 1641, 1234, 1209, 8550, 1209, 1234, 8552, 698, 1752, 8156, 8158, 1752, 8159, 8160, 698, 1752, 1214, 1224, 1229, 1234, 1209, 8557, 1219, 8559, 1219, 8561, 8330, 1188, 1183, 1234, 1209, 8563, 8314, 8565, 1699, 1694, 1643, 1877, 8172, 1188, 1183, 1214, 1224, 1229, 8570, 1214, 1224, 1229, 8573, 1214, 1224, 1229, 8575, 8577, 1219, 8579, 1219, 8581, 8583, 8585, 1219, 8587, 1219, 8589, 8591, 8593, 8188, 1188, 1183, 8595, 1188, 1183, 1408, 1403, 1408, 1403, 1371, 1438, 844, 8200, 857, 576, 8426, 8598, 576, 1555, 1643, 1641, 1580, 1580, 1580, 1580, 1580, 1468, 1468, 1468, 1468, 576, 8450, 576, 8426, 576, 576, 1209, 1234, 8605, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 1214, 1229, 1224, 1219, 1229, 1224, 1731, 1736, 1737, 1739, 698, 1757, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 698, 698, 698, 8224, 8226, 8228, 8230, 8232, 698, 1757, 1736, 1731, 1739, 1737, 698, 1757, 8237, 8238, 1864, 8408, 8627, 1165, 1160, 1165, 1160, 1165, 1160, 8347, 8247, 1188, 1183, 1214, 1224, 1229, 1234, 1209, 8637, 1165, 1160, 1165, 1160, 1165, 1160, 8347, 8247, 1188, 1183, 1219, 8639, 1219, 8641, 1209, 1234, 8643, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 8257, 1188, 1183, 1165, 1160, 8335, 1188, 1183, 1214, 1229, 1224, 1234, 1209, 8645, 1165, 1160, 8647, 1188, 1183, 1219, 1229, 1224, 1234, 1209, 8649, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1428, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1408, 1403, 8651, 8270, 844, 8273, 857, 8276, 8278, 8280, 8282, 8284, 8286, 1456, 1456, 8297, 8426, 1736, 1731, 1736, 1731, 1739, 8305, 1757, 1752, 1699, 1694, 8469, 1717, 8291, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8294, 8296, 1736, 1731, 8318, 8659, 1699, 1694, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8297, 8663, 1736, 1731, 8318, 8665, 8469, 1717, 8299, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8305, 1757, 1752, 8481, 1775, 1770, 1780, 8307, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8314, 8668, 1736, 1731, 1739, 8318, 8672, 1699, 1694, 1790, 8321, 8323, 8458, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 8685, 1188, 1183, 1165, 1160, 8335, 1188, 1183, 8687, 8689, 1234, 1209, 8691, 8693, 8695, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 8330, 1188, 1183, 1165, 1160, 1165, 1160, 1155, 8335, 1188, 1183, 1219, 1214, 8698, 1234, 1209, 8700, 1165, 1160, 1165, 1160, 1165, 1160, 8347, 8349, 1188, 1183, 1219, 1214, 1229, 1224, 1209, 8704, 1219, 1214, 1229, 1224, 1234, 8706, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 8408, 8412, 8362, 8708, 8412, 1408, 1403, 1408, 1403, 1413, 8408, 8711, 8412, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1408, 1403, 1408, 1403, 1413, 8715, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1428, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 8391, 8721, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1423, 1418, 1433, 1428, 1371, 8394, 1456, 1451, 8412, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1423, 1418, 1433, 1428, 1438, 8408, 1456, 1451, 8412, 1468, 1468, 1468, 1468, 1468, 1468, 8413, 8414, 8495, 8455, 8458, 8503, 8505, 1478, 8415, 1739, 1737, 8419, 1757, 1752, 1699, 1694, 1717, 8426, 8428, 8430, 8432, 1555, 1643, 1641, 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1736, 1731, 1739, 1737, 8477, 1757, 1752, 1694, 1699, 1770, 8469, 1775, 1717, 8450, 8481, 1775, 1770, 1780, 8452, 8454, 8729, 8493, 8495, 8455, 1877, 1864, 8458, 8503, 8505, 1736, 1731, 1737, 1736, 1731, 1739, 8477, 1757, 1752, 1699, 1694, 1699, 1694, 8469, 1775, 1717, 1790, 1785, 1736, 1731, 8733, 1736, 1731, 8735, 8477, 1757, 1752, 8481, 1775, 1770, 1780, 1790, 1785, 1795, 8489, 1808, 8491, 8493, 8737, 8495, 8739, 8497, 8741, 8499, 8743, 8501, 8503, 8505, 1877, 8514, 8784, 3128, 3123, 3128, 3123, 3128, 3123, 8679, 8786, 8681, 8788, 8790, 8792, 8515, 8794, 8681, 8671, 8670, 8671, 8670, 8671, 8670, 8671, 8670, 8732, 8746, 8745, 8746, 8745, 8670, 2983, 8746, 8745, 8746, 8745, 8530, 8662, 8671, 8670, 8671, 8670, 8671, 8548, 8548, 8732, 8731, 8671, 8604, 2983, 2983, 8601, 8670, 8661, 8604, 8662, 8661, 8731, 8798, 3128, 3123, 3128, 3123, 3128, 3123, 8679, 8801, 8610, 8758, 8803, 3128, 3123, 3128, 3123, 3128, 3123, 8781, 8626, 8616, 8806, 3128, 3123, 3128, 3123, 3128, 3123, 8781, 8626, 8684, 8808, 8810, 8812, 8612, 8814, 8816, 8818, 8613, 8820, 8822, 8824, 8614, 8826, 8828, 8615, 8830, 8630, 8758, 3128, 3123, 8626, 8616, 8832, 3128, 3123, 3128, 3123, 8622, 3143, 8834, 8836, 8838, 8840, 8842, 3143, 8626, 8630, 3098, 3093, 3098, 3093, 3098, 3093, 8635, 8636, 8847, 8849, 8851, 8723, 8713, 8713, 8723, 8662, 8661, 2983, 8855, 3128, 3123, 3128, 3123, 3128, 3123, 8679, 8681, 8858, 8860, 3128, 3123, 8684, 8719, 8719, 2983, 2983, 2983, 3098, 3093, 3064, 3098, 3093, 8775, 3093, 3098, 3098, 3093, 3128, 3123, 3128, 3123, 3128, 3123, 8781, 3064, 3064, 3064, 8867, 8758, 3093, 3098, 3128, 3123, 3128, 3123, 3128, 3123, 8781, 3143, 8769, 3098, 3093, 8763, 3098, 3093, 3093, 3098, 3128, 3123, 3128, 3123, 3128, 3123, 8781, 3064, 3064, 3064, 8872, 8758, 3093, 3098, 3098, 3093, 3098, 3093, 8763, 3128, 3123, 3064, 3064, 3064, 8874, 8876, 3093, 3098, 3128, 3123, 3128, 3123, 3128, 3123, 8781, 3143, 8769, 3093, 3098, 3098, 3093, 3098, 3093, 8775, 3128, 3123, 3128, 3123, 3128, 3123, 8781, 3143, 3143, 8884, 8886, 8883, 8882, 8883, 8882, 8883, 8882, 8883, 8882, 8899, 8894, 8896, 8883, 8882, 8883, 8882, 8883, 8882, 8883, 8882, 8883, 8882, 4149, 4144, 8845, 8894, 8896, 8901, 4149, 4144, 8903, 8894, 8896, 8905, 4149, 4144, 8845, 8896, 8907, 4149, 4144, 8845, 8894, 8896, 4061, 4061, 4151, 8883, 8882, 8883, 8882, 4149, 4144, 4156, 4156, 4156, 4149, 4144, 4161, 4161, 4161, 8912, 4149, 4144, 4161, 4156, 8894, 8896, 8917, 8916, 8915, 8916, 8915, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8960, 8961, 8962, 8963, 8964, 8966, 8967, 8968, 8969, 8970, 8971, 8972, 8973, 8974, 8976, 8978, 8979, 8980, 8981, 8982, 8983, 8984, 8985, 8986, 8987, 8988, 8989, 8990, 8991, 8992, 8993, 8994, 8995, 8996, 8997, 8998, 8999, 9000, 9001, 9002, 9003, 9004, 9005, 9006, 9007, 9008, 9009, 9010, 9011, 9012, 9013, 9014, 9016, 9017, 9018, 9019, 9020, 9021, 9022, 9023, 9024, 9025, 9026, 9027, 9028, 9029, 9030, 9031, 9033, 9034, 9035, 9036, 9037, 9038, 9039, 9040, 9041, 9044, 9045, 9046, 9047, 9048, 9049, 9050, 9051, 9052, 9053, 9054, 9055, 9056, 9057, 9058, 9059, 9060, 9061, 9062, 9063, 9064, 9065, 9066, 9067, 9068, 9069, 9070, 9071, 9072, 9073, 9074, 9075, 9076, 9077, 9078, 9079, 9080, 9081, 9082, 9083, 9084, 9085, 9086, 9087, 9088, 9089, 9090, 9091, 9092, 9094, 9095, 9096, 9097, 9098, 9100, 9101, 9103, 9104, 9105, 9106, 9107, 9108, 9109, 9110, 9111, 9112, 9113, 9114, 9115, 9119, 9120, 9121, 9122, 9123, 9124, 9125, 9126, 9127, 9129, 9130, 9131, 9132, 9133, 9134, 9135, 9136, 9137, 9138, 9139, 9140, 9141, 9142, 9143, 9145, 9146, 9148, 9149, 9150, 9151, 9152, 9153, 9154, 9155, 9156, 9157, 9158, 9159, 9160, 9161, 9163, 9165, 9167, 9168, 9169, 9170, 9171, 9173, 9175, 9176, 9177, 9178, 9179, 9180, 9181, 9182, 9183, 9184, 9186, 9187, 9188, 9190, 9191, 9192, 9195, 9197, 9201, 9203, 9207, 9208, 9209, 9211, 9212, 9213, 9214, 9215, 9216, 9217, 9218, 9219, 9220, 9221, 9222, 9223, 9225, 9226, 9227, 9228, 9229, 9230, 9231, 9232, 9233, 9234, 9235, 9236, 9237, 9238, 9239, 9240, 9241, 9242, 9243, 9244, 9245, 9247, 9248, 9249, 9250, 9251, 9252, 9253, 9254, 9255, 9256, 9257, 9258, 9259, 9260, 9261, 9262, 9263, 9264, 9265, 9266, 9267, 9268, 9269, 9270, 9271, 9272, 9273, 9274, 9275, 9276, 9277, 9278, 9279, 9280, 9281, 9282, 9283, 9284, 9285, 9286, 9287, 9288, 9289, 9290, 9291, 9292, 9294, 9295, 9296, 9297, 9298, 9299, 9300, 9301, 9302, 9303, 9304, 9305, 9306, 9307, 9308, 9310, 9311, 9312, 9313, 9314, 9315, 9316, 9317, 9318, 9319, 9320, 9322, 9324, 9325, 9327, 9328, 9329, 9330, 9331, 9332, 9333, 9334, 9335, 9336, 9337, 9338, 9339, 9340, 9341, 9342, 9343, 9344, 9345, 9346, 9348, 9349, 9351, 9352, 9353, 9354, 9355, 9356, 9357, 9359, 9360, 9361, 9362, 9363, 9364, 9365, 9366, 9367, 9368, 9369, 9370, 9371, 9372, 9373, 9374, 9375, 9376, 9377, 9378, 9379, 9380, 9381, 9382, 9383, 9384, 9385, 9386, 9387, 9388, 9389, 9390, 9391, 9392, 9393, 9395, 9396, 9397, 9398, 9399, 9400, 9401, 9402, 9403, 9404, 9405, 9406, 9407, 9408, 9409, 9410, 9411, 9412, 9413, 9414, 9415, 9416, 9417, 9418, 9419, 9420, 9421, 9422, 9423, 9424, 9425, 9426, 9427, 9428, 9429, 9430, 9431, 9432, 9433, 9435, 9436, 9437, 9438, 9439, 9440, 9441, 9442, 9443, 9444, 9446, 9447, 9448, 9450, 9451, 9452, 9453, 9454, 9455, 9456, 9457, 9458, 9459, 9460, 9461, 9462, 9463, 9464, 9465, 9466, 9467, 9468, 9469, 9470, 9471, 9472, 9473, 9474, 9475, 9477, 9478, 9479, 9480, 9482, 9483, 9484, 9485, 9486, 9487, 9488, 9489, 9490, 9491, 9492, 9493, 9494, 9496, 9497, 9498, 9499, 9500, 9501, 9502, 9505, 9506, 9510, 9511, 9512, 9513, 9514, 9515, 9516, 9517, 9518, 9519, 9520, 9521, 9522, 9523, 9524, 9525, 9526, 9527, 9528, 9529, 9531, 9532, 9534, 9535, 9536, 9537, 9538, 9539, 9540, 9541, 9542, 9543, 9544, 9545, 9546, 9547, 9548, 9550, 9551, 9552, 9553, 9554, 9556, 9557, 9558, 9559, 9560, 9561, 9562, 9563, 9564, 9565, 9567, 9568, 9569, 9570, 9571, 9572, 9573, 9575, 9576, 9577, 9578, 9579, 9580, 9581, 9582, 9583, 9584, 9585, 9586, 9587, 9589, 9590, 9591, 9592, 9593, 9594, 9595, 9596, 9597, 9598, 9599, 9600, 9601, 9602, 9603, 9604, 9605, 9606, 9607, 9608, 9609, 9610, 9611, 9612, 9613, 9614, 9615, 9617, 9618, 9619, 9620, 9621, 9622, 9623, 9624, 9625, 9626, 9627, 9628, 9629, 9630, 9631, 9632, 9633, 9634, 9635, 9636, 9637, 9638, 9639, 9640, 9641, 9642, 9643, 9644, 9645, 9646, 9647, 9648, 9649, 9650, 9651, 9652, 9653, 9654, 9655, 9656, 9657, 9658, 9659, 9660, 9661, 9662, 9663, 9664, 9665, 9666, 9667, 9668, 9669, 9670, 9671, 9672, 9673, 9674, 9675, 9676, 9677, 9678, 9679, 9680, 9681, 9682, 9683, 9684, 9685, 9686, 9687, 9688, 9689, 9690, 9691, 9692, 9693, 9694, 9695, 9696, 9697, 9698, 9699, 9700, 9701, 9702, 9703, 9704, 9705, 9707, 9708, 9709, 9710, 9711, 9712, 9713, 9714, 9715, 9716, 9717, 9718, 9719, 9720, 9721, 9722, 9723, 9724, 9725, 9726, 9727, 9728, 9729, 9730, 9731, 9732, 9733, 9734, 9736, 9737, 9739, 9740, 9741, 9742, 9743, 9744, 9745, 9746, 9747, 9748, 9749, 9750, 9751, 9752, 9754, 9756, 9758, 9760, 9761, 9762, 9763, 9764, 9766, 9767, 9768, 9769, 9770, 9771, 9772, 9774, 9778, 9780, 9781, 9782, 9783, 9784, 9785, 9786, 9787, 9788, 9789, 9790, 9791, 9792, 9793, 9794, 9795, 9796, 9797, 9798, 9799, 8523, 8523, 8523, 8719, 9043, 9800, 9801, 9802, 9803, 9804, 9805, 8544, 8544, 8544, 9806, 9807, 9808, 9809, 9810, 9811, 9185, 9189, 9194, 9200, 9206, 9812, 9813, 9814, 9815, 9816, 9817, 9818, 9819, 9820, 9821, 9823, 9824, 9825, 9826, 9827, 9828, 9829, 9831, 9832, 9834, 9835, 9836, 9837, 9838, 9839, 9840, 9841, 9842, 9844, 9845, 9846, 9847, 9848, 9849, 9850, 9851, 9852, 9856, 9860, 9864, 9867, 9869, 9870, 9871, 9872, 9873, 9874, 9876, 9877, 9878, 9879, 9880, 9881, 9887, 9888, 8723, 9889, 9890, 9891, 9892, 9893, 9894, 9895, 9896, 9897, 8719, 9901, 9902, 9903, 9904, 9905, 9906, 9907, 9909, 9910, 9911, 9912, 9913, 9914, 9915, 9916, 9919, 9920, 9921, 9504, 9509, 8723, 8723, 9922, 8719, 9923, 8723, 9924, 9925, 9926, 9927, 9928, 9929, 9930, 9931, 9932, 9933, 9934, 9935, 9936, 9937, 9938, 9939, 9940, 9941, 9942, 9943, 9944, 9945, 9946, 9948, 9949, 9950, 9951, 9952, 9953, 9954, 9955, 9956, 9957, 9958, 9959, 9960, 9961, 9962, 9963, 9964, 9965, 9966, 9967, 9968, 9969, 9970, 9971, 9972, 9973, 9974, 9975, 9976, 9978, 9979, 9980, 9981, 9982, 9983, 9984, 9985, 9986, 9987, 9988, 9989, 9990, 9993, 9994, 9995, 9996, 9997, 9998, 9999, 10000, 10001, 10002, 10003, 10004, 10005, 10006, 10007, 10008, 10009, 10010, 10011, 10012, 10013, 10014, 10015, 10016, 10017, 10018, 10019, 8879, 8878, 9773, 10022, 10023, 8879, 8878, 8881, 8880, 8879, 8878, 8881, 8880, 9779, 10024, 10025, 9947, 10026, 10027, 10020, 10028, 10029, 10031, 10032, 8879, 8878, 9830, 10033, 10034, 8879, 8878, 10020, 10035, 10036, 8879, 8878, 10020, 10037, 10038, 8879, 8878, 8881, 8880, 8879, 8878, 8881, 8880, 9868, 10039, 10040, 8879, 8878, 10020, 10041, 10042, 10043, 10044, 10045, 10046, 10047, 8878, 8879, 10049, 10050, 10052, 10053, 8878, 8879, 8879, 8878, 10055, 10056, 10057, 10058, 8878, 8879, 8879, 8878, 10060, 10061, 10062, 10063, 10064, 10065, 10066, 8853, 8853, 8853, 10067, 8879, 8878, 9947, 10068, 10069, 8879, 8878, 10020, 10070, 10071, 10072, 10073, 10074, 10075, 10076, 10077, 10078, 10079, 10080, 10081, 8888, 8888, 10083, 10084, 10085, 10086, 10087, 10088, 8915, 10089, 10090, 10091, 8915, 8915, 10089, 10092, 10093, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 10131, 10133, 10136, 10138, 10142, 10146, 10157, 10160, 10162, 10165, 10187, 10189, 10191, 10193, 10195, 10197, 10203, 10207, 10209, 10219, 10222, 10225, 10236, 10241, 10243, 10256, 10260, 10263, 10265, 10273, 10280, 10282, 10294, 10296, 10301, 10303, 10306, 10311, 10314, 10317, 10320, 10327, 10329, 10331, 10333, 10335, 10344, 10361, 10363, 10365, 10367, 10371, 10374, 10376, 10378, 10382, 10384, 10386, 10399, 10401, 10409, 10411, 10413, 10417, 10420, 10422, 10424, 10426, 10428, 10432, 10436, 10438, 10440, 10442, 10446, 10448, 10451, 10454, 10456, 10458, 10460, 10463, 10465, 10467, 10469, 10471, 10474, 10476, 10478, 10481, 10484, 10486, 10488, 10491, 10493, 10495, 10498, 10500, 10516, 10518, 10522, 10524, 10529, 10531, 10533, 10538, 10541, 10543, 10545, 10547, 10551, 10557, 10559, 10561, 10565, 10568, 10572, 10574, 10576, 10580, 10584, 10590, 10592, 10594, 10597, 10599, 10602, 10604, 10606, 10608, 10610, 10614, 10616, 10618, 10622, 10624, 10626, 10628, 10630, 10632, 10636, 10638, 10640, 10643, 10645, 10648, 10650, 10652, 10659, 10661, 10666, 10668, 10670, 10673, 10675, 10678, 10680, 10682, 10685, 10688, 10690, 10692, 10695, 10697, 10699, 10701, 10705, 10707, 10709, 10712, 10714, 10718, 10721, 10723, 10725, 10728, 10730, 10734, 10752, 10755, 10757, 10765, 10774, 10776, 10779, 10781, 10789, 10797, 10802, 10805, 10809, 10811, 10813, 10818, 10820, 10822, 10825, 10828, 10831, 10113, 10115, 8965, 9753, 10120, 8746, 8745, 10122, 10124, 8977, 8975, 10130, 8746, 8745, 10141, 8727, 10145, 8727, 10846, 10848, 10850, 10582, 10582, 10578, 10582, 8656, 10785, 10785, 10785, 10292, 8657, 8554, 10285, 10288, 10292, 10407, 10865, 8746, 8745, 8746, 8745, 10407, 10867, 10206, 8657, 8554, 10216, 10214, 8554, 8657, 8554, 10170, 10179, 10170, 10171, 8746, 8745, 10358, 10763, 8604, 8746, 8745, 8746, 8745, 8746, 8745, 10172, 10356, 8554, 8554, 8554, 8554, 8554, 8554, 8554, 8554, 10179, 10174, 10407, 10871, 10175, 10176, 8746, 8745, 10179, 10407, 10873, 10875, 10876, 10877, 10878, 10783, 8658, 8657, 8731, 8732, 8731, 10783, 10783, 10879, 8657, 8657, 8554, 8657, 8554, 10288, 10202, 10206, 10217, 10212, 10217, 10214, 10216, 10217, 10227, 8746, 8745, 10227, 10229, 10231, 8746, 8745, 8658, 8658, 8607, 10398, 8658, 8607, 10235, 10398, 10239, 10404, 10381, 8658, 8607, 8658, 8607, 8746, 8745, 10398, 10586, 10586, 10251, 10252, 10252, 10253, 10254, 10255, 10578, 10582, 10886, 10887, 10888, 10258, 10279, 10275, 10276, 10268, 10269, 10270, 10271, 10275, 10276, 10277, 10278, 10279, 8657, 10285, 8657, 8554, 8746, 8745, 8746, 8745, 10288, 10783, 10783, 8746, 8745, 10292, 10783, 9166, 9164, 9174, 8568, 8728, 8568, 8728, 10844, 8568, 8728, 10309, 10844, 10308, 8568, 8728, 10309, 10844, 8568, 8728, 10309, 10844, 8567, 8568, 8728, 10309, 10844, 10895, 10896, 10897, 9198, 9196, 10898, 9204, 9202, 10899, 10339, 8657, 10763, 8746, 8745, 8746, 8745, 8746, 8745, 8657, 10341, 10763, 9224, 8746, 8745, 8657, 10358, 8657, 10392, 8600, 8746, 8745, 8746, 8745, 8746, 8745, 8600, 8746, 8745, 8746, 8745, 8657, 10356, 8657, 10358, 8657, 8657, 10763, 8746, 8745, 8746, 8745, 8746, 8745, 10407, 8746, 8745, 10381, 8746, 8745, 10407, 8746, 8745, 8658, 10907, 8658, 8607, 10392, 8746, 8745, 8746, 8745, 8746, 8745, 10398, 8746, 8745, 10407, 8746, 8745, 10404, 8746, 8745, 10407, 8746, 8745, 10910, 10912, 10914, 10919, 10921, 10923, 10928, 10930, 10932, 10943, 10947, 10949, 10408, 10955, 10957, 10959, 10961, 9323, 9321, 10965, 10505, 10503, 10507, 10509, 10968, 10511, 8713, 8723, 9476, 10515, 8656, 10528, 8658, 8657, 9434, 10970, 9445, 9449, 8667, 10556, 10571, 9476, 9481, 10586, 10588, 10751, 9755, 9753, 9759, 9757, 10844, 8746, 8745, 10973, 10975, 10977, 10981, 10984, 10985, 9549, 9555, 10655, 8713, 10717, 8713, 10986, 10664, 8713, 10987, 10989, 10704, 10991, 10717, 8727, 10733, 8727, 9755, 9753, 10844, 8746, 8745, 10785, 10760, 10761, 10763, 9706, 9755, 8728, 8745, 10844, 8746, 8745, 10763, 10751, 9755, 9753, 9759, 9757, 10844, 8746, 8745, 10785, 10760, 10761, 10763, 9755, 8728, 8745, 10844, 8746, 8745, 10785, 10787, 10792, 10834, 9706, 9755, 9753, 10844, 8746, 8745, 10816, 10834, 10836, 9755, 9753, 9759, 9757, 10844, 8746, 8745, 10995, 10998, 11001, 11003, 11005, 11007, 11009, 11016, 11018, 11020, 11022, 11027, 11030, 11032, 11034, 11036, 11038, 11045, 11047, 11049, 11052, 11057, 11059, 11061, 11063, 11068, 11070, 11072, 11075, 11077, 11079, 8844, 8863, 8844, 8844, 11084, 11085, 11074, 11086, 8863, 11089, 11090, 10854, 10940, 11091, 11092, 11093, 11094, 10854, 10940, 11095, 11096, 11097, 8863, 11100, 11103, 10880, 10881, 10903, 10905, 11074, 11108, 11109, 11110, 8844, 8863, 11074, 11113, 11114, 11115, 8863, 8844, 11074, 11118, 11119, 11120, 8863, 8844, 10937, 11123, 11124, 10938, 11125, 11126, 10939, 11127, 11128, 10940, 11129, 11130, 11131, 8844, 8863, 11074, 11134, 11135, 11136, 8863, 8844, 11139, 11144, 11145, 11000, 8883, 8882, 9947, 8844, 8863, 11146, 11150, 11151, 11000, 8883, 8882, 9947, 8844, 8863, 11074, 11152, 11153, 10020, 8883, 8882, 8863, 8844, 11154, 11158, 11159, 11000, 8844, 8863, 11074, 11160, 11161, 8863, 8844, 11162, 9947, 8883, 8882, 8844, 8863, 10020, 8883, 8882, 8863, 8844, 8853, 8853, 8853, 8853, 11169, 11170, 11171, 11173, 11174, 11000, 11175, 8863, 11074, 11178, 11179, 11180, 8863, 8883, 8882, 9947, 11183, 9947, 8883, 8882, 8888, 10020, 8883, 8882, 8888, 11188, 9977, 8883, 8882, 8888, 9991, 8883, 8882, 11193, 10020, 8883, 8882, 8888, 10020, 8883, 8882, 11194, 11195, 11197, 10082, 8916, 8915, 10089, 8916, 11201, 11202, 10089, 8916, 10089, 8916, 11205, 10048, 8916, 8915, 10054, 8916, 11206, 10059, 8916, 8915, 10082, 8916, 8915, 10089, 8916, 8915, 10082, 8916, 8915, 10089, 8916, 11207, 10082, 8916, 8915, 10082, 8916, 8915, 10089, 8916, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 11463, 11464, 11465, 11466, 11467, 11468, 11469, 11470, 11471, 11472, 11473, 11474, 11475, 11476, 10135, 8725, 8724, 11267, 11477, 11478, 8726, 11479, 11480, 8726, 11484, 11289, 11447, 11318, 11485, 11486, 11487, 11442, 11488, 11489, 11490, 11491, 11492, 11493, 11494, 11495, 11496, 11497, 11498, 11500, 11501, 11502, 11503, 11504, 11270, 11271, 9015, 11273, 11506, 10199, 8671, 8670, 11507, 11508, 11509, 11510, 11511, 10199, 8671, 8670, 11512, 10582, 8671, 8670, 11513, 11514, 11515, 11516, 11517, 11518, 11519, 11520, 11521, 11522, 11523, 11524, 11525, 11526, 11527, 11528, 11529, 11530, 10582, 8671, 8670, 11531, 11532, 10582, 8671, 8670, 11533, 11534, 10582, 8671, 8670, 11535, 11536, 11537, 11538, 11539, 11540, 11541, 11543, 11544, 11545, 11546, 11547, 11548, 11554, 11555, 11556, 11557, 11558, 11559, 11560, 11561, 11274, 11563, 10582, 8671, 8670, 10199, 8671, 8670, 11564, 11565, 11566, 11567, 11568, 11569, 11289, 11570, 11571, 11282, 11572, 11573, 11574, 11442, 11575, 11576, 11283, 11284, 11285, 11577, 11578, 11579, 11580, 11581, 11582, 11583, 11584, 11585, 11586, 11587, 11588, 11589, 11590, 11591, 11592, 9093, 11593, 11594, 9102, 9099, 11595, 11596, 11597, 11598, 11599, 11600, 11601, 11602, 11603, 11604, 11605, 11606, 11607, 11608, 11609, 11610, 11318, 11447, 11442, 11289, 11611, 11612, 11616, 11617, 11290, 11291, 9128, 11618, 11619, 11620, 11621, 11622, 11623, 11293, 11624, 11625, 11626, 11627, 11628, 9144, 9147, 11629, 11630, 11631, 11632, 11633, 11634, 11635, 11636, 11637, 11638, 11639, 11640, 11641, 11642, 11643, 11296, 9162, 11644, 11645, 11298, 9172, 11646, 8731, 11647, 11648, 11649, 11650, 11651, 11652, 11653, 11654, 11655, 11656, 11657, 11658, 11659, 11660, 11661, 11662, 11663, 11664, 11665, 11666, 11667, 11668, 11669, 11301, 11302, 11303, 11304, 11673, 11674, 11676, 11677, 11305, 11306, 10677, 8725, 8724, 11679, 10337, 11680, 11681, 11682, 11683, 11684, 11685, 11686, 11687, 11688, 11689, 11690, 11691, 11692, 11693, 11694, 11695, 11310, 11696, 11697, 11698, 11699, 11700, 11701, 11702, 11703, 11704, 11705, 11706, 11707, 11708, 11709, 11710, 11711, 11712, 11713, 11714, 11715, 11716, 11717, 11718, 11719, 11720, 9246, 10369, 8697, 8702, 11315, 11316, 11721, 11722, 11723, 11724, 11725, 11318, 11726, 11727, 11728, 11729, 11730, 11731, 10388, 8671, 8670, 11732, 11734, 11735, 11736, 11737, 11738, 11739, 11740, 11741, 11742, 11743, 11744, 11745, 11746, 11747, 11748, 11323, 11749, 11750, 11751, 11752, 11753, 11754, 11767, 10415, 8703, 8702, 11327, 11328, 9309, 10430, 8703, 8702, 11333, 11772, 11773, 9326, 10444, 8697, 8702, 11338, 10620, 8697, 8702, 11340, 11341, 9347, 10612, 8697, 8702, 11344, 11345, 9358, 10473, 8725, 8724, 10480, 8725, 8724, 10483, 10490, 8725, 8724, 10497, 8725, 8724, 8717, 10677, 8725, 8724, 11775, 11776, 11777, 11778, 11780, 11781, 11782, 11783, 11784, 10520, 8671, 8670, 11364, 11785, 8732, 11786, 10535, 8671, 8670, 11787, 11788, 10582, 8671, 8670, 11789, 8731, 10549, 8671, 8670, 11791, 10582, 8671, 8670, 11792, 11793, 11794, 10563, 8671, 8670, 11378, 11379, 11795, 10578, 8671, 8670, 11796, 10582, 8671, 8670, 11797, 8731, 11798, 11799, 11800, 11801, 11802, 11803, 11804, 11805, 11806, 11807, 10596, 8697, 8702, 11388, 10620, 8697, 8702, 11390, 9507, 10612, 8697, 8702, 11395, 10620, 8697, 8702, 11398, 9530, 9533, 10634, 8703, 8702, 11404, 11406, 11814, 11408, 11815, 10654, 8725, 8724, 11816, 11817, 11818, 11819, 10663, 8725, 8724, 11821, 11822, 10672, 8725, 8724, 10677, 8725, 8724, 10684, 8725, 8724, 10687, 10694, 8725, 8724, 8717, 10703, 8725, 8724, 11825, 10711, 8725, 8724, 11434, 11827, 11828, 8726, 10727, 8725, 8724, 11440, 11829, 11830, 8726, 11445, 11831, 11832, 11451, 11833, 11834, 11835, 11442, 11443, 11836, 10783, 11837, 11450, 11838, 11839, 11840, 11841, 11842, 11843, 11451, 11844, 11845, 11846, 11847, 11848, 11849, 11850, 11851, 11852, 11853, 11854, 11855, 11442, 11443, 11856, 10783, 11857, 11450, 11858, 11859, 11445, 11860, 11861, 11862, 11451, 11863, 11864, 11865, 11447, 11448, 11866, 10783, 11867, 11450, 11868, 11869, 11870, 11871, 11872, 11451, 11873, 11874, 11875, 10807, 10804, 11454, 11876, 8732, 8731, 11457, 9738, 9735, 11460, 11461, 11462, 11877, 11878, 11879, 11880, 11881, 11882, 11883, 11884, 11885, 11917, 11918, 11919, 11920, 11923, 11921, 10852, 8881, 8880, 11924, 11925, 11928, 11926, 11929, 11934, 11932, 11935, 11938, 11939, 11940, 11941, 11733, 10993, 8864, 10993, 8864, 10993, 8864, 10993, 8864, 11942, 11943, 11733, 11733, 11944, 11945, 11733, 11946, 10916, 8881, 8880, 11949, 11950, 11951, 11952, 10925, 8881, 8880, 11955, 11956, 11957, 11958, 10934, 8881, 8880, 11961, 11962, 11963, 11964, 11967, 11970, 11973, 11976, 11977, 11978, 11979, 11081, 8881, 8880, 11982, 11983, 11984, 11988, 11986, 10979, 8881, 8880, 11989, 11990, 11991, 11992, 11993, 11997, 11995, 10979, 8881, 8880, 11998, 11999, 12000, 12001, 12002, 12003, 11024, 8881, 8880, 12006, 12007, 12008, 12009, 12010, 12014, 12012, 10979, 8881, 8880, 12015, 12016, 12017, 12020, 12021, 12023, 12024, 12025, 12026, 12027, 12028, 12029, 12030, 12031, 12032, 12033, 12034, 12035, 10963, 8879, 8878, 12036, 12042, 12040, 10979, 8881, 8880, 12043, 12044, 12045, 11024, 8881, 8880, 12048, 12049, 8879, 8878, 11000, 11011, 8881, 8880, 12050, 12051, 12052, 8879, 8878, 11000, 11011, 8881, 8880, 12054, 12055, 12056, 12057, 11074, 8879, 8878, 11024, 8881, 8880, 12058, 12059, 12060, 12061, 8878, 8879, 11029, 11040, 8881, 8880, 12063, 12064, 12065, 12066, 11051, 8879, 8878, 11081, 8881, 8880, 12067, 12068, 12069, 11074, 8879, 8878, 11065, 8881, 8880, 12071, 12072, 12073, 12074, 11074, 8879, 8878, 11081, 8881, 8880, 12075, 12076, 12077, 11167, 12081, 12082, 12083, 8914, 12084, 12085, 8914, 12087, 8898, 8898, 8909, 12088, 12089, 8915, 12090, 12091, 8914, 12093, 12094, 12095, 8909, 12096, 12097, 8914, 12099, 12100, 12101, 8909, 12102, 12103, 12104, 8914, 12105, 12106, 12107, 11167, 12108, 12109, 12110, 11168, 11172, 12111, 12112, 8915, 8914, 12113, 8911, 12114, 12115, 12116, 8911, 12117, 12118, 12119, 8914, 12120, 12121, 8915, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 12162, 12164, 12169, 12171, 12174, 12175, 12176, 12177, 12180, 12183, 12184, 12185, 12186, 12187, 12188, 12189, 12190, 12191, 12203, 12205, 12208, 12209, 12210, 12211, 12213, 12214, 12215, 12221, 12222, 12223, 12225, 12226, 12227, 12233, 12238, 12240, 12242, 12246, 12247, 12248, 12251, 12252, 12253, 12256, 12257, 12258, 12268, 12273, 12280, 12282, 12283, 12284, 12285, 12286, 12287, 12294, 12297, 12301, 12304, 12305, 12306, 12308, 12313, 12323, 12326, 12327, 12333, 12344, 12345, 12346, 12347, 12348, 12349, 12352, 12353, 12354, 12361, 12367, 12368, 12373, 12375, 12380, 12384, 12385, 12386, 12388, 12389, 12391, 12392, 12394, 12397, 12402, 12406, 12411, 12415, 12416, 12417, 12418, 12419, 12421, 12423, 12424, 12425, 12426, 12427, 12429, 12432, 12434, 12436, 12442, 12446, 12450, 12452, 12454, 12457, 12459, 12468, 12470, 12472, 12473, 12474, 12475, 12476, 12477, 12478, 12481, 12483, 12485, 12488, 12490, 12491, 12492, 12497, 12499, 12501, 12504, 12507, 12509, 12511, 12514, 12517, 12518, 12519, 12520, 12521, 12522, 12523, 12524, 12525, 12526, 12527, 12529, 12530, 12531, 12532, 12533, 12534, 12535, 12536, 12537, 12538, 12539, 12540, 12541, 12542, 12543, 12544, 12545, 12546, 12547, 12548, 12549, 12550, 12551, 12552, 12553, 12554, 12555, 12556, 12557, 12558, 12559, 12560, 12561, 12562, 12563, 12568, 12572, 12573, 12574, 12575, 12577, 12579, 12580, 12581, 12582, 12584, 12585, 12586, 12588, 12589, 12590, 12591, 12593, 12594, 12595, 12599, 12600, 12601, 12602, 12603, 12605, 12606, 12607, 12609, 12610, 12611, 12613, 12617, 12619, 12621, 12624, 12625, 12626, 12627, 12628, 12629, 12630, 12631, 12632, 12633, 12634, 12635, 12636, 12637, 12638, 12639, 12640, 12641, 12642, 12643, 12644, 12645, 12646, 12647, 12649, 12651, 12652, 12653, 12657, 12658, 12659, 12660, 12662, 12663, 12664, 12665, 12666, 12667, 12668, 12669, 12670, 12671, 12672, 12673, 12674, 12675, 12676, 12677, 12678, 12679, 12681, 12682, 12683, 12684, 12687, 12688, 12689, 12690, 12691, 12694, 12695, 12696, 12698, 12699, 12702, 12703, 12705, 12707, 12711, 12714, 12715, 12720, 12722, 12724, 12727, 12728, 12730, 12732, 12735, 12736, 12739, 12740, 12743, 12744, 12746, 12748, 12752, 12754, 12755, 12758, 12759, 12760, 12762, 12763, 12764, 12765, 12766, 12767, 12768, 12769, 12772, 12774, 12776, 12161, 12168, 12779, 12784, 12785, 12786, 12787, 12788, 12791, 12792, 12794, 12795, 12796, 12798, 12799, 11779, 10967, 12439, 12441, 12445, 8865, 10900, 12449, 11779, 10967, 12578, 12598, 12312, 12706, 12710, 8865, 12731, 8865, 12747, 11779, 10967, 12706, 12710, 8865, 12800, 12578, 12312, 12731, 8865, 12578, 12731, 12706, 12747, 11779, 10967, 12801, 8865, 12802, 12264, 12267, 12616, 12803, 8865, 12804, 11779, 10967, 12439, 12441, 12449, 12445, 8865, 12237, 11779, 10967, 12229, 12710, 8865, 12231, 8865, 12232, 12616, 12365, 11779, 10967, 12463, 12466, 12444, 12445, 8865, 12438, 12439, 12441, 12447, 12449, 12461, 12465, 12430, 10900, 12235, 12237, 12244, 12245, 11779, 10967, 10900, 12449, 12445, 8865, 12439, 12441, 12264, 12805, 8865, 12806, 12267, 12616, 12343, 12807, 8865, 12808, 12463, 12447, 12466, 12444, 12465, 12430, 12461, 12438, 11779, 10967, 12731, 8865, 12614, 11733, 12578, 12571, 12312, 12706, 12710, 8865, 12747, 12293, 12296, 12299, 12303, 12733, 12749, 12708, 8865, 12604, 12312, 11779, 10967, 12706, 12710, 8865, 12731, 8865, 12811, 12616, 10891, 12337, 10900, 12447, 12461, 12463, 12430, 12444, 12438, 12465, 12466, 11779, 10967, 12430, 10900, 12438, 12447, 12449, 12444, 12445, 12461, 12465, 12466, 12463, 10906, 12351, 10890, 10890, 12351, 11779, 10967, 12366, 10891, 12358, 10900, 10972, 10891, 10993, 8865, 12366, 12614, 12812, 12578, 12571, 12747, 12706, 8865, 12731, 11779, 10967, 12447, 12449, 12438, 12439, 12441, 12463, 12444, 12445, 8865, 12430, 10900, 12465, 12466, 12461, 12598, 12456, 10972, 12401, 10992, 8865, 12401, 10993, 8865, 12410, 10993, 8865, 12410, 10993, 8865, 11779, 10967, 12430, 10900, 12438, 12439, 12441, 12444, 12445, 8865, 12447, 12449, 12456, 12461, 12462, 12463, 12464, 8865, 12465, 12466, 10906, 11779, 10967, 8865, 12731, 8865, 12614, 12815, 12571, 12616, 12747, 8865, 12706, 12710, 8865, 12816, 12817, 12818, 12819, 12820, 12821, 12823, 12824, 12825, 12826, 12827, 12828, 12830, 12831, 12832, 12833, 12834, 12835, 12837, 12838, 12839, 12840, 12841, 12842, 12844, 12845, 12846, 12847, 12848, 12849, 12852, 12853, 12854, 12855, 12856, 12859, 12862, 12863, 12864, 12865, 12866, 12869, 12871, 12872, 12873, 12874, 12875, 12878, 12881, 12882, 12883, 12884, 12885, 12887, 12888, 12890, 12893, 12895, 12898, 11768, 12903, 12904, 12905, 11779, 10967, 12598, 12616, 12908, 12909, 12910, 12911, 12912, 12914, 12915, 12916, 12917, 12918, 12655, 11826, 12710, 12719, 12751, 12771, 12920, 12921, 12922, 12923, 12924, 12925, 12926, 12929, 12930, 12931, 12932, 12933, 12934, 12935, 12939, 12940, 12941, 12942, 12943, 12944, 12945, 12949, 12950, 12951, 12952, 12953, 12954, 12955, 12959, 12960, 12961, 12962, 12963, 12964, 12965, 12968, 12969, 12970, 12971, 12972, 12973, 12974, 12978, 12979, 12980, 12981, 12982, 12983, 12984, 12987, 12988, 12991, 12992, 12994, 12995, 12996, 12997, 12998, 13001, 12999, 13002, 13004, 13005, 13008, 13009, 13011, 13012, 13015, 13016, 13019, 13020, 13023, 13024, 13027, 13028, 13031, 13029, 13032, 13033, 13034, 13035, 13038, 13039, 13042, 13045, 13043, 122, 123, 124, 125, 126, 127, 13057, 13059, 13060, 12179, 12182, 13066, 13070, 13071, 13072, 13080, 13083, 13086, 13093, 13096, 13099, 13105, 13108, 13120, 13127, 13128, 13158, 12428, 13175, 13185, 13196, 13202, 13208, 13212, 13218, 13224, 13227, 13231, 13234, 13238, 13243, 12576, 13248, 13252, 13256, 13259, 13262, 13267, 13270, 13276, 13277, 13281, 13286, 13290, 13296, 13302, 13306, 13310, 13313, 13316, 13320, 13324, 13327, 12686, 13332, 12693, 13340, 12704, 13345, 13347, 13350, 12729, 13356, 13358, 12745, 13365, 13366, 12761, 13372, 13379, 13380, 8864, 13381, 8865, 13384, 13389, 13391, 11672, 11671, 11670, 11678, 11675, 13242, 13395, 13396, 12438, 13397, 13398, 8796, 8864, 12461, 12444, 13399, 13166, 10992, 13400, 8864, 13401, 8864, 10901, 8865, 13402, 8864, 10902, 8865, 12463, 13139, 13131, 13142, 13223, 13242, 13403, 13404, 10966, 13405, 13406, 13407, 8865, 8864, 10972, 13132, 13408, 13409, 10993, 13410, 8864, 13116, 13411, 13355, 10992, 13412, 8864, 13413, 13133, 13134, 13142, 13223, 13242, 13414, 13415, 12196, 13416, 13417, 10993, 13418, 8864, 13420, 13421, 8864, 10972, 8865, 12200, 12201, 13422, 13355, 10992, 13423, 8864, 13201, 13424, 13425, 13426, 13427, 13139, 13142, 13223, 13242, 13428, 13429, 10966, 13431, 13433, 13434, 13435, 10972, 8865, 8864, 13437, 11672, 11671, 11670, 13078, 13217, 13223, 13242, 13439, 13440, 12212, 13441, 13442, 8796, 8864, 13443, 8864, 10902, 8865, 12218, 13444, 13355, 10992, 13445, 8864, 13446, 8864, 10870, 8865, 13139, 13131, 13142, 13223, 13242, 13447, 13448, 10966, 13132, 13449, 13450, 10993, 13451, 8864, 13116, 13452, 13166, 10992, 13453, 8864, 13454, 13455, 10972, 8865, 8864, 13456, 11672, 11671, 11670, 11678, 11675, 13217, 13242, 13457, 13458, 13459, 13460, 13461, 13462, 13166, 10992, 13463, 8864, 13464, 13465, 13466, 8796, 8864, 13467, 13468, 8864, 10902, 8865, 13469, 13470, 13471, 13472, 8864, 10901, 8865, 13473, 13474, 8864, 10870, 8865, 13475, 13476, 11672, 11671, 11670, 11678, 11675, 13217, 13223, 13242, 13477, 13478, 13479, 8864, 10901, 8865, 12250, 13480, 8864, 10902, 8865, 12260, 13481, 13166, 10992, 13482, 8864, 12261, 13483, 13484, 8796, 8864, 12262, 13485, 13487, 13489, 13490, 10972, 8865, 8864, 13491, 13493, 13495, 13496, 13497, 13498, 13499, 13500, 13501, 13502, 13133, 13134, 13142, 13223, 13242, 13503, 13504, 10966, 13352, 13505, 13355, 10992, 13506, 8864, 13507, 13508, 13509, 13510, 13511, 8865, 8864, 10972, 13342, 13512, 13513, 10993, 13514, 8864, 13515, 11672, 11562, 12292, 13516, 12295, 13517, 12298, 13518, 12300, 12302, 13519, 13116, 13520, 13521, 13522, 10993, 13523, 8864, 13524, 13525, 8865, 8864, 10972, 12321, 12321, 12322, 13174, 13119, 13217, 13223, 13242, 13526, 13527, 12325, 13528, 13529, 10993, 13530, 8864, 13531, 13355, 10992, 13532, 8864, 13534, 8864, 8865, 10972, 12335, 12341, 13535, 13536, 12338, 12339, 13537, 12341, 12343, 12444, 12461, 12463, 12438, 13538, 13539, 13540, 13541, 13542, 13543, 13544, 13545, 11672, 11671, 11670, 11678, 11675, 13217, 13223, 13242, 13546, 13547, 13548, 13549, 8864, 10901, 8865, 13550, 13551, 13552, 8864, 10902, 8865, 13553, 13554, 13166, 13555, 13556, 13557, 13558, 13131, 12350, 13559, 13560, 13132, 13561, 13562, 13563, 13139, 13131, 13142, 13223, 13242, 13564, 13565, 10966, 12356, 13566, 13567, 12357, 13568, 11790, 12360, 13569, 13570, 8865, 8864, 13132, 12363, 13571, 13572, 13573, 8864, 12365, 13574, 13133, 13134, 13575, 13577, 13578, 8865, 8864, 10972, 12377, 13579, 12382, 13580, 10993, 13581, 8864, 12382, 13582, 11672, 11671, 11670, 11678, 11675, 13217, 13223, 13242, 13583, 13584, 13585, 13586, 8864, 10902, 8865, 13587, 13588, 13589, 8796, 8864, 13590, 13591, 13592, 13166, 10992, 13593, 8864, 13594, 13595, 8864, 10901, 8865, 13596, 13597, 13598, 13139, 13207, 13142, 11790, 12604, 13599, 13600, 13601, 8865, 8864, 13352, 12733, 13602, 13603, 13604, 8864, 13605, 13606, 13607, 8864, 13342, 12708, 13608, 13609, 13610, 8864, 13611, 13612, 13613, 8864, 11672, 11671, 11670, 11678, 11675, 13217, 13223, 13242, 13614, 13615, 13616, 13617, 8864, 10901, 8865, 13618, 13619, 13620, 8796, 8864, 13621, 13622, 13166, 10992, 13623, 8864, 13624, 13625, 8864, 10902, 8865, 13626, 8864, 10993, 8865, 13627, 13628, 13629, 13630, 8797, 13631, 13632, 13633, 13634, 8864, 10993, 8865, 13174, 13207, 13217, 13223, 13242, 13635, 13636, 10966, 10993, 13637, 8864, 12484, 13638, 13355, 10992, 13639, 8864, 13640, 13642, 13643, 8864, 10972, 8865, 12503, 13644, 10993, 13645, 8864, 12510, 13646, 13647, 10993, 13648, 8864, 13649, 13650, 13655, 13656, 13661, 13662, 13667, 13668, 13669, 13670, 13673, 13674, 13680, 13683, 13686, 13689, 13691, 13692, 13695, 13698, 13702, 13704, 13706, 13708, 13709, 13201, 13207, 13217, 13223, 13242, 13712, 13713, 10966, 12571, 11790, 13714, 12604, 12614, 13715, 8865, 8864, 13717, 13721, 13722, 13285, 13295, 13295, 12650, 12648, 13726, 13309, 13305, 13309, 13727, 13337, 8865, 8864, 13342, 12708, 13728, 8865, 13729, 8865, 8864, 13352, 12733, 13355, 8865, 13360, 12749, 13730, 8865, 8864, 13376, 13731, 8865, 8864, 13732, 13735, 13738, 13739, 13742, 13745, 13746, 13749, 13752, 13753, 13756, 13759, 13760, 13763, 13766, 13767, 13770, 13773, 13774, 13777, 13780, 13782, 12781, 12782, 13784, 12789, 12797, 12919, 13791, 13705, 13707, 13792, 13654, 13660, 13666, 13672, 13678, 13794, 13796, 13798, 13800, 13802, 13804, 13808, 12913, 12919, 13812, 13814, 13817, 13786, 13810, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 13826, 13833, 13834, 13835, 13836, 13837, 13838, 13839, 13840, 13844, 13846, 13847, 13848, 13849, 13850, 13851, 13852, 13853, 13854, 13855, 13856, 13857, 13858, 13860, 13861, 13862, 13863, 13864, 13865, 13866, 13868, 13869, 13870, 13871, 13872, 13873, 13874, 13875, 13876, 13877, 13878, 13879, 13880, 13882, 13895, 8796, 13899, 8797, 13901, 13827, 13828, 13902, 13905, 13906, 13907, 13908, 13909, 13910, 10966, 12466, 13913, 13916, 13917, 13918, 13919, 13921, 13922, 13924, 12430, 13926, 13927, 13928, 12465, 12447, 13930, 13931, 13932, 13933, 13934, 13935, 13936, 13937, 13883, 13938, 13941, 13945, 13946, 13947, 13948, 13951, 13953, 13954, 13956, 13957, 13959, 13961, 13962, 13963, 13964, 13883, 13965, 10966, 13968, 13971, 13973, 13976, 13977, 13978, 13979, 13980, 13982, 13983, 13985, 13986, 13991, 13992, 13993, 13883, 13994, 13997, 13430, 14002, 14003, 14004, 13436, 14006, 14007, 14008, 14009, 14010, 14011, 13883, 14012, 10966, 14015, 14018, 14019, 14021, 14022, 14023, 14024, 12219, 14026, 14027, 14029, 14031, 14032, 14033, 14034, 14035, 14036, 14037, 13883, 14038, 14041, 14042, 14045, 14047, 14048, 14050, 14051, 14053, 14056, 14057, 14058, 14060, 14061, 14062, 14063, 14064, 14065, 13883, 14066, 10966, 14073, 14074, 14076, 14080, 14081, 14084, 14085, 14086, 14091, 14092, 14093, 14096, 14097, 14098, 14101, 14102, 14103, 14104, 14105, 14106, 14107, 13883, 14108, 10966, 14112, 14113, 14114, 14115, 14117, 14118, 14119, 14120, 14122, 14123, 14125, 14126, 14129, 14130, 14131, 13486, 14136, 14137, 14138, 13492, 14149, 14150, 14151, 14152, 13883, 14153, 14156, 14157, 14159, 14160, 14162, 14168, 14169, 14170, 14171, 14174, 14176, 14178, 14179, 14180, 14182, 14184, 14186, 14187, 14189, 14193, 14195, 14198, 14199, 14200, 14201, 14202, 14203, 14204, 14205, 14206, 14207, 13883, 14208, 10966, 14211, 14214, 14216, 12328, 14218, 14219, 14221, 14223, 14224, 14225, 14226, 14227, 12340, 12336, 14230, 14231, 14233, 12340, 14234, 12342, 12447, 14235, 14236, 14237, 12466, 14238, 12465, 12430, 14247, 14248, 14249, 14250, 14251, 14252, 14253, 14254, 10966, 14259, 14260, 14261, 14265, 14266, 14267, 14270, 14275, 14276, 14279, 14283, 14284, 14285, 14286, 13883, 14287, 14290, 14291, 12355, 14294, 14296, 12359, 14297, 14300, 14301, 14302, 14303, 12362, 14307, 14305, 14308, 12364, 14310, 14311, 14315, 14316, 14317, 14318, 14320, 14322, 14324, 14325, 14327, 14328, 14329, 14330, 14331, 14332, 14333, 14334, 10966, 14339, 14340, 14341, 14345, 14346, 14350, 14351, 14353, 14356, 14357, 14358, 14362, 14363, 14364, 14365, 14366, 12578, 14370, 14371, 14372, 14373, 12731, 14377, 14375, 14381, 14379, 14382, 14383, 12706, 14387, 14385, 14391, 14389, 14392, 14393, 14394, 14395, 14396, 14397, 14398, 14399, 10966, 14404, 14405, 14406, 14410, 14411, 14414, 14415, 14417, 14420, 14421, 14422, 14424, 14425, 14426, 14431, 14436, 14437, 14438, 14439, 14440, 14441, 14442, 13883, 14443, 14446, 14447, 14449, 14450, 14452, 14453, 14455, 14459, 14460, 14461, 14462, 14464, 14466, 14467, 14470, 14472, 14474, 14476, 14478, 14484, 14485, 14487, 14490, 14492, 14497, 14498, 14499, 14500, 14501, 14502, 14505, 14506, 12578, 14507, 14509, 14510, 10972, 14512, 14513, 14514, 14516, 14517, 14518, 14519, 14520, 14521, 14523, 14524, 14525, 13881, 13883, 14527, 10993, 14528, 14529, 14530, 14531, 12706, 10993, 14533, 8864, 10993, 14535, 14536, 14537, 14538, 12731, 14539, 10992, 14540, 8864, 14541, 14542, 12747, 10993, 14544, 14545, 13368, 13374, 14546, 10994, 14548, 14549, 14550, 14551, 14553, 14554, 14556, 14557, 14559, 14560, 14562, 14563, 14565, 14566, 14568, 14569, 13382, 13703, 14572, 14573, 14575, 13904, 13903, 14576, 14577, 12958, 12070, 12977, 12078, 14579, 14580, 14430, 12814, 12813, 14072, 14078, 12814, 12813, 14428, 12814, 12813, 14430, 12814, 12813, 12814, 12813, 14269, 12814, 12813, 14428, 14408, 12814, 12813, 14428, 14430, 12814, 12813, 14269, 14408, 12814, 12813, 12814, 12813, 14408, 12814, 12813, 14269, 14428, 12814, 12813, 14430, 12814, 12813, 14343, 14430, 14349, 12814, 12813, 12814, 12813, 14428, 12814, 12813, 14408, 14413, 12814, 12813, 14428, 14430, 12814, 12813, 14582, 14583, 14584, 14482, 14480, 14585, 14586, 13684, 13690, 13696, 13701, 13703, 13705, 13707, 12900, 12901, 12902, 12906, 12037, 12038, 12039, 14594, 14595, 12938, 12938, 12948, 12958, 12070, 12977, 12078, 14571, 14574, 14599, 14596, 14597, 14578, 14581, 14587, 14588, 14589, 14590, 14591, 14592, 14597, 14593, 14600, 14596, 14597, 14598, 121, 122, 123, 124, 125, 126, 127, 14765, 14767, 13063, 14769, 14770, 13199, 14772, 13205, 14775, 13237, 10990, 11774, 10988, 13230, 14778, 14777, 14779, 14781, 14786, 14788, 14789, 14792, 14793, 14794, 13114, 13115, 13215, 13141, 13293, 13221, 13335, 14802, 13237, 11774, 10990, 13230, 10988, 14803, 13940, 13246, 12596, 12592, 14805, 14809, 14813, 13114, 13115, 13215, 13141, 13293, 13221, 13335, 14819, 13230, 10990, 10988, 11553, 13237, 14821, 14820, 14823, 12198, 12197, 12199, 12255, 12465, 14825, 14831, 13076, 13215, 13141, 13293, 13221, 13335, 14837, 13237, 10988, 11774, 13230, 10990, 14838, 13996, 14840, 13265, 14841, 14844, 13076, 14845, 13077, 13215, 13079, 13293, 13221, 13335, 14851, 11774, 13237, 13230, 10988, 10990, 14853, 14852, 14855, 12217, 12216, 14857, 14861, 14863, 12220, 12288, 12228, 12224, 14865, 13129, 13130, 13215, 13141, 13293, 13221, 13335, 14872, 10988, 13237, 11774, 10990, 13230, 14873, 14040, 14876, 14880, 13265, 14882, 13150, 14885, 13205, 14888, 13215, 13156, 13335, 14891, 10988, 11774, 13237, 10990, 13230, 14893, 14892, 14895, 14897, 14899, 14902, 14905, 13199, 14908, 13205, 14911, 13215, 13156, 13293, 13157, 13335, 14915, 13230, 13237, 11553, 10990, 10988, 14917, 14916, 12249, 14918, 12254, 12255, 12259, 14922, 14927, 14930, 14933, 14934, 14937, 13114, 13115, 13215, 13141, 13293, 13221, 13335, 14942, 13237, 10988, 10990, 13230, 11553, 14943, 14155, 14947, 12587, 13103, 13246, 14949, 14953, 14955, 12291, 12281, 12289, 12288, 12466, 12291, 12290, 13114, 13115, 14963, 14965, 12320, 12315, 12317, 12316, 12318, 12320, 12319, 13129, 13130, 13215, 13211, 13293, 13221, 13335, 14975, 11774, 10988, 10990, 13237, 13230, 14977, 14976, 14979, 14981, 14983, 12330, 12329, 12332, 12331, 14985, 13129, 13130, 13215, 13141, 14990, 14991, 14995, 14997, 14998, 15002, 15004, 15005, 15006, 15009, 13215, 13156, 13293, 13157, 10990, 11774, 13237, 10988, 13230, 15014, 15013, 15015, 15018, 13215, 13211, 13129, 13130, 13215, 13141, 13293, 13221, 13335, 15029, 10988, 10990, 13230, 13237, 11774, 15030, 14289, 15033, 13265, 12596, 12592, 12587, 12390, 13246, 15036, 14299, 15042, 15044, 15046, 12372, 12369, 12370, 12372, 12371, 15049, 15054, 13199, 15057, 13205, 15060, 13215, 13156, 13293, 13157, 10990, 13237, 13230, 10988, 11774, 15065, 15064, 15066, 15069, 15072, 15074, 13215, 13141, 12587, 12390, 13265, 12596, 12592, 13246, 15082, 14369, 15087, 15089, 15091, 15094, 15096, 15098, 13150, 15099, 13205, 15102, 13215, 13156, 13293, 13157, 10990, 10988, 11774, 13230, 13237, 15107, 15106, 15108, 15111, 15114, 15116, 15119, 15122, 15123, 13199, 13205, 13215, 13211, 13293, 13221, 13335, 15130, 10988, 10990, 11774, 13237, 13230, 15131, 14445, 15133, 15137, 12495, 12493, 12495, 12494, 15139, 15143, 15146, 13199, 13205, 13215, 13211, 13293, 13221, 10988, 10990, 13237, 13230, 11774, 15161, 14504, 12612, 12570, 13246, 15164, 12587, 13251, 12596, 12592, 13265, 12612, 12608, 15168, 13284, 13280, 13293, 13289, 13299, 15176, 13319, 10988, 11824, 13323, 10990, 11824, 10988, 13319, 13323, 10990, 13319, 10990, 13323, 10988, 11824, 13330, 15181, 13335, 15182, 15184, 15189, 15190, 15192, 15193, 15198, 15200, 15202, 15205, 15206, 15209, 13371, 15210, 15212, 15216, 15229, 15230, 15171, 15172, 14771, 15234, 15235, 15172, 15222, 15238, 15224, 15239, 15226, 15240, 15228, 15241, 13914, 14428, 13920, 14430, 13949, 13955, 13960, 13969, 14319, 13981, 13988, 13989, 13990, 13999, 14016, 14430, 14428, 14043, 14049, 14059, 15244, 15245, 15246, 15247, 15248, 15249, 15250, 15251, 15252, 15253, 14094, 14094, 14099, 14100, 14430, 14121, 14127, 14428, 14132, 14139, 15254, 15255, 15256, 15257, 15258, 15259, 15260, 15261, 15262, 15263, 14158, 14172, 14177, 14181, 14185, 14183, 14188, 14188, 14190, 14191, 14192, 14326, 14321, 14319, 14212, 14463, 14269, 14428, 14430, 14408, 15264, 15265, 15266, 15267, 15268, 15269, 15270, 15271, 15272, 15273, 15274, 15275, 15276, 15277, 15278, 15279, 15280, 15281, 15282, 15283, 14319, 14321, 14326, 15284, 15285, 15286, 15287, 15288, 15289, 15290, 15291, 15292, 15293, 15294, 15295, 15296, 15297, 15298, 15299, 15300, 15301, 15302, 15303, 14451, 14463, 14468, 15148, 15149, 15150, 15307, 15308, 15151, 15152, 15311, 15153, 15312, 15154, 15313, 15155, 15314, 15220, 15315, 15171, 15316, 15172, 15317, 15216, 15318, 15319, 15320, 15222, 15321, 15224, 15322, 15226, 15323, 15228, 15324, 15171, 15172, 15216, 15327, 15218, 15328, 15220, 15329, 15222, 15330, 15224, 15331, 15226, 15332, 15228, 15333, 15334, 15335, 15337, 15338, 15339, 15340, 15341, 15342, 15343, 15344, 15345, 15346, 15347, 15348, 15350, 15351, 15352, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 15360, 15361, 15362, 15365, 15366, 15367, 15369, 15370, 15371, 15372, 15373, 13912, 15378, 15380, 15383, 15384, 15385, 15386, 15387, 15388, 15389, 15390, 15392, 15393, 15394, 15395, 15396, 15397, 15399, 15400, 15401, 15402, 15403, 15404, 15405, 15406, 15407, 15408, 15409, 15410, 15411, 15413, 15414, 15415, 15416, 15417, 13967, 15420, 15421, 15422, 15423, 15424, 15425, 15426, 15427, 15428, 15429, 15430, 15431, 15432, 15433, 15435, 15436, 15437, 15438, 15439, 15440, 15443, 15444, 15446, 15447, 15448, 15449, 15450, 15451, 15452, 15453, 15455, 15456, 15457, 15458, 15459, 14014, 15463, 15464, 15465, 15467, 15468, 15469, 15470, 15471, 15472, 15473, 15474, 15475, 15476, 15477, 15478, 15479, 15481, 15482, 15483, 15484, 15485, 15486, 15488, 15489, 15490, 15491, 15492, 15493, 15494, 15496, 15497, 15498, 15500, 15501, 15502, 15503, 15504, 14068, 15507, 15509, 15510, 15511, 15512, 15513, 15514, 15516, 15517, 15518, 15519, 15520, 15522, 15523, 15524, 15525, 15526, 14110, 15529, 15530, 15531, 15532, 15533, 15534, 15535, 15538, 15540, 15541, 15542, 15543, 15544, 15545, 15546, 15548, 15549, 15550, 15551, 15552, 15553, 15555, 15556, 15557, 15558, 15559, 15560, 15562, 15563, 15564, 15565, 15566, 15567, 15568, 15569, 15570, 15571, 15572, 15573, 15574, 15575, 15576, 15577, 15578, 15579, 15580, 15581, 15582, 15583, 15584, 15585, 15586, 15588, 15589, 15590, 15591, 15592, 14210, 15595, 15597, 15598, 15599, 15600, 15601, 15602, 15603, 15604, 15605, 15606, 14989, 14994, 14996, 15615, 15617, 15618, 15619, 15620, 15621, 15622, 15623, 15624, 15625, 14256, 15628, 15629, 15630, 15631, 15632, 15633, 15634, 15635, 15636, 15637, 15638, 15640, 15641, 15642, 15643, 15644, 15645, 15032, 15648, 15649, 15650, 15651, 15652, 15653, 15655, 15041, 15045, 15659, 15660, 15661, 15662, 15663, 15664, 15665, 15666, 15667, 15668, 15670, 15671, 15672, 15673, 15674, 15675, 15676, 15677, 15678, 14336, 15681, 15683, 15684, 15685, 15686, 15687, 15688, 15689, 15690, 15691, 15692, 15694, 15086, 15093, 15701, 15702, 15703, 15705, 15706, 15707, 15708, 15709, 15710, 15711, 15712, 15713, 14401, 15716, 15718, 15719, 15720, 15722, 15723, 15724, 15725, 15726, 15727, 15728, 15729, 15731, 15732, 15733, 15734, 15735, 15736, 15738, 15739, 15740, 15741, 15742, 15743, 15744, 15745, 15746, 15747, 15748, 15749, 15750, 15751, 15752, 15753, 15754, 15755, 15756, 15757, 15758, 15760, 15761, 15762, 15764, 15765, 15766, 15767, 15768, 15769, 15770, 15771, 15772, 15773, 15774, 15775, 15776, 15778, 15779, 15780, 15781, 15782, 15783, 15784, 15785, 15786, 15787, 15788, 15789, 15790, 15791, 15792, 15793, 15795, 15797, 15188, 15799, 15801, 15197, 15803, 15204, 15806, 15808, 15810, 15811, 15814, 15815, 15816, 15817, 15819, 15211, 15820, 15822, 15824, 15826, 12814, 12813, 15828, 15377, 15829, 15830, 12814, 12813, 12814, 12813, 15831, 15832, 15833, 15834, 15835, 15836, 15837, 15838, 15839, 15840, 15442, 15841, 15445, 15842, 15462, 15843, 15844, 14025, 15845, 15846, 15847, 15849, 15508, 15853, 15856, 15858, 15859, 15860, 15861, 15862, 15863, 15864, 15536, 15865, 15866, 15537, 15867, 15539, 15869, 15871, 15874, 15878, 15879, 15880, 15881, 15882, 15883, 15884, 15885, 15886, 15887, 15888, 15889, 15890, 15891, 15892, 14217, 15893, 15699, 12814, 12813, 15894, 15895, 15896, 12814, 12813, 15897, 12814, 12813, 15898, 15902, 15906, 15908, 15911, 15915, 15696, 15699, 15700, 15697, 15696, 15700, 15657, 15697, 15918, 15919, 15920, 15921, 15682, 15926, 15928, 15696, 15697, 15699, 15700, 15931, 15717, 15935, 15721, 15939, 15941, 15942, 15943, 15944, 15945, 15946, 15947, 15949, 15950, 15952, 15954, 15956, 15958, 15960, 15962, 15211, 15964, 15968, 15970, 15972, 15974, 15976, 15977, 15211, 15978, 15980, 15982, 15984, 15986, 15988, 15990, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 16134, 16136, 15375, 16145, 16147, 16150, 16152, 16157, 16164, 16166, 16169, 16171, 15419, 16176, 16179, 16184, 16186, 16189, 16191, 16200, 16202, 16205, 16207, 15461, 16211, 16215, 16217, 16222, 16224, 16227, 16229, 16240, 16243, 16245, 15506, 16256, 16258, 16261, 16263, 15528, 16277, 16279, 16282, 16284, 16289, 16294, 16296, 16299, 16305, 16307, 16310, 16314, 16316, 16319, 16321, 15594, 16327, 16329, 16334, 16340, 16342, 16344, 16346, 15627, 16352, 16356, 16358, 16361, 16363, 16369, 16371, 16377, 16380, 16387, 16389, 16391, 16393, 15680, 16400, 16402, 16405, 16414, 16416, 16418, 16420, 15715, 16431, 16433, 16436, 16438, 16444, 16446, 16453, 16455, 16457, 16459, 16463, 16466, 16468, 16471, 16473, 16474, 16476, 16479, 16481, 16484, 16486, 16489, 16491, 16496, 16498, 16499, 16501, 16503, 16505, 15777, 15794, 15796, 15166, 15763, 16129, 16128, 16497, 16500, 15158, 15157, 15794, 15364, 15363, 16500, 15763, 15166, 16497, 15158, 15157, 15794, 15796, 16497, 15763, 15166, 16500, 15777, 15794, 15796, 16512, 16504, 15368, 16132, 15794, 15730, 16517, 16518, 16520, 16140, 16523, 16524, 16141, 16525, 16526, 16142, 14799, 14798, 15794, 15391, 13942, 16159, 16160, 16161, 14816, 14815, 15794, 15412, 16175, 13974, 16181, 16182, 15078, 14833, 13987, 15022, 14834, 15794, 15434, 16537, 14000, 16196, 16539, 14848, 16198, 15794, 15454, 16541, 16213, 16544, 16214, 16219, 14869, 14868, 15794, 15480, 16233, 16234, 14054, 16236, 15495, 16238, 15794, 15499, 16249, 16549, 16250, 16251, 16252, 15515, 16254, 15794, 15521, 12814, 12813, 16268, 12814, 12813, 12814, 12813, 16272, 16273, 16559, 15022, 15077, 16562, 14134, 16274, 16564, 14939, 14938, 15794, 15547, 16288, 14165, 16292, 16293, 15127, 15561, 16572, 15022, 15077, 16303, 14196, 16304, 14313, 14972, 14971, 15794, 15587, 16325, 16583, 16326, 16331, 15022, 15077, 16336, 16585, 14992, 15608, 16408, 16337, 16338, 16586, 16587, 16591, 16592, 16594, 16595, 15616, 16339, 15794, 15730, 16350, 16351, 16443, 15022, 15077, 15693, 15023, 16408, 16602, 16410, 16603, 16604, 16605, 15026, 15025, 15794, 15639, 16367, 16606, 16607, 15034, 15654, 16374, 16375, 16608, 16376, 16609, 15048, 15047, 14313, 16382, 16383, 15669, 16385, 15794, 15730, 16397, 16614, 16398, 16399, 15078, 15077, 15693, 15081, 16408, 16409, 16617, 16618, 16410, 16619, 16620, 15704, 16412, 15794, 15730, 16424, 16622, 16425, 16426, 16427, 16624, 16428, 15127, 15126, 15794, 15730, 16442, 16443, 16448, 16449, 16450, 15158, 15157, 15794, 15796, 16500, 15763, 15166, 16497, 15777, 15794, 15796, 16500, 16497, 16641, 16504, 15158, 15157, 15794, 15796, 16497, 16500, 15166, 15763, 15777, 15794, 15796, 16497, 16500, 16502, 16649, 16504, 15813, 15812, 15813, 15957, 15232, 15231, 15237, 15236, 15233, 15827, 15825, 15823, 15821, 15243, 15242, 15305, 15306, 15304, 15310, 15309, 15959, 15951, 15955, 15953, 15959, 15957, 15963, 15961, 15967, 15965, 15967, 15966, 15975, 15973, 15971, 15969, 15326, 15325, 15983, 15979, 15983, 15981, 15991, 15989, 15987, 15985, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 16768, 16773, 16778, 16785, 16789, 16797, 16800, 16805, 16810, 16814, 16821, 16829, 16835, 16843, 16851, 16856, 16862, 16871, 16873, 16875, 16883, 15160, 15159, 16884, 16885, 16886, 15165, 15167, 15163, 16887, 14508, 16888, 16889, 16890, 16878, 16891, 16880, 15160, 15159, 16892, 16893, 16894, 16895, 16896, 16897, 16880, 15163, 15167, 16898, 15165, 16899, 14508, 16868, 16900, 16878, 15160, 15159, 16901, 16902, 16903, 16904, 16905, 16878, 15167, 15165, 15163, 16906, 14508, 16907, 16868, 16908, 16880, 16909, 15175, 15174, 15173, 16910, 16911, 16913, 16882, 15012, 15011, 16914, 16915, 16916, 16917, 16918, 16921, 16922, 16924, 16925, 16927, 14801, 14800, 16928, 16929, 16930, 16931, 13943, 16932, 16933, 16934, 16935, 14818, 14817, 16936, 16937, 16938, 16939, 16940, 14314, 16941, 13419, 14312, 16942, 16943, 15129, 15079, 16944, 16945, 16946, 14836, 14835, 16947, 16948, 16949, 16950, 16952, 16953, 14850, 14849, 16955, 16956, 16957, 16958, 12814, 12813, 16960, 16962, 12814, 12813, 12814, 12813, 16963, 14871, 14870, 16964, 16965, 16966, 16967, 16968, 16969, 16970, 16971, 15012, 14890, 16972, 16973, 16974, 16975, 16976, 16978, 16979, 16980, 14099, 14914, 14913, 16981, 16982, 16983, 16984, 16985, 16986, 16987, 16988, 16989, 16990, 16991, 16992, 16993, 15129, 15079, 16995, 16996, 16998, 16999, 14941, 14940, 17001, 17002, 17003, 17004, 17005, 14166, 17006, 14164, 14163, 17007, 17008, 15012, 15128, 17009, 17010, 14185, 12809, 15129, 15079, 17012, 17013, 17014, 17015, 17016, 14314, 17017, 13576, 14312, 14974, 14973, 17018, 17019, 17020, 17021, 17022, 17024, 14457, 13533, 14456, 17025, 15129, 15079, 17026, 17027, 17028, 14993, 17030, 15167, 14229, 17031, 17032, 17033, 17034, 17035, 17037, 17039, 15012, 15011, 17041, 17042, 17043, 17044, 17045, 17046, 17047, 15129, 15079, 17048, 17049, 17050, 14367, 17051, 15163, 15167, 15080, 17052, 17054, 15028, 15027, 17058, 17059, 17060, 17061, 17062, 17065, 15035, 15037, 14295, 17066, 17067, 17068, 17070, 15129, 15079, 17072, 17073, 14314, 17074, 13576, 14312, 17075, 17076, 15063, 15062, 17077, 17078, 17079, 17080, 17081, 17083, 17084, 15129, 15079, 17085, 17086, 17087, 14367, 15163, 17088, 15167, 15080, 17089, 17090, 17093, 15105, 15104, 17096, 17097, 17098, 17099, 17100, 17102, 17103, 17104, 17106, 15129, 15128, 17107, 17108, 17109, 17110, 17111, 17112, 14457, 13641, 14456, 17113, 17114, 17115, 15160, 15159, 17116, 17117, 17118, 17119, 17120, 16880, 17121, 14508, 17122, 15165, 15163, 15167, 16868, 17123, 16878, 17124, 15175, 15174, 15173, 17125, 17126, 17127, 16880, 17128, 16878, 17130, 16882, 15160, 15159, 17131, 17132, 17133, 17134, 17135, 16878, 17136, 16880, 15165, 17137, 14508, 15167, 17138, 15163, 16868, 17139, 15175, 15174, 15173, 17140, 17141, 16877, 17142, 16878, 16879, 17143, 16880, 17144, 16881, 17146, 16882, 17147, 17148, 17149, 17150, 17151, 17152, 17153, 17154, 17155, 17156, 17157, 17158, 17159, 17160, 17161, 16920, 17105, 16954, 16951, 17105, 16959, 17105, 16977, 17105, 17101, 17105, 16994, 17000, 16997, 17101, 17105, 17101, 17105, 17101, 17105, 17105, 17101, 17101, 17105, 17105, 17082, 17105, 17101, 17162, 17163, 17164, 17165, 17166, 17167, 17168, 17169, 17170, 17171, 17172, 17173, 17174, 17175, 17176, 17177, 17178, 17179, 17180, 17181, 17182, 17183, 17184, 17185, 17186, 17187, 17188, 17189, 17190, 17191, 17192, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 17280, 17281, 17282, 17283, 17284, 17285, 17286, 17287, 17288, 17290, 17291, 17292, 17293, 17294, 17295, 17296, 17297, 17298, 17299, 17301, 17302, 17306, 17307, 17308, 17310, 17311, 17314, 17316, 17317, 17318, 17319, 17322, 17325, 17326, 17327, 17329, 17331, 17332, 17334, 17335, 17336, 17337, 17342, 17343, 17344, 17345, 17347, 17349, 17351, 17353, 17354, 17355, 16912, 17359, 17360, 17361, 17362, 17372, 17373, 17374, 17378, 17383, 17384, 17385, 17390, 17392, 17393, 17396, 17397, 17398, 17401, 17402, 17403, 17409, 17410, 17411, 17415, 17416, 17419, 17420, 17421, 17422, 17424, 17425, 17426, 17434, 17435, 17436, 17444, 17445, 17446, 17447, 17451, 17454, 17456, 17460, 17461, 17462, 17466, 17467, 17468, 17473, 17475, 17476, 17479, 17480, 17481, 17483, 12810, 17484, 17485, 17486, 17487, 17492, 17494, 17495, 17496, 17497, 17498, 17504, 17505, 17506, 17508, 17509, 17510, 17513, 17515, 17516, 17524, 17525, 17526, 17533, 17534, 17535, 17538, 17540, 17541, 17542, 17545, 17546, 17547, 17553, 17554, 17555, 17560, 17561, 17562, 17564, 17566, 17567, 17570, 17571, 17572, 17579, 17580, 17581, 17584, 17585, 17587, 17588, 17592, 17593, 17594, 17603, 17604, 17605, 17611, 17612, 17613, 17617, 17618, 17619, 17624, 17626, 17628, 17629, 17630, 17631, 17633, 17635, 17636, 17637, 17641, 17643, 17129, 17645, 17646, 17647, 17648, 17653, 17655, 17656, 17658, 17659, 17661, 17662, 17664, 17665, 17666, 17669, 17671, 17672, 17674, 17676, 17145, 17678, 17679, 17681, 17683, 17685, 17688, 17690, 17692, 17369, 17367, 17609, 17602, 17615, 17694, 17695, 17371, 17601, 17382, 17615, 17609, 17381, 17609, 17389, 17615, 17395, 17568, 17569, 17609, 17610, 17615, 17696, 17408, 17610, 17697, 17698, 17615, 17699, 17418, 17609, 17431, 17615, 17433, 17609, 17430, 17609, 17700, 17601, 17615, 17441, 17442, 17440, 17602, 17701, 17702, 17703, 17610, 17609, 17615, 17704, 17459, 17609, 17705, 17615, 17706, 17465, 17615, 17707, 17602, 17531, 17708, 17532, 17609, 17615, 17530, 17709, 17601, 17609, 17472, 17615, 17478, 17615, 17710, 17711, 17609, 17610, 17615, 17610, 17491, 17489, 17609, 17615, 17569, 17610, 17609, 17502, 17615, 17503, 17609, 17029, 17064, 17071, 17063, 17712, 17601, 17609, 17530, 17532, 17531, 17615, 17713, 17602, 17601, 17714, 17531, 17615, 17530, 17609, 17715, 17602, 17532, 17716, 17609, 17532, 17602, 17531, 17530, 17717, 17601, 17615, 17057, 17055, 17056, 17053, 17071, 17063, 17064, 17069, 17569, 17615, 17610, 17609, 17578, 17601, 17577, 17576, 17718, 17615, 17719, 17602, 17609, 17094, 17095, 17092, 17091, 17600, 17615, 17602, 17599, 17601, 17720, 17721, 17598, 17609, 17610, 17609, 17615, 17616, 17722, 17724, 17727, 17729, 17731, 17733, 17735, 17737, 17739, 17741, 17743, 17745, 17747, 17749, 17751, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 17300, 16462, 17305, 17814, 17309, 17820, 16462, 17825, 17328, 17330, 17831, 16462, 17835, 17837, 17838, 17352, 17842, 15178, 15180, 14522, 14496, 15179, 17846, 16770, 17849, 16155, 17852, 17853, 16780, 17856, 17857, 17859, 17862, 16194, 17865, 16791, 17868, 17870, 17872, 17874, 16232, 17877, 16802, 17881, 16807, 17887, 17890, 16287, 17893, 17894, 17896, 17900, 17902, 17905, 17906, 17908, 16823, 17911, 17914, 17917, 17918, 17920, 16831, 17923, 17537, 17539, 17928, 17930, 16366, 17552, 17934, 17936, 17939, 17940, 17942, 16845, 17945, 17583, 17949, 17950, 17952, 16853, 17955, 16441, 17958, 17961, 16462, 17625, 17627, 17967, 17634, 17972, 15179, 14496, 15180, 15178, 14522, 17978, 16462, 17983, 17984, 17660, 17663, 17989, 14522, 15180, 15179, 15178, 14526, 17819, 17995, 17818, 17991, 17995, 17830, 17991, 17824, 17834, 17995, 17840, 17991, 18001, 17993, 17970, 17991, 17964, 17995, 17845, 18002, 18005, 18006, 18007, 18008, 18009, 18012, 18013, 18014, 18015, 18016, 18017, 18018, 18019, 18020, 18021, 18022, 18023, 18024, 18025, 18026, 18028, 18029, 18032, 18034, 18035, 18036, 18037, 18038, 18039, 18040, 18041, 18043, 18044, 18045, 18046, 18047, 18048, 17601, 17443, 18052, 18053, 18054, 17531, 18050, 17602, 17458, 18056, 18057, 18059, 17453, 17601, 18061, 18062, 18064, 18065, 18067, 18068, 18069, 18070, 18072, 18073, 18074, 18075, 18076, 18077, 18080, 18081, 18078, 18082, 18083, 18084, 18085, 18086, 18087, 18088, 18089, 18090, 18091, 18092, 18093, 18094, 18095, 18096, 18097, 18098, 18100, 18101, 18102, 18103, 18104, 18105, 18107, 18108, 18110, 18111, 18112, 18113, 18115, 18116, 18118, 18119, 18120, 18121, 18122, 18124, 18125, 18126, 18127, 18128, 18129, 18130, 18131, 18132, 18133, 18134, 18135, 18136, 18137, 18138, 18139, 18140, 18141, 18143, 18145, 18146, 18147, 18148, 18149, 18150, 18151, 18152, 18153, 18154, 18155, 18158, 18159, 18160, 18161, 18162, 18163, 18164, 17995, 17970, 17991, 17964, 17975, 17993, 17995, 17974, 17977, 17991, 18172, 17991, 17995, 17982, 17981, 17992, 17991, 17993, 17995, 17997, 17994, 18177, 15996, 16004, 15992, 15993, 15995, 15994, 15997, 16001, 16000, 15999, 16002, 16004, 16003, 15349, 16007, 16006, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18304, 18305, 18306, 18309, 18310, 18311, 18314, 18315, 18316, 18319, 18321, 18322, 18323, 18324, 18325, 18326, 18327, 18328, 18329, 18331, 18332, 18333, 18335, 18336, 18337, 18338, 18339, 18343, 18344, 18345, 18346, 18347, 18348, 18349, 18350, 18351, 18352, 18354, 17899, 18356, 18357, 18359, 18360, 18361, 18362, 18363, 18365, 18366, 18367, 18368, 18371, 18372, 18373, 18375, 18376, 18378, 18379, 18380, 18381, 18384, 18385, 18386, 18387, 18388, 18389, 18390, 18391, 18394, 18396, 18397, 18398, 18399, 18400, 18401, 18402, 18403, 18406, 18408, 18409, 18410, 18411, 18412, 18413, 18414, 18415, 18416, 18417, 18418, 18419, 18420, 18421, 18422, 18423, 18424, 18426, 18427, 18428, 18429, 18430, 18431, 18433, 18435, 18437, 18011, 17380, 18440, 18442, 18445, 18448, 18450, 18027, 18454, 17601, 17417, 17423, 18031, 18456, 18458, 18460, 18463, 18464, 18466, 18468, 18470, 18471, 18475, 18472, 18477, 18478, 18482, 18483, 18480, 18060, 18485, 18486, 18066, 18489, 18491, 18494, 18498, 18501, 18503, 18507, 18511, 18515, 18099, 18519, 18521, 18523, 18525, 18526, 18528, 18114, 18117, 18533, 18535, 18123, 18540, 18544, 18547, 18551, 18553, 18142, 18144, 18558, 18562, 18564, 18566, 18157, 18570, 18573, 18574, 18575, 18576, 18577, 18578, 18579, 18580, 18581, 18582, 18583, 18585, 18586, 18587, 18588, 18589, 18590, 18591, 18592, 18593, 18594, 18596, 18597, 18598, 18599, 15336, 15996, 18600, 18601, 18602, 18603, 18604, 18605, 18606, 16005, 18607, 18608, 18609, 16008, 18610, 18611, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18689, 18690, 18692, 18693, 18695, 18696, 18698, 18699, 18701, 18704, 18706, 18708, 18712, 18714, 18716, 18718, 18720, 18723, 18726, 18730, 18733, 18735, 18737, 18739, 18740, 18744, 18746, 18748, 18750, 18753, 18754, 18756, 18757, 17639, 18762, 18763, 18765, 18767, 17667, 18770, 18772, 18774, 18776, 18778, 18782, 18784, 18786, 18788, 18790, 18792, 18793, 17394, 18796, 18798, 18800, 18801, 18802, 18805, 18807, 18809, 18811, 18474, 18815, 18816, 18481, 18820, 18822, 18824, 17477, 18828, 17568, 17507, 18833, 18835, 18837, 18839, 18841, 18843, 17568, 18848, 18850, 18853, 18855, 17614, 18860, 18863, 18865, 18867, 18870, 18873, 18875, 18877, 18880, 18883, 18884, 15998, 18888, 18892, 18896, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18944, 18946, 18948, 18950, 18952, 18953, 18954, 18955, 18956, 18957, 18958, 18959, 18960, 18961, 18963, 18965, 18967, 18969, 18971, 18972, 18973, 18975, 18977, 18978, 18980, 18982, 17817, 18983, 17829, 18985, 17839, 18988, 18991, 18994, 18995, 18996, 18998, 18999, 19001, 19002, 18476, 18814, 19006, 18819, 19010, 19012, 17614, 19013, 19014, 19015, 17518, 19016, 19018, 19020, 17543, 17557, 19022, 19023, 17589, 19025, 19027, 17969, 19029, 17987, 19033, 19038, 19039, 19041, 19042, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19075, 19093, 19096, 19072, 19098, 19073, 19100, 19074, 19102, 19103, 19077, 19104, 19078, 19079, 18444, 19091, 19080, 19081, 18803, 18804, 19082, 19083, 19111, 19112, 19084, 19114, 19091, 19116, 19085, 18493, 19087, 19118, 19091, 18506, 19086, 18510, 19091, 19122, 19123, 19124, 19087, 19125, 19091, 19126, 19088, 19127, 19091, 19128, 19089, 19129, 19091, 19130, 19090, 19131, 19091, 18569, 19092, 19133, 19134, 19095, 19135, 19136, 19137, 18890, 19139, 19140, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19203, 19099, 19205, 19101, 19207, 18780, 19200, 19210, 19212, 19213, 19214, 19215, 19216, 19217, 19218, 19220, 19221, 19224, 19225, 19226, 19228, 19229, 19230, 18497, 19232, 19233, 19234, 19235, 19236, 18514, 19240, 19242, 19243, 19244, 19245, 19246, 18847, 19248, 19250, 18560, 19252, 19254, 19255, 19256, 18859, 19201, 19259, 19260, 19202, 19040, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18987, 19334, 19338, 19349, 19351, 19353, 19355, 19357, 19360, 19362, 19364, 18852, 19370, 19372, 19373, 19375, 19376, 19329, 19331, 19241, 19211, 19227, 19249, 19253, 19009, 19105, 19110, 19346, 19223, 19238, 19239, 19119, 19222, 19107, 19342, 18997, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19456, 19460, 19463, 19464, 19465, 19467, 19469, 19471, 19473, 19474, 19209, 19475, 19476, 19477, 19461, 19478, 19479, 19480, 19481, 19458, 19468, 19482, 19483, 19484, 19459, 19466, 19485, 19486, 19462, 19487, 19488, 19489, 19490, 19491, 19258, 19261, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19584, 19594, 19598, 19603, 19588, 19604, 19608, 19586, 19587, 19589, 19609, 19612, 19585, 19595, 19599, 19601, 19606, 19613, 19616, 19590, 19618, 19591, 19619, 18882, 19036, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19712, 19716, 19719, 19720, 19721, 19724, 19597, 19717, 19722, 19611, 19726, 19731, 19733, 19265, 19735, 19736, 19264, 19262, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19715, 19718, 19843, 19845, 19725, 19849, 19853, 18879, 18887, 18891, 18895, 19856, 19037, 19857, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19968, 19728, 19970, 19971, 19972, 19975, 19976, 19977, 19978, 19980, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20096, 20098, 19974, 20102, 19855, 20104, 20105, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20100, 20225, 20226, 20228, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20352, 20354, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19377, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20608, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20230, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20481, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127};
int h_C[]= {
2, 4, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 397, 399, 401, 403, 405, 407, 409, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 475, 477, 479, 481, 483, 485, 487, 489, 491, 494, 496, 498, 500, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523, 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557, 560, 562, 564, 566, 568, 570, 573, 575, 579, 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 613, 615, 617, 619, 621, 623, 625, 627, 629, 631, 633, 635, 637, 639, 641, 643, 645, 647, 649, 651, 653, 655, 657, 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 691, 693, 695, 697, 700, 702, 705, 707, 709, 711, 713, 715, 717, 719, 721, 723, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 745, 747, 749, 751, 753, 755, 757, 759, 761, 763, 765, 767, 769, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 817, 819, 821, 823, 825, 827, 829, 831, 833, 835, 837, 839, 841, 843, 846, 848, 850, 852, 854, 856, 859, 861, 863, 865, 867, 869, 871, 873, 875, 877, 879, 881, 883, 885, 887, 889, 891, 893, 895, 897, 899, 901, 903, 905, 907, 909, 911, 913, 916, 918, 920, 922, 924, 926, 928, 930, 932, 934, 936, 938, 940, 942, 944, 946, 950, 952, 954, 956, 958, 960, 962, 964, 966, 968, 970, 972, 974, 976, 978, 980, 982, 984, 986, 988, 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1023, 1025, 1027, 1029, 1031, 1033, 1035, 1037, 1041, 1043, 1045, 1047, 1049, 1051, 1053, 1055, 1057, 1059, 1063, 1065, 1070, 1072, 1074, 1076, 1078, 1080, 1082, 1084, 1086, 1088, 1091, 1093, 1095, 1097, 1099, 1101, 1103, 1105, 1107, 1109, 1112, 1114, 1116, 1118, 1120, 1122, 1124, 1126, 1128, 1130, 1133, 1135, 1137, 1139, 1142, 1144, 1146, 1148, 1151, 1153, 1157, 1159, 1162, 1164, 1168, 1170, 1172, 1174, 1176, 1178, 1180, 1182, 1185, 1187, 1190, 1192, 1194, 1196, 1198, 1200, 1202, 1204, 1206, 1208, 1211, 1213, 1216, 1218, 1221, 1223, 1226, 1228, 1231, 1233, 1239, 1241, 1244, 1246, 1249, 1251, 1253, 1255, 1257, 1259, 1261, 1263, 1265, 1267, 1269, 1271, 1273, 1275, 1278, 1280, 1282, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1306, 1308, 1310, 1312, 1314, 1316, 1318, 1320, 1322, 1324, 1326, 1328, 1330, 1332, 1335, 1337, 1339, 1341, 1343, 1345, 1347, 1349, 1351, 1353, 1358, 1360, 1362, 1364, 1368, 1370, 1373, 1375, 1377, 1379, 1381, 1383, 1385, 1387, 1389, 1391, 1394, 1396, 1400, 1402, 1405, 1407, 1410, 1412, 1415, 1417, 1420, 1422, 1425, 1427, 1430, 1432, 1435, 1437, 1440, 1442, 1444, 1446, 1448, 1450, 1453, 1455, 1459, 1461, 1463, 1465, 1470, 1472, 1474, 1476, 1480, 1482, 1484, 1486, 1488, 1490, 1492, 1494, 1496, 1498, 1500, 1502, 1504, 1506, 1508, 1510, 1512, 1514, 1516, 1518, 1520, 1522, 1524, 1526, 1528, 1530, 1532, 1534, 1536, 1538, 1540, 1542, 1544, 1546, 1548, 1550, 1552, 1554, 1557, 1559, 1561, 1563, 1565, 1567, 1569, 1571, 1573, 1575, 1577, 1579, 1582, 1584, 1586, 1588, 1590, 1592, 1594, 1596, 1598, 1600, 1602, 1604, 1606, 1608, 1610, 1612, 1614, 1616, 1618, 1620, 1622, 1624, 1626, 1628, 1630, 1632, 1634, 1636, 1638, 1640, 1645, 1647, 1649, 1651, 1653, 1655, 1657, 1659, 1662, 1664, 1666, 1668, 1670, 1672, 1674, 1676, 1678, 1680, 1682, 1684, 1686, 1688, 1690, 1692, 1696, 1698, 1702, 1704, 1706, 1708, 1710, 1712, 1714, 1716, 1719, 1721, 1723, 1725, 1727, 1729, 1733, 1735, 1741, 1743, 1745, 1747, 1749, 1751, 1754, 1756, 1759, 1761, 1763, 1765, 1767, 1769, 1772, 1774, 1777, 1779, 1782, 1784, 1787, 1789, 1792, 1794, 1797, 1799, 1801, 1803, 1805, 1807, 1811, 1813, 1815, 1817, 1819, 1821, 1823, 1825, 1827, 1829, 1831, 1833, 1835, 1837, 1839, 1841, 1843, 1845, 1847, 1849, 1851, 1853, 1856, 1858, 1860, 1862, 1866, 1868, 1870, 1872, 1874, 1876, 1863, 1863, 1854, 1854, 1863, 1863, 1922, 1924, 1926, 1928, 1930, 1932, 286, 1477, 1660, 1236, 1236, 1477, 1660, 1730, 1276, 1276, 492, 492, 1236, 1236, 948, 1236, 1236, 1730, 1730, 1738, 1738, 286, 571, 571, 571, 571, 571, 571, 1738, 577, 1236, 1236, 1642, 558, 1236, 1236, 1236, 1236, 1038, 395, 571, 1236, 1236, 1068, 1068, 1131, 1131, 1140, 1140, 1021, 1021, 1642, 1863, 492, 1236, 1236, 492, 1236, 1236, 501, 501, 1236, 1236, 1068, 1068, 1131, 1131, 492, 492, 1236, 1236, 1068, 1068, 1131, 1131, 501, 501, 1236, 1236, 1089, 1089, 558, 1642, 1642, 1809, 947, 571, 577, 1809, 1236, 1236, 1038, 703, 2286, 2288, 2290, 2292, 2295, 2297, 2299, 2301, 2304, 2306, 2308, 2310, 2313, 2315, 2317, 2319, 2321, 2323, 2325, 2327, 2329, 2331, 2333, 2335, 2337, 2339, 2341, 2343, 2345, 2347, 2350, 2352, 2354, 2356, 2358, 2360, 1365, 1365, 2365, 2367, 2369, 2371, 2373, 2375, 2377, 2379, 2381, 2383, 2385, 2387, 2389, 2391, 2393, 2395, 1236, 1236, 1068, 1068, 1131, 1131, 1236, 1236, 1140, 1140, 1089, 1089, 1140, 1140, 1276, 1276, 1365, 1242, 914, 1700, 1021, 1021, 1038, 1038, 947, 948, 1021, 1021, 1038, 1038, 1693, 1021, 1021, 1730, 1738, 1038, 1038, 1039, 2616, 2618, 2620, 2622, 2624, 2626, 2628, 2630, 2632, 2634, 2636, 2638, 2640, 2642, 2645, 2647, 2650, 2652, 2654, 2656, 1089, 1089, 1066, 1066, 1068, 1068, 1140, 1140, 1067, 1067, 1068, 1068, 1166, 1131, 1131, 1140, 1140, 1154, 1154, 1236, 1236, 1236, 1236, 1242, 1242, 1247, 1365, 1365, 1366, 1354, 1276, 1276, 1333, 1354, 1355, 1356, 1365, 1365, 1366, 1392, 1397, 1466, 1466, 1854, 1642, 1642, 1693, 1700, 1730, 1730, 1738, 1738, 1854, 1854, 1863, 1863, 1854, 1854, 1863, 1863, 1854, 1863, 2980, 2982, 2985, 2987, 2989, 2991, 2993, 2995, 2997, 2999, 3001, 3003, 3005, 3007, 3009, 3011, 3013, 3015, 3017, 3019, 3021, 3023, 3025, 3027, 3029, 3031, 3033, 3035, 3037, 3039, 3041, 3043, 3045, 3047, 3049, 3051, 3053, 3055, 3057, 3059, 3061, 3063, 3066, 3068, 3071, 3073, 3075, 3077, 3079, 3081, 3084, 3086, 3090, 3092, 3095, 3097, 3101, 3103, 3105, 3107, 3109, 3111, 3114, 3116, 3120, 3122, 3125, 3127, 3131, 3133, 3135, 3137, 3140, 3142, 2643, 2643, 3145, 3145, 2643, 2643, 2302, 2302, 2302, 2302, 3145, 3145, 2964, 2971, 2348, 2348, 2293, 3145, 3145, 2348, 2348, 2293, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2302, 2302, 2302, 2302, 2302, 2302, 2348, 2348, 2348, 2348, 2348, 2348, 2311, 2311, 2311, 2311, 3145, 3145, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2657, 3681, 3683, 3689, 3691, 3148, 3148, 3069, 3069, 3148, 3148, 3150, 3708, 3710, 2643, 2643, 2648, 2643, 2643, 2643, 2643, 2648, 2657, 2964, 2971, 4019, 4021, 3145, 3145, 4054, 4056, 4058, 4060, 4063, 4065, 3145, 3145, 3145, 3145, 3148, 3148, 3087, 3087, 3117, 3117, 3138, 3138, 3145, 3145, 3148, 3148, 3150, 4141, 4143, 4146, 4148, 4153, 4155, 4158, 4160, 4163, 4165, 4167, 4169, 4172, 4174, 4176, 4178, 3846, 3676, 3846, 3846, 4180, 4180, 3846, 3846, 4180, 4180, 4180, 4180, 3676, 3846, 4150, 4180, 4180, 4150, 4170, 4170, 4180, 4180, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 6657, 6659, 6661, 6663, 6665, 6667, 6669, 6671, 6673, 6675, 6677, 6679, 6681, 6683, 6685, 6687, 6689, 6691, 6693, 6695, 6697, 6699, 6701, 6703, 6705, 6707, 6709, 6711, 6713, 6715, 6717, 6719, 6721, 6723, 6725, 6727, 6729, 6731, 6733, 6735, 6737, 6739, 6741, 6743, 6745, 6747, 6749, 6751, 6753, 6755, 6757, 6759, 6761, 6763, 6765, 6767, 6769, 6771, 6773, 6775, 6777, 6779, 6781, 6783, 6785, 6787, 6789, 6791, 6793, 6795, 6797, 6799, 6801, 6803, 6805, 6807, 6809, 6811, 6813, 6815, 6817, 6819, 6821, 6823, 6825, 6827, 6829, 6831, 6833, 6835, 6837, 6839, 6841, 6843, 6845, 6847, 6849, 6851, 6853, 6855, 6857, 6859, 6861, 6863, 6865, 6867, 6869, 6871, 6873, 6875, 6877, 6879, 6881, 6883, 6885, 6887, 6889, 6891, 6893, 6895, 6897, 6899, 6901, 6903, 6905, 6907, 6909, 6911, 6913, 6915, 6917, 6919, 6921, 6923, 6925, 6927, 6929, 6931, 6933, 6935, 6937, 6939, 6941, 6943, 6945, 6947, 6949, 6951, 6953, 6955, 6957, 6959, 6961, 6963, 6965, 6967, 6969, 6971, 6973, 6975, 6977, 6979, 6981, 6983, 6985, 6987, 6989, 6991, 6993, 6995, 6997, 6999, 7001, 7003, 7005, 7007, 7009, 7011, 7013, 7015, 7017, 7019, 7021, 7023, 7025, 7027, 7029, 7031, 7033, 7035, 7037, 7039, 7041, 7043, 7045, 7047, 7049, 7051, 7053, 7055, 7057, 7059, 7061, 7063, 7065, 7067, 7069, 7071, 7073, 7075, 7077, 7079, 7081, 7083, 7085, 7087, 7089, 7091, 7093, 7095, 7097, 7099, 7101, 7103, 7105, 7107, 7109, 7111, 7113, 7115, 7117, 7119, 7121, 7123, 7125, 7127, 7129, 7131, 7133, 7135, 7137, 7139, 7141, 7143, 7145, 7147, 7149, 7151, 7153, 7155, 7157, 7159, 7161, 7163, 7165, 7167, 7169, 7171, 7173, 7175, 7177, 7179, 7181, 7183, 7185, 7187, 7189, 7191, 7193, 7195, 7197, 7199, 7201, 7203, 7205, 7207, 7209, 7211, 7213, 7215, 7217, 7219, 7221, 7223, 7225, 7227, 7229, 7231, 7233, 7235, 7237, 7239, 7241, 7243, 7245, 7247, 7249, 7251, 7253, 7255, 7257, 7259, 7261, 7263, 7265, 7267, 7269, 7271, 7273, 7275, 7277, 7279, 7281, 7283, 7285, 7287, 7289, 7291, 7293, 7295, 7297, 7299, 7301, 7303, 7305, 7307, 7309, 7311, 7313, 7315, 7317, 7319, 7321, 7323, 7325, 7327, 7329, 7331, 7333, 7335, 7337, 7339, 7341, 7343, 7345, 7347, 7349, 7351, 7353, 7355, 7357, 7359, 7361, 7363, 7365, 7367, 7369, 7371, 7373, 7375, 7377, 7379, 7381, 7383, 7385, 7387, 7389, 7391, 7393, 7395, 7397, 7399, 7401, 7403, 7405, 7407, 7409, 7411, 7413, 7415, 7417, 7419, 7421, 7423, 7425, 7427, 7429, 7431, 7433, 7435, 7437, 7439, 7441, 7443, 7445, 7447, 7449, 7451, 7453, 7455, 7457, 7459, 7461, 7463, 7465, 7467, 7469, 7471, 7473, 7475, 7477, 7479, 7481, 7483, 7485, 7487, 7489, 7491, 7493, 7495, 7497, 7499, 7501, 7503, 7505, 7507, 7509, 7511, 7513, 7515, 7517, 7519, 7521, 7523, 7525, 7527, 7529, 7531, 7533, 7535, 7537, 7539, 7541, 1883, 1884, 1894, 1895, 1897, 1898, 7549, 7551, 7553, 1933, 1938, 1942, 1951, 1952, 1964, 1970, 1971, 1972, 1973, 1983, 1984, 1985, 1986, 1987, 2037, 2038, 2044, 2045, 2048, 2049, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2072, 2081, 2082, 2093, 2096, 2100, 2101, 2104, 2105, 2108, 2116, 2117, 2123, 2124, 2126, 2127, 2129, 2130, 2136, 2137, 2139, 2140, 2144, 2145, 2153, 2154, 2155, 2159, 2160, 2161, 2165, 2166, 2167, 2168, 2170, 2171, 2173, 2174, 2175, 2176, 2177, 2178, 2180, 2181, 2183, 2184, 2185, 2186, 2187, 2188, 2192, 2193, 2205, 2208, 2209, 2219, 2229, 2230, 2232, 2233, 2236, 2237, 2267, 2281, 7648, 7650, 7652, 7654, 7656, 7658, 7660, 7662, 7664, 7666, 7668, 7670, 7672, 7674, 7676, 7678, 7680, 7682, 2362, 2363, 7686, 7688, 7690, 7692, 7694, 7696, 7698, 7700, 2411, 2412, 2424, 2425, 2427, 2428, 2431, 2432, 2453, 2454, 2457, 2458, 2466, 2467, 2503, 2504, 2511, 2514, 2520, 2533, 2544, 2546, 2550, 2551, 2554, 2555, 2564, 2565, 2569, 2570, 2572, 2598, 2599, 2602, 2603, 2606, 2607, 2610, 7740, 7742, 7744, 7746, 7748, 7750, 7752, 7754, 7756, 7758, 2666, 2667, 2675, 2676, 2677, 2678, 2681, 2682, 2683, 2684, 2685, 2686, 2699, 2708, 2709, 2712, 2713, 2716, 2719, 2731, 2732, 2738, 2739, 2750, 2751, 2758, 2760, 2761, 2763, 2771, 2777, 2778, 2798, 2806, 2807, 2808, 2810, 2811, 2812, 2831, 2834, 2846, 2848, 2881, 2907, 2908, 2928, 2931, 2939, 2940, 2943, 2944, 2959, 2960, 2962, 2963, 2966, 2967, 2969, 2970, 2974, 2976, 7822, 7824, 7826, 7828, 7830, 7832, 7834, 7836, 7838, 7840, 7842, 7844, 7846, 7848, 7850, 7852, 7854, 7856, 7858, 7860, 7862, 7864, 7866, 7868, 7870, 7872, 7874, 7876, 7878, 7880, 7882, 7884, 7886, 7888, 7890, 7892, 7894, 3177, 3178, 3186, 3187, 3189, 3190, 3191, 3192, 3193, 3194, 3196, 3197, 3505, 3528, 3587, 3588, 3596, 3597, 3598, 3601, 3602, 3610, 3613, 3614, 3624, 3625, 3626, 3627, 3628, 3629, 3631, 3632, 3633, 3634, 3635, 3636, 3638, 3639, 3640, 3641, 3642, 3643, 3645, 3646, 3647, 3648, 3650, 3651, 3658, 3659, 3666, 3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675, 3679, 7957, 7959, 3700, 3701, 3702, 3703, 3704, 3705, 3706, 7968, 3826, 3827, 3835, 3837, 3838, 3839, 3840, 3843, 3845, 4007, 4010, 7981, 4039, 4040, 7985, 7987, 7989, 4083, 4084, 4098, 4099, 4100, 4101, 4115, 4118, 4124, 4127, 4132, 4133, 4135, 4136, 4137, 4138, 4139, 8008, 8010, 8012, 8014, 8016, 8018, 8020, 8022, 4221, 4222, 4229, 4230, 5163, 5164, 5178, 5179, 5182, 5183, 5210, 5211, 5227, 5314, 5422, 5426, 5427, 5470, 5473, 5475, 5477, 5478, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8320, 8322, 8490, 1809, 8494, 8508, 8439, 8502, 8504, 8506, 8320, 8322, 8490, 1809, 8496, 8510, 8498, 8512, 8439, 8502, 8504, 8506, 8397, 8392, 8400, 8399, 8401, 8403, 8402, 8405, 8404, 8064, 0, 8288, 8066, 8064, 5, 8288, 8066, 8288, 8154, 8317, 8444, 8418, 8496, 8498, 8439, 8068, 8071, 8070, 8072, 8075, 8074, 8076, 8152, 8520, 1089, 8079, 8078, 8204, 8215, 8095, 8080, 8080, 947, 8095, 8080, 8482, 8427, 8496, 8498, 8482, 8525, 8081, 8423, 8423, 8423, 8423, 8445, 8083, 8085, 8084, 8527, 8529, 8087, 8086, 8475, 8088, 8090, 8089, 8475, 8091, 8475, 8092, 8093, 8094, 8095, 948, 8441, 8440, 8235, 8100, 8441, 8440, 8417, 8416, 8235, 8100, 8215, 8098, 8235, 8100, 948, 8101, 8104, 8103, 8105, 8108, 8107, 8109, 8150, 8111, 8483, 8112, 8114, 8116, 1809, 8118, 8120, 8122, 8421, 8124, 8152, 8532, 8125, 8126, 8235, 8129, 8128, 8534, 8475, 8130, 8536, 8215, 8222, 8317, 8317, 8131, 8133, 8146, 8425, 8427, 8449, 8451, 8417, 8416, 8539, 8541, 8543, 8146, 8136, 8139, 8138, 8140, 8143, 8142, 8144, 8152, 8547, 8145, 8146, 8319, 8319, 8425, 8147, 8150, 8149, 8425, 8427, 8449, 8451, 8434, 8152, 8151, 8551, 8153, 8152, 8553, 8154, 8478, 8155, 8157, 8478, 1477, 1477, 8418, 8161, 8162, 8164, 8163, 8166, 8165, 8558, 8167, 8560, 8168, 8562, 1089, 8332, 8331, 8341, 8340, 8564, 8313, 8566, 8170, 8169, 8435, 8457, 8171, 8174, 8173, 8175, 8177, 8176, 8571, 8178, 8180, 8179, 8574, 8181, 8183, 8182, 8576, 8578, 8184, 8580, 8185, 8582, 8584, 8586, 8186, 8588, 8187, 8590, 8592, 8594, 1089, 8190, 8189, 8596, 8192, 8191, 8194, 8193, 8400, 8195, 8197, 8196, 8198, 8199, 8201, 8235, 947, 8599, 8215, 8202, 8435, 8203, 8225, 8227, 8229, 8231, 8492, 8225, 8227, 8229, 8231, 8444, 947, 8204, 947, 8313, 8205, 8207, 8206, 8606, 8326, 8208, 8346, 8327, 8346, 8328, 8329, 8338, 8210, 8209, 8339, 8210, 8209, 8212, 8211, 8214, 8213, 8215, 8216, 8218, 8217, 8475, 8219, 8475, 8220, 8221, 8222, 8313, 8317, 8223, 8225, 8227, 8229, 8231, 8444, 8479, 8475, 8233, 8316, 8234, 8235, 8236, 1477, 1477, 8456, 8407, 8628, 8343, 8239, 8346, 8344, 8346, 8345, 1166, 8246, 8351, 8350, 8240, 8242, 8241, 8244, 8243, 8638, 8343, 8245, 8346, 8344, 8346, 8345, 1166, 8246, 8351, 8350, 8248, 8640, 8249, 8642, 8251, 8250, 8644, 8253, 8252, 8346, 8254, 8346, 8255, 8256, 1089, 8259, 8258, 8346, 8333, 1110, 8337, 8336, 8260, 8262, 8261, 8264, 8263, 8646, 8326, 8265, 8648, 8332, 8331, 8266, 8268, 8267, 8341, 8340, 8650, 8386, 8385, 8400, 8387, 8400, 8388, 8389, 8372, 8371, 8400, 8373, 8400, 8374, 8375, 8384, 8376, 8377, 8365, 8363, 8400, 8366, 8400, 8367, 8368, 8379, 8378, 8400, 8380, 8400, 8381, 8382, 8384, 8383, 8400, 8369, 8652, 8269, 8271, 8272, 8274, 8275, 8277, 8279, 8281, 8283, 8285, 8287, 8288, 8313, 8425, 8301, 8289, 8475, 8303, 8304, 8444, 8479, 8478, 8423, 8290, 8468, 8448, 8319, 8309, 8292, 8475, 8310, 8475, 8311, 8312, 8293, 8295, 8475, 8315, 8317, 8660, 8423, 8422, 8309, 8308, 8475, 8310, 8475, 8311, 8312, 8313, 8664, 8475, 8315, 8317, 8666, 8468, 8298, 8319, 8301, 8300, 8475, 8302, 8475, 8303, 8304, 8444, 8479, 8306, 8480, 8483, 8482, 8484, 8427, 8309, 8308, 8475, 8310, 8475, 8311, 8312, 8313, 8669, 8475, 8315, 8316, 8317, 8673, 8423, 8422, 8319, 8320, 8322, 8439, 8326, 8325, 8346, 8327, 8346, 8328, 8329, 8686, 8332, 8324, 8346, 8333, 1110, 8337, 8336, 8688, 8690, 8341, 8340, 8692, 8694, 8696, 8326, 8325, 8346, 8327, 8346, 8328, 8329, 1089, 8332, 8331, 8346, 8333, 8346, 8346, 8334, 1110, 8337, 8336, 8339, 8338, 8699, 8341, 8340, 8701, 8343, 8342, 8346, 8344, 8346, 8345, 1166, 8348, 8351, 8350, 8353, 8352, 8355, 8354, 8356, 8705, 8358, 8357, 8360, 8359, 8361, 8707, 8365, 8364, 8400, 8366, 8400, 8367, 8368, 8407, 1365, 8406, 8709, 1242, 8365, 8363, 8400, 8367, 8368, 8407, 8712, 1365, 8365, 8364, 8400, 8366, 8400, 8367, 8368, 8400, 8369, 8400, 8400, 8370, 8716, 8372, 8371, 8400, 8373, 8400, 8374, 8375, 8384, 8376, 8377, 8379, 8378, 8400, 8380, 8400, 8381, 8382, 8384, 8383, 8386, 8385, 8400, 8387, 8400, 8388, 8389, 8390, 8722, 8397, 8392, 8400, 8398, 8400, 8399, 8401, 8403, 8402, 8405, 8404, 8406, 8393, 8410, 8395, 8411, 8397, 8396, 8400, 8398, 8400, 8399, 8401, 8403, 8402, 8405, 8404, 8406, 8407, 8410, 8409, 8411, 8492, 8494, 8438, 8439, 8502, 8504, 1477, 1477, 1477, 1477, 1477, 1477, 1477, 8453, 1809, 8417, 8416, 8418, 8421, 8420, 8423, 8422, 8424, 8425, 8427, 8429, 8431, 8433, 8435, 8434, 8436, 8437, 8494, 8438, 8439, 8502, 8504, 8441, 8440, 8443, 8442, 8444, 8479, 8478, 8446, 8445, 8447, 8468, 8470, 8448, 8449, 8480, 8483, 8482, 8484, 8451, 8453, 8730, 1660, 1660, 1660, 8457, 8456, 1660, 1660, 1660, 8460, 8459, 8461, 8475, 8462, 8463, 8476, 8479, 8464, 8466, 8465, 8467, 8467, 8468, 8470, 8471, 8473, 8472, 8475, 8474, 8734, 8475, 8475, 8736, 8476, 8479, 8478, 8480, 8483, 8482, 8484, 8486, 8485, 8487, 8488, 8490, 1809, 8492, 8738, 8494, 8740, 8496, 8742, 8498, 8744, 8500, 8502, 8504, 8506, 8513, 8785, 8676, 8609, 8780, 8677, 8780, 8678, 3129, 8787, 3069, 8789, 8791, 8793, 3099, 8795, 3148, 8516, 8516, 8516, 8516, 8516, 8516, 8516, 8516, 8608, 8517, 8517, 8518, 8518, 8556, 8747, 8521, 8521, 8522, 8522, 8674, 8674, 8537, 8537, 8537, 8537, 8602, 8603, 8597, 8555, 8555, 8556, 8597, 8747, 8747, 8674, 8602, 8674, 8603, 8674, 8674, 8608, 8799, 8676, 8609, 8780, 8677, 8780, 8678, 3129, 8802, 8629, 8755, 8804, 8754, 8753, 8780, 8778, 8780, 8779, 3129, 8625, 8683, 8807, 8754, 8611, 8780, 8778, 8780, 8779, 3129, 3069, 3069, 8809, 8811, 8813, 3099, 8815, 8817, 8819, 3129, 8821, 8823, 8825, 3099, 8827, 8829, 3129, 8831, 8629, 8755, 8754, 8753, 3148, 3148, 8833, 8618, 8617, 8620, 8619, 8621, 8623, 8835, 8837, 8839, 8841, 8843, 8624, 8625, 8629, 8632, 8631, 8774, 8633, 8774, 8634, 3099, 3069, 8848, 8850, 8852, 8653, 8653, 8654, 8654, 8674, 8674, 8747, 8856, 8676, 8675, 8780, 8677, 8780, 8678, 3129, 8680, 8859, 8861, 8777, 8682, 8683, 8714, 8718, 8747, 8747, 8747, 8749, 8748, 8750, 8774, 8773, 3099, 8751, 8770, 8774, 8772, 8777, 8752, 8780, 8778, 8780, 8779, 3129, 8765, 8766, 8757, 8868, 8755, 8771, 8770, 8754, 8753, 8780, 8778, 8780, 8779, 3129, 8782, 8755, 8774, 8762, 3099, 8774, 8761, 8760, 8759, 8777, 8756, 8780, 8778, 8780, 8779, 3129, 8765, 8766, 8757, 8873, 3069, 8760, 8759, 8774, 8761, 8774, 8762, 3099, 8777, 8764, 8765, 8766, 8767, 8875, 8877, 8771, 8770, 8777, 8768, 8780, 8778, 8780, 8779, 3129, 8782, 3069, 8771, 8770, 8774, 8772, 8774, 8773, 3099, 8777, 8776, 8780, 8778, 8780, 8779, 3129, 8782, 8783, 8885, 8887, 8857, 8857, 8857, 8857, 8800, 8800, 8805, 8805, 8900, 3846, 3846, 8800, 8800, 8805, 8805, 8862, 8862, 8857, 8857, 8862, 8862, 8890, 8889, 3846, 3846, 3846, 8902, 8890, 8889, 8904, 3846, 3846, 8906, 8890, 8889, 3684, 3684, 8908, 8890, 8889, 3684, 3684, 3684, 8846, 8846, 8854, 8857, 8857, 8862, 8862, 8870, 8869, 8866, 8893, 8895, 8870, 8869, 8871, 8893, 8895, 8913, 8890, 8889, 8892, 8891, 8893, 8895, 8918, 8897, 8897, 8910, 8910, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 1878, 1879, 1880, 1881, 1882, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1896, 1899, 1900, 1901, 1902, 1903, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1934, 1935, 1936, 1937, 1939, 1940, 1941, 1943, 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1965, 1966, 1967, 1968, 1969, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2039, 2040, 2041, 2042, 2043, 2046, 2047, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, 2062, 2071, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2094, 2095, 2097, 2098, 2099, 2102, 2103, 2106, 2107, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2118, 2119, 2120, 2121, 2122, 2125, 2128, 2131, 2132, 2133, 2134, 2135, 2138, 2141, 2142, 2143, 2146, 2147, 2148, 2149, 2150, 2151, 2152, 2156, 2157, 2158, 2162, 2163, 2164, 2169, 2172, 2179, 2182, 2189, 2190, 2191, 2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, 2202, 2203, 2204, 2206, 2207, 2210, 2211, 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2231, 2234, 2235, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2268, 2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2282, 2283, 2284, 2361, 2396, 2397, 2398, 2399, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2413, 2414, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2426, 2429, 2430, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452, 2455, 2456, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2468, 2469, 2470, 2471, 2472, 2473, 2474, 2475, 2476, 2477, 2478, 2479, 2480, 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 2505, 2506, 2507, 2508, 2509, 2510, 2512, 2513, 2515, 2516, 2517, 2518, 2519, 2521, 2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529, 2530, 2531, 2532, 2534, 2535, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2545, 2547, 2548, 2549, 2552, 2553, 2556, 2557, 2558, 2559, 2560, 2561, 2562, 2563, 2566, 2567, 2568, 2571, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2600, 2601, 2604, 2605, 2608, 2609, 2611, 2612, 2613, 2614, 2659, 2660, 2661, 2662, 2663, 2664, 2665, 2668, 2669, 2670, 2671, 2672, 2673, 2674, 2679, 2680, 2687, 2688, 2689, 2690, 2691, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2710, 2711, 2714, 2715, 2717, 2718, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2733, 2734, 2735, 2736, 2737, 2740, 2741, 2742, 2743, 2744, 2745, 2746, 2747, 2748, 2749, 2752, 2753, 2754, 2755, 2756, 2757, 2759, 2762, 2764, 2765, 2766, 2767, 2768, 2769, 2770, 2772, 2773, 2774, 2775, 2776, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790, 2791, 2792, 2793, 2794, 2795, 2796, 2797, 2799, 2800, 2801, 2802, 2803, 2804, 2805, 2809, 2813, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821, 2822, 2823, 2824, 2825, 2826, 2827, 2828, 2829, 2830, 2832, 2833, 2835, 2836, 2837, 2838, 2839, 2840, 2841, 2842, 2843, 2844, 2845, 2847, 2849, 2850, 2851, 2852, 2853, 2854, 2855, 2856, 2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867, 2868, 2869, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878, 2879, 2880, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 2898, 2899, 2900, 2901, 2902, 2903, 2904, 2905, 2906, 2909, 2910, 2911, 2912, 2913, 2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924, 2925, 2926, 2927, 2929, 2930, 2932, 2933, 2934, 2935, 2936, 2937, 2938, 2941, 2942, 2945, 2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955, 2956, 2957, 2958, 2961, 2965, 2968, 2973, 2975, 2977, 2978, 3176, 3179, 3180, 3181, 3182, 3183, 3184, 3185, 3188, 3195, 3198, 3200, 3201, 3206, 3207, 3209, 3210, 3212, 3213, 3225, 3227, 3228, 3234, 3235, 3263, 3271, 3296, 3297, 3304, 3305, 9116, 9118, 9117, 9032, 9042, 3321, 3330, 3390, 3391, 3393, 3394, 9116, 9117, 9118, 3398, 3400, 3414, 3422, 3425, 3437, 8569, 8572, 9193, 9199, 9205, 3491, 3496, 3516, 3530, 3531, 3533, 3535, 3562, 3563, 3581, 3589, 3590, 3591, 3592, 3593, 3594, 3595, 3599, 3600, 3603, 3604, 3605, 3606, 3607, 3608, 3609, 3611, 3612, 3615, 3616, 3617, 3618, 3619, 3620, 3621, 3622, 3623, 3630, 3637, 3644, 3649, 3652, 3653, 3654, 3655, 3656, 3657, 3660, 3661, 3662, 3663, 3664, 3665, 3677, 3678, 9293, 3687, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 9394, 3760, 3762, 3764, 3765, 3788, 3789, 3825, 3828, 3829, 3830, 3831, 3832, 3833, 3834, 3836, 3841, 3842, 3844, 9503, 9508, 9566, 9574, 3893, 9588, 3909, 9616, 3974, 3990, 4014, 4015, 4016, 4017, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051, 4052, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, 4096, 4097, 4102, 4103, 4104, 4105, 4106, 4107, 4108, 4109, 4110, 4111, 4112, 4113, 4114, 4116, 4117, 4119, 4120, 4121, 4122, 4123, 4125, 4126, 4128, 4129, 4130, 4131, 4134, 9918, 9765, 8857, 4202, 4203, 9918, 9775, 9777, 9776, 9918, 9918, 9866, 9866, 8857, 4218, 4219, 8800, 4224, 4225, 8805, 4227, 4228, 4231, 4232, 9886, 9822, 8800, 5104, 5105, 9886, 9833, 8805, 5115, 5116, 9886, 9843, 8862, 5126, 5127, 9854, 9853, 9858, 9857, 9862, 9861, 9866, 9865, 8857, 5143, 5144, 9886, 9886, 8862, 5154, 5155, 5158, 5159, 5160, 5161, 5162, 9875, 9883, 5176, 5177, 5180, 5181, 9884, 9883, 9886, 9885, 5206, 5207, 5208, 5209, 9884, 9883, 9886, 9885, 5225, 5226, 5238, 5239, 5240, 5243, 5246, 9898, 9899, 9900, 5254, 9918, 9908, 8857, 5301, 5302, 9918, 9917, 8862, 5311, 5312, 5395, 5396, 5397, 5398, 5399, 5420, 5421, 5423, 5424, 5425, 9992, 10021, 5468, 5469, 5471, 5472, 5474, 5476, 10030, 8897, 5558, 5559, 10030, 10051, 8910, 6140, 6141, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 10132, 10134, 10137, 10139, 10143, 10147, 10158, 10161, 10163, 10166, 10188, 10190, 10192, 10194, 10196, 10198, 10204, 10208, 10210, 10220, 10223, 10226, 10237, 10242, 10244, 10257, 10261, 10264, 10266, 10274, 10281, 10283, 10295, 10297, 10302, 10304, 10307, 10312, 10315, 10318, 10321, 10328, 10330, 10332, 10334, 10336, 10345, 10362, 10364, 10366, 10368, 10372, 10375, 10377, 10379, 10383, 10385, 10387, 10400, 10402, 10410, 10412, 10414, 10418, 10421, 10423, 10425, 10427, 10429, 10433, 10437, 10439, 10441, 10443, 10447, 10449, 10452, 10455, 10457, 10459, 10461, 10464, 10466, 10468, 10470, 10472, 10475, 10477, 10479, 10482, 10485, 10487, 10489, 10492, 10494, 10496, 10499, 10501, 10517, 10519, 10523, 10525, 10530, 10532, 10534, 10539, 10542, 10544, 10546, 10548, 10552, 10558, 10560, 10562, 10566, 10569, 10573, 10575, 10577, 10581, 10585, 10591, 10593, 10595, 10598, 10600, 10603, 10605, 10607, 10609, 10611, 10615, 10617, 10619, 10623, 10625, 10627, 10629, 10631, 10633, 10637, 10639, 10641, 10644, 10646, 10649, 10651, 10653, 10660, 10662, 10667, 10669, 10671, 10674, 10676, 10679, 10681, 10683, 10686, 10689, 10691, 10693, 10696, 10698, 10700, 10702, 10706, 10708, 10710, 10713, 10715, 10719, 10722, 10724, 10726, 10729, 10731, 10735, 10753, 10756, 10758, 10766, 10775, 10777, 10780, 10782, 10790, 10798, 10803, 10806, 10810, 10812, 10814, 10819, 10821, 10823, 10826, 10829, 10832, 10112, 10114, 10116, 10837, 10119, 10118, 10117, 10121, 10123, 10126, 10125, 10129, 10128, 10127, 10140, 10148, 10144, 10148, 10847, 10849, 10851, 8516, 8516, 8516, 8516, 10526, 10784, 10784, 10784, 10152, 10149, 10150, 10151, 10355, 10152, 8517, 10866, 10154, 10153, 10772, 10155, 8518, 10868, 10167, 10200, 10169, 10403, 10168, 10169, 10200, 10169, 10791, 10788, 10791, 10570, 10842, 10589, 8655, 10762, 10835, 10394, 10393, 10396, 10395, 10838, 10837, 8655, 8655, 10173, 10357, 10173, 10248, 10173, 10342, 10340, 10355, 10788, 10791, 8521, 10872, 10567, 10570, 10178, 10177, 10788, 8522, 10874, 3306, 3307, 3308, 3309, 10184, 10234, 10180, 10181, 10182, 10183, 10184, 10185, 3319, 10579, 10200, 10201, 10514, 10583, 10778, 10786, 10205, 8674, 10211, 8655, 10213, 10215, 10786, 10788, 10745, 10743, 10567, 10228, 10230, 10233, 10232, 10579, 10234, 10540, 10521, 10514, 10583, 10754, 10778, 10238, 10240, 10245, 10246, 10247, 10359, 10248, 10250, 10249, 10355, 10527, 10555, 10267, 8655, 10786, 10791, 10786, 10791, 8537, 8537, 3395, 3396, 3397, 10267, 8545, 10786, 10791, 10267, 10555, 10527, 8655, 10759, 10791, 10786, 10791, 8549, 10284, 10397, 10390, 10391, 10287, 10286, 10840, 10839, 10397, 8555, 8555, 10290, 10289, 10291, 8555, 10299, 10298, 10305, 10838, 10837, 10840, 10839, 10843, 10769, 10768, 10770, 10773, 10764, 10738, 10737, 10739, 10742, 10745, 10744, 10746, 10749, 10835, 10795, 10794, 10796, 10801, 3471, 3473, 3475, 10323, 10322, 3478, 10325, 10324, 3481, 10338, 10360, 10793, 10394, 10393, 10396, 10395, 10838, 10837, 10340, 8674, 10793, 10835, 10406, 10405, 10342, 8674, 10360, 10793, 10835, 10347, 10346, 10349, 10348, 10769, 10350, 10835, 10352, 10351, 10354, 10353, 10355, 8674, 10357, 8674, 10359, 10360, 10762, 10394, 10393, 10838, 10837, 10795, 10794, 10796, 10800, 10799, 10380, 10769, 10767, 10770, 10772, 10771, 10389, 10908, 10390, 10391, 10793, 10394, 10393, 10396, 10395, 10838, 10837, 10397, 10738, 10737, 10739, 10741, 10740, 10403, 10406, 10405, 10746, 10748, 10747, 10911, 10913, 10915, 10920, 10922, 10924, 10929, 10931, 10933, 10944, 10948, 10950, 8720, 3686, 10958, 10960, 10962, 10435, 10434, 3757, 10504, 10502, 10506, 10508, 10969, 10510, 10513, 10512, 10514, 8655, 10526, 10527, 10537, 10536, 10540, 10971, 10550, 10553, 10554, 10555, 10570, 10579, 10583, 8674, 10587, 10835, 10838, 10837, 10840, 10839, 10843, 10842, 10589, 10974, 10976, 10978, 10982, 3855, 3857, 10642, 10647, 8720, 10656, 10657, 10658, 3883, 8710, 10665, 3889, 3897, 8720, 3911, 10716, 10720, 10732, 10736, 10738, 10737, 10742, 10741, 10740, 10784, 10759, 10791, 10793, 10835, 10745, 10744, 10743, 10749, 10748, 10747, 10750, 10835, 10838, 10837, 10840, 10839, 10843, 10842, 10841, 10784, 10759, 10791, 10762, 10769, 10768, 10767, 10773, 10772, 10771, 10784, 10786, 10791, 10793, 10835, 10795, 10794, 10801, 10800, 10799, 10815, 10833, 10835, 10838, 10837, 10840, 10839, 10843, 10842, 10841, 10996, 10999, 11002, 11004, 11006, 11008, 11010, 11017, 11019, 11021, 11023, 11028, 11031, 11033, 11035, 11037, 11039, 11046, 11048, 11050, 11053, 11058, 11060, 11062, 11064, 11069, 11071, 11073, 11076, 11078, 11080, 11015, 10956, 10845, 10954, 4195, 4196, 9918, 4201, 10853, 4205, 4206, 9918, 9777, 4209, 4210, 4211, 4212, 9918, 9866, 4215, 4216, 4217, 10855, 4223, 4226, 10909, 10909, 10909, 10909, 9886, 5098, 5099, 5103, 10918, 10917, 9886, 5109, 5110, 5114, 10927, 10926, 9886, 5120, 5121, 5125, 10936, 10935, 9855, 5131, 5132, 9859, 5134, 5135, 9863, 5137, 5138, 9866, 5140, 5141, 5142, 10942, 10941, 9886, 5148, 5149, 5153, 10946, 10945, 11140, 5165, 5166, 9882, 11013, 11012, 10997, 11015, 10956, 11147, 5184, 5185, 9882, 11013, 11012, 10997, 11015, 10951, 9886, 5196, 5197, 11083, 11083, 10952, 10983, 11026, 11155, 5212, 5213, 9882, 11015, 10956, 9886, 5221, 5222, 10983, 11026, 11163, 11014, 11013, 11012, 11015, 10956, 11083, 11083, 10953, 10983, 10954, 10956, 10956, 10983, 10964, 5251, 5252, 5253, 5294, 5295, 9918, 5300, 10980, 9918, 5305, 5306, 5310, 10983, 11013, 11012, 10997, 11184, 11014, 11013, 11012, 11015, 11083, 11083, 11025, 11026, 11189, 11043, 11042, 11041, 11044, 11056, 11055, 11054, 5447, 11083, 11083, 11066, 11067, 11083, 11083, 11082, 5467, 11196, 11198, 11187, 11186, 11185, 11107, 11106, 5530, 5557, 11200, 11199, 11107, 11106, 5592, 11143, 11142, 11141, 11149, 11148, 6034, 11157, 11165, 11156, 11166, 11165, 11164, 11166, 11165, 11164, 11187, 11186, 11185, 11200, 11199, 6139, 11187, 11186, 11185, 11192, 11191, 11190, 11200, 11199, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 3152, 3153, 3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164, 3165, 11265, 11431, 11264, 11266, 3170, 3171, 11268, 3173, 3174, 11269, 3199, 8516, 8516, 8516, 3205, 3208, 3211, 8516, 3215, 3216, 3217, 3218, 3219, 3220, 3221, 3222, 3223, 3224, 3226, 3229, 3230, 3231, 3232, 3233, 10156, 10159, 11272, 10164, 3240, 11279, 11278, 11277, 3244, 3245, 3246, 3247, 3248, 11279, 11278, 11277, 3252, 11459, 11459, 11369, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3264, 3265, 3266, 3267, 3268, 3269, 3270, 3272, 3273, 3274, 3275, 11459, 11459, 11369, 3279, 3280, 11459, 11459, 11369, 3284, 3285, 11459, 11459, 11369, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3298, 3299, 3300, 3301, 3302, 3303, 3310, 3311, 3312, 3313, 3314, 3315, 3316, 3317, 10186, 3320, 11459, 11276, 11275, 11279, 11278, 11277, 3328, 3329, 3331, 3332, 3333, 3334, 11280, 3336, 3337, 11281, 3339, 3340, 3341, 11446, 3343, 3344, 10218, 10221, 10224, 3348, 3349, 3350, 3351, 3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 3360, 3361, 3362, 3363, 11286, 3365, 3366, 11288, 11287, 3369, 3370, 3371, 3372, 3373, 3374, 3375, 3376, 3377, 3378, 3379, 3380, 3381, 3382, 3383, 3384, 8537, 8537, 8537, 8537, 3389, 3392, 3399, 3401, 10259, 10262, 11292, 3405, 3406, 3407, 3408, 3409, 3410, 10272, 3412, 3413, 3415, 3416, 3417, 11294, 11295, 3420, 3421, 3423, 3424, 3426, 3427, 3428, 3429, 3430, 3431, 3432, 3433, 3434, 3435, 3436, 10293, 11297, 3440, 3441, 10300, 11299, 3444, 11300, 3446, 3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3455, 3456, 3457, 3458, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3466, 3467, 3468, 10310, 10313, 10316, 10319, 3476, 3477, 3479, 3480, 10326, 9210, 11418, 11308, 11307, 3487, 11309, 3489, 3490, 3492, 3493, 3494, 3495, 3497, 3498, 3499, 3500, 3501, 3502, 3503, 3504, 3506, 3507, 10343, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524, 3525, 3526, 3527, 3529, 3532, 3534, 3536, 3537, 3538, 3539, 11311, 11314, 11313, 11312, 10370, 10373, 3546, 3547, 3548, 3549, 3550, 11317, 3552, 3553, 3554, 3555, 3556, 3557, 11321, 11320, 11319, 3561, 3564, 3565, 3566, 3567, 3568, 3569, 3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578, 11322, 3580, 3582, 3583, 3584, 3585, 3586, 3685, 11326, 11325, 11324, 10416, 10419, 11329, 11332, 11331, 11330, 10431, 3721, 3722, 11334, 11337, 11336, 11335, 10445, 11397, 11397, 11339, 10450, 10453, 11342, 11394, 11393, 11343, 9350, 10462, 11346, 11349, 11348, 11347, 11352, 11351, 11350, 11353, 11356, 11355, 11354, 11359, 11358, 11357, 11360, 11418, 11418, 11361, 3758, 3759, 3761, 3763, 3766, 3767, 3768, 3769, 3770, 11363, 11376, 11362, 10521, 3775, 11365, 3777, 11368, 11367, 11366, 3781, 3782, 11459, 11459, 11369, 3786, 11370, 11373, 11372, 11371, 3793, 11459, 11459, 11374, 3797, 3798, 3799, 11377, 11376, 11375, 10564, 10567, 3805, 11382, 11381, 11380, 3809, 11459, 11459, 11383, 3813, 11384, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 3822, 3823, 3824, 11387, 11386, 11385, 9495, 11397, 11397, 11389, 10601, 11391, 11394, 11393, 11392, 10613, 11397, 11397, 11396, 10621, 11399, 11400, 11403, 11402, 11401, 10635, 11405, 3873, 11407, 3875, 11411, 11410, 11409, 3879, 3880, 3881, 3882, 11413, 11415, 11412, 3887, 3888, 11416, 11415, 11414, 11418, 11418, 11417, 11421, 11420, 11419, 11422, 11425, 11424, 11423, 11426, 11429, 11428, 11427, 3910, 11432, 11431, 11430, 11433, 3916, 3917, 11435, 11438, 11437, 11436, 11439, 3923, 3924, 11441, 10764, 3927, 3928, 10739, 3930, 3931, 3932, 11446, 10754, 3935, 11444, 3937, 10788, 3939, 3940, 3941, 3942, 3943, 3944, 10746, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3955, 3956, 3957, 11446, 10754, 3960, 11444, 3962, 10788, 3964, 3965, 10764, 3967, 3968, 3969, 10770, 3971, 3972, 3973, 11446, 10778, 3977, 11449, 3979, 10788, 3981, 3982, 3983, 3984, 3985, 10796, 3987, 3988, 3989, 11453, 11452, 10808, 3994, 11456, 11455, 10817, 11459, 11458, 10824, 10827, 10830, 4003, 4004, 4005, 4006, 4008, 4009, 4011, 4012, 4013, 4188, 4189, 4193, 4194, 4197, 11922, 11483, 11482, 11481, 11087, 4204, 4207, 11927, 4208, 4213, 11933, 4214, 11098, 4220, 11101, 11104, 10893, 11499, 11499, 11505, 11505, 11542, 11542, 11549, 11549, 4660, 4664, 10909, 10893, 5038, 5040, 10909, 5097, 11757, 11756, 11755, 11111, 5106, 5107, 5108, 11760, 11759, 11758, 11116, 5117, 5118, 5119, 11763, 11762, 11761, 11121, 5128, 5129, 5130, 5133, 5136, 5139, 11132, 5145, 5146, 5147, 11916, 11915, 11764, 11137, 5156, 5157, 5167, 11987, 11810, 11809, 11765, 5171, 5172, 5173, 5174, 5175, 5186, 11996, 11810, 11809, 11766, 5190, 5191, 5192, 5193, 5194, 5195, 11896, 11895, 11894, 5201, 5202, 5203, 5204, 5205, 5214, 12013, 11810, 11809, 11808, 5218, 5219, 5220, 5223, 5224, 5228, 5229, 5230, 5231, 5232, 5233, 5234, 5235, 5236, 5237, 5242, 5244, 5245, 11771, 11770, 11769, 5250, 5296, 12041, 11810, 11809, 11808, 11176, 5303, 5304, 11896, 11895, 11811, 11181, 5313, 11889, 11886, 11887, 11892, 11891, 11890, 5392, 5393, 5394, 11889, 11888, 11887, 11892, 11891, 11890, 5406, 5407, 5408, 5409, 11913, 11912, 11893, 11896, 11895, 11894, 5416, 5417, 5418, 5419, 11899, 11898, 11897, 11902, 11901, 11900, 5434, 5435, 5436, 5437, 11905, 11904, 11903, 11916, 11915, 11906, 5444, 5445, 5446, 11913, 11912, 11907, 11910, 11909, 11908, 5454, 5455, 5456, 5457, 11913, 11912, 11911, 11916, 11915, 11914, 5464, 5465, 5466, 12053, 5500, 5501, 5502, 12079, 5528, 5529, 12079, 11203, 12053, 12062, 12079, 5585, 5586, 12080, 5590, 5591, 11985, 6026, 6027, 6028, 11994, 6032, 6033, 12011, 6040, 6041, 6042, 12022, 6048, 6049, 6050, 12079, 6074, 6075, 6076, 12053, 6098, 6099, 6100, 12062, 12079, 6113, 6114, 12080, 12079, 11208, 12053, 6167, 6168, 6169, 12062, 6175, 6176, 6177, 12079, 6187, 6188, 12080, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 12163, 12165, 12170, 12172, 3166, 3167, 3168, 3169, 3172, 3175, 10856, 3202, 3203, 3204, 10858, 10860, 10862, 3214, 12204, 12206, 3236, 3237, 3238, 3239, 3241, 3242, 3243, 3249, 3250, 3251, 3253, 3254, 3255, 12234, 12239, 12241, 12243, 3276, 3277, 3278, 3281, 3282, 3283, 3286, 3287, 3288, 12269, 12274, 3318, 3322, 3323, 3324, 3325, 3326, 3327, 3335, 3338, 3342, 3345, 3346, 3347, 12309, 12314, 3364, 3367, 3368, 12334, 3385, 3386, 3387, 3388, 10882, 10884, 3402, 3403, 3404, 3411, 3418, 3419, 12374, 12376, 12381, 3438, 3439, 12387, 3442, 3443, 3445, 12393, 12395, 12398, 12403, 12407, 12412, 3469, 3470, 3472, 3474, 12420, 12422, 3482, 3483, 3484, 3485, 3486, 3488, 12433, 12435, 12437, 12443, 3508, 12451, 12453, 12455, 12458, 12460, 12469, 12471, 3540, 3541, 3542, 3543, 3544, 3545, 12479, 12482, 3551, 12486, 12489, 3558, 3559, 3560, 12498, 12500, 12502, 12505, 12508, 3579, 12512, 12515, 3711, 3712, 3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 12528, 3723, 3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732, 3733, 3734, 3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 3745, 3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754, 3755, 3756, 12564, 12569, 3771, 3772, 3773, 3774, 3776, 3778, 3779, 3780, 12583, 3783, 3784, 3785, 3787, 3790, 3791, 3792, 3794, 3795, 3796, 3800, 3801, 3802, 3803, 3804, 3806, 3807, 3808, 3810, 3811, 3812, 3814, 12618, 12620, 12622, 3847, 3848, 3849, 3850, 3851, 3852, 3853, 3854, 3856, 3858, 3859, 3860, 3861, 3862, 3863, 3864, 3865, 3866, 3867, 3868, 3869, 3870, 3871, 3872, 3874, 3876, 3877, 3878, 11820, 3884, 3885, 3886, 11823, 3890, 3891, 3892, 3894, 3895, 3896, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3912, 3913, 3914, 3915, 3918, 3919, 3920, 3921, 3922, 3925, 3926, 12697, 3929, 12700, 3933, 3934, 3936, 3938, 12712, 3945, 12716, 12721, 12723, 12725, 3958, 3959, 3961, 3963, 3966, 12737, 3970, 12741, 3975, 3976, 3978, 3980, 12753, 3986, 12756, 3991, 3992, 3993, 3995, 3996, 3997, 3998, 3999, 4000, 4001, 4002, 12773, 12775, 12777, 12160, 12167, 12780, 12783, 4198, 4199, 4200, 11088, 12790, 11930, 12793, 11936, 11099, 11102, 11105, 12566, 12565, 10909, 12440, 10909, 12487, 12431, 12448, 12566, 12565, 12192, 12597, 12311, 12193, 12709, 12513, 12194, 12487, 12195, 12566, 12565, 12379, 12709, 12513, 4346, 10892, 12496, 12383, 12487, 10864, 10864, 10864, 10864, 12566, 12565, 4386, 12202, 4388, 12230, 12266, 12311, 4396, 12207, 4398, 12566, 12565, 10909, 12440, 12448, 10909, 12487, 12236, 12566, 12565, 12263, 12709, 12513, 12230, 12487, 12266, 12311, 12270, 12566, 12565, 10869, 10869, 10869, 10909, 12487, 10869, 10909, 12440, 10869, 12448, 10869, 10869, 10869, 12431, 10909, 12236, 10909, 10909, 12566, 12565, 12431, 12448, 10909, 12487, 10909, 12440, 12263, 4591, 12265, 4593, 12266, 12311, 12270, 4600, 12271, 4602, 11551, 11551, 11551, 11550, 11551, 11551, 11551, 11552, 12566, 12565, 12272, 12487, 12277, 12275, 12276, 12277, 12311, 12278, 12709, 12513, 12279, 10909, 10909, 10909, 10909, 12307, 12307, 12307, 12513, 12310, 12311, 12566, 12565, 10909, 12709, 12513, 10909, 12487, 4735, 12496, 12709, 12597, 12496, 11615, 11615, 11615, 11615, 11613, 11614, 11615, 11615, 12566, 12565, 10889, 12431, 10889, 10889, 12448, 10889, 10909, 10889, 10889, 10889, 10889, 12496, 12734, 12709, 12750, 12750, 12566, 12565, 12734, 12750, 12597, 12496, 12396, 12709, 12409, 12408, 12750, 10893, 4878, 10892, 10893, 12378, 12379, 12513, 12383, 12566, 12565, 10894, 12448, 10894, 10909, 12440, 10894, 10894, 10909, 12487, 10894, 12431, 10894, 10894, 10894, 12597, 12615, 12396, 12734, 12400, 12399, 12750, 12405, 12404, 12709, 12409, 12408, 12750, 12414, 12413, 12566, 12565, 10904, 12431, 10904, 10909, 12440, 10904, 10909, 12487, 10904, 12448, 12467, 10904, 10909, 10904, 10909, 12513, 10904, 10904, 12467, 12566, 12565, 12480, 10909, 12487, 10909, 5078, 10909, 12496, 10909, 12506, 10909, 12709, 12513, 11947, 5100, 5101, 5102, 11112, 12822, 11953, 5111, 5112, 5113, 11117, 12829, 11959, 5122, 5123, 5124, 11122, 12836, 11965, 11968, 11971, 11974, 11133, 12843, 11980, 5150, 5151, 5152, 11138, 12850, 12851, 5168, 5169, 5170, 12857, 12860, 12861, 5187, 5188, 5189, 12867, 12870, 12004, 5198, 5199, 5200, 12876, 12879, 12880, 5215, 5216, 5217, 12886, 12018, 12889, 12891, 12894, 12896, 12899, 12516, 5247, 5248, 5249, 12566, 12565, 12597, 12615, 12907, 5297, 5298, 5299, 11177, 12046, 5307, 5308, 5309, 11182, 12654, 12680, 12709, 12718, 12750, 12770, 5386, 5387, 5388, 5389, 5390, 5391, 12927, 5400, 5401, 5402, 5403, 5404, 5405, 12936, 5410, 5411, 5412, 5413, 5414, 5415, 12946, 5428, 5429, 5430, 5431, 5432, 5433, 12956, 5438, 5439, 5440, 5441, 5442, 5443, 12966, 5448, 5449, 5450, 5451, 5452, 5453, 12975, 5458, 5459, 5460, 5461, 5462, 5463, 12985, 5499, 12989, 5527, 12993, 5556, 11204, 5574, 5575, 5584, 5587, 13000, 13003, 6025, 13006, 6031, 13010, 6039, 13013, 6047, 13017, 6073, 13021, 6097, 13025, 6103, 6112, 6115, 13030, 6138, 11209, 6166, 13036, 6174, 13040, 6186, 6189, 13044, 122, 123, 124, 125, 126, 127, 12166, 12173, 13061, 13064, 13065, 10857, 10859, 10861, 10863, 13081, 13084, 13087, 13094, 13097, 13100, 13106, 13109, 13121, 10883, 10885, 13159, 13161, 13176, 13186, 13197, 13203, 13209, 13213, 13219, 13225, 13228, 13232, 13235, 13239, 13244, 13247, 13249, 13253, 13257, 13260, 13263, 13268, 13271, 12623, 13278, 13282, 13287, 13291, 13297, 13303, 13307, 13311, 13314, 13317, 13321, 13325, 13328, 13331, 13333, 13336, 12701, 13343, 12713, 12717, 12726, 13353, 12738, 12742, 13361, 12757, 13367, 13369, 13373, 12778, 4182, 13056, 4185, 13058, 13385, 11931, 11937, 13153, 13152, 13151, 13155, 13154, 12567, 4246, 4247, 13067, 4251, 4252, 13195, 13165, 13068, 13069, 4257, 12734, 13184, 4260, 13183, 4263, 13164, 13163, 13162, 4269, 13169, 13168, 13167, 13073, 13138, 13140, 13178, 13179, 12567, 4292, 4293, 13241, 4296, 4299, 4300, 13136, 13190, 13118, 13341, 4305, 4306, 13195, 4308, 13117, 13351, 4311, 12734, 13184, 4314, 13183, 4316, 13138, 13140, 13178, 13179, 12567, 4335, 4336, 13341, 4339, 4340, 13195, 4342, 13137, 4348, 4351, 13190, 13135, 13188, 13359, 13351, 4357, 12734, 13184, 4360, 13183, 13138, 4363, 4364, 4365, 4366, 13138, 13178, 13179, 12567, 4383, 4384, 13241, 4387, 4389, 4391, 4392, 13075, 13074, 13169, 4397, 13153, 13152, 13151, 13206, 13216, 13222, 12567, 4419, 4420, 13111, 4423, 4424, 13195, 13165, 4429, 13169, 13168, 13167, 13113, 4435, 12734, 13184, 4438, 13183, 4444, 13092, 13091, 13090, 13138, 13140, 13178, 13179, 12567, 4466, 4467, 13241, 13193, 4470, 4471, 13195, 4473, 13117, 13182, 4476, 12734, 13184, 4479, 13183, 4482, 4483, 13089, 13136, 13190, 4487, 13153, 13152, 13151, 13155, 13154, 13216, 12567, 4506, 4507, 4509, 4510, 4511, 4512, 12734, 13184, 4515, 13183, 4517, 4518, 4519, 13195, 13165, 4522, 4523, 13169, 13168, 13167, 4527, 4528, 4529, 4530, 13164, 13163, 13162, 4534, 4535, 13092, 13091, 13090, 4539, 4540, 13153, 13152, 13151, 13155, 13154, 13216, 13222, 12567, 4562, 4563, 4566, 13164, 13163, 13162, 13113, 4574, 13169, 13168, 13167, 13182, 4579, 12734, 13184, 4582, 13183, 13111, 4585, 4586, 13195, 13165, 13359, 4590, 4592, 4594, 4595, 13192, 13102, 13191, 4599, 4601, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4610, 13138, 13140, 13178, 13179, 12567, 4629, 4630, 13241, 13351, 4633, 12734, 13184, 4636, 13183, 4638, 4641, 4643, 4644, 4645, 13136, 13190, 13135, 13341, 4650, 4651, 13195, 4653, 13137, 4655, 13153, 13104, 13359, 4668, 13111, 4670, 13112, 4672, 13182, 13113, 4675, 13351, 4679, 4680, 4681, 13195, 4683, 13117, 4685, 4686, 13136, 13190, 13118, 13351, 13341, 13359, 13200, 13206, 13178, 13179, 12567, 4718, 4719, 13341, 4722, 4723, 13195, 4725, 13194, 4728, 12734, 13184, 4731, 13183, 4738, 13190, 13188, 13122, 13359, 13344, 4749, 4751, 13266, 13273, 4754, 13354, 13362, 13123, 13124, 13125, 13126, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 4774, 13153, 13152, 13151, 13155, 13154, 13216, 13222, 12567, 4792, 4793, 4795, 4796, 13164, 13163, 13162, 4800, 4801, 4802, 13169, 13168, 13167, 4806, 4807, 12734, 4809, 4810, 4811, 4812, 13140, 13266, 4817, 4818, 13341, 4820, 4821, 4822, 13138, 13140, 13178, 13222, 12567, 4841, 4842, 13241, 13354, 4846, 4847, 13266, 4852, 13143, 13273, 4859, 4860, 13145, 13144, 13341, 13344, 4866, 4867, 4868, 13148, 13362, 4872, 13200, 13206, 4875, 4880, 4883, 13136, 13190, 13135, 13359, 4888, 13341, 4890, 13195, 4892, 13137, 13351, 4895, 13153, 13152, 13151, 13155, 13154, 13216, 13222, 12567, 4915, 4916, 4918, 4919, 13169, 13168, 13167, 4923, 4924, 4925, 13195, 13165, 4928, 4929, 4930, 12734, 13184, 4933, 13183, 4935, 4936, 13164, 13163, 13162, 4940, 4941, 4942, 13138, 13140, 13178, 13143, 13266, 4955, 4958, 4959, 13145, 13144, 13351, 13354, 4965, 4966, 4967, 13146, 4969, 4970, 4971, 13147, 13341, 13344, 4976, 4977, 4978, 13148, 4980, 4981, 4982, 13149, 13153, 13152, 13151, 13155, 13154, 13216, 13222, 12567, 5003, 5004, 5006, 5007, 13164, 13163, 13162, 5011, 5012, 5013, 13195, 13165, 5016, 5017, 12734, 13184, 5020, 13183, 5022, 5023, 13169, 13168, 13167, 5027, 13191, 13171, 13170, 5031, 5032, 5033, 5034, 13195, 5036, 5037, 5039, 5041, 13173, 13189, 13172, 13200, 13206, 13178, 13179, 12567, 5063, 5064, 13241, 13181, 5067, 13180, 13182, 5070, 12734, 13184, 5073, 13183, 5075, 5081, 5082, 13190, 13189, 13188, 13359, 5087, 13192, 5089, 13191, 13193, 5092, 5093, 13195, 5095, 13194, 11948, 13651, 11954, 13657, 11960, 13663, 11966, 11969, 11972, 11975, 11981, 13675, 13681, 12858, 13687, 12868, 12005, 13693, 12877, 13699, 12019, 12892, 12897, 5241, 13710, 13200, 13206, 13216, 13222, 12567, 5271, 5272, 13241, 13273, 13255, 5284, 13266, 13273, 5290, 13275, 13274, 13718, 12047, 13723, 11812, 11813, 13294, 13301, 13300, 5330, 12685, 12656, 12661, 5344, 12750, 13339, 13338, 13341, 13344, 5356, 13346, 5360, 13349, 13348, 13351, 13354, 12734, 13357, 13359, 13362, 5374, 13364, 13363, 13375, 5382, 13378, 13377, 13733, 13736, 12928, 13740, 13743, 12937, 13747, 13750, 12947, 13754, 13757, 12957, 13761, 13764, 12967, 13768, 13771, 12976, 13775, 13778, 12986, 12990, 13720, 13725, 12086, 13387, 13392, 13725, 13790, 13393, 13394, 12092, 13653, 13659, 13665, 13671, 13677, 13007, 12098, 13014, 13018, 13022, 13026, 13807, 13720, 13725, 13037, 13041, 13816, 13785, 13809, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 13062, 13082, 13085, 13088, 13095, 13098, 13101, 13107, 13110, 13160, 13177, 13187, 13198, 13204, 13210, 13214, 13220, 13226, 13229, 13233, 13236, 13240, 13245, 13250, 13254, 13258, 13261, 13264, 13269, 13272, 13279, 13283, 13288, 13292, 13298, 13304, 13308, 13312, 13315, 13318, 13322, 13326, 13329, 13334, 13370, 13824, 4184, 13825, 4187, 12178, 12181, 13386, 4234, 4235, 4236, 4238, 4239, 4245, 13845, 13829, 4250, 4253, 4254, 4255, 4256, 4258, 4259, 4261, 13830, 4264, 4265, 4266, 13831, 13832, 4270, 4271, 4272, 4273, 4275, 4277, 4280, 4283, 12692, 4291, 4294, 4301, 4302, 4303, 4304, 4307, 4309, 4310, 4312, 4313, 4315, 4318, 4320, 4323, 4326, 12324, 4334, 13845, 4338, 4341, 4343, 4352, 4353, 4354, 4355, 4356, 4358, 4359, 4361, 4362, 4368, 4371, 4374, 12692, 4382, 4385, 13998, 4393, 4394, 4395, 14005, 4400, 4401, 4402, 4404, 4407, 4410, 12692, 4418, 13845, 4422, 4425, 4426, 4430, 4431, 4432, 4433, 13841, 4436, 4437, 4439, 4445, 4446, 4447, 4449, 4451, 4454, 4457, 12692, 4465, 4468, 4469, 4472, 4474, 4475, 4477, 4478, 4480, 4484, 4485, 4486, 4489, 4490, 4491, 4493, 4494, 4497, 12692, 4505, 13845, 4513, 4514, 4516, 4520, 4521, 4524, 4525, 4526, 4531, 4532, 4533, 4536, 4537, 4538, 4542, 4543, 4544, 4546, 4547, 4550, 4553, 12692, 4561, 13845, 4567, 4568, 4569, 4570, 4575, 4576, 4577, 4578, 4580, 4581, 4583, 4584, 4587, 4588, 4589, 14133, 4596, 4597, 4598, 14140, 4612, 4614, 4617, 4620, 12324, 4628, 4631, 4632, 4634, 4635, 4637, 4646, 4647, 4648, 4649, 4652, 4654, 4656, 4657, 4667, 4669, 4671, 4673, 4674, 4678, 4682, 4684, 4687, 4688, 4689, 4690, 4698, 4699, 4701, 4703, 4706, 4709, 12324, 4717, 13845, 4721, 4724, 4726, 13841, 4729, 4730, 4732, 4739, 4740, 4741, 4742, 4747, 13885, 13859, 4752, 4753, 4755, 13889, 4757, 13892, 13843, 4760, 4761, 4762, 13843, 4764, 13842, 13843, 4775, 4776, 4777, 4778, 4779, 4782, 4785, 4791, 13845, 4797, 4798, 4799, 4803, 4804, 4805, 4808, 4813, 4816, 4819, 4824, 4826, 4829, 4832, 12692, 4840, 4843, 4844, 13889, 4849, 4855, 13859, 4858, 4861, 4862, 4863, 4864, 13885, 4869, 14306, 4870, 13892, 4873, 4874, 4884, 4885, 4886, 4887, 4889, 4891, 4893, 4894, 4897, 4898, 4899, 4901, 4902, 4905, 4908, 4914, 13845, 4920, 4921, 4922, 4926, 4927, 4931, 4932, 4934, 4937, 4938, 4939, 4943, 4944, 4947, 4950, 4952, 13859, 4960, 4961, 4962, 4963, 13889, 4968, 14376, 4972, 14380, 4973, 4974, 13885, 4979, 14386, 4983, 14390, 4985, 4986, 4987, 4989, 4990, 4993, 4996, 5002, 13845, 5008, 5009, 5010, 5014, 5015, 5018, 5019, 5021, 5024, 5025, 5026, 5028, 5029, 5030, 5035, 5042, 5043, 5044, 5046, 5048, 5051, 5054, 12692, 5062, 5065, 5066, 5068, 5069, 5071, 5072, 5074, 5083, 5084, 5085, 5086, 5088, 5090, 5091, 5094, 5096, 13652, 13658, 13664, 13676, 13682, 13688, 13694, 13700, 13711, 5256, 5258, 5261, 5264, 5270, 5273, 5276, 13859, 5281, 5286, 5289, 13867, 5292, 5293, 13719, 13724, 5317, 5318, 5321, 5323, 5324, 5331, 5332, 5338, 12685, 12692, 5349, 13884, 5351, 5352, 5353, 5354, 13885, 13887, 5358, 13886, 13888, 5362, 5363, 5364, 5365, 13889, 5367, 13891, 5369, 13890, 5371, 5372, 13892, 13893, 5376, 5377, 13894, 13896, 5381, 13897, 5384, 5385, 13734, 13737, 13741, 13744, 13748, 13751, 13755, 13758, 13762, 13765, 13769, 13772, 13776, 13779, 14552, 14558, 5524, 5526, 5550, 13390, 13388, 5553, 5555, 14561, 14564, 14567, 14570, 5588, 5589, 14069, 14070, 14088, 14071, 14077, 14082, 14088, 14087, 14089, 14088, 14141, 14142, 14145, 14143, 14145, 14144, 14146, 14145, 14147, 14148, 14239, 14245, 14240, 14241, 14242, 14245, 14243, 14244, 14246, 14245, 14257, 14272, 14262, 14263, 14272, 14268, 14271, 14273, 14272, 14274, 14337, 14359, 14342, 14347, 14348, 14354, 14359, 14360, 14359, 14361, 14402, 14433, 14407, 14412, 14418, 14433, 14427, 14429, 14434, 14433, 6015, 6017, 6019, 14481, 14479, 6022, 6024, 14486, 14488, 14491, 14555, 14495, 14494, 14495, 14552, 14555, 14558, 14561, 14564, 14567, 14570, 6135, 6137, 14552, 14555, 14558, 14561, 14564, 14567, 14570, 13781, 13783, 6219, 13787, 13788, 13789, 13801, 13793, 13795, 13797, 13799, 13801, 13803, 13805, 13806, 6491, 13811, 13813, 13815, 121, 122, 123, 124, 125, 126, 127, 4183, 4186, 14720, 4191, 4192, 14732, 14773, 14733, 14776, 14740, 14737, 14741, 14729, 14738, 4248, 13911, 4249, 14782, 13923, 4262, 14790, 4267, 4268, 14795, 14732, 14733, 14735, 14730, 14753, 14736, 14763, 4285, 14740, 14741, 14737, 14738, 14739, 13939, 14804, 14742, 14746, 14745, 14806, 13952, 13958, 14732, 14733, 14735, 14730, 14753, 14736, 14763, 4328, 14738, 14737, 14739, 14741, 14740, 4337, 13966, 13972, 14744, 14743, 14742, 14749, 14748, 14826, 13984, 14732, 14735, 14734, 14753, 14736, 14763, 4376, 14740, 14739, 14741, 14738, 14737, 13995, 14839, 13432, 14747, 14842, 13438, 14732, 14846, 14733, 14735, 14730, 14753, 14736, 14763, 4412, 14741, 14740, 14738, 14729, 14737, 4421, 14013, 14856, 14726, 14721, 14858, 4434, 14028, 14725, 14728, 14723, 14722, 14866, 14732, 14733, 14735, 14734, 14753, 14736, 14763, 4459, 14739, 14740, 14741, 14737, 14738, 14039, 14874, 14046, 14052, 14747, 14883, 14732, 14886, 14733, 14889, 14735, 14730, 14763, 4499, 14729, 14741, 14740, 14737, 14738, 4508, 14067, 14075, 14898, 14900, 14903, 14906, 14732, 14909, 14733, 14912, 14735, 14730, 14753, 14736, 14763, 4555, 14738, 14740, 14741, 14737, 14729, 4564, 14109, 14724, 14919, 14725, 14749, 14726, 14923, 14124, 14931, 13488, 14935, 13494, 14732, 14733, 14735, 14730, 14753, 14736, 14763, 4622, 14740, 14739, 14737, 14738, 14741, 14154, 14944, 14161, 14744, 14743, 14742, 14950, 14175, 14956, 14749, 14748, 14744, 14728, 14727, 14749, 14748, 14732, 14733, 14194, 14966, 14749, 14748, 14744, 14743, 14742, 14749, 14748, 14732, 14733, 14735, 14730, 14753, 14736, 14763, 4711, 14741, 14739, 14737, 14740, 14738, 4720, 14209, 14215, 4727, 14220, 14744, 14731, 14749, 14748, 14986, 14732, 14733, 14735, 14734, 4748, 4750, 4756, 4758, 4759, 4763, 4765, 4766, 15007, 15010, 14735, 14730, 14753, 14736, 14737, 14741, 14740, 14729, 14738, 4794, 14255, 15016, 15019, 14735, 14734, 14732, 14733, 14735, 14734, 14753, 14736, 14763, 4834, 14739, 14737, 14738, 14740, 14741, 14288, 15031, 4845, 14747, 14746, 14745, 14744, 14743, 14742, 4857, 15038, 4865, 15043, 4871, 14744, 14743, 14742, 14749, 14748, 15050, 14323, 14732, 15058, 14733, 15061, 14735, 14730, 14753, 14736, 14737, 14740, 14738, 14729, 14741, 4917, 14335, 15067, 15070, 14352, 15075, 14735, 14730, 14744, 14743, 14747, 14746, 14745, 14742, 4957, 15083, 4964, 15088, 15090, 4975, 15095, 15097, 14732, 15100, 14733, 15103, 14735, 14730, 14753, 14736, 14737, 14729, 14741, 14738, 14740, 5005, 14400, 15109, 15112, 14416, 15117, 15120, 14432, 15124, 14732, 14733, 14735, 14730, 14753, 14736, 14763, 5056, 14739, 14737, 14741, 14740, 14738, 14444, 15132, 14448, 14454, 14744, 14731, 14749, 14748, 15140, 14465, 14471, 14732, 14733, 14735, 14734, 14753, 14736, 14739, 14737, 14740, 14738, 14741, 14503, 15162, 14749, 14748, 14742, 5278, 14744, 14743, 14746, 14745, 14747, 14749, 14748, 5291, 14751, 14750, 14753, 14752, 14754, 15177, 14759, 14755, 14758, 14760, 14761, 14758, 14756, 14759, 14760, 14761, 14759, 14761, 14760, 14757, 14758, 14762, 5346, 14763, 5348, 5350, 5355, 5357, 5359, 5361, 5366, 5368, 5370, 5373, 5375, 5378, 14764, 5380, 5383, 15215, 5498, 5503, 13716, 14515, 13383, 5551, 5552, 14515, 15221, 5577, 15223, 5579, 15225, 5581, 15227, 5583, 14780, 14783, 14784, 14797, 14808, 14811, 15203, 14822, 14828, 14829, 15085, 15092, 15203, 14962, 14854, 14860, 15052, 14875, 14878, 15203, 5708, 5709, 5710, 5711, 5713, 5715, 5716, 5718, 5719, 5720, 14960, 14961, 14959, 14957, 14921, 14925, 14929, 14932, 15024, 15203, 5759, 5760, 5761, 5762, 5763, 5764, 5765, 5766, 5767, 5768, 14945, 14952, 15203, 14957, 14959, 14958, 14960, 14961, 14962, 15203, 15024, 14968, 14969, 14970, 14978, 14988, 14999, 15000, 15001, 15003, 5855, 5856, 5857, 5858, 5859, 5860, 5861, 5862, 5863, 5864, 5872, 5873, 5875, 5876, 5877, 5879, 5881, 5882, 5883, 5884, 15052, 15053, 15056, 5941, 5942, 5944, 5946, 5947, 5949, 5950, 5952, 5953, 5954, 5979, 5980, 5982, 5984, 5986, 5987, 5990, 5991, 5993, 5994, 15135, 15142, 15145, 14473, 14475, 14477, 6020, 6021, 14483, 13679, 6030, 13685, 6036, 14489, 6038, 13697, 6044, 14493, 6046, 13716, 6070, 14515, 6072, 15215, 6096, 6101, 6102, 15156, 6105, 15223, 6107, 15225, 6109, 15227, 6111, 13716, 14515, 15215, 6165, 15217, 6171, 15219, 6173, 15221, 6179, 15223, 6181, 15225, 6183, 15227, 6185, 6198, 6209, 6227, 6228, 6233, 6236, 6446, 6449, 6452, 6455, 6464, 6474, 6477, 6482, 6501, 6504, 6509, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 14766, 14768, 4190, 4233, 14774, 4237, 4240, 4241, 4242, 4243, 4244, 15374, 14787, 14791, 14796, 4274, 4276, 4278, 4279, 4281, 4282, 4284, 4286, 4287, 4288, 4289, 4290, 15398, 4295, 4297, 4298, 14807, 14810, 14814, 4317, 4319, 4321, 4322, 4324, 4325, 4327, 4329, 4330, 4331, 4332, 4333, 15418, 14824, 4344, 4345, 4347, 4349, 4350, 14827, 14832, 4367, 4369, 4370, 4372, 4373, 4375, 4377, 4378, 4379, 4380, 4381, 15441, 4390, 14843, 4399, 14847, 4403, 4405, 4406, 4408, 4409, 4411, 4413, 4414, 4415, 4416, 4417, 15460, 4427, 4428, 14859, 14864, 4440, 4441, 4442, 4443, 14867, 4448, 4450, 4452, 4453, 4455, 4456, 4458, 4460, 4461, 4462, 4463, 4464, 15487, 14877, 14881, 4481, 14884, 4488, 14887, 4492, 4495, 4496, 4498, 4500, 4501, 4502, 4503, 4504, 15505, 14896, 14901, 14904, 14907, 4541, 14910, 4545, 4548, 4549, 4551, 4552, 4554, 4556, 4557, 4558, 4559, 4560, 15527, 4565, 14920, 4571, 4572, 4573, 14924, 14928, 14936, 4611, 4613, 4615, 4616, 4618, 4619, 4621, 4623, 4624, 4625, 4626, 4627, 15554, 14948, 4639, 4640, 4642, 14951, 14954, 4658, 4659, 4661, 4662, 4663, 4665, 4666, 4676, 4677, 14964, 14967, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4700, 4702, 4704, 4705, 4707, 4708, 4710, 4712, 4713, 4714, 4715, 4716, 15593, 14980, 14984, 4733, 4734, 4736, 4737, 14987, 4743, 4744, 4745, 4746, 15607, 15609, 15610, 15008, 4780, 4781, 4783, 4784, 4786, 4787, 4788, 4789, 4790, 15626, 15017, 15020, 4814, 4815, 4823, 4825, 4827, 4828, 4830, 4831, 4833, 4835, 4836, 4837, 4838, 4839, 15646, 15647, 4848, 4850, 4851, 4853, 4854, 4856, 15039, 15656, 15658, 4876, 4877, 4879, 4881, 4882, 15051, 15055, 4896, 15059, 4900, 4903, 4904, 4906, 4907, 4909, 4910, 4911, 4912, 4913, 15679, 15068, 15073, 15076, 4945, 4946, 4948, 4949, 4951, 4953, 4954, 4956, 15084, 15695, 15698, 4984, 15101, 4988, 4991, 4992, 4994, 4995, 4997, 4998, 4999, 5000, 5001, 15714, 15110, 15115, 15118, 15121, 15125, 5045, 5047, 5049, 5050, 5052, 5053, 5055, 5057, 5058, 5059, 5060, 5061, 15737, 15134, 15138, 5076, 5077, 5079, 5080, 15141, 15144, 15147, 5255, 5257, 5259, 5260, 5262, 5263, 5265, 5266, 5267, 5268, 5269, 15759, 5274, 5275, 5277, 5279, 5280, 5282, 5283, 5285, 5287, 5288, 15169, 5315, 5316, 5319, 5320, 5322, 5325, 5326, 5327, 5328, 5329, 5333, 5334, 5335, 5336, 5337, 5339, 5340, 5341, 5342, 5343, 5345, 5347, 15185, 15798, 15191, 15194, 15802, 15201, 15805, 15207, 5379, 15213, 5497, 5523, 5525, 5549, 15818, 5554, 15809, 5576, 5578, 5580, 5582, 15376, 15381, 5602, 13915, 5604, 5605, 15379, 15381, 15382, 15381, 5613, 5624, 5626, 5628, 5636, 5643, 5644, 5651, 5652, 5653, 14469, 5662, 14543, 5673, 14017, 5678, 5679, 15466, 5694, 5696, 5700, 15850, 14079, 15854, 15857, 5722, 5723, 5725, 5727, 5738, 5744, 5746, 14128, 5748, 5753, 14469, 5757, 14543, 15870, 15872, 15875, 5776, 5783, 5785, 5793, 5794, 5795, 5796, 5797, 5802, 5803, 5804, 5808, 5813, 5814, 5822, 15596, 5830, 14228, 15611, 15613, 5847, 5848, 5849, 15612, 15613, 5852, 15614, 15613, 15899, 15903, 15907, 15909, 15912, 15916, 14278, 14280, 14281, 14282, 14292, 14293, 14304, 14309, 5930, 5931, 5933, 15922, 14344, 15927, 15929, 14374, 14378, 14384, 14388, 15932, 14409, 15936, 14543, 15940, 6004, 6010, 6012, 6014, 6016, 6018, 15948, 6023, 6029, 6035, 6037, 6043, 6045, 6069, 6071, 15809, 6095, 6104, 6106, 6108, 6110, 6134, 6136, 15809, 6164, 6170, 6172, 6178, 6180, 6182, 6184, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 16135, 16137, 16139, 16146, 16148, 16151, 16153, 16158, 16165, 16167, 16170, 16172, 16174, 16177, 16180, 16185, 16187, 16190, 16192, 16201, 16203, 16206, 16208, 16210, 16212, 16216, 16218, 16223, 16225, 16228, 16230, 16241, 16244, 16246, 16248, 16257, 16259, 16262, 16264, 16266, 16278, 16280, 16283, 16285, 16290, 16295, 16297, 16300, 16306, 16308, 16311, 16315, 16317, 16320, 16322, 16324, 16328, 16330, 16335, 16341, 16343, 16345, 16347, 16349, 16353, 16357, 16359, 16362, 16364, 16370, 16372, 16378, 16381, 16388, 16390, 16392, 16394, 16396, 16401, 16403, 16406, 16415, 16417, 16419, 16421, 16423, 16432, 16434, 16437, 16439, 16445, 16447, 16454, 16456, 16458, 16460, 16464, 16467, 16469, 16472, 15170, 16475, 16477, 16480, 16482, 16485, 16487, 16490, 16492, 15186, 15800, 15195, 15804, 15208, 15214, 16478, 16494, 16495, 16470, 16465, 13900, 13898, 15187, 15196, 16452, 16451, 16494, 16494, 16130, 15196, 16465, 16470, 15187, 16452, 16451, 16494, 16495, 15187, 16465, 16470, 15196, 16478, 16494, 16495, 5571, 15807, 16133, 16131, 16494, 16435, 5600, 5601, 5603, 14785, 5607, 5608, 13925, 5610, 5611, 13929, 16144, 16143, 16494, 16149, 16156, 13944, 13950, 14812, 16163, 16162, 16494, 16168, 13970, 16178, 13975, 14830, 16430, 16429, 16407, 16430, 16183, 16494, 16188, 5661, 16195, 14001, 5665, 16199, 16197, 16494, 16204, 5674, 14020, 5680, 14862, 14030, 16221, 16220, 16494, 16226, 14044, 14879, 16235, 14055, 16239, 16237, 16494, 16242, 14894, 5714, 14083, 14090, 14095, 16255, 16253, 16494, 16260, 16267, 16270, 14111, 16269, 16270, 16271, 16270, 14116, 14926, 5747, 16430, 16429, 5754, 16404, 14135, 5758, 16276, 16275, 16494, 16281, 14946, 16291, 14167, 14173, 16430, 16429, 16573, 16302, 16301, 14469, 16404, 14197, 16309, 16313, 16312, 16494, 16318, 14213, 5824, 14982, 14222, 16333, 16332, 15024, 5836, 16404, 16407, 14232, 15085, 15203, 5845, 5846, 5850, 5851, 5853, 5854, 16430, 16429, 16494, 16435, 14258, 14264, 15021, 16430, 16429, 16407, 16404, 14277, 5896, 15024, 5898, 5899, 5900, 16355, 16354, 16494, 16360, 15085, 5909, 5910, 16368, 16373, 14298, 15040, 5918, 15203, 5920, 16430, 16429, 16379, 14458, 14469, 16386, 16384, 16494, 16435, 14338, 5945, 15071, 14355, 16430, 16429, 16407, 16404, 14368, 15085, 5967, 5968, 15092, 5970, 5971, 16413, 16411, 16494, 16435, 14403, 5983, 15113, 14419, 14423, 5992, 14435, 16430, 16429, 16494, 16435, 14543, 15136, 14458, 15183, 14469, 16452, 16451, 16494, 16495, 15196, 16465, 16470, 15187, 16478, 16494, 16495, 15196, 15187, 6092, 15807, 16452, 16451, 16494, 16495, 15187, 15196, 16470, 16465, 16478, 16494, 16495, 15187, 15196, 15203, 6161, 15807, 16652, 16506, 16652, 16651, 16508, 16507, 16511, 16510, 16509, 16516, 16515, 16514, 16513, 16640, 16639, 16630, 16631, 16629, 16633, 16632, 16638, 16634, 16636, 16635, 16638, 16637, 16640, 16639, 16652, 16642, 16652, 16651, 16646, 16645, 16644, 16643, 16648, 16647, 16652, 16650, 16652, 16651, 16656, 16655, 16654, 16653, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 16769, 16774, 16779, 16786, 16790, 16798, 16801, 16806, 16811, 16298, 16822, 16830, 16836, 16844, 16852, 16857, 16863, 16872, 16874, 16876, 5479, 16861, 16860, 5483, 5484, 5485, 16865, 16867, 16864, 5489, 16866, 5491, 5492, 5493, 14532, 5495, 15199, 16861, 16860, 5506, 5507, 5509, 5510, 5511, 5512, 15199, 16864, 16867, 5516, 16865, 5518, 16866, 14511, 5521, 14532, 16861, 16860, 5533, 5534, 5536, 5537, 5538, 14532, 16867, 16865, 16864, 5543, 16866, 5545, 14511, 5547, 15199, 5560, 16870, 16870, 16869, 5565, 5569, 5572, 14547, 16828, 16827, 5595, 5596, 5598, 5599, 16919, 5606, 16923, 5609, 16926, 5612, 16772, 16771, 5616, 5617, 5619, 5620, 16775, 5622, 5623, 5625, 5627, 16777, 16776, 5631, 5632, 5634, 5635, 5637, 16782, 5639, 16781, 16782, 5642, 5645, 16855, 16846, 5648, 5649, 5650, 16784, 16783, 5656, 5657, 5659, 5660, 5663, 5664, 16788, 16787, 5668, 5669, 5671, 5672, 16792, 16840, 5677, 5681, 16793, 16840, 16794, 16840, 5686, 16796, 16795, 5689, 5690, 5692, 5693, 5695, 5697, 5698, 5699, 16828, 16799, 5703, 5704, 5706, 5707, 5712, 5717, 5721, 5724, 16815, 16804, 16803, 5730, 5731, 5733, 5734, 5735, 5736, 5737, 5739, 5740, 5741, 5742, 5743, 5745, 16855, 16826, 5751, 5752, 5755, 5756, 16809, 16808, 5771, 5772, 5774, 5775, 5777, 16864, 5779, 16812, 16867, 5782, 5784, 16855, 16854, 5788, 5789, 16815, 16813, 16855, 16846, 5800, 5801, 5805, 5806, 5807, 16818, 5810, 16817, 16816, 16820, 16819, 5817, 5818, 5820, 5821, 5823, 5825, 16825, 16824, 16825, 5829, 16855, 16826, 5833, 5834, 5835, 16864, 5838, 16867, 16848, 5841, 5842, 5843, 5844, 17036, 17038, 17040, 16828, 16827, 5867, 5868, 5870, 5871, 5874, 5878, 5880, 16855, 16832, 5887, 5888, 5889, 16848, 5891, 16864, 16867, 16847, 5895, 5897, 16834, 16833, 5903, 5904, 5906, 5907, 5908, 5911, 16838, 16864, 16837, 5915, 5916, 5917, 5919, 16855, 16846, 5923, 5924, 16840, 5926, 16839, 16840, 5929, 5932, 16842, 16841, 5936, 5937, 5939, 5940, 5943, 5948, 5951, 16855, 16846, 5957, 5958, 5959, 16848, 16864, 5962, 16867, 16847, 5965, 5966, 5969, 16850, 16849, 5974, 5975, 5977, 5978, 5981, 5985, 5988, 5989, 5995, 16855, 16854, 5998, 5999, 6001, 6002, 6003, 6005, 16859, 16858, 16859, 6009, 6011, 6013, 16861, 16860, 6053, 6054, 6056, 6057, 6058, 15199, 6060, 16866, 6062, 16865, 16864, 16867, 14511, 6067, 14532, 6077, 16870, 16870, 16869, 6082, 6085, 6088, 15199, 6090, 14532, 6093, 14547, 16861, 16860, 6118, 6119, 6121, 6122, 6123, 14532, 6125, 15199, 16865, 6128, 16866, 16867, 6131, 16864, 14511, 6142, 16870, 16870, 16869, 6150, 6152, 15183, 6154, 14532, 14534, 6157, 15199, 6159, 14543, 6162, 14547, 6196, 6197, 6199, 6200, 6207, 6208, 6216, 6217, 6218, 6229, 6230, 6231, 6232, 6234, 6235, 16519, 16527, 16563, 16561, 16542, 16540, 15848, 15852, 16553, 16554, 16556, 16558, 16563, 16561, 15877, 15868, 17011, 16575, 16593, 16590, 15901, 15905, 15910, 15917, 15924, 15923, 15938, 15933, 6441, 6442, 6443, 6444, 6445, 6447, 6448, 6450, 6451, 6453, 6454, 6462, 6463, 6472, 6473, 6475, 6476, 6478, 6479, 6480, 6481, 6489, 6490, 6499, 6500, 6502, 6503, 6505, 6506, 6507, 6508, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 16138, 16154, 16173, 16193, 16209, 16231, 16247, 16265, 16286, 16323, 16348, 16365, 16395, 16422, 16440, 16461, 16483, 16488, 16493, 5480, 5481, 5486, 5487, 5488, 5490, 17312, 5494, 5496, 5504, 5505, 17320, 17323, 5513, 5514, 5515, 5517, 5519, 5520, 5522, 5531, 5532, 17338, 5539, 5540, 5541, 5542, 5544, 5546, 5548, 5561, 5562, 5563, 17358, 5573, 5593, 5594, 17363, 5614, 5615, 17375, 5621, 5629, 5630, 17386, 5638, 5640, 5641, 5646, 5647, 17399, 5654, 5655, 17404, 5666, 5667, 17412, 5675, 5676, 5682, 5683, 5684, 5685, 5687, 5688, 17427, 5701, 5702, 17437, 5726, 5728, 5729, 17448, 17452, 17455, 17457, 5749, 5750, 17463, 5769, 5770, 17469, 5778, 5780, 5781, 5786, 5787, 17482, 5790, 17289, 5792, 5798, 5799, 17488, 5809, 5811, 5812, 5815, 5816, 17499, 5826, 5827, 5828, 5831, 5832, 17511, 5837, 5839, 5840, 5865, 5866, 17527, 5885, 5886, 17536, 5890, 5892, 5893, 5894, 5901, 5902, 17548, 5912, 5913, 5914, 5921, 5922, 17563, 5925, 5927, 5928, 5934, 5935, 17573, 5955, 5956, 17582, 5960, 5961, 5963, 5964, 5972, 5973, 17595, 5996, 5997, 17606, 6006, 6007, 6008, 6051, 6052, 17620, 6059, 6061, 6063, 6064, 6065, 6066, 6068, 6078, 6079, 6080, 6089, 6091, 17644, 6094, 6116, 6117, 17649, 6124, 6126, 6127, 6129, 6130, 6132, 6133, 6143, 6144, 6145, 6153, 6155, 6156, 6158, 6160, 17677, 6163, 17680, 17682, 17684, 17686, 17689, 17691, 17693, 17368, 16522, 16521, 17366, 16521, 6243, 6244, 17370, 17366, 16529, 16530, 16530, 16528, 16532, 16531, 16532, 16533, 17400, 16535, 16536, 16534, 16536, 6266, 17407, 16538, 6269, 6271, 16543, 6274, 16961, 16543, 16546, 16547, 17432, 16547, 16545, 15855, 6287, 16548, 15855, 16550, 16551, 15851, 16548, 6294, 6295, 6296, 16552, 16555, 16555, 6305, 16557, 16560, 6309, 16560, 6314, 17464, 16563, 6317, 16566, 16565, 6320, 15873, 15876, 15876, 16567, 6325, 16566, 16570, 16568, 16570, 16569, 16571, 6336, 6337, 16571, 16574, 16577, 16576, 17490, 16578, 16577, 16581, 16580, 16579, 16581, 16582, 16584, 17023, 16584, 17512, 17520, 17520, 17519, 6363, 17522, 16589, 17523, 16588, 17521, 16589, 6370, 17522, 16598, 6373, 16596, 15900, 16597, 15900, 6378, 16598, 15904, 6382, 15914, 15913, 16601, 16600, 16599, 6388, 16601, 15914, 17675, 17544, 17675, 17590, 17559, 17551, 17559, 17558, 16611, 16610, 16612, 16610, 16615, 16616, 15925, 16613, 6414, 15930, 6416, 16616, 15930, 17591, 17675, 17675, 17590, 16623, 15937, 16625, 15934, 16625, 6431, 6432, 16621, 15937, 16626, 16627, 16627, 16628, 17723, 17725, 17728, 17730, 17732, 17734, 17736, 17738, 17740, 17742, 17744, 17746, 17748, 17750, 17752, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 17811, 17807, 17813, 17815, 17816, 17821, 17807, 17826, 17827, 17828, 17832, 17807, 17836, 17346, 17348, 17841, 17843, 17809, 17809, 17808, 17810, 17809, 17847, 17792, 17850, 17793, 17379, 17854, 17794, 17391, 17858, 17860, 17863, 17795, 17866, 17796, 17869, 17871, 17873, 17875, 17797, 17878, 17798, 17882, 17799, 17888, 17891, 17800, 17474, 17895, 17897, 5791, 17903, 17493, 17907, 17909, 17801, 17912, 17915, 17514, 17919, 17921, 17802, 17924, 17926, 17927, 17929, 17931, 17803, 17933, 17935, 17937, 17565, 17941, 17943, 17804, 17946, 17948, 17586, 17951, 17953, 17805, 17956, 17806, 17959, 17962, 17807, 17965, 17966, 17968, 17971, 17973, 17809, 17810, 17809, 17809, 17808, 17979, 17807, 17657, 17985, 17986, 17988, 17990, 17808, 17809, 17809, 17809, 17810, 17315, 17675, 17313, 17675, 17675, 17333, 17675, 17324, 17341, 17675, 17350, 17675, 17687, 17675, 17632, 17675, 17623, 17675, 17844, 18003, 6238, 6239, 6240, 6241, 6242, 6245, 6246, 6248, 6249, 6250, 6251, 6254, 6256, 6257, 6258, 6260, 6261, 6262, 6263, 6264, 6267, 6268, 6272, 6275, 6276, 6280, 6281, 6282, 6283, 6284, 6286, 6288, 6289, 6290, 6291, 6292, 6293, 17880, 17880, 6299, 6300, 6301, 17880, 18051, 17885, 17886, 6307, 6308, 6310, 17884, 17885, 6315, 6316, 6318, 6319, 6321, 6322, 6323, 6324, 6326, 6328, 6330, 6331, 6332, 6334, 6338, 6339, 18079, 6341, 6342, 6343, 6344, 6345, 6346, 6348, 6349, 6350, 6352, 6354, 6355, 6356, 6358, 6360, 6361, 6362, 6364, 6365, 6366, 6367, 6368, 6369, 6371, 6372, 6374, 6375, 6376, 6377, 6379, 6380, 6383, 6384, 6385, 6386, 6387, 6389, 6390, 6393, 6394, 6395, 6396, 6399, 6400, 6401, 6402, 6404, 6405, 6407, 6408, 6410, 6411, 6412, 6413, 6415, 6417, 6418, 6420, 6421, 6422, 6424, 6426, 6427, 6428, 6429, 6430, 6433, 6434, 6436, 6438, 6439, 6440, 18165, 17675, 17632, 17675, 17623, 17642, 17675, 17675, 17640, 17976, 17675, 18173, 17675, 17675, 17654, 17652, 17670, 17675, 17675, 17675, 17996, 17673, 18178, 18169, 17999, 17998, 18000, 18176, 18175, 18004, 18168, 18167, 18166, 18169, 18171, 18170, 18174, 18176, 18175, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 17812, 5482, 18307, 17822, 5508, 18312, 17833, 5535, 18317, 18320, 5564, 5566, 5567, 5568, 5570, 17848, 5597, 17851, 5618, 17855, 5633, 18334, 17861, 17864, 5658, 17867, 5670, 17876, 5691, 17879, 5705, 17883, 5732, 17889, 17892, 5773, 18353, 17898, 18355, 17904, 18358, 17910, 5819, 17913, 17916, 18364, 17922, 5869, 17925, 18369, 17932, 5905, 18374, 17938, 18377, 17944, 5938, 17947, 18382, 17954, 5976, 17957, 6000, 17960, 17963, 6055, 18392, 18395, 6081, 6083, 6084, 6086, 6087, 17980, 6120, 18404, 18407, 6146, 6147, 6148, 6149, 6151, 6191, 6192, 6193, 6194, 6202, 6203, 6204, 6205, 6211, 6212, 6213, 6215, 6221, 6222, 6223, 6224, 6225, 6226, 18434, 18436, 18010, 18438, 18330, 18441, 18443, 18446, 18449, 18451, 18453, 18030, 18341, 18340, 18342, 18455, 18457, 18459, 18461, 18042, 18465, 18467, 18469, 6297, 6298, 6302, 18473, 6304, 6306, 6311, 6312, 18058, 18484, 18063, 18487, 18488, 18490, 18071, 18495, 18499, 18502, 18504, 18508, 18512, 18516, 18518, 18520, 18522, 18106, 18109, 18527, 18529, 18530, 18532, 18534, 18536, 18537, 18541, 18545, 18548, 18552, 18554, 18555, 18556, 18559, 18563, 18565, 18156, 18567, 18571, 17726, 6457, 6459, 6460, 6461, 6466, 6467, 6468, 6469, 6470, 6471, 6485, 6486, 6487, 6488, 6493, 6494, 6495, 6496, 6497, 6498, 6510, 6512, 6513, 6515, 18425, 18432, 6520, 6521, 6522, 6551, 6552, 6553, 6556, 18584, 6559, 6560, 6562, 18595, 6565, 6566, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 17303, 18308, 17321, 18313, 17339, 18318, 17356, 18700, 17357, 17364, 17376, 17387, 17405, 17413, 17428, 17438, 17449, 17470, 17901, 17500, 17517, 17528, 18370, 17549, 17556, 17574, 18383, 17596, 17607, 17621, 18393, 17638, 18758, 18759, 17650, 18405, 18766, 18768, 18769, 18771, 18773, 18775, 18777, 18779, 18783, 18785, 18787, 18789, 18791, 6252, 18794, 18709, 18797, 18799, 6273, 6277, 6278, 18806, 18808, 18810, 18812, 18813, 18055, 18479, 18817, 18821, 18823, 18825, 18724, 18829, 18728, 18731, 18834, 18836, 18838, 18840, 18842, 18844, 18742, 18849, 18851, 18854, 18856, 18751, 18861, 18864, 18866, 18868, 18871, 18874, 18876, 18878, 18881, 6517, 6519, 18858, 18889, 6558, 6564, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 17304, 17823, 17340, 18951, 18702, 17365, 17377, 17388, 17406, 17414, 17429, 17439, 17450, 17471, 17501, 17529, 17550, 17575, 17597, 17608, 17622, 18976, 18760, 17651, 18981, 17668, 18945, 18984, 18947, 18986, 18949, 18989, 18992, 18993, 6255, 18452, 18033, 19000, 18462, 19003, 19004, 19005, 19007, 19008, 19011, 6329, 18962, 18505, 6347, 6353, 18964, 19017, 19019, 19021, 18966, 18968, 6406, 19024, 18970, 19026, 6437, 18974, 19030, 18979, 19034, 18885, 6554, 18893, 18897, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19076, 19094, 19097, 18688, 6195, 18691, 6206, 18694, 6214, 18990, 18703, 18439, 18705, 18707, 19106, 18710, 18711, 18713, 19108, 19109, 18715, 18717, 18049, 19113, 18719, 19115, 18721, 18492, 18722, 19117, 18725, 6335, 18727, 19120, 18729, 19121, 18732, 6359, 18524, 18531, 18734, 18538, 18736, 6392, 18738, 6398, 18741, 18549, 18743, 18557, 18745, 6423, 18747, 18568, 18749, 19132, 18752, 6458, 19031, 18761, 6484, 19035, 18886, 19138, 18894, 18898, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 6190, 19204, 6201, 19206, 6210, 19208, 18697, 6237, 6247, 6253, 18795, 6259, 6265, 6270, 19219, 6279, 6285, 6303, 18818, 6313, 6327, 18826, 6333, 19231, 6340, 18830, 6351, 18831, 6357, 19237, 6381, 6391, 18539, 6397, 18543, 6403, 19247, 6409, 6419, 19251, 6425, 6435, 18857, 6456, 19257, 18755, 6483, 18869, 18764, 19263, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19333, 6220, 18447, 18496, 18500, 18509, 18513, 18832, 18845, 18846, 18550, 19367, 18572, 19028, 6465, 19032, 6492, 19328, 19330, 19358, 19335, 19358, 19365, 19368, 19347, 19336, 19343, 19345, 19350, 19358, 19358, 19352, 19344, 19339, 19341, 19340, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18781, 18827, 18517, 18542, 18546, 18561, 18862, 18872, 6511, 6514, 19457, 6523, 6524, 6525, 19363, 6527, 6528, 6529, 6530, 19337, 19369, 6534, 6535, 6536, 19348, 19363, 6542, 6543, 19354, 6545, 6546, 6548, 6549, 6550, 19470, 19472, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19332, 6518, 6526, 6531, 19361, 6533, 6537, 19356, 19359, 19366, 6541, 6544, 19350, 19596, 19600, 19602, 19607, 19614, 19617, 19371, 6557, 19374, 6563, 19593, 19592, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 6516, 6532, 6538, 6539, 6540, 6547, 19714, 19605, 19610, 19723, 19727, 6555, 6561, 19734, 6570, 6571, 19732, 19713, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19841, 19842, 19844, 19615, 19846, 19729, 6567, 19851, 19851, 19851, 19852, 6574, 19840, 6576, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19847, 19969, 19848, 19730, 19850, 6568, 6569, 6572, 6573, 6575, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20097, 19973, 20101, 19854, 20103, 19979, 19981, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20224, 20099, 20227, 20229, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20353, 20355, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20480, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 6577, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20736, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20864, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127};
bool h_Op[]= {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#define THREADS_PER_BLOCK 128
#define BLOCKS_PER_GRID 1
#define SIZE_OF_IN 6656
#define SIZE_OF_AC 14464
__device__ void
ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float R[165*THREADS_PER_BLOCK];
const int t= THREADS_PER_BLOCK;
__shared__ float final;
final=0;
R[i + 0*t] = A[i + 0*t];
R[i + 1*t] = A[i + 1*t];
R[i + 2*t] = A[i + 2*t];
R[i + 3*t] = A[i + 3*t];
R[i + 4*t] = A[i + 4*t];
R[i + 5*t] = A[i + 5*t];
R[i + 6*t] = A[i + 6*t];
R[i + 7*t] = A[i + 7*t];
R[i + 8*t] = A[i + 8*t];
R[i + 9*t] = A[i + 9*t];
R[i + 10*t] = A[i + 10*t];
R[i + 11*t] = A[i + 11*t];
R[i + 12*t] = A[i + 12*t];
R[i + 13*t] = A[i + 13*t];
R[i + 14*t] = A[i + 14*t];
R[i + 15*t] = A[i + 15*t];
R[i + 16*t] = A[i + 16*t];
R[i + 17*t] = A[i + 17*t];
R[i + 18*t] = A[i + 18*t];
R[i + 19*t] = A[i + 19*t];
R[i + 20*t] = A[i + 20*t];
R[i + 21*t] = A[i + 21*t];
R[i + 22*t] = A[i + 22*t];
R[i + 23*t] = A[i + 23*t];
R[i + 24*t] = A[i + 24*t];
R[i + 25*t] = A[i + 25*t];
R[i + 26*t] = A[i + 26*t];
R[i + 27*t] = A[i + 27*t];
R[i + 28*t] = A[i + 28*t];
R[i + 29*t] = A[i + 29*t];
R[i + 30*t] = A[i + 30*t];
R[i + 31*t] = A[i + 31*t];
R[i + 32*t] = A[i + 32*t];
R[i + 33*t] = A[i + 33*t];
R[i + 34*t] = A[i + 34*t];
R[i + 35*t] = A[i + 35*t];
R[i + 36*t] = A[i + 36*t];
R[i + 37*t] = A[i + 37*t];
R[i + 38*t] = A[i + 38*t];
R[i + 39*t] = A[i + 39*t];
R[i + 40*t] = A[i + 40*t];
R[i + 41*t] = A[i + 41*t];
R[i + 42*t] = A[i + 42*t];
R[i + 43*t] = A[i + 43*t];
R[i + 44*t] = A[i + 44*t];
R[i + 45*t] = A[i + 45*t];
R[i + 46*t] = A[i + 46*t];
R[i + 47*t] = A[i + 47*t];
R[i + 48*t] = A[i + 48*t];
R[i + 49*t] = A[i + 49*t];
R[i + 50*t] = A[i + 50*t];
R[i + 51*t] = A[i + 51*t];
__syncthreads();
for (int iter=0; iter< n_iter; iter++) {
R[i + 52*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]];
R[i + 53*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]];
R[i + 54*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]];
R[i + 55*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]];
R[i + 56*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]];
R[i + 57*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]];
R[i + 58*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]];
R[i + 59*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]];
R[i + 60*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]];
R[i + 61*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]];
R[i + 62*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]];
__syncthreads();
R[i + 63*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]];
R[i + 64*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]];
R[i + 65*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]];
R[i + 66*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]];
R[i + 67*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]];
R[i + 68*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]];
R[i + 69*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]];
__syncthreads();
R[i + 70*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]];
R[i + 71*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]];
R[i + 72*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]];
R[i + 73*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]];
R[i + 74*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]];
R[i + 75*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]];
R[i + 76*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]];
R[i + 77*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]];
R[i + 78*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]];
__syncthreads();
R[i + 79*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]];
R[i + 80*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]];
R[i + 81*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]];
R[i + 82*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]];
R[i + 83*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]];
R[i + 84*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]];
R[i + 85*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]];
R[i + 86*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]];
R[i + 87*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]];
__syncthreads();
R[i + 88*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]];
R[i + 89*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]];
R[i + 90*t] = Op[i + 38*t] ? R[B[i + 38*t]] * R[C[i + 38*t]] : R[B[i + 38*t]] + R[C[i + 38*t]];
R[i + 91*t] = Op[i + 39*t] ? R[B[i + 39*t]] * R[C[i + 39*t]] : R[B[i + 39*t]] + R[C[i + 39*t]];
R[i + 92*t] = Op[i + 40*t] ? R[B[i + 40*t]] * R[C[i + 40*t]] : R[B[i + 40*t]] + R[C[i + 40*t]];
R[i + 93*t] = Op[i + 41*t] ? R[B[i + 41*t]] * R[C[i + 41*t]] : R[B[i + 41*t]] + R[C[i + 41*t]];
R[i + 94*t] = Op[i + 42*t] ? R[B[i + 42*t]] * R[C[i + 42*t]] : R[B[i + 42*t]] + R[C[i + 42*t]];
__syncthreads();
R[i + 95*t] = Op[i + 43*t] ? R[B[i + 43*t]] * R[C[i + 43*t]] : R[B[i + 43*t]] + R[C[i + 43*t]];
R[i + 96*t] = Op[i + 44*t] ? R[B[i + 44*t]] * R[C[i + 44*t]] : R[B[i + 44*t]] + R[C[i + 44*t]];
R[i + 97*t] = Op[i + 45*t] ? R[B[i + 45*t]] * R[C[i + 45*t]] : R[B[i + 45*t]] + R[C[i + 45*t]];
R[i + 98*t] = Op[i + 46*t] ? R[B[i + 46*t]] * R[C[i + 46*t]] : R[B[i + 46*t]] + R[C[i + 46*t]];
R[i + 99*t] = Op[i + 47*t] ? R[B[i + 47*t]] * R[C[i + 47*t]] : R[B[i + 47*t]] + R[C[i + 47*t]];
R[i + 100*t] = Op[i + 48*t] ? R[B[i + 48*t]] * R[C[i + 48*t]] : R[B[i + 48*t]] + R[C[i + 48*t]];
R[i + 101*t] = Op[i + 49*t] ? R[B[i + 49*t]] * R[C[i + 49*t]] : R[B[i + 49*t]] + R[C[i + 49*t]];
__syncthreads();
R[i + 102*t] = Op[i + 50*t] ? R[B[i + 50*t]] * R[C[i + 50*t]] : R[B[i + 50*t]] + R[C[i + 50*t]];
R[i + 103*t] = Op[i + 51*t] ? R[B[i + 51*t]] * R[C[i + 51*t]] : R[B[i + 51*t]] + R[C[i + 51*t]];
R[i + 104*t] = Op[i + 52*t] ? R[B[i + 52*t]] * R[C[i + 52*t]] : R[B[i + 52*t]] + R[C[i + 52*t]];
R[i + 105*t] = Op[i + 53*t] ? R[B[i + 53*t]] * R[C[i + 53*t]] : R[B[i + 53*t]] + R[C[i + 53*t]];
R[i + 106*t] = Op[i + 54*t] ? R[B[i + 54*t]] * R[C[i + 54*t]] : R[B[i + 54*t]] + R[C[i + 54*t]];
R[i + 107*t] = Op[i + 55*t] ? R[B[i + 55*t]] * R[C[i + 55*t]] : R[B[i + 55*t]] + R[C[i + 55*t]];
__syncthreads();
R[i + 108*t] = Op[i + 56*t] ? R[B[i + 56*t]] * R[C[i + 56*t]] : R[B[i + 56*t]] + R[C[i + 56*t]];
R[i + 109*t] = Op[i + 57*t] ? R[B[i + 57*t]] * R[C[i + 57*t]] : R[B[i + 57*t]] + R[C[i + 57*t]];
R[i + 110*t] = Op[i + 58*t] ? R[B[i + 58*t]] * R[C[i + 58*t]] : R[B[i + 58*t]] + R[C[i + 58*t]];
R[i + 111*t] = Op[i + 59*t] ? R[B[i + 59*t]] * R[C[i + 59*t]] : R[B[i + 59*t]] + R[C[i + 59*t]];
R[i + 112*t] = Op[i + 60*t] ? R[B[i + 60*t]] * R[C[i + 60*t]] : R[B[i + 60*t]] + R[C[i + 60*t]];
R[i + 113*t] = Op[i + 61*t] ? R[B[i + 61*t]] * R[C[i + 61*t]] : R[B[i + 61*t]] + R[C[i + 61*t]];
R[i + 114*t] = Op[i + 62*t] ? R[B[i + 62*t]] * R[C[i + 62*t]] : R[B[i + 62*t]] + R[C[i + 62*t]];
__syncthreads();
R[i + 115*t] = Op[i + 63*t] ? R[B[i + 63*t]] * R[C[i + 63*t]] : R[B[i + 63*t]] + R[C[i + 63*t]];
R[i + 116*t] = Op[i + 64*t] ? R[B[i + 64*t]] * R[C[i + 64*t]] : R[B[i + 64*t]] + R[C[i + 64*t]];
R[i + 117*t] = Op[i + 65*t] ? R[B[i + 65*t]] * R[C[i + 65*t]] : R[B[i + 65*t]] + R[C[i + 65*t]];
R[i + 118*t] = Op[i + 66*t] ? R[B[i + 66*t]] * R[C[i + 66*t]] : R[B[i + 66*t]] + R[C[i + 66*t]];
R[i + 119*t] = Op[i + 67*t] ? R[B[i + 67*t]] * R[C[i + 67*t]] : R[B[i + 67*t]] + R[C[i + 67*t]];
__syncthreads();
R[i + 120*t] = Op[i + 68*t] ? R[B[i + 68*t]] * R[C[i + 68*t]] : R[B[i + 68*t]] + R[C[i + 68*t]];
R[i + 121*t] = Op[i + 69*t] ? R[B[i + 69*t]] * R[C[i + 69*t]] : R[B[i + 69*t]] + R[C[i + 69*t]];
R[i + 122*t] = Op[i + 70*t] ? R[B[i + 70*t]] * R[C[i + 70*t]] : R[B[i + 70*t]] + R[C[i + 70*t]];
R[i + 123*t] = Op[i + 71*t] ? R[B[i + 71*t]] * R[C[i + 71*t]] : R[B[i + 71*t]] + R[C[i + 71*t]];
R[i + 124*t] = Op[i + 72*t] ? R[B[i + 72*t]] * R[C[i + 72*t]] : R[B[i + 72*t]] + R[C[i + 72*t]];
R[i + 125*t] = Op[i + 73*t] ? R[B[i + 73*t]] * R[C[i + 73*t]] : R[B[i + 73*t]] + R[C[i + 73*t]];
__syncthreads();
R[i + 126*t] = Op[i + 74*t] ? R[B[i + 74*t]] * R[C[i + 74*t]] : R[B[i + 74*t]] + R[C[i + 74*t]];
R[i + 127*t] = Op[i + 75*t] ? R[B[i + 75*t]] * R[C[i + 75*t]] : R[B[i + 75*t]] + R[C[i + 75*t]];
R[i + 128*t] = Op[i + 76*t] ? R[B[i + 76*t]] * R[C[i + 76*t]] : R[B[i + 76*t]] + R[C[i + 76*t]];
R[i + 129*t] = Op[i + 77*t] ? R[B[i + 77*t]] * R[C[i + 77*t]] : R[B[i + 77*t]] + R[C[i + 77*t]];
R[i + 130*t] = Op[i + 78*t] ? R[B[i + 78*t]] * R[C[i + 78*t]] : R[B[i + 78*t]] + R[C[i + 78*t]];
__syncthreads();
R[i + 131*t] = Op[i + 79*t] ? R[B[i + 79*t]] * R[C[i + 79*t]] : R[B[i + 79*t]] + R[C[i + 79*t]];
R[i + 132*t] = Op[i + 80*t] ? R[B[i + 80*t]] * R[C[i + 80*t]] : R[B[i + 80*t]] + R[C[i + 80*t]];
R[i + 133*t] = Op[i + 81*t] ? R[B[i + 81*t]] * R[C[i + 81*t]] : R[B[i + 81*t]] + R[C[i + 81*t]];
R[i + 134*t] = Op[i + 82*t] ? R[B[i + 82*t]] * R[C[i + 82*t]] : R[B[i + 82*t]] + R[C[i + 82*t]];
__syncthreads();
R[i + 135*t] = Op[i + 83*t] ? R[B[i + 83*t]] * R[C[i + 83*t]] : R[B[i + 83*t]] + R[C[i + 83*t]];
R[i + 136*t] = Op[i + 84*t] ? R[B[i + 84*t]] * R[C[i + 84*t]] : R[B[i + 84*t]] + R[C[i + 84*t]];
R[i + 137*t] = Op[i + 85*t] ? R[B[i + 85*t]] * R[C[i + 85*t]] : R[B[i + 85*t]] + R[C[i + 85*t]];
R[i + 138*t] = Op[i + 86*t] ? R[B[i + 86*t]] * R[C[i + 86*t]] : R[B[i + 86*t]] + R[C[i + 86*t]];
__syncthreads();
R[i + 139*t] = Op[i + 87*t] ? R[B[i + 87*t]] * R[C[i + 87*t]] : R[B[i + 87*t]] + R[C[i + 87*t]];
R[i + 140*t] = Op[i + 88*t] ? R[B[i + 88*t]] * R[C[i + 88*t]] : R[B[i + 88*t]] + R[C[i + 88*t]];
R[i + 141*t] = Op[i + 89*t] ? R[B[i + 89*t]] * R[C[i + 89*t]] : R[B[i + 89*t]] + R[C[i + 89*t]];
R[i + 142*t] = Op[i + 90*t] ? R[B[i + 90*t]] * R[C[i + 90*t]] : R[B[i + 90*t]] + R[C[i + 90*t]];
__syncthreads();
R[i + 143*t] = Op[i + 91*t] ? R[B[i + 91*t]] * R[C[i + 91*t]] : R[B[i + 91*t]] + R[C[i + 91*t]];
R[i + 144*t] = Op[i + 92*t] ? R[B[i + 92*t]] * R[C[i + 92*t]] : R[B[i + 92*t]] + R[C[i + 92*t]];
R[i + 145*t] = Op[i + 93*t] ? R[B[i + 93*t]] * R[C[i + 93*t]] : R[B[i + 93*t]] + R[C[i + 93*t]];
__syncthreads();
R[i + 146*t] = Op[i + 94*t] ? R[B[i + 94*t]] * R[C[i + 94*t]] : R[B[i + 94*t]] + R[C[i + 94*t]];
R[i + 147*t] = Op[i + 95*t] ? R[B[i + 95*t]] * R[C[i + 95*t]] : R[B[i + 95*t]] + R[C[i + 95*t]];
__syncthreads();
R[i + 148*t] = Op[i + 96*t] ? R[B[i + 96*t]] * R[C[i + 96*t]] : R[B[i + 96*t]] + R[C[i + 96*t]];
__syncthreads();
R[i + 149*t] = Op[i + 97*t] ? R[B[i + 97*t]] * R[C[i + 97*t]] : R[B[i + 97*t]] + R[C[i + 97*t]];
__syncthreads();
R[i + 150*t] = Op[i + 98*t] ? R[B[i + 98*t]] * R[C[i + 98*t]] : R[B[i + 98*t]] + R[C[i + 98*t]];
__syncthreads();
R[i + 151*t] = Op[i + 99*t] ? R[B[i + 99*t]] * R[C[i + 99*t]] : R[B[i + 99*t]] + R[C[i + 99*t]];
__syncthreads();
R[i + 152*t] = Op[i + 100*t] ? R[B[i + 100*t]] * R[C[i + 100*t]] : R[B[i + 100*t]] + R[C[i + 100*t]];
__syncthreads();
R[i + 153*t] = Op[i + 101*t] ? R[B[i + 101*t]] * R[C[i + 101*t]] : R[B[i + 101*t]] + R[C[i + 101*t]];
__syncthreads();
R[i + 154*t] = Op[i + 102*t] ? R[B[i + 102*t]] * R[C[i + 102*t]] : R[B[i + 102*t]] + R[C[i + 102*t]];
__syncthreads();
R[i + 155*t] = Op[i + 103*t] ? R[B[i + 103*t]] * R[C[i + 103*t]] : R[B[i + 103*t]] + R[C[i + 103*t]];
__syncthreads();
R[i + 156*t] = Op[i + 104*t] ? R[B[i + 104*t]] * R[C[i + 104*t]] : R[B[i + 104*t]] + R[C[i + 104*t]];
__syncthreads();
R[i + 157*t] = Op[i + 105*t] ? R[B[i + 105*t]] * R[C[i + 105*t]] : R[B[i + 105*t]] + R[C[i + 105*t]];
__syncthreads();
R[i + 158*t] = Op[i + 106*t] ? R[B[i + 106*t]] * R[C[i + 106*t]] : R[B[i + 106*t]] + R[C[i + 106*t]];
__syncthreads();
R[i + 159*t] = Op[i + 107*t] ? R[B[i + 107*t]] * R[C[i + 107*t]] : R[B[i + 107*t]] + R[C[i + 107*t]];
__syncthreads();
R[i + 160*t] = Op[i + 108*t] ? R[B[i + 108*t]] * R[C[i + 108*t]] : R[B[i + 108*t]] + R[C[i + 108*t]];
__syncthreads();
R[i + 161*t] = Op[i + 109*t] ? R[B[i + 109*t]] * R[C[i + 109*t]] : R[B[i + 109*t]] + R[C[i + 109*t]];
__syncthreads();
R[i + 162*t] = Op[i + 110*t] ? R[B[i + 110*t]] * R[C[i + 110*t]] : R[B[i + 110*t]] + R[C[i + 110*t]];
__syncthreads();
R[i + 163*t] = Op[i + 111*t] ? R[B[i + 111*t]] * R[C[i + 111*t]] : R[B[i + 111*t]] + R[C[i + 111*t]];
__syncthreads();
R[i + 164*t] = Op[i + 112*t] ? R[B[i + 112*t]] * R[C[i + 112*t]] : R[B[i + 112*t]] + R[C[i + 112*t]];
if (i==0) { final += R[164*t]; }
__syncthreads();
}
if (i==0) { A[0]= final;}
}
| a4adc3f5839d585deae0277d42c7f3b7052fb154.cu | float h_A[]= {
0.646300533086186, 0.6891034119322159, 0.5468255896007155, 0.6042228186164886, 0.8659380581803113, 0.6300291449865434, 0.6636944471272259, 0.9882951548595007, 0.6352107108241554, 0.5790636985735749, 0.8804145795069749, 0.9456035439132031, 0.6321246094793169, 0.5520083637849034, 0.8193643662644936, 0.948699220113753, 0.6755087191072062, 0.8452024670159349, 0.5158472479991425, 0.7454278577521886, 0.8203518918008311, 0.8306414037192553, 0.9102755274193095, 0.8049150489951427, 0.6634987536615461, 0.5516742816892066, 0.6842642708230713, 0.7483998039947184, 0.8051003412268876, 0.5649583199862422, 0.8121027556323586, 0.5450967323115479, 0.6219450160218438, 0.5105097521704045, 0.9137357556898562, 0.5150533504856335, 0.9355026026464295, 0.710832721093494, 0.9629822013245587, 0.5863652172884737, 0.9265505203829214, 0.5420760069497614, 0.6783567622586935, 0.8976679836225981, 0.5509090210473756, 0.6604391659811224, 0.999366552142813, 0.9348916843328499, 0.713477120025813, 0.7305105281555632, 0.5508255633550583, 0.5329064212395214, 0.6742118985756717, 0.689140376023022, 0.7270457963615451, 0.7209463549118231, 0.7283645311972975, 0.7472317688709345, 0.8427026709428014, 0.8917006197702075, 0.7860696907282438, 0.8998261806382524, 0.774140322305406, 0.7407395850512472, 0.7016144680644383, 0.9513347541186932, 0.9539583490820657, 0.8055369082163983, 0.66731931499848, 0.9269077839786752, 0.8036882303399886, 0.8353559289667416, 0.7487273959642642, 0.95231413311441, 0.52936400623473, 0.6622843724305907, 0.7865911951337959, 0.8490486059918574, 0.5654667231844523, 0.902222672460675, 0.7377938242893363, 0.845162901466018, 0.6178930176516815, 0.8820423918233746, 0.819658695927562, 0.7297992824653494, 0.8608408011644345, 0.7814267405834245, 0.5451303358395813, 0.8364497176294705, 0.9476071729161337, 0.8683920694866987, 0.6120014563881961, 0.7324781077435785, 0.952295205463342, 0.6058991359641921, 0.8291900507261571, 0.5226152595564822, 0.5947825439255605, 0.8782021043314617, 0.7569168372950734, 0.5797010072157455, 0.6594462100662504, 0.8337467492618065, 0.9914618549442806, 0.9321282144535272, 0.7246478245290418, 0.7161212795026455, 0.6016851675753103, 0.5125244716188995, 0.9299996842565255, 0.9903492897808992, 0.6237940900552181, 0.8205895991703788, 0.9884414871779998, 0.5866948961638769, 0.998033951487114, 0.5295990006307705, 0.5611045875923815, 0.961245424281093, 0.7226827601352674, 0.9640509189881881, 0.6519866100403702, 0.7892687497473407, 0.8167014390840873, 0.6765396258366096, 0.8449615379127254, 0.596268393959178, 0.7939249923913629, 0.5168816989873475, 0.9345299250253124, 0.6309463095185301, 0.8986805331645678, 0.7523313838450412, 0.82531344079664, 0.7627569522371702, 0.9031553492515547, 0.9494476034448109, 0.6288373163385834, 0.7924580168091493, 0.721495346147775, 0.834300321380002, 0.6469884013241809, 0.6472322377172521, 0.7984745212053365, 0.962294073457627, 0.7886130868301862, 0.8776482969543955, 0.6478837103419697, 0.9882165119301165, 0.8740486150400817, 0.5056143331065995, 0.6618047410295208, 0.7610666592760644, 0.5702625328895041, 0.9369221577593562, 0.9494164874124904, 0.59238444587363, 0.8780965038804809, 0.5218639139258541, 0.6812401728541819, 0.9328253167831007, 0.5161381775199221, 0.9100393851884749, 0.9728293591126462, 0.811344502001907, 0.782823841572214, 0.8658958032470887, 0.663719411218762, 0.9692889930153497, 0.6467599014694876, 0.589304535120637, 0.5094633344034718, 0.7025147226816439, 0.6598503212467648, 0.7020746471976945, 0.8830918473238974, 0.7866485844831004, 0.6345775079170256, 0.5615367280865449, 0.8802227833065868, 0.8582600706563485, 0.9155482170781064, 0.9530293740421751, 0.8182483372937428, 0.8524389803132264, 0.7241233536334677, 0.5151834741836199, 0.7385812199918054, 0.7943893265125952, 0.9051641860383268, 0.619534572253894, 0.8242822542479566, 0.6413536058059588, 0.9807819497947537, 0.9898101031902062, 0.8415733555438634, 0.9867989644513635, 0.9373926397421499, 0.8237322958318012, 0.9271544178576562, 0.8356995743720048, 0.5658178563673646, 0.9705983473416786, 0.6736511025432669, 0.7830998587352098, 0.7045935293009279, 0.6834898270240016, 0.6499489142941706, 0.8808467511064504, 0.6642293957183821, 0.8653745173498084, 0.6267646816753698, 0.8074151052755552, 0.6799619044150402, 0.9942692009440288, 0.8625681533776105, 0.9257538399244084, 0.9914011126522407, 0.7917287747201345, 0.6046048508747064, 0.532450046417468, 0.6437265828207415, 0.8897046260829842, 0.9224802213438084, 0.6057855632559244, 0.9499914588921554, 0.611727504863876, 0.7587968793908222, 0.67209262512403, 0.9950600556677005, 0.7501592342573983, 0.8822302791460712, 0.562604449598759, 0.9938232800091651, 0.9950277163985779, 0.7850563426271466, 0.6291752867355491, 0.5837153379176236, 0.7493907741017607, 0.6658782563135046, 0.6476146082689006, 0.5974297752374016, 0.7034458626620241, 0.5161588039335538, 0.7186483385553928, 0.9061181053411442, 0.6296220803731616, 0.8344587501610189, 0.7658368512919322, 0.5193911487477161, 0.5254419830916216, 0.7504808479462405, 0.8552544477499019, 0.8289137948682707, 0.5426242175782335, 0.858845508474556, 0.8252356216134121, 0.8866364015504669, 0.5073032774858128, 0.74148567685559, 0.5809190407335006, 0.8382147766638192, 0.5021179405425454, 0.5612965191774761, 0.521704780892861, 0.9620418862791433, 0.6967427399939414, 0.6533446492141379, 0.7147919014055153, 0.7887651897338765, 0.8217711569279046, 0.5366901108437196, 0.9734989556630906, 0.5485794158722644, 0.5458920483132449, 0.9415910985632716, 0.6905162757784671, 0.8177732442367671, 0.5193192818261485, 0.904439960839182, 0.577484626417917, 0.5023125567163751, 0.5351238363408092, 0.5506641464567381, 0.9097008547341774, 0.5028782459886247, 0.9775999371965542, 0.8480896041264325, 0.9524433141692397, 0.8790167956693373, 0.6918264294189349, 0.6610097567204785, 0.5590353481221483, 0.9055975628804205, 0.6238987671821737, 0.9890972864990741, 0.6749305158850749, 0.6388066974704508, 0.9249096968119721, 0.9237097208162639, 0.9956186647783947, 0.7502845085261427, 0.9157536785718855, 0.6367148161459021, 0.8914383120371315, 0.7754815852778648, 0.7442070581925427, 0.7168533964646541, 0.8035208845828656, 0.9058793058946397, 0.5506057302703941, 0.5610586777236432, 0.6198991192228714, 0.6759727566157296, 0.6521536736152977, 0.8911054170392861, 0.8730066061369885, 0.9052165427830005, 0.6290577933163359, 0.6266432294048905, 0.5833044339268814, 0.542572680556954, 0.709871771808865, 0.9961238310508744, 0.5220897050603603, 0.8772828170016069, 0.8770640256265352, 0.6734715416008624, 0.66448493340621, 0.711886014564672, 0.8948545491540754, 0.742454808358824, 0.5031309948396989, 0.998105761408189, 0.7416349611897435, 0.9833498748501672, 0.9434160912644086, 0.9287361899928851, 0.5668358498590604, 0.5516216715871469, 0.9180811238230364, 0.5003209498989232, 0.9919408995756567, 0.6098195086655246, 0.8529329865006654, 0.5483923087170157, 0.5108091169433435, 0.9310974593281147, 0.5131543331047703, 0.5522897530875988, 0.6135861087993936, 0.942225130594302, 0.7883109048664032, 0.980358430652991, 0.7427552974292404, 0.9008592468210845, 0.8330182916915136, 0.5116266438107839, 0.7155765952485353, 0.5586222664249273, 0.8094091386607725, 0.8137341760476213, 0.596062013591621, 0.6233668771354632, 0.6220904258017181, 0.7729922623950242, 0.9242654659438391, 0.9918232862707279, 0.7038756613345727, 0.6955031237560236, 0.7330651825711396, 0.9814429451532296, 0.6637713448493832, 0.6273600806376864, 0.7920615560597659, 0.7560570649825917, 0.8667792441759616, 0.9311245995511708, 0.7390681612865757, 0.8622775259815559, 0.6482748715498996, 0.599471939542426, 0.5779101969326355, 0.801001618896622, 0.7871239215733595, 0.8926484924268543, 0.7436028538658448, 0.5052795513219919, 0.963326129083858, 0.6198395865526083, 0.7441529681401635, 0.9601662315681015, 0.7548356711809994, 0.9954837830129526, 0.6759079213898986, 0.9621800756631611, 0.89322570586708, 0.7735070667526001, 0.6915282225910707, 0.9341423848626801, 0.6211207388152615, 0.6819132804430946, 0.9004959920486495, 0.6869188767615144, 0.8235250113200907, 0.9166578872951766, 0.9013150686850961, 0.8380839858962025, 0.9073157003461221, 0.9543454184467036, 0.9055858124509073, 0.8918637925715706, 0.9903134675953051, 0.5127986114584449, 0.696692956130758, 0.5370523981116533, 0.7001724629171293, 0.9916611642074329, 0.9857366515184061, 0.924747395057823, 0.9445137331888381, 0.7766195394457958, 0.6195252022646915, 0.9546686663614874, 0.9844751038378801, 0.844027852820263, 0.7447830762001548, 0.6791783356204665, 0.5337497689443171, 0.7495252125656233, 0.7131514817215079, 0.877101450045404, 0.945751398349572, 0.8683084143497066, 0.8372134741459116, 0.6646904302471112, 0.8462850394417225, 0.5136995543425608, 0.7093735655361126, 0.8695010981991498, 0.7935643355642961, 0.5975979145080428, 0.6512819838748201, 0.5073914397414905, 0.7782127409874147, 0.6518832024877972, 0.9932076929834982, 0.5595762061403449, 0.9922246720679966, 0.5687109423220303, 0.838105358258217, 0.969625314094954, 0.7190620443864618, 0.8664081309733891, 0.667132130714411, 0.6353479152159843, 0.9881038444464245, 0.9749456914050801, 0.6056862445310439, 0.7651350765547513, 0.7111211950747746, 0.7268386451680536, 0.6345617335482233, 0.520889051962435, 0.9564077666058417, 0.5944431116457711, 0.7802461524486353, 0.9072284896017788, 0.7054058059340611, 0.5258608417539039, 0.9747301898084519, 0.8116918485805862, 0.5947262934805981, 0.835470561834067, 0.8289930620314478, 0.5932868967061289, 0.712644147073302, 0.6305110950712968, 0.8225485490400115, 0.8741217768921016, 0.6296711447821639, 0.706668544980442, 0.9063993574246532, 0.6289844799422348, 0.9689829115352273, 0.8146775882388788, 0.9554286901233602, 0.5417569350671105, 0.805744277346577, 0.6248071416806387, 0.6357804381341573, 0.5849076492790561, 0.9432718005552067, 0.8528011364197232, 0.9393180145037578, 0.9055210552178747, 0.6218196701805875, 0.5630041801771573, 0.9057041149454138, 0.9699522473736628, 0.6410947775990148, 0.9467625040225263, 0.8152812695819185, 0.7962021187861228, 0.943125023708686, 0.593552831309881, 0.8989205954589661, 0.5432046020734231, 0.8479840409511219, 0.8508701651831969, 0.5299197849521209, 0.5325591497756108, 0.8793828608768641, 0.9369757353802319, 0.5594073029268327, 0.5286711607741894, 0.8548877866552773, 0.5686180685915301, 0.7391274856651364, 0.7926810868411738, 0.8954541553960164, 0.5655129506186884, 0.9109372852850768, 0.8388292670887891, 0.6047967180900435, 0.6235155630673881, 0.8682310872676411, 0.9662344277289328, 0.6665840484803136, 0.9948095299771151, 0.7204924530281454, 0.6999509968934565, 0.5766348487013278, 0.7367778800867257, 0.6837518469566681, 0.7770362604491128, 0.7587593463886848, 0.6310152500067889, 0.9919350377155323, 0.5287101907299846, 0.6341323580528309, 0.6416557527648712, 0.5883926591073251, 0.631425571085183, 0.8420540781161465, 0.6143637234888837, 0.8104077645102102, 0.9959475858013923, 0.9714705072445354, 0.6585921059526243, 0.8276218042252581, 0.5236214010776717, 0.8033802033078954, 0.7285054761100016, 0.5029381516906044, 0.9234000025643223, 0.6620917674867863, 0.561503064154029, 0.5771265064791333, 0.8742754298885033, 0.7971833382563311, 0.7199981465522782, 0.7252244800011279, 0.5969156324580289, 0.7381505609258481, 0.5680191990851209, 0.5729645938848937, 0.7039117180288094, 0.9133157526199824, 0.5354540604279023, 0.5974301685518403, 0.5038891564217153, 0.8157942322558649, 0.7859410990553226, 0.8056504129361373, 0.6700358006832727, 0.6973785780151314, 0.5778551791276771, 0.5271360770943279, 0.8813298017380735, 0.5704358736540642, 0.7085428617195324, 0.9275680222175162, 0.5098610056870094, 0.9834861477586689, 0.5938433356175594, 0.6385932490945254, 0.6407794033101999, 0.9894985463088162, 0.6947984656850965, 0.6908835745820855, 0.9434479881043976, 0.6400819937464182, 0.6227332389056729, 0.9317305409527508, 0.7490244259085336, 0.7080759286228255, 0.7261418110356387, 0.9849611732531696, 0.8275907799859956, 0.7877709069472671, 0.6635787742348993, 0.9566498365513382, 0.9748628490405655, 0.6920318310713454, 0.9891349039024118, 0.6207854977713125, 0.5846708458760457, 0.638580615990205, 0.6107995937311764, 0.6273644935634082, 0.8791650679890575, 0.548205927966527, 0.9068404986513314, 0.7206877308174136, 0.8159804032398001, 0.7058470702694106, 0.7782382475467775, 0.554249281387349, 0.9329381142030011, 0.5511034131330732, 0.7350473494414951, 0.9682932885624354, 0.7124734961916888, 0.9885473291650108, 0.6051389970719803, 0.6532977034077603, 0.7299236953606625, 0.7255637558553876, 0.7654754536758199, 0.5954706519063397, 0.5918197380827519, 0.9673961499233998, 0.8059289887491525, 0.9788048055672876, 0.6030938291157355, 0.7844111512535903, 0.5347588790075453, 0.7080473758010537, 0.6515005741162019, 0.7964049788091725, 0.8410201353420025, 0.6714313442320763, 0.5910490044351887, 0.8995951419154249, 0.5184526686036783, 0.7925424835796833, 0.7679126300955836, 0.9590203996334217, 0.9903091706139668, 0.7216147584909365, 0.9976929437673969, 0.9704877980644011, 0.5470410188986154, 0.5186496911750648, 0.7275990372036649, 0.6748393260356575, 0.7191963173930802, 0.7788636130564965, 0.7043338505652308, 0.9406509282040869, 0.52560660969902, 0.9616897082844407, 0.5269839631434159, 0.6345471970871202, 0.7327621134132786, 0.5424223826722891, 0.6168325444359117, 0.6391903370210928, 0.8675913802283597, 0.7232739802939148, 0.6507550219605913, 0.9393934233789198, 0.8263999233020167, 0.7485394947870259, 0.5132102800989462, 0.625481521007652, 0.7739774047696706, 0.8857528228584339, 0.9238937325350356, 0.6833594764663675, 0.5085098423318805, 0.60230946300701, 0.9636284306289673, 0.7561520464326359, 0.5270883050805939, 0.7334739362727096, 0.812623690762135, 0.7789196515887583, 0.664852634521558, 0.879214888978636, 0.7926990751198626, 0.5970731570931582, 0.5355408806670174, 0.8625834422501488, 0.8591529471731963, 0.8343916360589914, 0.5093196955880548, 0.9616363595445148, 0.5602164363946163, 0.7172587103636402, 0.8135567537750678, 0.5115644623041465, 0.6116482269828576, 0.6993011884296083, 0.6353127926219422, 0.6707370605525017, 0.6775098986182565, 0.5641301865518293, 0.9806327958605061, 0.6565364439300072, 0.9204489360294551, 0.8897248477441302, 0.5042206987979128, 0.6867821143085405, 0.8326952648842246, 0.8133394645759977, 0.631755831093546, 0.7168534543337715, 0.553815544401435, 0.9210250470711574, 0.8693782939314261, 0.707530539955956, 0.8280387791335548, 0.9898684171960364, 0.9106199707727254, 0.8917367641107529, 0.9175156167204768, 0.6570701428088337, 0.8850069006100854, 0.9016916422038186, 0.6217220978517903, 0.732174065703552, 0.9311739233989099, 0.7568504517733975, 0.8992529268458326, 0.8349936529456221, 0.7594875346301262, 0.9825630141201046, 0.8352988135145627, 0.9576918526608564, 0.7281027500440791, 0.7017470713526053, 0.8891857370680291, 0.5002872327542913, 0.94839759897853, 0.5867946200920484, 0.7109245157620054, 0.7900136686828181, 0.7454451494369139, 0.5634953251365167, 0.780902394602301, 0.7360328369367453, 0.9592633173685323, 0.6611483647614069, 0.6183278808848897, 0.5651687423987624, 0.9233209866077092, 0.8569320658136195, 0.5587178571907216, 0.9146668880118916, 0.5304406517948714, 0.7397743026504903, 0.5984947276197737, 0.7800889432751461, 0.9562199258305095, 0.6432837513122842, 0.5550505863181256, 0.9235852230924513, 0.733640926191228, 0.776085602341642, 0.9524542079848878, 0.847250075104929, 0.7383027441400019, 0.7390817430532659, 0.5934418555980321, 0.939762183617721, 0.7243231328197473, 0.8502483578899043, 0.5133099121559599, 0.7053783530245934, 0.8376415716976677, 0.6373816462943671, 0.8866852726164371, 0.6866767594367265, 0.9713340705964297, 0.8351969079006495, 0.5776190440980556, 0.5764313107857315, 0.5384416761182539, 0.53363574776288, 0.5034229345108567, 0.5980528030498259, 0.5110302205892395, 0.8948816982126281, 0.8736692914770265, 0.6978317046133657, 0.99010958406235, 0.7502160632818542, 0.6153720826043493, 0.5256339570888131, 0.9980145093725308, 0.7755385200777405, 0.569001590154208, 0.9176724785790076, 0.8181647947535928, 0.7374480098332676, 0.6909665020438541, 0.8142226548304321, 0.9742959309490044, 0.7369373381019042, 0.5827434624790624, 0.9271939313775199, 0.9317717788450606, 0.7216238185386424, 0.6985416016974761, 0.5807711852783854, 0.5361875519031234, 0.5535722388142401, 0.9228446765858227, 0.9872259771598613, 0.7961943683173853, 0.7509096841824182, 0.8920629777940434, 0.7196990780883037, 0.9465682735955805, 0.7023041340593035, 0.5003835914835418, 0.9551501012151069, 0.567378116214371, 0.6789007117283913, 0.8131317297967569, 0.5157030155040458, 0.8903269497329267, 0.9698394707342216, 0.880610216618767, 0.5638125233271751, 0.9439176534406095, 0.6617599887583265, 0.5971275743875617, 0.8433006006231972, 0.6361631341613987, 0.9314640172014954, 0.6003004748354532, 0.9335735031331582, 0.6512360228208864, 0.5086987276822852, 0.9289508271411779, 0.7764175949496209, 0.7217065396368492, 0.524910731412012, 0.5528080380926884, 0.6064559602195418, 0.5868470659969289, 0.6635625851282483, 0.9960085972417181, 0.9708803572636373, 0.7949211055726071, 0.9311976496496084, 0.5058496039334064, 0.9849933979285113, 0.754603430565185, 0.6020953284255771, 0.8881080637470343, 0.5444215224718545, 0.9790234088872531, 0.9507974984957426, 0.6060991653926988, 0.5812583351180527, 0.9352654402216442, 0.6986846296293825, 0.9168548851724783, 0.5943415536972141, 0.7972851114931117, 0.9238458252041004, 0.7785439940736236, 0.8027079420177673, 0.625666199588418, 0.9878265887939884, 0.8684623032413582, 0.9657038529429582, 0.6748846472509984, 0.6763728035482574, 0.863897236201751, 0.8878137025210504, 0.882000334699977, 0.9874521190492971, 0.9968497286219995, 0.7085143622349079, 0.7502563166466953, 0.5100596894405709, 0.7221069810109304, 0.5119915802691737, 0.6772169647506838, 0.7944568379199017, 0.8985029224042136, 0.6440184616163815, 0.637449236824535, 0.8830754347167523, 0.7009911817687886, 0.7345796154807748, 0.8674924373519134, 0.5915263587943979, 0.6006873299642819, 0.935101364309366, 0.5916840133813797, 0.7025417008931176, 0.8738420055138969, 0.8515053284533634, 0.532849192641057, 0.8372844013505963, 0.6928445088878702, 0.5000481438995283, 0.5884335221398116, 0.5995670630808899, 0.9888530084238121, 0.5438025285085197, 0.5095021646983758, 0.8894768148169612, 0.6466027312844618, 0.5842592246182025, 0.6407079406106068, 0.9032205059340827, 0.8851738069269643, 0.7745756049713829, 0.6454741220170227, 0.6795858748123376, 0.6269660238146295, 0.5474855981954658, 0.5414390310120987, 0.9741726869608864, 0.5810304537700886, 0.7099185038655249, 0.5934671282518265, 0.6398698522874915, 0.5002762153443079, 0.5859937767770008, 0.6670655649237398, 0.9185268232951198, 0.9105261178683872, 0.572102526223022, 0.6327333283895704, 0.9320586638263583, 0.9026164478514013, 0.7180313118623922, 0.660405559974598, 0.5149313797841719, 0.7054232144392834, 0.5740223050858865, 0.6275890571803284, 0.9267748250226557, 0.9800441654951677, 0.507829382203053, 0.6419830505612427, 0.7088084545395703, 0.5335691036102399, 0.9141766177125922, 0.6341179915055504, 0.5884302188764479, 0.9210203431912694, 0.7434708823921008, 0.5987877133257116, 0.6492463885149824, 0.9479386335243074, 0.5840048168805851, 0.6965573314801549, 0.8494532049049655, 0.6078019087261085, 0.7384128836762065, 0.5950362018635402, 0.9937877541408366, 0.780514396725051, 0.7698684040692368, 0.976694951920666, 0.7459299494984142, 0.8168788335134516, 0.6234539464714894, 0.6198843520593409, 0.9697927309373819, 0.8316396562914714, 0.7592195918822549, 0.5077289855037546, 0.5098327402585117, 0.8774091203916243, 0.9091281027415556, 0.614181245033647, 0.5810847215641792, 0.7892407118312466, 0.7772499067663932, 0.984650564958578, 0.9091021548873972, 0.6740446906572493, 0.8293863877721068, 0.6226249360311578, 0.6547289407526626, 0.6042360601159997, 0.65963962983253, 0.8538765541782942, 0.6417756251984567, 0.7496272766906809, 0.6157990105244564, 0.6963683703890142, 0.8878627934136596, 0.9434391660816033, 0.9286000386024276, 0.6547148468210243, 0.9386945753296556, 0.7776746037920286, 0.5410738256834172, 0.5541537887220906, 0.566096810007787, 0.7358519830139285, 0.7612404901406558, 0.6523714579294894, 0.5605225243157361, 0.6525721726253293, 0.6322496631504118, 0.8822743815096532, 0.6608110422999461, 0.5329385729201039, 0.580515496665571, 0.5779912079140113, 0.87997203223194, 0.9898542697442663, 0.8837662614684308, 0.8544232781478822, 0.5022842180548204, 0.5773064792910672, 0.7000346056691509, 0.718178137054416, 0.5619155200597288, 0.7235558961907185, 0.9406133465773018, 0.6031276692009929, 0.7910496614542846, 0.5308397615651719, 0.7846357213502575, 0.732955045034702, 0.660689603613501, 0.624289347522082, 0.8362875255492506, 0.7137812227450171, 0.5647727715887381, 0.9218689726515423, 0.9018539978672373, 0.6124883956060718, 0.5444309741330681, 0.5961502031060293, 0.6645214096905985, 0.9141330615507679, 0.7775357416815067, 0.5995049883443608, 0.8604483111182484, 0.5541032546241027, 0.8089363999340876, 0.9964760805106907, 0.506888214034129, 0.6832218426476613, 0.8301052263479682, 0.5965132647446971, 0.7986420428667425, 0.8468177817819622, 0.8562725128157398, 0.9310770114279951, 0.6744064769329418, 0.6243212131028824, 0.6708758667597288, 0.7834429795058873, 0.9432635554966639, 0.5957322054153149, 0.852522052426096, 0.9840865714593958, 0.6284795713964796, 0.8577619768166257, 0.7834187556268557, 0.7598640599146151, 0.9503367574361032, 0.8626399322323969, 0.64924784869908, 0.8446978433049003, 0.8411085966844416, 0.8925151032605001, 0.7091735431702804, 0.5597521801285236, 0.5502308175904501, 0.5254614794048132, 0.9605476270430058, 0.951637702110574, 0.8604801880713773, 0.990710364114952, 0.6655172596096852, 0.8501810642029749, 0.6001422674077659, 0.7603824968044223, 0.6406260944029176, 0.5169377858960995, 0.8967587379119223, 0.7394718814613863, 0.6484652171756973, 0.5438339034064437, 0.9935483396714682, 0.5849071752608563, 0.627438105749589, 0.9748620047026393, 0.9257332841564356, 0.8878997598941276, 0.5256609082863422, 0.9064511396772408, 0.9587194048007492, 0.5081764783610119, 0.8249905737305162, 0.6496397513801804, 0.7195955682880676, 0.7359959541681667, 0.9703071744057263, 0.8722490060693451, 0.9769402537669138, 0.5094019439460136, 0.6126200264330806, 0.8684780898967995, 0.5956058159270011, 0.634553677925165, 0.8848127447141709, 0.5484706883921411, 0.8679089029485725, 0.9546972653518806, 0.8794620109801896, 0.5279504419038505, 0.7126180146048886, 0.9102055908771131, 0.8644315606962762, 0.8247123860600846, 0.5921112363602581, 0.6083673824929476, 0.5141519057771338, 0.9760424747773796, 0.7388198003770832, 0.9030473608551833, 0.904359911597262, 0.8638327120164218, 0.7382776034258496, 0.7094500429092381, 0.8287307558957766, 0.8447362311752091, 0.9785323302753333, 0.736732262464013, 0.7646723240894204, 0.7237446545726518, 0.5312982298304042, 0.7374050603540891, 0.8661083699118752, 0.9404874446075214, 0.6414711394173191, 0.6645194727047316, 0.7493180113974431, 0.7034505260472272, 0.6518367147635957, 0.6108364199269274, 0.7823246082941627, 0.7544465339385489, 0.5395698291431562, 0.6508190074543254, 0.5253117886085376, 0.8826920181853335, 0.9128581699322444, 0.8775904303667588, 0.9530524152776587, 0.757540024053555, 0.7970528714870762, 0.8405165121084591, 0.7283701977135559, 0.8671076989061002, 0.7822913892834322, 0.9427750325895521, 0.9600203869932834, 0.5396420948196136, 0.7307336987601604, 0.5959344008466603, 0.5776775177983159, 0.819142990425868, 0.6944146798811895, 0.7832859900074962, 0.6801847757931612, 0.9562701633899062, 0.7073889663331205, 0.5371517686907452, 0.5848038209178932, 0.6938954583876427, 0.9675153814133772, 0.9106809336078343, 0.7797237871405178, 0.9076233577730968, 0.6225333434628848, 0.9360833778123654, 0.755369345876056, 0.8287246287544336, 0.7832527602513946, 0.6830294948575468, 0.8217939616674648, 0.9727466352953125, 0.7098410595636506, 0.5030784525369147, 0.5099524752704464, 0.699598836281111, 0.555056886140102, 0.7944902259103886, 0.5355412850764948, 0.8045953576527118, 0.6490594403989591, 0.5023053789482935, 0.5481470344054418, 0.6399705982155802, 0.7844154284709528, 0.99014092490572, 0.9394735331043571, 0.6345423902977484, 0.6345885277603347, 0.6059020120817644, 0.6162847990429718, 0.9938693460348769, 0.9328400015594591, 0.6165395760986283, 0.9594682926040896, 0.6622919370118638, 0.9726975942649864, 0.5879807594666935, 0.5088247547089808, 0.6965997150463837, 0.9811608422948641, 0.8840675507065754, 0.9461812951681865, 0.8351248119731944, 0.5258843901761987, 0.6435590303497869, 0.8202875258008769, 0.7188447487617302, 0.7537645472558716, 0.8602731300035864, 0.8213784766089811, 0.6233752684188447, 0.7483878865072116, 0.50831758545153, 0.6575499844823798, 0.5380055033765019, 0.973742952448867, 0.8789710918614744, 0.7859915308789611, 0.7795509374148415, 0.5649060990073578, 0.7342098766891465, 0.564522965051357, 0.8676055501459201, 0.8472994826441227, 0.541154935013825, 0.8442759512833237, 0.5504082923093758, 0.6013676037224776, 0.9132968095667775, 0.9090459752866911, 0.7523346988693529, 0.5744768965300744, 0.9071978714393114, 0.995205231231019, 0.7015666490876219, 0.6860956478368885, 0.9301163684069222, 0.5976847519892772, 0.8298652469795309, 0.7348524460887784, 0.742158085690625, 0.6770698920585714, 0.860925612589416, 0.8873602465364756, 0.8437063814421497, 0.6209881732403102, 0.829286545633026, 0.5194834129005317, 0.9906188454842377, 0.5093668333896422, 0.5288771726621817, 0.6753817717410937, 0.5058557013786278, 0.8566538681674771, 0.8723433921961546, 0.986852463115012, 0.7027213269100716, 0.8460417142725801, 0.8369293454511714, 0.8996028741129123, 0.8070300480397428, 0.6395918389393982, 0.6247988069825273, 0.5217237552768497, 0.9342462247028214, 0.5253919575192875, 0.9815169780012409, 0.7333905685896838, 0.9754139904718684, 0.7543786396478807, 0.5553422065483762, 0.8605854785466969, 0.7134281059264829, 0.8283217475427667, 0.7679318837676344, 0.9064655279993168, 0.5340062039068005, 0.5967458090600182, 0.7449451895754696, 0.7009630626585777, 0.8229852180533714, 0.7710466568238832, 0.7893926797078262, 0.817593298383644, 0.6617725579564899, 0.8374289808524042, 0.8111016365128783, 0.533743690882045, 0.5921943670802837, 0.8660611424836043, 0.6305624362282244, 0.9549253702967202, 0.6785916275513784, 0.756979539974934, 0.8996012053945419, 0.8146714034375477, 0.6805102217565409, 0.9352072622350791, 0.6090949891172568, 0.5912686591571317, 0.6971505625008763, 0.5071837255359728, 0.7374163333106882, 0.8523875481946477, 0.56776876698454, 0.6248777695410921, 0.8450663188772065, 0.6095041964288364, 0.8442595393557512, 0.9643969221663399, 0.8917021871612274, 0.7489457698695341, 0.5814112596780918, 0.7915501323598708, 0.8864148194083357, 0.6043682732411422, 0.7757927970030094, 0.8414617032413634, 0.5127137823903312, 0.8567724441958259, 0.6096130293164774, 0.6764684849289472, 0.8460181956771147, 0.8696529308729957, 0.7251476480008732, 0.5954690025886057, 0.6568670955123213, 0.6944252959426949, 0.5745244305618826, 0.8282796889683786, 0.5631043299831184, 0.7496262715990387, 0.9874906496118887, 0.8966278353205451, 0.5386878772027497, 0.7192172797212988, 0.9524368515562635, 0.948137475377858, 0.5836522477410689, 0.6172856380623571, 0.5136821094715727, 0.5776015301502713, 0.9325720853696757, 0.9358075816466761, 0.6922749689595328, 0.7852498892451859, 0.912267458881405, 0.6742255335694698, 0.7825304761528185, 0.8650554793338581, 0.616915032070197, 0.7810817168499768, 0.9473839014056948, 0.833115484908957, 0.8620544746165284, 0.5446811432723602, 0.6800025829987993, 0.5371002649252101, 0.8073457868335473, 0.5203839902214875, 0.8879935715586273, 0.9050492337238809, 0.6244224538063633, 0.8954581886045854, 0.6749155672495473, 0.5327180582267492, 0.6068500446531124, 0.6181917388588856, 0.928827225520807, 0.8706357918505327, 0.7569962458361807, 0.632191131482521, 0.8804432451958644, 0.9068103574280871, 0.7156312683798343, 0.551462738681055, 0.7527167245345066, 0.6769902031144203, 0.8491450711886029, 0.8869128730515499, 0.6857741949438243, 0.5183713894200075, 0.6597834749918241, 0.649392365841013, 0.6327994903390479, 0.5499856606413038, 0.8876766433343859, 0.7804487525561608, 0.7796656778818778, 0.6484136414898243, 0.8160956501029327, 0.6423040468014047, 0.5671073335602459, 0.5526960361178777, 0.5553425527673415, 0.9755499600629666, 0.9928124220227508, 0.6058651779313444, 0.6747882667387863, 0.8467797893684955, 0.6685222062966949, 0.5294215942221532, 0.8551626982140346, 0.6960009500385091, 0.7945150136104191, 0.8325705370376602, 0.5777289361329725, 0.5814658204500128, 0.8327616354819651, 0.7958688395679077, 0.7701728647558552, 0.5924808022506777, 0.5462275854987788, 0.522210665552151, 0.6056395856981897, 0.5178370985212434, 0.8943275807428943, 0.9657874897859372, 0.9632183094600104, 0.8669319389396521, 0.8203026553770668, 0.9379869353251327, 0.9423806547201279, 0.5125447251955291, 0.6336199700406894, 0.6832034183414766, 0.607363621009056, 0.7319400487880183, 0.6655850057385082, 0.9688448447358833, 0.5665282114140241, 0.7453251265824534, 0.7960961578288981, 0.7742151704064646, 0.9511709473649064, 0.9252329964190595, 0.6539343559829813, 0.5391208097912679, 0.7302206845223498, 0.5358925784953117, 0.6105242434897172, 0.903830102265514, 0.6541822195524265, 0.7166172233936803, 0.9812827898923311, 0.9014105163125912, 0.7833210136095907, 0.7524349487505273, 0.5892425376933675, 0.8321436417314974, 0.5669995368466987, 0.7981550018744118, 0.6163970300775787, 0.6163245398802475, 0.6851213037980425, 0.6221799795635743, 0.8200215587543256, 0.8800228163434116, 0.6252504767446851, 0.9388372708142068, 0.6474065066154271, 0.5585177486673933, 0.9414591324677744, 0.7494190285321578, 0.8665670048851938, 0.6441625882369895, 0.5500864534070471, 0.5645315317349273, 0.9101297690499928, 0.914060329266259, 0.5370183630537323, 0.6062367316655717, 0.8123459107196331, 0.9139763783020962, 0.8401469997958583, 0.7680045167064222, 0.5236781655543681, 0.5127182912740973, 0.9844074043200753, 0.7028101781298418, 0.7750288273436847, 0.6028738265986948, 0.8017024253856293, 0.8217832925465804, 0.8669114038313149, 0.552438085395886, 0.7851719354908234, 0.9854974630650922, 0.5631888515722597, 0.8077333668559901, 0.805110839528868, 0.6808573491308427, 0.6210993576490422, 0.5582073155176236, 0.8886806979987446, 0.6346451473353911, 0.7047633982508874, 0.9828449970100417, 0.7779544679141983, 0.7244238444737225, 0.6602807303982986, 0.8489727051208034, 0.736280056727493, 0.6973148694446145, 0.9986240268748925, 0.5807384198079513, 0.9810149587070336, 0.6360983801168032, 0.7890882768283014, 0.7180638337133805, 0.5018512003036433, 0.803478032443756, 0.5179078761901904, 0.947212930362237, 0.8911588293935601, 0.9223407798106737, 0.6356717171145961, 0.9034008222528855, 0.8291715261646946, 0.9933956589015248, 0.5245271086617016, 0.8747290432021282, 0.975061617448933, 0.7408085622198496, 0.773796871774915, 0.5394209648105619, 0.5451148488755475, 0.6391951931191318, 0.776788243909841, 0.8261706644507478, 0.9047868831325021, 0.5481106526128778, 0.8047905630122272, 0.6934334265616842, 0.9951449361533032, 0.9008723073222187, 0.8303249530582817, 0.9614719279223345, 0.5939099457870023, 0.855212835446811, 0.5736026167526582, 0.5337544065275819, 0.5276587875341385, 0.9201768486160865, 0.921438288641341, 0.6116135180314728, 0.8887019191529395, 0.9724180433247355, 0.7894497591403742, 0.8836457043741688, 0.5130707105222323, 0.9008478611980439, 0.6465700465092636, 0.7981920691567168, 0.7860991150167704, 0.5188982865064249, 0.8121762235466801, 0.6930998638912795, 0.968866891125206, 0.7991567009751961, 0.6979732658740968, 0.7754261892340776, 0.8912489381979298, 0.9531509455817969, 0.675440420635226, 0.9249797978245234, 0.7000756166411202, 0.7932128659110149, 0.7441123206676281, 0.6853047031566151, 0.9982201398072461, 0.8100448187441223, 0.9654304013952628, 0.8436742915516208, 0.7608565272353498, 0.7387329789121503, 0.6881608908337034, 0.7293064229397865, 0.6288318800765664, 0.6452694573946312, 0.745596481377305, 0.7052388202751451, 0.9398021384725406, 0.9936044527953776, 0.5053367455693586, 0.9397217810177876, 0.8727579917614927, 0.6305339096359578, 0.9701398564611007, 0.9036349319877326, 0.7360448214963455, 0.7667628545404437, 0.8908001704442619, 0.8709745942139613, 0.9829927471669158, 0.6638527760593609, 0.5323941673051724, 0.8632625240863816, 0.9488306849931083, 0.9625398788421153, 0.6234109570123262, 0.7058963191092794, 0.7956753117484123, 0.9979352047169103, 0.7582837579218544, 0.9767289083196253, 0.9777560204220709, 0.868573185281674, 0.5198583096881311, 0.7598794783061432, 0.9416989693185505, 0.9593428687847696, 0.969980158218346, 0.9130102007756591, 0.6332307042735479, 0.7803254954082977, 0.7295677033961345, 0.5794764109158461, 0.7712186890082855, 0.7029614180618526, 0.5771843279854566, 0.9381772811249327, 0.6503719945852242, 0.577763031307815, 0.7471884797307842, 0.7378569028354156, 0.9016599419918686, 0.588184676927477, 0.7508085875758775, 0.9413754730045316, 0.8830156722865798, 0.7183488445510054, 0.8345149441436648, 0.7196573979021692, 0.6598014882326657, 0.5753303679412307, 0.5979103739218312, 0.7487044684446251, 0.6941474295156114, 0.7083503731318006, 0.5839368425388733, 0.7544146040739399, 0.5850350519375349, 0.9592364248777274, 0.5388398060977094, 0.5691401513402823, 0.9789920570884498, 0.6208860235227177, 0.697479624994208, 0.9097855066223199, 0.8412782684620281, 0.6634511613869263, 0.6714209893235005, 0.833730302160608, 0.7431177031941008, 0.8940578270865556, 0.8466025019568351, 0.8390888910449469, 0.6518282611776997, 0.650333591307825, 0.8613617511772558, 0.6758351232236768, 0.6877408117881705, 0.5401824116330747, 0.7555265991133051, 0.7748705234614401, 0.9722725604800513, 0.573648991905642, 0.8900860682195646, 0.5151775873549511, 0.9613551364001022, 0.7821282303822312, 0.9179684193360822, 0.954375487520639, 0.6375117026710135, 0.9969647853257733, 0.8753013808757907, 0.8767860462910737, 0.710778000695732, 0.5742968745965789, 0.8162380259980637, 0.9158520010548006, 0.7807470860740728, 0.8979230455024267, 0.9936572275271202, 0.7083052038787085, 0.7902521609007434, 0.6434419180679427, 0.7357963130984683, 0.6065238357981039, 0.9800933376603832, 0.7692620281134402, 0.7579853441464142, 0.6683577412390953, 0.7543696139052103, 0.6638397343036877, 0.5727361366627709, 0.5332073131414556, 0.9311153557504835, 0.6679040016288984, 0.5278687396235189, 0.7877730191756949, 0.9039152602573776, 0.7982597461978624, 0.5589322450917251, 0.6458198255012435, 0.9025203932036321, 0.7319232465980791, 0.70737607160659, 0.5574214483185417, 0.7216494100920376, 0.7647229285542485, 0.5899485792738064, 0.7673460156090166, 0.9875782996529305, 0.98618450010204, 0.6010520895145822, 0.5526428261395229, 0.8709623139063545, 0.8710733729535005, 0.5064902455823086, 0.604306883624546, 0.7007514757499618, 0.8361132049149278, 0.8996417257909157, 0.7520941596671157, 0.5528428164716568, 0.8768181301526347, 0.8305087292664315, 0.7446329381927888, 0.7001460895625479, 0.5835099802245496, 0.8502155045387971, 0.6252382019572444, 0.6063793722532907, 0.8413295403100353, 0.914332814387289, 0.6816095934237016, 0.5534579898743051, 0.972733984563328, 0.5850269979411842, 0.7214791777950158, 0.5757249107479367, 0.5364819227699389, 0.7672358014717838, 0.6567572953392699, 0.5275593403499311, 0.5350875316422268, 0.7573155608493425, 0.9160726940427522, 0.9544130961581814, 0.9077428702601527, 0.5449241445444004, 0.7823543751102859, 0.8966934734508378, 0.5880343360531544, 0.6688908267964186, 0.7003690216784018, 0.6180258104395422, 0.5460745121723951, 0.6952546253838328, 0.9476619625512321, 0.6699261023972627, 0.854368404283271, 0.9812526164066645, 0.6185533158716691, 0.7189393189370077, 0.7301822144253631, 0.5276956853595252, 0.7486260567960925, 0.7979547086510521, 0.5505542475907209, 0.5067118481511903, 0.6802081783472471, 0.5826107562880349, 0.9973960968257876, 0.5442625662027267, 0.5933856100950915, 0.8004133938603625, 0.6501603018478066, 0.9136498268435296, 0.973223525863065, 0.5340426222197758, 0.8334922180033413, 0.5680340101986648, 0.8483948479170726, 0.6976033081306539, 0.944263324299244, 0.6092948520069132, 0.9479844073486698, 0.7658755727841363, 0.9366904211777938, 0.8237204435444285, 0.6648709396861319, 0.6071892963529493, 0.9023091319716239, 0.6593248585704261, 0.7696253717257049, 0.8621859509734695, 0.8541978830798401, 0.7989712613329538, 0.822594641126195, 0.7849292712487492, 0.6826915901698933, 0.9625566265883636, 0.8747900625701517, 0.786501169606096, 0.5611417539844026, 0.7335236065041818, 0.7012644465242514, 0.8997867341425713, 0.5497858689040093, 0.56753509286922, 0.5545375466589157, 0.8377569370145683, 0.6003668191791804, 0.5692010022069849, 0.7787286890731056, 0.6696248960097018, 0.7004274113592284, 0.9058876092872112, 0.7550524871368689, 0.9491336146987548, 0.7638181162883693, 0.6683507302712779, 0.7563238899322937, 0.8181436562956627, 0.721340716306976, 0.7563906068387825, 0.5658492377708144, 0.6628779787012683, 0.5197302897117531, 0.7040943788443303, 0.7954878739004267, 0.8543947776280933, 0.7154412310828782, 0.8203192889255231, 0.7215129650498571, 0.5467154663022733, 0.9572352265178746, 0.7429350000460719, 0.9806098094503721, 0.9170709116151697, 0.9858593894620771, 0.6673810311766473, 0.6501687179182749, 0.8266081311834439, 0.5306604446954801, 0.6716790080972266, 0.7774885192180188, 0.9345563707193072, 0.6746297844713616, 0.9164054466096763, 0.8030842107816305, 0.5202398620730274, 0.6534579779571236, 0.7731552883763311, 0.8394929989774749, 0.9285368502247304, 0.8416179063204652, 0.572874709828308, 0.8422790217454043, 0.9170645353878004, 0.7914905149517789, 0.6183538300151361, 0.535795338483187, 0.943304840316853, 0.8005439159146318, 0.7112905374991182, 0.7443576891671735, 0.7710353683658455, 0.5921313216562778, 0.9566716460998531, 0.7661440351783944, 0.8224724469269697, 0.591354294756712, 0.6470669947876853, 0.5992991607180007, 0.7960556235589228, 0.8900459170531745, 0.9208182611947631, 0.7460974983454941, 0.9433293109296301, 0.9137401385463397, 0.51672588425317, 0.6108843491768661, 0.938590669229814, 0.5062922604442429, 0.9979074647141006, 0.7445964931827995, 0.6486125891994764, 0.9458553589203578, 0.6362973443860189, 0.550696422561825, 0.677478613167537, 0.7770316211587592, 0.5121534214133081, 0.5209739824821442, 0.8021146091983891, 0.9149970984341234, 0.900621548585335, 0.581359362457005, 0.6829404947328868, 0.6538135938967646, 0.6031098722170811, 0.5997375306113115, 0.7659769529842986, 0.7253929493668845, 0.7720305679776065, 0.5660331843801046, 0.9207277424036889, 0.5217060734655707, 0.9327566887731054, 0.7744635363223176, 0.8030699714017145, 0.5705856962071845, 0.9242749124292089, 0.7583590442944816, 0.902113197251282, 0.7348835333342476, 0.9333935326712411, 0.7667268289618221, 0.7560830480587016, 0.7058091809979673, 0.8152982252443272, 0.9929260438158979, 0.939069166399255, 0.8397961524188727, 0.8857540839785139, 0.7617290243853512, 0.9036188146321261, 0.8140801657215706, 0.9035303391780497, 0.5467023636254116, 0.9827284504198194, 0.9019867528844698, 0.7851386858698965, 0.891608871059943, 0.6174531632188842, 0.8900252379026531, 0.861549894770399, 0.7768943405300803, 0.934528355613536, 0.6441735201545122, 0.8388281831649465, 0.9770759599730278, 0.781010404395656, 0.8671511065214479, 0.6969580503977257, 0.7135742293138223, 0.6293419320786222, 0.7918855875656434, 0.5607649307356393, 0.6175899417005879, 0.6094739708223893, 0.515101135854747, 0.5443531279216514, 0.5537161990710757, 0.9989882345402354, 0.8428042677641336, 0.8189260499995997, 0.85777781553411, 0.8290312512273856, 0.7460253275425582, 0.542399986198209, 0.510260344703211, 0.823531965249229, 0.8414759630516149, 0.7989750593387412, 0.9138964041779954, 0.9153969286541697, 0.9720058922426311, 0.9071481959427632, 0.6728902463793919, 0.730625471398446, 0.9005255159632273, 0.770034625745865, 0.9214041906779316, 0.8341368186084965, 0.846986104149309, 0.8391202149537814, 0.8005140216882831, 0.9095236492176553, 0.785325810916474, 0.5489304452867346, 0.792587351679225, 0.5293739123157402, 0.8270659452138094, 0.7514625588864067, 0.8202507187125352, 0.817561435301652, 0.7467330550147773, 0.9277406245496785, 0.752885451613533, 0.6484528604224584, 0.5542351877021723, 0.9954285111739991, 0.8116745128136178, 0.6533920953399586, 0.7281161122485529, 0.7324037817436879, 0.8838107854590571, 0.9118347124806095, 0.9196970680584746, 0.9611010623607499, 0.7739342010009924, 0.7441658962211481, 0.5651958978313366, 0.5873684319377303, 0.9284159627352556, 0.6346351906027625, 0.671606134138911, 0.6610032738031324, 0.5429595131303434, 0.7504688934505459, 0.7032971483216719, 0.6731852664133458, 0.8066455590689368, 0.8382349784506453, 0.5483299859843886, 0.5293054391149836, 0.8457193158083962, 0.8690082028655831, 0.705650003032489, 0.8086137506139984, 0.8273093899057027, 0.6907198371155754, 0.8545508898689513, 0.7073294982752711, 0.5572109504914221, 0.8754942214317016, 0.6715919119662541, 0.9903331411493443, 0.6204501247263183, 0.9473818186619448, 0.685159600255169, 0.9330679448884298, 0.9890905848649757, 0.5861603771812822, 0.7225951152606965, 0.5231181671218508, 0.6195905568078993, 0.8943872210741156, 0.8350570405630343, 0.7752029062354104, 0.9896247165165813, 0.7553432155180784, 0.768291212312699, 0.7521423346222214, 0.8637565757601864, 0.8082660014510369, 0.6078965201018187, 0.749710619165771, 0.5156610705791325, 0.5769700920105281, 0.807745415766798, 0.5932623989049783, 0.7196948628959172, 0.7864965525231593, 0.7489442150358967, 0.614675169026283, 0.8264502384613146, 0.5224662845843853, 0.9624405536426044, 0.5903287545609569, 0.6253542175541514, 0.5980550970355402, 0.9135905955780861, 0.9186593526992112, 0.9741657915359625, 0.9804192433752115, 0.605254972879977, 0.9075233293234832, 0.7723452615061284, 0.9603993162389632, 0.9493959944789986, 0.5971390898517117, 0.6684630958599818, 0.8253076311191903, 0.5061247385704244, 0.5181063613012312, 0.617573262151663, 0.9123165920730871, 0.892528260586356, 0.8796924221722413, 0.5604717861796283, 0.7120610872394912, 0.7471513994320598, 0.9729386907784328, 0.5994833256549406, 0.8954767169224921, 0.7864761705897556, 0.6477691102954963, 0.71664572458387, 0.6272676403149164, 0.5743462806317712, 0.9679693801471798, 0.5046917393992689, 0.7715937702297035, 0.6316605339232974, 0.5845468925124607, 0.9776573595186898, 0.6847486281023281, 0.8378915597059021, 0.9333256439809108, 0.8735461081647014, 0.7830294053644733, 0.5051969124367539, 0.5417345550664883, 0.9668244465385268, 0.6399789463298933, 0.7788904521198569, 0.5605660406907831, 0.512295152419642, 0.9676386332591524, 0.6803892954850141, 0.887713485351433, 0.9477586222264694, 0.7822455691943133, 0.7676570623958365, 0.5549812163456553, 0.9049934574089542, 0.7005695789577748, 0.7801350696720641, 0.5554540825773526, 0.6277682552189735, 0.9416706129794157, 0.581110406746204, 0.9998795779380831, 0.5071531498280116, 0.6509534128833041, 0.6067852794486263, 0.7384474831602019, 0.8151115376246312, 0.9296540574134302, 0.510953035283791, 0.9706166906530969, 0.9333234214105983, 0.6435319363317172, 0.70530909691761, 0.7233121331592953, 0.9515165826723813, 0.7691306327763093, 0.5546574974070104, 0.725567082663143, 0.8590032689410457, 0.7896482674839831, 0.514549077463718, 0.9567610257809955, 0.7068365275677078, 0.753496519622916, 0.8777238064777778, 0.6610077151679061, 0.5089035225013274, 0.6626949464231799, 0.6114582437987941, 0.7819345887698279, 0.9089890050359167, 0.6062580040903423, 0.688161429412204, 0.6596286572056427, 0.7614275290108808, 0.5692654273605788, 0.874484136443246, 0.6309931318539849, 0.6649364549409689, 0.8643315637600362, 0.7978171182778633, 0.682262225740459, 0.8247772665775043, 0.5191093195713357, 0.8332145820313304, 0.7527318190273292, 0.7591958004315374, 0.8971370237457637, 0.6605427289661666, 0.706996608440069, 0.7476155106276041, 0.7961045588703075, 0.7251426775297496, 0.9237054566370994, 0.8636958484883313, 0.8531755443049407, 0.6766576759781902, 0.8906338143674815, 0.727860749212329, 0.9436405643444785, 0.7932765559480011, 0.8096944839988567, 0.8770889006395786, 0.9320152107742977, 0.5529168603160912, 0.7229712145846612, 0.9731433496675921, 0.9897250068618613, 0.7727447522659878, 0.6724110273846232, 0.7319352486760768, 0.9530533465031759, 0.6590286594396747, 0.9369588083347946, 0.832879954190183, 0.5927141407708627, 0.7447633910861983, 0.8950945541851583, 0.90578923744337, 0.6066790960173287, 0.9871076762463247, 0.7306218015326649, 0.6600472091752305, 0.8636243118331752, 0.5329980997587327, 0.5674797478322691, 0.8056050333728324, 0.975299755054505, 0.6799768655548581, 0.9487663115412681, 0.5836749986687946, 0.8884754182827872, 0.6787739338007341, 0.6951765689914934, 0.9073007185067242, 0.5134861220336069, 0.9810025897139225, 0.8308775611299204, 0.6335431782637555, 0.8714570561751791, 0.5488842286345786, 0.5768300370859633, 0.5204718756717979, 0.7953812422624443, 0.662900508897597, 0.8369158235480018, 0.9822863051645975, 0.7938374748267347, 0.6588597952663638, 0.6161893439852649, 0.5546863378652598, 0.9893054987702137, 0.9520460972034095, 0.5829792492698787, 0.5637006315632925, 0.7223580573625948, 0.724684393965551, 0.5693430208400079, 0.836961887286368, 0.6824498749397339, 0.6437860844684828, 0.7557532408520562, 0.6974266986586238, 0.9792910815407916, 0.8741350965891597, 0.5708852199310624, 0.8799291502610319, 0.8233852043619405, 0.8171978338258923, 0.978865215805058, 0.5529219712500684, 0.8803881276563228, 0.8581786307250284, 0.87200441007383, 0.7080231337489273, 0.7317361029353358, 0.6775152808067225, 0.9837716837573258, 0.9428325852019912, 0.7881054949340741, 0.6604412680923197, 0.6123692497857589, 0.8279384531631737, 0.760512428402689, 0.5972164710035721, 0.6479183484564794, 0.5762187023869051, 0.54758366562878, 0.9115206731689018, 0.8832833866600073, 0.7325905421051581, 0.8091193299835803, 0.839218824437634, 0.9787852124836454, 0.8053339152700596, 0.8356852532425556, 0.8240507869938187, 0.9740237752640535, 0.9931761245615565, 0.5950118822625011, 0.5289375126691105, 0.8559781842765348, 0.6198372847777663, 0.5490619246624096, 0.6048304538025988, 0.8343607340914828, 0.9566811589713188, 0.7696567609185765, 0.7828540736789713, 0.6809236749253211, 0.9799225203251257, 0.9821625866090269, 0.6737628581156196, 0.9340461352902343, 0.742056025076328, 0.9507728944675005, 0.5587624479893024, 0.7457438426699534, 0.9470554148049211, 0.9933152408253991, 0.7069231524303603, 0.889057519778365, 0.5097479559011764, 0.6545222685367909, 0.6271453375362639, 0.5818624862242291, 0.5690300348310493, 0.5181433736063241, 0.5451720251797354, 0.8371573837955228, 0.6699579486628621, 0.9761444889402684, 0.7365151551551907, 0.8382031221102908, 0.7970965926296717, 0.7011588850450396, 0.5822560955915597, 0.7900619160887581, 0.7989130984535968, 0.8065475006843086, 0.6581855563522824, 0.5018047446237279, 0.7569519823323595, 0.5147426583559769, 0.9786745996656923, 0.8156139183305978, 0.7356477244192167, 0.7541804835470702, 0.734757208003097, 0.5045587299945418, 0.5303455895669095, 0.5713755070179968, 0.6372861417531028, 0.9627696140651487, 0.9973125164045592, 0.8869422560109821, 0.8065892369376447, 0.8126287976684519, 0.8669461386946946, 0.5256207364711027, 0.9120951454103885, 0.5263114869658219, 0.8866379730033018, 0.6833837778724448, 0.7994810952293212, 0.8623676753394862, 0.8066674767874644, 0.5092868411919669, 0.6689057761528561, 0.7649170552457144, 0.5051594478259663, 0.5598542495452838, 0.8634237398019453, 0.8154254228316611, 0.9695870547033658, 0.5342552018407609, 0.6869483631022941, 0.8797981981981702, 0.6230704386581206, 0.6166376633018309, 0.5186378750513103, 0.6265359989878394, 0.8546445377605627, 0.5947277275861709, 0.9194109160605044, 0.5521957790993656, 0.9481596704170852, 0.6346443784157285, 0.902720305805383, 0.8798004121069422, 0.5490523252301799, 0.9543544676678031, 0.9931011629862341, 0.777460798459517, 0.5655936415419727, 0.7069151465705608, 0.8640018719038445, 0.7375912152650734, 0.9630426941457727, 0.5800725176796135, 0.7067429130726242, 0.5146532146504491, 0.6435235663647425, 0.5471384627308264, 0.7105161356710972, 0.958679319050755, 0.8367101807249165, 0.7070243753339476, 0.7229045329606405, 0.7972691951544764, 0.6798233598993134, 0.7176235919063468, 0.91170447398034, 0.736465366477916, 0.9022858658245363, 0.5512810534115868, 0.7789875815921998, 0.783733420400949, 0.9759962432645646, 0.9464786383038353, 0.7169211327302041, 0.7936240875860956, 0.7901108840341563, 0.8903502875435869, 0.9144223752598559, 0.7941804934130069, 0.7864643503028335, 0.6562524526404936, 0.9525407875730805, 0.9178651810410354, 0.9844773720148667, 0.7428850534569553, 0.9259735016821641, 0.9310059241538635, 0.5745888946490565, 0.9680115523000683, 0.8783056637959048, 0.9748459952839612, 0.5048248469836063, 0.6900185306083388, 0.6809436344827771, 0.8603493764130179, 0.6040203265327851, 0.724434059439697, 0.8744526760103659, 0.8959109158829978, 0.8633346161056987, 0.5168716993171554, 0.5863811811012163, 0.7361499793485369, 0.8679139925211152, 0.6337635401464927, 0.9028995554897881, 0.8993560319581759, 0.8465668761919004, 0.9101853799598072, 0.6242008060943864, 0.9907173990737392, 0.8466624833129868, 0.7814457499248777, 0.8801828320798788, 0.5777127154023846, 0.8912307235422683, 0.5955918883919609, 0.5439038370871672, 0.8380609814805291, 0.689369915728844, 0.6116051484545539, 0.8768057782314285, 0.6730714909100632, 0.978129673361231, 0.8680020507823825, 0.8188962649611309, 0.6342455228119152, 0.9265904641638976, 0.5396357177335984, 0.7805362185041311, 0.7666336630606617, 0.8477902985070589, 0.7685461610970374, 0.9244424645284983, 0.6564172428911061, 0.700061548600162, 0.9542737293493, 0.6587555416589623, 0.807877736582622, 0.5490998315671063, 0.6732663742778978, 0.9272557593484276, 0.8492664615678696, 0.6527384739289815, 0.5789324406344215, 0.8595752544462529, 0.7199049967515996, 0.9985635174173131, 0.9693653293979474, 0.6860893255624885, 0.9580263119461534, 0.5969360287138548, 0.7770003017609765, 0.8089293082810078, 0.5146350564179565, 0.7228140987150389, 0.5949030597265885, 0.8094936795387526, 0.7596294652669495, 0.7542210783324407, 0.5326594788760274, 0.7699999140622817, 0.5919471535024157, 0.8686730016877595, 0.6362015381767266, 0.538337631615097, 0.8343494614105527, 0.6756806600243378, 0.5756201682482686, 0.7491075021429778, 0.9815118077059359, 0.9395633546614336, 0.5956239069759983, 0.7425720690787192, 0.7154305443662787, 0.9968382502660822, 0.8981249912967668, 0.7631937763790451, 0.8237530318349429, 0.7704249714564485, 0.8120336479093986, 0.9509689306344252, 0.7322956776295946, 0.6941067520580373, 0.6665363339867199, 0.7711806098103131, 0.8457683611430785, 0.7583070012806803, 0.5069869038150167, 0.7180279413534858, 0.6030709990528049, 0.6911238352269893, 0.6732640762654942, 0.8742941663241869, 0.6499021865080621, 0.6936150371355578, 0.9732765132731132, 0.6987618545716321, 0.7874824292693328, 0.7954977566993784, 0.7322148904092525, 0.8857213600048387, 0.620660636875375, 0.734998084701467, 0.7469864488538172, 0.5922974245450295, 0.7567669374534955, 0.8217452846132344, 0.5171538297641295, 0.6274506760878458, 0.5352762722426246, 0.6441919252895842, 0.7643891481051122, 0.655923680364914, 0.886130697520665, 0.705279483649611, 0.9818570281155916, 0.6142002444406094, 0.605648154807536, 0.8269870485087334, 0.887489980653451, 0.9328236488725601, 0.7657944523881715, 0.9156619629829084, 0.7577306265038513, 0.5584383874842338, 0.8714261335992777, 0.7313762972170608, 0.8589050303191069, 0.9286434649706492, 0.5664020281358664, 0.9802593059768752, 0.6323747465045446, 0.9868530100953928, 0.6412094106059348, 0.9515638805601974, 0.5781493926003649, 0.8752831106883118, 0.9005257805109437, 0.7431371636778721, 0.5775509823933744, 0.981761118342831, 0.7273972304373442, 0.5856572199992669, 0.9549697612498349, 0.7338728086464137, 0.7460960000186192, 0.8738883292627051, 0.7184570670290089, 0.7894762901135266, 0.6484698284503627, 0.7021811743370342, 0.6671495327232173, 0.7109590691741912, 0.735793837626517, 0.9520369292813727, 0.9368124013178012, 0.7011508992779548, 0.9644674091159212, 0.9930851310438813, 0.9628599473343858, 0.9771180645076885, 0.8311646659613939, 0.9093548689193252, 0.9844447754975272, 0.5180229700419133, 0.5830273197114607, 0.5727743005966157, 0.8470941450330498, 0.6408640082631372, 0.9347396973669702, 0.7552507321465418, 0.5280512426857488, 0.8679893575086652, 0.7031834116245146, 0.9999728660875287, 0.8028559474112216, 0.8345880264591095, 0.5402199393068219, 0.5671105727456287, 0.796304015318111, 0.5407857408765387, 0.9884319762394158, 0.9043779451422389, 0.8022965108889071, 0.8236880988076585, 0.7507299674643768, 0.8152673871444787, 0.9270064191521026, 0.9990834099311146, 0.6656452983711956, 0.8635584436024839, 0.9383011381179119, 0.7912127667046729, 0.6257386764408103, 0.9812754327329374, 0.617003191643571, 0.9218170184367902, 0.8259013291318453, 0.681465407627226, 0.8376392239787434, 0.5882225432120576, 0.7290551371901529, 0.9041454390975521, 0.9861050830856393, 0.7914331636113184, 0.6636479437494521, 0.5778125779063306, 0.5186231066985278, 0.9890878737411377, 0.6275106175980223, 0.5758846237890598, 0.5808122141634511, 0.8795260394759081, 0.6427466114882376, 0.6364578295744638, 0.5662334711144658, 0.887498948547137, 0.6367025310575114, 0.7374418014910966, 0.6558064521544521, 0.5098887775421721, 0.7041687933642886, 0.8315753606671847, 0.6176636425624564, 0.9209163927534108, 0.9128771114681451, 0.8245762516849193, 0.7065620625876674, 0.6288317225786085, 0.877175204648489, 0.8761680345614649, 0.554608048213807, 0.8381300165721286, 0.501558645329331, 0.748445132108857, 0.9298802995336695, 0.8724780864642443, 0.9288990667712753, 0.7156604187725404, 0.781087990251971, 0.9235982628380839, 0.8134755225456809, 0.9348047742259575, 0.916640026365166, 0.9164582484883936, 0.5567793533624232, 0.8098510299985918, 0.6331936574214978, 0.5476240241078518, 0.9993718673619383, 0.754133011811504, 0.6598927729760993, 0.948291858279573, 0.7690700415926701, 0.5477038994489942, 0.7260116056545639, 0.9549052178595907, 0.7231979598968336, 0.6208852995712937, 0.6841627497487581, 0.7625954119820191, 0.9896578092179137, 0.7298919955436527, 0.9245976754745124, 0.7917930645614574, 0.9331379420674093, 0.6347274351988547, 0.7638102505408855, 0.9149387642088896, 0.880339704359326, 0.6322539669911438, 0.8249827733167769, 0.6315705515873904, 0.9491516734204624, 0.6114884284480185, 0.5910510516556442, 0.520224421106239, 0.8497498605060272, 0.8095044790220168, 0.9217699632374752, 0.6235921911073896, 0.7947277118678993, 0.6054092256584153, 0.7144545948111161, 0.9198234226599118, 0.9377742726253322, 0.523967084634474, 0.7508308725479722, 0.6099129238255967, 0.9899167351070215, 0.5387228842776299, 0.8616417008046178, 0.6283461282469319, 0.7038784339452411, 0.7262584812630147, 0.8559436255019984, 0.9333244081005722, 0.8684241808601367, 0.9735236774547937, 0.7634734226747131, 0.7070722217570924, 0.7868115576795063, 0.6514313640781826, 0.5510085621127909, 0.5528774605750735, 0.9462516815341069, 0.713381775953112, 0.8576945003873562, 0.7184572334267079, 0.7123423856799495, 0.9229596618394008, 0.9960311686251071, 0.7275633309403311, 0.7997294678764446, 0.720502747646887, 0.9131331784300525, 0.8807644856929224, 0.9698916050350079, 0.6798356738593773, 0.6217116369418392, 0.6447914409371804, 0.5859399947768993, 0.7465943563795356, 0.6885755419362358, 0.640144162092532, 0.965534846121196, 0.8625278057086292, 0.556617006226183, 0.8524144897116295, 0.8182895104182293, 0.8680952952592913, 0.5951411185876139, 0.5698907301684174, 0.9965195547204829, 0.5845090760067999, 0.7973168732427041, 0.7159297239065208, 0.9587418087742497, 0.6298805441904416, 0.8422190710013395, 0.8863777734152868, 0.5715672485349452, 0.748368371407697, 0.8318585809964365, 0.5013859547358902, 0.8673809950442855, 0.7744223108616175, 0.7555840604549188, 0.5373812348395822, 0.5857326877046667, 0.5144902226155832, 0.8053972779516523, 0.9725703869895006, 0.7424875104264131, 0.5067951607846526, 0.6428089647315594, 0.695222581239705, 0.5522497964621969, 0.5783687677846919, 0.6520303801481416, 0.9127341849478945, 0.5065082530209337, 0.7989402845174212, 0.7724124603236244, 0.8970213171822643, 0.6861530521017105, 0.6617661553003059, 0.891922749393079, 0.733509908039073, 0.8936646334099936, 0.6938047893768016, 0.9676046669119243, 0.6053888226376785, 0.9244752338014608, 0.8863946224542801, 0.7681190163262692, 0.5166947749448361, 0.8823969910345482, 0.7383638469614456, 0.8856475430353085, 0.910778632984675, 0.8724518573926602, 0.6365800889850816, 0.7109834802253054, 0.9121069073505311, 0.5121735893319554, 0.8207268630591772, 0.7069732023838176, 0.6805295629318702, 0.9444179059990174, 0.9990157375404339, 0.6019415284493639, 0.5055515817018845, 0.536144605456282, 0.8257571912478197, 0.7720660031765909, 0.5634141980704449, 0.7247427112535103, 0.9106268795079893, 0.8662500256401682, 0.5540867588109475, 0.6407855086930864, 0.655697770775693, 0.9748494648665471, 0.6757783127468109, 0.966283689337771, 0.6328087613147584, 0.6108118821958863, 0.934415943823405, 0.7525769685894936, 0.551788839128966, 0.8532773998929755, 0.9986651178314444, 0.532006708390215, 0.6575820481818266, 0.8451469746374114, 0.9053523752343307, 0.9467457868478049, 0.547255901486583, 0.6601299774805685, 0.9384494518703321, 0.5529869182720059, 0.8650132847003973, 0.5809219314126022, 0.6250242419417851, 0.6509530864830715, 0.8451843740667544, 0.5883170393737849, 0.6972266332587103, 0.7347631307366861, 0.6476330925662354, 0.687745763438558, 0.6644372633265089, 0.5108479762049543, 0.6974638128847298, 0.573480872087137, 0.6212509567621658, 0.8159635626271795, 0.7564560370959083, 0.8267236878082831, 0.8289178368696328, 0.8769370987234637, 0.6083276834302205, 0.5958331246320492, 0.5470784764086558, 0.6630917509729672, 0.5443216363292709, 0.7721469362791709, 0.9960646202870738, 0.8422677929623476, 0.7633137803128115, 0.9782915097954004, 0.5427112210990855, 0.9542208691935687, 0.6886269194743009, 0.8105798525699153, 0.6137464414142324, 0.6552488338929965, 0.5647010523353941, 0.641936031843235, 0.6946078755853831, 0.8832321884231562, 0.5878894492302429, 0.7507466215449854, 0.5389001185389588, 0.8326984893569314, 0.777693951596017, 0.6257982561432613, 0.7286296627464895, 0.5799615308375898, 0.9330435950536866, 0.9343156849920031, 0.8510445372813951, 0.824281477928634, 0.7474531460213685, 0.6460310921610075, 0.652562288958389, 0.7770021478623697, 0.9791559593393566, 0.7066570907433651, 0.7404642967511094, 0.915598832298995, 0.6549000882336721, 0.644576087636844, 0.6584895861371702, 0.7494330517996384, 0.6384452459070249, 0.971658386436084, 0.7784608753416428, 0.9087118100048681, 0.9659569028015902, 0.8223280752871056, 0.7990285585419121, 0.8610747028121333, 0.6720114557459784, 0.9117327998312855, 0.8170186171430238, 0.9110856060391874, 0.6064471536017452, 0.7318648284038936, 0.6064927975779224, 0.6974981862164347, 0.8304882132341257, 0.9887585375018222, 0.5582847346892855, 0.9434522189453984, 0.6100870505331326, 0.7895830861033359, 0.7403434727692486, 0.9348475147874735, 0.8390849887207219, 0.508307576282874, 0.827740446923393, 0.9899963669794161, 0.5268115418799011, 0.5495696025510243, 0.8174793874293795, 0.5239272526889847, 0.7124269781624796, 0.9400258342865182, 0.5410714952632729, 0.9792741636356486, 0.7680762763018079, 0.5803305528157401, 0.8585520216969782, 0.5352943254131031, 0.6943019587081634, 0.514877785345754, 0.6260799151252154, 0.6392316083054342, 0.8574710530745562, 0.5940103327229413, 0.8685870597275481, 0.6207446085227611, 0.9000712369129003, 0.9483270496899965, 0.7908493301894062, 0.6856660172438929, 0.9765804305811796, 0.9188213211500387, 0.6983703456471102, 0.6263355897574212, 0.535219857361493, 0.809866948201803, 0.8996749260850472, 0.6774785312280267, 0.5093973987875141, 0.6641533525729038, 0.5553036975634971, 0.7658066463113495, 0.6738018809679437, 0.8980896873691, 0.8212924576560029, 0.5687448258716743, 0.5899457443038727, 0.6396537843001906, 0.8916991223144561, 0.6176986551222454, 0.6724900677038548, 0.6697095217351017, 0.885010470102074, 0.8825754772666274, 0.9972816383247338, 0.9614515966791926, 0.5179428698013692, 0.6458716426985314, 0.6992178586158968, 0.6212178268685749, 0.6653059060064983, 0.521874857685959, 0.6929554354654772, 0.6716975259471956, 0.8906210865306023, 0.674515423440637, 0.842225665911988, 0.5555304367008632, 0.6705745188721852, 0.7938126095000784, 0.9657930963034723, 0.6878603430895267, 0.6529771357421242, 0.5553179552443095, 0.6736146870011457, 0.5201879417895816, 0.8805175070723285, 0.7167347139117826, 0.6189476820718061, 0.5109611303914623, 0.5626061824271404, 0.5671178917684991, 0.5822523636546284, 0.7769161048414299, 0.9358702680709181, 0.5577732493075203, 0.6497998162451883, 0.7638871352984999, 0.6329629385979526, 0.6858768342422081, 0.9213109776817622, 0.9389299508293628, 0.7742933274111782, 0.6041845124241805, 0.799180299192398, 0.5420165263444549, 0.9715216748632493, 0.5311495597629001, 0.5025477077161122, 0.5569836544383839, 0.9686297452692627, 0.8763849116611057, 0.6887883432004733, 0.6516708429999932, 0.7854834293860016, 0.6421068057942882, 0.5587133904421753, 0.5173411464119198, 0.5593825587742427, 0.5501117817756719, 0.6423051188938742, 0.5934408022037668, 0.972904788461578, 0.8905737211692906, 0.8061552168579224, 0.7242356084435998, 0.8215895561970789, 0.7954729579510433, 0.6192574656272241, 0.9593568597716282, 0.9102017028341398, 0.6983819704313335, 0.9063768034474678, 0.740280817081831, 0.6172778944007216, 0.6808367966634448, 0.8975908794035696, 0.572147079286146, 0.619520884473197, 0.5271721894235619, 0.9384557646773504, 0.7541992581932402, 0.5379752023271718, 0.7272150426242809, 0.9596032809000303, 0.9780024947400857, 0.8308810030179176, 0.5229640526271784, 0.9785657756807455, 0.9862666240834506, 0.9297264049447922, 0.614750870509662, 0.9206426608195313, 0.9892126447973957, 0.5835563579995111, 0.8231009771168398, 0.5814460839297517, 0.654337494638498, 0.6130351588628368, 0.8645624983591264, 0.7715220542196328, 0.9843596912923173, 0.5838913897750408, 0.948424818012455, 0.6691096277164525, 0.7375075133186222, 0.5699809144264969, 0.8509199837212886, 0.8726708065959075, 0.6023393170796506, 0.9453937330776331, 0.9692719920336377, 0.8619385747637986, 0.8598620685145281, 0.7621018190148284, 0.5829649564299816, 0.6544936620780588, 0.9318163334603144, 0.7888903498409228, 0.919268724679065, 0.6466017196928089, 0.9517965555713161, 0.9066443383210316, 0.5823760864435448, 0.8253687875543247, 0.5309485317971765, 0.8813231022417395, 0.8684764650516927, 0.5305354586572975, 0.8010120230590927, 0.6515963757541372, 0.6939462880629641, 0.7779077507068506, 0.6519176084458396, 0.9973354734229636, 0.69558604652423, 0.7403033279716278, 0.6266660730956128, 0.7845683134145913, 0.8924369975243188, 0.7451005377277966, 0.6678729044858713, 0.9878756018421113, 0.9982380305385424, 0.8205530714859642, 0.9005060023080076, 0.5448344326685193, 0.7467928169124635, 0.5645954847799738, 0.6852258078651019, 0.8136399919667003, 0.6535867888445563, 0.6388520939202006, 0.9413234727487838, 0.8251148351271446, 0.8587997393910023, 0.9821604471167107, 0.6502002430488762, 0.6372546735528262, 0.6119269096585362, 0.714386370109676, 0.6729654340814765, 0.8853657302938767, 0.8620592846906303, 0.9531539744562754, 0.5690723447707369, 0.9915848383240959, 0.8610858535435809, 0.9279540015302579, 0.7606387614690593, 0.8742952704314373, 0.9021353314200078, 0.8424591314971492, 0.8324801243650101, 0.841959673974177, 0.812586526516916, 0.8860448194426664, 0.9447546847670494, 0.8905482347149242, 0.5854598983395367, 0.6595675887977281, 0.9595705630517264, 0.8205729162580688, 0.7332826656119501, 0.8277780416768445, 0.853843206619471, 0.9697040199869242, 0.6431313316453253, 0.940966536616924, 0.5039207888323194, 0.8118809192251186, 0.9730143159765838, 0.69030063909415, 0.5633663960784416, 0.5625041430064642, 0.8695394521232119, 0.5677623715507125, 0.9253237037450515, 0.5827611555854666, 0.9746065481922803, 0.8740554336476879, 0.5937445475176004, 0.8004831997113873, 0.9853951231390151, 0.6303598266878185, 0.7134655731217103, 0.6712005244808303, 0.5047961229459063, 0.6782522988502386, 0.5580027423848344, 0.6516174661110543, 0.7609535162716914, 0.6255064277778388, 0.9995630858087493, 0.7777337675455454, 0.9028381210197616, 0.7809058442629301, 0.6955546837083117, 0.5971413324759302, 0.5193984700234073, 0.9264914213660417, 0.998878042183359, 0.8152123365338648, 0.645200567109361, 0.5913849043820043, 0.9281195087704646, 0.6183482148548842, 0.7928355485988103, 0.8680121824295421, 0.9114863315953611, 0.911829853697415, 0.9961262159577486, 0.6890721975318735, 0.550814326035635, 0.84040862878113, 0.9095906059579562, 0.8794522661348058, 0.5086950883882884, 0.73738305066707, 0.9934896617607336, 0.5066266052835346, 0.6402215166262903, 0.5634933346039623, 0.8357575275744097, 0.7471629845648465, 0.7299624133096554, 0.5849862221241143, 0.5209731055290912, 0.939702131983081, 0.6883736337071296, 0.6144965115436358, 0.6522437862620436, 0.8587349310147792, 0.7489200595859344, 0.7803984758338863, 0.7924887558033862, 0.7770623788861637, 0.7734271293910542, 0.5357752757576888, 0.6676981608404828, 0.6358526424424067, 0.711994247478485, 0.7417364091085609, 0.9385334603858646, 0.9523955644382778, 0.6781011377975206, 0.9387831576516374, 0.649493513566189, 0.7184956816150068, 0.9036961007365656, 0.8886483250802912, 0.6884995181000493, 0.7190578598827985, 0.8873462399457571, 0.8316872307288017, 0.6271125166458418, 0.9332436335987297, 0.8412943236284491, 0.5610915585409105, 0.693198728377997, 0.8545646526463988, 0.9928640098155328, 0.55431265881697, 0.7917879446070553, 0.7397863120674625, 0.9808948294563449, 0.7419201242974098, 0.7257797507430891, 0.5721795056946823, 0.5226225787379777, 0.7831831264924121, 0.6222563741397473, 0.7766273368807064, 0.8722073867581213, 0.7433245269330808, 0.8913910533279896, 0.6478375943186332, 0.7434445303410112, 0.6691654331177973, 0.6217763777961304, 0.8516899497994386, 0.9143731377592739, 0.5253663463722491, 0.8318676359991238, 0.9787516087158492, 0.7043413837019878, 0.7270903256589749, 0.5486240670067759, 0.7126419461088204, 0.6433841365541946, 0.6610627774958524, 0.5718330747742512, 0.6261614705006793, 0.643915249198313, 0.88748867237881, 0.5585392292949019, 0.6375532106136279, 0.6198353712487953, 0.6046431667361039, 0.6302500749138311, 0.9494072167849648, 0.643463107922541, 0.539465316561905, 0.9528051679697753, 0.506808946502495, 0.8818546143802247, 0.7027518384800191, 0.8132983497521293, 0.696685658250286, 0.6459113678970879, 0.8702312758853779, 0.7189615058998694, 0.6422523303444169, 0.8957794933365832, 0.811413862241635, 0.7737164260147078, 0.6497752393392638, 0.949237280834248, 0.9218137565876985, 0.9927123453771043, 0.6834136065285386, 0.863836896274043, 0.718911385277518, 0.598707502155756, 0.545678788802366, 0.7149112558166684, 0.7915979854834762, 0.9889895519816244, 0.6345932938051326, 0.5559852168300599, 0.8315255518419433, 0.8212844721435784, 0.8877230900315228, 0.7874484757994056, 0.9535522374581296, 0.7381312100951917, 0.7683138170765311, 0.7827418465364089, 0.6598400835485346, 0.8907769734205615, 0.5605026504138684, 0.9612194258152587, 0.8340129071536857, 0.7896083344726624, 0.6258567815772469, 0.8552619394493164, 0.8896459374169016, 0.690712163024782, 0.7979027364651596, 0.6545876441604241, 0.7535592100755548, 0.6203593033408575, 0.7709617407100469, 0.8720633626275407, 0.5479727347643444, 0.7931270743702402, 0.8209373358010248, 0.567455533372637, 0.6295983247762753, 0.607280305830612, 0.9452167439478709, 0.9689547657586115, 0.5926939461233975, 0.9590242923684239, 0.5790650203377397, 0.5349212446720722, 0.5091261332246073, 0.9090029369972192, 0.9579979350485391, 0.672307624160464, 0.6453499328881792, 0.6752872043237532, 0.5348799636107799, 0.8738551246897349, 0.7160314395438616, 0.7056245392249119, 0.8034086430505435, 0.7730835435779225, 0.7527149397205569, 0.7099617673092162, 0.8316703187897805, 0.8559023894833847, 0.55620165136069, 0.7406839702345319, 0.9489835323257468, 0.9093067758524173, 0.9247783282230195, 0.6452162212309418, 0.5696045966356812, 0.7928415269672435, 0.8676674676721278, 0.6521659896865557, 0.9165253882736794, 0.5983450368442071, 0.77095780377614, 0.9291368970376137, 0.7532520431114305, 0.6789064078951697, 0.9585396475041217, 0.5937881234265951, 0.7945986797467082, 0.8815127166017247, 0.6026300418495191, 0.903247705484133, 0.8788680081992724, 0.9306219434986991, 0.8810212648859881, 0.617528652079323, 0.5393958187499712, 0.6126737347663227, 0.6327019713697709, 0.5948628670576597, 0.7606420939320937, 0.6703508535004945, 0.5951364184508888, 0.5418505181819386, 0.7791488789521541, 0.8220851687747881, 0.6308149540632034, 0.7077149402870209, 0.8459072426932202, 0.688008839677668, 0.9949243549052196, 0.7567844404528272, 0.6286413724651778, 0.957858061644219, 0.5609264416694368, 0.5488079769705481, 0.8061463251727707, 0.6377668171631314, 0.8867945725346138, 0.7869394284897333, 0.813201364835874, 0.7420702202156905, 0.9370397588634805, 0.6799184570547614, 0.6383520509170746, 0.7805647570998111, 0.6531414859237896, 0.6358850434480706, 0.8193336280209982, 0.5260035459772704, 0.9515176360284558, 0.7157444993779563, 0.7731324258168022, 0.7488448710270095, 0.5923295548592449, 0.9173438146923893, 0.5392018391463578, 0.6617031408059719, 0.602587809030132, 0.753604828344475, 0.9082223516752721, 0.55743202563428, 0.9254203820146125, 0.7799102599921897, 0.8405400690659512, 0.698241200546821, 0.7869651025522404, 0.6190496886915977, 0.7673797681995875, 0.7458212073108065, 0.7830935659745806, 0.9715059495734295, 0.6459235359749544, 0.8725287362243865, 0.5547278918634684, 0.7078702878103338, 0.7084763024275992, 0.624031111600611, 0.8452512097882166, 0.6775843596273977, 0.9843693178420654, 0.9265939372483094, 0.9867341457680598, 0.6240033735608352, 0.6521777037779948, 0.608276429520284, 0.9109713817247029, 0.7154254503173342, 0.8019158510486082, 0.5096962362538366, 0.8177089563007522, 0.7108628791242275, 0.7155861144562817, 0.9311217922799493, 0.6941686593453158, 0.5581812843833018, 0.5815568417487507, 0.6512028545484427, 0.8194260376062752, 0.5734598270964976, 0.8156177416217079, 0.8689998526756706, 0.7435404452433698, 0.5055232265511538, 0.6063597514743844, 0.9755761576841162, 0.5421758991578574, 0.8277280565809857, 0.5160524120908312, 0.6199390878129909, 0.7996430877393228, 0.7963537442002, 0.7569930537500249, 0.9577015062679295, 0.8201875104556684, 0.7672144651596682, 0.8554917749112714, 0.7368079073064119, 0.9726932414615075, 0.8500582087996447, 0.7917842600463328, 0.7771065969010073, 0.8841445641639687, 0.7084568956111985, 0.8791482213764787, 0.946078636023286, 0.5136610727575222, 0.6436875577965118, 0.9287817129407083, 0.9557826845721052, 0.7064629504181233, 0.7732242648464418, 0.624534298564204, 0.5786064145931726, 0.9196767371657896, 0.9975977956827418, 0.71973310894555, 0.8955195100118065, 0.9512358947663333, 0.980114315449454, 0.8162875676729238, 0.6742574295375401, 0.6970840122387553, 0.9170377302538466, 0.7424754645668379, 0.8311038060414927, 0.7997472586262004, 0.5738482988796733, 0.5710208367548929, 0.5244760338945305, 0.6049067954838865, 0.8324884540830526, 0.587493097411732, 0.8242676379200653, 0.6813119976619324, 0.9916915258479801, 0.5856001231177388, 0.7873007575293056, 0.581550798582791, 0.9624389078003763, 0.847563918695218, 0.645331780426017, 0.5834630804432425, 0.6935235607134487, 0.7286623174171017, 0.8878313729275369, 0.6939120892604103, 0.9881775734389289, 0.9458884632299343, 0.8638753425885227, 0.5799015114763102, 0.7954435427638655, 0.9021326689483955, 0.7139117714052743, 0.6500157467358381, 0.8102646992299348, 0.7450189341136846, 0.6820087693332869, 0.5233681686869455, 0.5191990695290827, 0.7574246430887979, 0.9077053538869346, 0.6117007933725858, 0.5445485088494841, 0.6697045844783496, 0.7992498890570828, 0.9081407312887695, 0.8276896542604137, 0.5613381584216895, 0.7301444691568819, 0.9571224261693565, 0.7182064467495601, 0.583924638811383, 0.9552815459279431, 0.625785796139117, 0.9688982087596112, 0.8136252438113549, 0.9557468788134917, 0.5615872808084174, 0.5312945404510725, 0.7942063949804652, 0.912881126929669, 0.9664721245713612, 0.7627492298069771, 0.7762953326408714, 0.9466036997905837, 0.857157863551866, 0.6739699954341467, 0.6325637688355272, 0.8753099941637541, 0.5371341166036516, 0.5437715704621935, 0.8993780773979673, 0.7009792160710844, 0.8940641965748185, 0.9888080101055712, 0.5391517742872183, 0.515123413625564, 0.6764126909992434, 0.8263265512526189, 0.5843585251060119, 0.9382908119772059, 0.7980076598750045, 0.9597282754654074, 0.5054249769693624, 0.9943847460958914, 0.8502777016919079, 0.7434970537236489, 0.7247926239630486, 0.796656354005673, 0.971683482369424, 0.8090917685366033, 0.9612995765671701, 0.5571830640219726, 0.9788257786862173, 0.8003921349032442, 0.8544270898364673, 0.8014299647466352, 0.6393921558928591, 0.5312097701245597, 0.7554520090329353, 0.6629401596083269, 0.8722878715133413, 0.5317065054487198, 0.8326439465847859, 0.9243730072559788, 0.8170886835452208, 0.8072167144611968, 0.5724840282565273, 0.9258492091288794, 0.557820797068928, 0.5065154198608162, 0.822010813240383, 0.8941161705799804, 0.601239908447723, 0.8677696656361946, 0.7332052309217382, 0.7726990540125216, 0.8522218827766268, 0.841346976456111, 0.7673712107837727, 0.6572134298166652, 0.6692103430541971, 0.931179481372892, 0.6707227703059945, 0.9278623447203607, 0.7114052151854906, 0.5318774234651663, 0.7302896471658002, 0.7260709299887957, 0.8434558513390727, 0.9339343290592527, 0.7256228467661592, 0.7664974167771713, 0.9954358445789393, 0.8592696512227952, 0.7609630677847377, 0.7003489234798793, 0.6035522793345249, 0.5065413105460304, 0.6614354570224472, 0.5441316942210639, 0.5319726959814496, 0.5026186337305418, 0.9455465846661896, 0.7918942891724192, 0.8793036141120224, 0.9848488278127272, 0.6852087100870523, 0.9355796064038278, 0.7396980557578998, 0.5503400624640208, 0.8401619014365923, 0.7079790450996849, 0.9636805907366446, 0.6148770210183507, 0.5804328184882694, 0.9192041804909354, 0.6670378401901227, 0.8231344780973455, 0.8534275126256647, 0.5210637899335233, 0.6330795788815231, 0.9592082833426989, 0.8885168888483177, 0.8643588047018562, 0.79501489138546, 0.684716962614645, 0.6811785520466094, 0.9026336812382265, 0.5615573811679293, 0.7328893633552775, 0.8166702747288044, 0.7369316715736981, 0.6985707007629697, 0.7856809411144526, 0.7484439887517627, 0.5329049725998627, 0.9863789117970325, 0.8717081657368656, 0.6780711632093408, 0.9935171185866303, 0.8820999204986406, 0.8186887870089998, 0.5090454511611939, 0.6784640026968956, 0.6546884441922959, 0.829655012533842, 0.5526039129588501, 0.5940918174079493, 0.6324667241414657, 0.5500611118302707, 0.7470319141312312, 0.6024338102400357, 0.7870668433978628, 0.8933278879358957, 0.5042887551819166, 0.6015473929561779, 0.9537591284792527, 0.528670935480388, 0.7691729965815337, 0.704770301574143, 0.9956583818416309, 0.816918525404061, 0.858726683379873, 0.5661414785048022, 0.6928789687265098, 0.503389707321987, 0.5547296235437986, 0.6118134360316878, 0.6188162653202343, 0.5669853507822556, 0.5582296610031758, 0.6372578866436718, 0.7656390181741654, 0.5822412857786028, 0.882864624114071, 0.5142782232434921, 0.6778854487595252, 0.5987112730293984, 0.8230199029339251, 0.6038758072719975, 0.9981408470938775, 0.6870075103332252, 0.8589727315151138, 0.6329422786410714, 0.9136139585600513, 0.7111296342194271, 0.8864856777928148, 0.9210128891394684, 0.7272568830162269, 0.8189284084467376, 0.7242804633348967, 0.5730336762497968, 0.6698101827436966, 0.5204515985292524, 0.9102551354442964, 0.5416205294732693, 0.7173904800770623, 0.8453685361868895, 0.6796536367183759, 0.5092169682430465, 0.7021842621488219, 0.7393334924186158, 0.9362154472037767, 0.9175006676525574, 0.7149858770791998, 0.682891006672204, 0.6450145072943625, 0.6687187130889057, 0.7628093097740092, 0.5011291743111654, 0.819676421411458, 0.6361421966364851, 0.6064141203009172, 0.9895785374816427, 0.5974159400688883, 0.8904718738395601, 0.7797455782748164, 0.592923915927807, 0.849310357579185, 0.5050958196390294, 0.8485137733079822, 0.9607709821579519, 0.8831982962816691, 0.6729625427540922, 0.9681282183483451, 0.993414662706744, 0.7897667987169767, 0.557891664716643, 0.5775619662848736, 0.6512402443325611, 0.5707089592284371, 0.9014033277583067, 0.7395855114739904, 0.8605974221409114, 0.836639768888213, 0.7565351349210164, 0.5982640002513511, 0.8124806121624889, 0.8993340527395236, 0.9916763122921861, 0.5468170836321744, 0.8357500286936452, 0.5620298678634142, 0.5197986930769143, 0.9864587652485126, 0.6754738942325857, 0.8566673859798264, 0.8163899571540126, 0.5656880252758867, 0.7680538908002641, 0.8449255642532012, 0.6670434764219642, 0.9035499118473491, 0.6793801331282379, 0.9677290352747815, 0.917372343470783, 0.6309613412790787, 0.7640653932166399, 0.7157744196066911, 0.8817713604446832, 0.9764088218173926, 0.6465847897676495, 0.958743110380376, 0.7611273601020474, 0.602525806955866, 0.9999148619848277, 0.942393023132452, 0.5665935441663009, 0.9575035698165404, 0.8469278897328135, 0.7009303322871345, 0.5563903608442924, 0.650134718936613, 0.6936309704648581, 0.5068212493500892, 0.5667841083086648, 0.8730977658347476, 0.8410574889930859, 0.9982359159757487, 0.6531874705281489, 0.8309254048718318, 0.9434633669084433, 0.7981172618962339, 0.5987566775338533, 0.6100897293316787, 0.9960298474300473, 0.7765133018671321, 0.5799956531707086, 0.8304121662388477, 0.9260123447888606, 0.8208219496649777, 0.8640084736614946, 0.6137671097639377, 0.5344935055655694, 0.7713061794337774, 0.6068911727030901, 0.6639795570176098, 0.7337364498220698, 0.6212133471522083, 0.581610615959643, 0.6534586375129603, 0.9164638627178077, 0.5574991746725858, 0.5147399877463121, 0.9138046475611028, 0.5925357172355872, 0.5611840319700676, 0.5255692938336589, 0.6380433689124168, 0.8682354533362517, 0.544855459434477, 0.833235093585187, 0.7487825533698623, 0.9440666783564186, 0.6621870803877137, 0.7314147025288857, 0.7321861609052341, 0.9477216830592485, 0.8511050563201316, 0.7189576805497994, 0.7002019718412824, 0.7744575406020626, 0.8801281342684931, 0.7688208853330227, 0.932444367738331, 0.5186891682793808, 0.9589703702223153, 0.9265566489381707, 0.83615556150078, 0.5682256902268517, 0.6576899289276235, 0.5393028926783151, 0.6771221845844255, 0.6167968831770942, 0.7355570907820095, 0.9830399557858535, 0.6526847703456047, 0.7382091511683249, 0.6640644402140274, 0.6480315642245522, 0.6841829791619647, 0.9692215734243179, 0.9676905589954694, 0.741934062202609, 0.9144500135606437, 0.6608471587989674, 0.7374901458705366, 0.6976868411452279, 0.5957651810819602, 0.8315700946037611, 0.6295869588452225, 0.5428152320491866, 0.5712372955608487, 0.7495267710680142, 0.9038243545304325, 0.6570050291117601, 0.9234695501073802, 0.5303288783108595, 0.5844852847661716, 0.7584880924558421, 0.9653510497207571, 0.5263372564760783, 0.8323914841867697, 0.7683550789476248, 0.5359041663880585, 0.5048768443862367, 0.8848286911786718, 0.6467039420750584, 0.6734309105238592, 0.8434881285308087, 0.7407761212798936, 0.6794761541878416, 0.70593883228417, 0.6411812498667444, 0.7658940751198802, 0.7340786186812144, 0.8269624046615084, 0.761277079378587, 0.6715274923152821, 0.6800819851912574, 0.8698624849915333, 0.8449324677683518, 0.9988375817902537, 0.6807079530900053, 0.5772086360624057, 0.6339026709639966, 0.9366528735945245, 0.9216004363453842, 0.5386141553624999, 0.7949779355341388, 0.6733334759545241, 0.6337737318676749, 0.9503845490945169, 0.8947982229383484, 0.6891546452928713, 0.5829450307610553, 0.532441889879574, 0.6466424258510267, 0.5720859530537848, 0.6630369326671308, 0.5870181403923631, 0.9804860216956797, 0.6710634789301442, 0.9480770654184532, 0.8072949594754686, 0.882543026336918, 0.7282246981248792, 0.7429791247812471, 0.6590821813674898, 0.9572948356939357, 0.7989719219028398, 0.9188349847700179, 0.6656548012730772, 0.6094634841623002, 0.7646778224993602, 0.5833440266296736, 0.7418442682075158, 0.5435092810977257, 0.7389553567889235, 0.566080747253845, 0.515498505580895, 0.6694824977154976, 0.8859146998494019, 0.6474507392860643, 0.7583181117156567, 0.8395132749873349, 0.8421563210798101, 0.7615442386378874, 0.6867596253410417, 0.6662154658553658, 0.9408779554856246, 0.9984995509467864, 0.6769774429766968, 0.9585123428568754, 0.9969714923976143, 0.9734783882890401, 0.5666401778505616, 0.8123083414841522, 0.8885633699526383, 0.9572025569239236, 0.6169785371759298, 0.5106157697341235, 0.657184633619085, 0.8371219306232669, 0.5003396811423793, 0.6947679817632985, 0.607158823167471, 0.8068508717768978, 0.6307154224687177, 0.62038691848973, 0.7771629470776246, 0.9891597513328989, 0.8665260714909688, 0.98637174108884, 0.8195062694215338, 0.5694686706112337, 0.5991837199562903, 0.7506516419783535, 0.698259439504132, 0.5125753137452125, 0.8957238629819819, 0.9104936206423208, 0.7853570450271141, 0.8905140859888988, 0.5701409204224014, 0.5657158556408055, 0.518557072388959, 0.8740406468725668, 0.6645938638399376, 0.7155173246535531, 0.5432298593995508, 0.5461103481072734, 0.6265613490772608, 0.8680121880628084, 0.6021134560718792, 0.7751054228711158, 0.9562917486489062, 0.7794080472234545, 0.6303766895583742, 0.7750811446737986, 0.8692828727530821, 0.873291943675436, 0.8724663325553061, 0.9194837616333418, 0.5106560618272171, 0.5147012046277386, 0.6386692224765844, 0.6116947170543141, 0.9322375423002343, 0.9510138482142512, 0.5694758480847406, 0.9642979704252896, 0.518420453375281, 0.806122240269282, 0.7003074832644395, 0.5778291782003334, 0.9832326905623061, 0.5740839970304883, 0.5752743525869655, 0.9324897523018479, 0.927453476816596, 0.7132332763119531, 0.8141695232790711, 0.7693713829058235, 0.5490531407479449, 0.5293322780642298, 0.6663198962426441, 0.9002553128003958, 0.5314106689414464, 0.8800186299780169, 0.5834557905221731, 0.5209828024852274, 0.5869724804638142, 0.8540492965086786, 0.964600042060846, 0.6678994317646645, 0.9403755282970285, 0.640366982027853, 0.8648396807142804, 0.8852168611884617, 0.7932609305904013, 0.5000058069356004, 0.8783663361389134, 0.545478207748705, 0.8689498361910496, 0.6357183997080752, 0.7237000469036434, 0.571802601621621, 0.643716584580313, 0.5754999320033336, 0.8125283567133252, 0.5196753238336698, 0.8257083816023767, 0.8301249004193336, 0.6338430614681396, 0.8446058107961882, 0.6976421935721422, 0.9124127120758313, 0.6190771364891969, 0.7200668119283998, 0.6542945244115069, 0.7633893204444755, 0.8641388595100432, 0.5520997379499468, 0.9656237118875759, 0.955836978877487, 0.8809713199855504, 0.5718749062350967, 0.8183727540886943, 0.5642095624875023, 0.8383393418827026, 0.5622619592808484, 0.778297259794763, 0.6808486752091714, 0.6011553493069326, 0.7383487705277274, 0.5480376698059135, 0.7045802052144723, 0.9596182272053837, 0.8957051660430186, 0.7964367114365456, 0.5722751784807805, 0.5242659456377172, 0.8896044067090096, 0.7603798642710892, 0.5957407593796249, 0.8102849392348339, 0.8337914193100725, 0.7272561066318319, 0.6638280292496291, 0.6966953715151525, 0.6999515792767945, 0.532415997669985, 0.7388852024445376, 0.6554105080243475, 0.7522097752952477, 0.7551907052631905, 0.5570149811600222, 0.6312111549128626, 0.509097648574182, 0.9733942554993267, 0.92649909836852, 0.7822538439036006, 0.77570936925348, 0.8347176616667091, 0.7302361304543254, 0.9989645300124596, 0.7683345820321004, 0.7197023319335154, 0.5967578521351464, 0.6777813309933147, 0.80333063945048, 0.6299424052014534, 0.70109985952833, 0.6960206153714118, 0.9337081333999288, 0.5043253122078875, 0.5201698613830248, 0.65022107731901, 0.5161877970020062, 0.92402568020914, 0.9560461413471488, 0.9410793066542592, 0.8200658762856166, 0.9644581009889854, 0.7642600884738648, 0.8007652908985127, 0.5603761787667784, 0.8031438739761867, 0.6169326806798776, 0.8708480102327775, 0.9425768876920138, 0.7265730665647927, 0.8521036120403186, 0.7465197721761688, 0.7961671585680661, 0.5287108283521518, 0.5387567567653542, 0.6603964423526814, 0.8241074424044703, 0.584820556479368, 0.7944129155125247, 0.6897766853291547, 0.8653979635116928, 0.6414926524572317, 0.543228757464977, 0.5441807058171211, 0.7940746364553134, 0.7892549650625567, 0.8088430103999313, 0.8738846420815032, 0.9069153582911532, 0.9131356942596419, 0.7214931503764366, 0.9180839242237961, 0.5898538794111412, 0.6859770756514207, 0.7926624558334525, 0.7077110878138801, 0.8087125442189511, 0.6841352457134595, 0.8426976029416047, 0.5760505051735223, 0.5876755546636621, 0.9095127957770642, 0.5730854618942245, 0.6500782858712222, 0.6184155454466136, 0.7033277077546618, 0.885900898482743, 0.956010121041853, 0.5430346490930387, 0.7021981794720178, 0.8349998060936681, 0.9518186752138404, 0.689314079636953, 0.7411112688831284, 0.5699656654850984, 0.703580627080387, 0.672568851821226, 0.9870216815011864, 0.7493836752752427, 0.7349627803465353, 0.8347264541141706, 0.62494619271138, 0.598609868870011, 0.9314622578150027, 0.5299525734767938, 0.6974818449883845, 0.9788091527058718, 0.6326204847461595, 0.9226294482445663, 0.881983700176592, 0.9190340423687833, 0.9746802267847279, 0.9033679878963403, 0.8180012983119787, 0.6390339059186692, 0.665285647033, 0.534596447642389, 0.5237997864871153, 0.8315919171944963, 0.7080958110328355, 0.8253458028314571, 0.9026934946468455, 0.9341155137080166, 0.7762046457469692, 0.7136663216095916, 0.7645020542569076, 0.5499554213610942, 0.7742772421172878, 0.9109674120056599, 0.5677356334791448, 0.8619718111544717, 0.8837447117186533, 0.798152334434139, 0.9510852244614009, 0.9391624761184056, 0.7252255581514284, 0.7557801382530468, 0.7714410815540117, 0.7214649062949225, 0.6132770113673156, 0.9020515094994529, 0.984273593979863, 0.6665769057972586, 0.5130073001211634, 0.8587676725299223, 0.7574468432979367, 0.5008961562901291, 0.9437592937005722, 0.9535714323186884, 0.8656115296404863, 0.8361605025048535, 0.6759785569273185, 0.7744065450638811, 0.6918840474771344, 0.9116137631868243, 0.5109371181991671, 0.5074166027008746, 0.7994755338219182, 0.5914561417574447, 0.7708520256734908, 0.7219357585480064, 0.5696735590930131, 0.8348107269909326, 0.7333429227824955, 0.5412966668367738, 0.6220701766480571, 0.5274620212911437, 0.7570654491251468, 0.6693175013604735, 0.8969885681405452, 0.7969923493870732, 0.6314384936037383, 0.778292626163283, 0.8465408693631262, 0.5123935968048314, 0.5139250246727514, 0.960475625572737, 0.8957948678468645, 0.7502827313056744, 0.8948610030796551, 0.5685483362507204, 0.8705636429672062, 0.8810424124786556, 0.5089006289387963, 0.9407446181148191, 0.8325077592632077, 0.5338392196505475, 0.6944405143944403, 0.8783123920142356, 0.5054053971309349, 0.6411647795655461, 0.7171404016567708, 0.5011877887394671, 0.9156493234738379, 0.8920951578475997, 0.7453501062219658, 0.7328310259609825, 0.622928435209141, 0.9113913248980074, 0.9526614958653113, 0.7613701846650887, 0.882810808032283, 0.9286746433774523, 0.5843805269469209, 0.7822578096688348, 0.5798086348346037, 0.7627948460516543, 0.9759483585316773, 0.5734805271903853, 0.8960247781430969, 0.612157057236264, 0.7067966734137503, 0.61838480287867, 0.6311439482853578, 0.6653151767520196, 0.8951488738400091, 0.5131020759123713, 0.5945511529550429, 0.6842835460840468, 0.7595356749533781, 0.7811218909728554, 0.7850971985458973, 0.9149877130323446, 0.5613645541716035, 0.8842588367022449, 0.8552301779168274, 0.9258518524561191, 0.6759451410685073, 0.7418227103311832, 0.5151297221604283, 0.6831006508827469, 0.7473236453440704, 0.675331635917438, 0.8136679331248764, 0.6312998911217114, 0.8528034870466061, 0.913759381467317, 0.5742871637391994, 0.8738168653308233, 0.6356336846931386, 0.7303866233989145, 0.7677606975417203, 0.7810147388569528, 0.7072083886215599, 0.7642070465253954, 0.9988711827833896, 0.5753152382281933, 0.840586134517336, 0.7815841885596546, 0.6583625369758598, 0.9438413061732989, 0.8347018301717503, 0.8253438178174447, 0.9500157088982555, 0.5188278625099285, 0.544153688237175, 0.672368953614815, 0.5718442386125245, 0.671552339611757, 0.7633951521651976, 0.6097840772831067, 0.981082764456042, 0.820469720972372, 0.8618552064667606, 0.935799508562552, 0.7115836422706601, 0.5864412079775658, 0.6581350405651933, 0.7485555694336558, 0.7004639536401864, 0.8321156507314456, 0.7761848688849782, 0.8241348823975753, 0.8845036224023013, 0.8343446771566043, 0.6882072078975485, 0.5509421057567443, 0.9678211976701672, 0.8604249945711597, 0.5381600407866463, 0.8614049592019696, 0.6578246141185837, 0.6543865382033379, 0.71995128751772, 0.7320551164401925, 0.6658306534473848, 0.8233534074245743, 0.7184747685497447, 0.5374982713590863, 0.8066207803228286, 0.8964833331386177, 0.7349827330309026, 0.7404439943259014, 0.6095780823833523, 0.7084744742411935, 0.9770420965796427, 0.9333551639598039, 0.6166561998298261, 0.8117160228358999, 0.9687900960393783, 0.7778041777698022, 0.9104507084245063, 0.7487645882521281, 0.5463598994245631, 0.6239028681498036, 0.8752892843045276, 0.7452589309198996, 0.9402298904389718, 0.5584559236811011, 0.8211481824631822, 0.6812482338093159, 0.522724292012774, 0.5934190429482458, 0.9699262112881231, 0.9985618341479784, 0.5272351665772099, 0.9032032426122593, 0.6865701436704157, 0.6154559552562626, 0.8579656515160645, 0.8918778054157227, 0.8286773031985666, 0.6574417542668115, 0.8864959886334347, 0.7476183342851597, 0.7000070252952126, 0.7293188721289562, 0.9793583039125586, 0.6836022256167696, 0.7421064255074348, 0.656335242508051, 0.5949851945181044, 0.9614559720743296, 0.6788196372122397, 0.7207783868143132, 0.754997909968957, 0.7355637782167364, 0.9760181429824475, 0.79481673236159, 0.6543270099624543, 0.9242180941032984, 0.9554303591665239, 0.754930588808185, 0.6830383461447994, 0.9252279937165782, 0.540540130517633, 0.6755624254092565, 0.9974515672872952, 0.9524148408561037, 0.9238675477401828, 0.9131358024753227, 0.6103431463262127, 0.6162802907770288, 0.9040008437471867, 0.8677952861468807, 0.8176068920227844, 0.9821973985704648, 0.6659827805611458, 0.8288310479610299, 0.8168969045309622, 0.5916271266110253, 0.9532172259751979, 0.9294455454787773, 0.8685091185646399, 0.7164776068397839, 0.5341177400883543, 0.6687242564064899, 0.9121561373151799, 0.5191306573559509, 0.7100067399365613, 0.6660841148791972, 0.829534945470392, 0.7025144613715623, 0.8730002388480345, 0.5959035226233262, 0.564412112813724, 0.6539121769975078, 0.5726483637521085, 0.5939007742425575, 0.5527643267279283, 0.6427082469538203, 0.9186513489858849, 0.8399292729038601, 0.9465947657958218, 0.8604909214998393, 0.8438457408835476, 0.8072989723430226, 0.608264834108954, 0.6739189233236519, 0.5260977643244609, 0.8684168795909704, 0.723430103135951, 0.9885231250904746, 0.5393633299736686, 0.9965072564339282, 0.6055891946156189, 0.6821919364415744, 0.9293812316412557, 0.9842416376946038, 0.7665017202671227, 0.9857777041401417, 0.8880020494812835, 0.5104274601296117, 0.6636807138344175, 0.8977369595641145, 0.670711732951939, 0.7101194809973523, 0.5935002250023373, 0.7085682591681464, 0.6260697658896572, 0.5115719594782069, 0.978597404025569, 0.9711412780388528, 0.5323463186836713, 0.7443673718638817, 0.5534759230084786, 0.5320585519363925, 0.5549329635831545, 0.5457584491030174, 0.8856809557080141, 0.6971766576615703, 0.9534369858722478, 0.7075979683769842, 0.5186234517986679, 0.7460625014903279, 0.7121570291575405, 0.568867023785018, 0.9219906914836531, 0.7483642948521463, 0.8566159348860984, 0.6610289330697701, 0.6072952278732857, 0.6893717118806366, 0.9889030215181692, 0.5676680358886557, 0.9035170099538545, 0.9973123248652106, 0.9103414506871877, 0.5692582194538067, 0.7477913163089939, 0.9346456437228037, 0.9803655898875825, 0.7036695855409989, 0.5867458900847056, 0.887575696459987, 0.7522539684843835, 0.736946681704536, 0.7933727235827319, 0.8136852391666874, 0.9942498362471706, 0.8665527033984071, 0.9501379255441257, 0.7156746655851214, 0.7841013142769957, 0.7390315864725435, 0.5156428464282075, 0.7386079562567796, 0.8775200896386206, 0.8957082599358825, 0.5059902142367407, 0.502550910021164, 0.8642224567707346, 0.5776575090699293, 0.6326819176327442, 0.6607039093099374, 0.9064906483272657, 0.5480572572671709, 0.5398923822057775, 0.7674651816129506, 0.8310322111801978, 0.9576449964580247, 0.7214845984629337, 0.5055966576048021, 0.8364429014559547, 0.8982441753996935, 0.8561866938769425, 0.9121594337316888, 0.711217267601604, 0.8268529022838278, 0.5996116314617441, 0.675959430812749, 0.7778624616582475, 0.7427233797355075, 0.6256184058009254, 0.5080799852863116, 0.8996295681706485, 0.8114081929537738, 0.5858332593202784, 0.9162351559479616, 0.5639018365498003, 0.6500645176929221, 0.5826681288791782, 0.7954344319006095, 0.7234519772609602, 0.7003029762242992, 0.9020343511632415, 0.6357025324314911, 0.6713717746942747, 0.8647648079365163, 0.7845220086867348, 0.9062427823730064, 0.7235819152763311, 0.9489553995135045, 0.9282358177913306, 0.7016315753945147, 0.6949452691735609, 0.8265645162110082, 0.8548632173635953, 0.6688230863141941, 0.8398355983340178, 0.9784915536951264, 0.9536922186163591, 0.6208845502814971, 0.7238466222316613, 0.9821530338779665, 0.5041573231981352, 0.6555274657852574, 0.7927521027951518, 0.7036047285111762, 0.8356879624819646, 0.7397411983648284, 0.9139157810000794, 0.8604452232977858, 0.665413060210704, 0.6363748612142833, 0.6443454225155846, 0.6105836412009449, 0.5758189765092213, 0.5255362938128991, 0.9304495638475007, 0.6366591749889869, 0.7130991531182121, 0.742345107255981, 0.8396368474515439, 0.9917681604211397, 0.7516860532631663, 0.7244921343101527, 0.6686355090064794, 0.6928632004123215, 0.6976304606241784, 0.6612489073738339, 0.6809033094866476, 0.7619591933832485, 0.698833013729103, 0.9585557580537929, 0.9797920054979969, 0.9855547506887493, 0.9214248060039312, 0.9186914935631555, 0.5843561858425479, 0.8010985488255218, 0.8440664648744154, 0.6018087291883145, 0.9012563199501973, 0.8374199648737702, 0.795661406330584, 0.7960622714437549, 0.8552094691470538, 0.8544717574694283, 0.5629658498768744, 0.6071386655153468, 0.6151117073751593, 0.8180557640914754, 0.9478717872263751, 0.5252096677993874, 0.6838835358442397, 0.7366854530263358, 0.6144063742344184, 0.8778855722955683, 0.8363282637810704, 0.9287257745608704, 0.7587985867748129, 0.656073183992102, 0.5785837877872285, 0.883226159806636, 0.6491367515159512, 0.7753889172822628, 0.9916210892190954, 0.7650454984572947, 0.8420904265694902, 0.9582282142838426, 0.9961385020148816, 0.6456006659432508, 0.6389447523509337, 0.5144791725545079, 0.6479251122879925, 0.515886404666275, 0.7854346530952321, 0.5192985116967572, 0.5590782518856463, 0.9094716486672645, 0.64157757165627, 0.9262581850399487, 0.9964114398883375, 0.8206019931991638, 0.9662159804212183, 0.5061272723218497, 0.7618026120943746, 0.9410795795971356, 0.9617134576099076, 0.5846478678979898, 0.767864490716794, 0.8021694925285706, 0.8333555431790537, 0.9385973913809338, 0.9793107255439716, 0.648973724454168, 0.9396080906128588, 0.9168326135275411, 0.9042202101780509, 0.5887014247155781, 0.6817711573565666, 0.779952811679819, 0.6783648996566026, 0.9481520209337795, 0.6919404085348397, 0.7181547543053408, 0.6089496088979642, 0.5479043501981455, 0.7651984015570508, 0.6600288330354326, 0.6039525894026194, 0.6375343301229046, 0.5894704933255116, 0.9097531733684207, 0.7026355727232999, 0.9047534417321893, 0.505556923461971, 0.6620824949546744, 0.6904099701483333, 0.8000567564957775, 0.7387391480576346, 0.6524935719082019, 0.8134089427346523, 0.5263174996821618, 0.8896399740307409, 0.9763187107667436, 0.9892097833255319, 0.9700506210998167, 0.5300987058787058, 0.8296996776171313, 0.634872420232828, 0.5402091529883839, 0.6372476846286199, 0.6936789909507765, 0.8776598822428939, 0.5556111393930533, 0.835237501367988, 0.6941716627132704, 0.934483719630499, 0.958326007083752, 0.6018999510742888, 0.9129302137945171, 0.7838810133795702, 0.9773790523783594, 0.7592129588466809, 0.9494533522086162, 0.8621195420557352, 0.9150536508721089, 0.5001862319308534, 0.893442665855245, 0.8382127640403796, 0.5328512515456705, 0.6385744576633918, 0.7539374253912282, 0.9873618722171875, 0.9894300185496903, 0.6516634086161293, 0.6760481679596156, 0.9860429921028746, 0.6720491468766756, 0.676194732755744, 0.6266010386467017, 0.9656720958783187, 0.6653606751278193, 0.5530121266326611, 0.7870716378546273, 0.5338784067571696, 0.865088549304875, 0.673577776892917, 0.9827081665507307, 0.6524150580745054, 0.632399063924097, 0.7300131898867317, 0.8327004460796393, 0.5434621965675879, 0.6748887713413922, 0.7205421895221704, 0.8395616837833538, 0.7840112694878065, 0.7495487791854549, 0.8455326681130642, 0.5324256040620687, 0.9144297801909465, 0.5282219804014493, 0.5549625970599592, 0.9662310323354126, 0.6447346727049663, 0.8333133813525198, 0.7247826415871037, 0.8065072993815536, 0.506163120588488, 0.9963226682286757, 0.7156742886447665, 0.9107778017087054, 0.5502902420264376, 0.7854479898697108, 0.9969025319096945, 0.5519972480362527, 0.8536891303278593, 0.6375372092282958, 0.674027719904182, 0.6303209325286557, 0.6433571881970991, 0.6988802127388056, 0.8860296406889692, 0.7922703680927962, 0.7572808820757377, 0.9922440807436588, 0.5573709158904556, 0.9789396585596872, 0.6689039772877672, 0.7686341997049408, 0.9677306607413034, 0.745070731027007, 0.7413647263271838, 0.8312641881796591, 0.6410111513009584, 0.6704960873691872, 0.8622257573471338, 0.75381910677669, 0.5700469587577306, 0.9557219977769258, 0.6954874873345098, 0.5957814470636679, 0.5423522226396933, 0.9353542225557778, 0.7599610296502375, 0.5128871666278307, 0.5651774142610106, 0.5537307166324317, 0.8739058446977492, 0.9421079314958218, 0.6912739065487253, 0.9967267939674567, 0.5518928551394332, 0.9267284792652294, 0.5789747830722392, 0.6626976349908329, 0.6163203781733855, 0.9363244203226314, 0.8948714975018945, 0.5214783985245284, 0.9168860539894772, 0.9052932006167165, 0.6774335970225241, 0.6630973780482265, 0.9930775094404001, 0.8129198214276246, 0.8455114461051217, 0.5164534605937872, 0.6050806044849881, 0.6682224791968006, 0.8647628565774225, 0.5810229912673434, 0.6447832253172068, 0.8545954073584852, 0.5736692407153106, 0.7653818935849277, 0.538308789142607, 0.5572240262266513, 0.5097945065493701, 0.948398610357437, 0.5171745868454434, 0.7881033007845146, 0.7309215400073392, 0.8286242438608158, 0.6044326315616171, 0.9719792998742722, 0.5492842371603607, 0.6258540198349027, 0.9128996039921438, 0.5213644970355132, 0.772455381964787, 0.8908564273300332, 0.618066068039179, 0.7850026566593354, 0.8984844055972834, 0.8894705887592418, 0.8863700782951249, 0.6542095841620074, 0.6455303469473022, 0.7823715739634269, 0.7979141116388988, 0.7644610906192374, 0.8025277264462468, 0.80745199109308, 0.8490458642581238, 0.8683469717840697, 0.5541381919932256, 0.666574513804582, 0.975011405157963, 0.875042314111878, 0.6989097969007545, 0.6482755714852344, 0.720438934925844, 0.5045830321043804, 0.5996077821537482, 0.8942552110584305, 0.8079362934551291, 0.8708580847725722, 0.9655677030106309, 0.7205319790130653, 0.9373132937076454, 0.888371479394419, 0.7936034659112817, 0.8111978509682978, 0.8465209033172204, 0.9725726717871929, 0.8110614396192282, 0.5967338661243291, 0.6207388035043866, 0.987583557643821, 0.5272236431695583, 0.6222067978763364, 0.8460710313413496, 0.7910523449333242, 0.6862238871442647, 0.5056749752705407, 0.6046016304515754, 0.9812902187925922, 0.988550399579386, 0.805386537678998, 0.6972713064253814, 0.915147884907915, 0.8837750426480389, 0.5612741338425564, 0.895080356587941, 0.6377695485394768, 0.5446805925421283, 0.5007266461144655, 0.8684685076628353, 0.5653054916759821, 0.7009384748068157, 0.8732893815031064, 0.7523736302870523, 0.9242940860478683, 0.8169784583178696, 0.6086746475674403, 0.8927098083323383, 0.7604628436861527, 0.5726414466839644, 0.6998925619700167, 0.7605235794676684, 0.708191068677783, 0.5178391866521537, 0.7775334355596157, 0.5685030827343135, 0.8432905815397527, 0.7554271294148058, 0.9419232221710917, 0.9046279933351717, 0.7112322642628832, 0.8731272181391675, 0.7091951687682443, 0.590523463136732, 0.6722722352928301, 0.5157093554285511, 0.8115541634662325, 0.8774551064111218, 0.882748650381509, 0.9792856914216577, 0.6448967411770633, 0.7996157247328766, 0.5718594677083753, 0.9798719810024641, 0.6666267885514123, 0.5240318363238934, 0.7624552202938945, 0.8678053858676439, 0.8297798063041102, 0.7993135680821599, 0.8670615655199345, 0.5617580223731742, 0.5103460006762935, 0.71569208559022, 0.9364169294563183, 0.6449850207789598, 0.590563994001492, 0.6242535496081378, 0.6179751360390937, 0.8345621896286477, 0.6088965013339616, 0.612616990499441, 0.6497769389521777, 0.6151845809732843, 0.6217169080809714, 0.5758173482437918, 0.741999583255877, 0.8876529066642673, 0.9684639314776782, 0.9440127958628906, 0.5798993706280596, 0.7539656857359934, 0.6059253523625864, 0.70372180060122, 0.7480830986138708, 0.8055245271609341, 0.6499593975067659, 0.5564261169013638, 0.9784838364467618, 0.8857767922074817, 0.5536956320676218, 0.8869616638030391, 0.8580835079603728, 0.7396156176861628, 0.9610523077671722, 0.7251860103706547, 0.9864032004899759, 0.884241564870379, 0.900769591042476, 0.6948585987495902, 0.9790568458645679, 0.8199310403177209, 0.9434198099989655, 0.8617708400583362, 0.7569618756740477, 0.7477771874030309, 0.915100218415511, 0.9200585341128239, 0.6694981859778837, 0.8674568394086739, 0.9701611613689733, 0.759791175405224, 0.6391564425593448, 0.585762618410348, 0.8530707111359189, 0.8619129677351934, 0.8922530419752425, 0.8337760254731911, 0.8052787534060222, 0.9727653235114824, 0.994656636180288, 0.9303010429780785, 0.7408786104035472, 0.8985205605395863, 0.7519587184721476, 0.7258881510666814, 0.5081311499569656, 0.814529170383263, 0.5234585526552524, 0.9752012045352194, 0.9093942118088721, 0.5145055499209501, 0.7498530481485208, 0.8326149591173078, 0.9438811465166068, 0.799767630101196, 0.5106275157910719, 0.5129107162369075, 0.5812060167687076, 0.977412361852132, 0.948798284493662, 0.626706102722419, 0.8553085808036411, 0.6590759512801778, 0.9212824474725669, 0.8167985004798506, 0.5167588369156132, 0.7562820756987764, 0.5237127583781183, 0.5327005398826717, 0.5114894429111333, 0.5452077556984849, 0.989701362751594, 0.8963502917148533, 0.6206919943103992, 0.8828758411664133, 0.6040659062537469, 0.5606449099596892, 0.9304726708451986, 0.6667980159209577, 0.5284368909809218, 0.8586424193346593, 0.8406884810498844, 0.767338019870124, 0.8139338077618663, 0.9485968122205686, 0.5108413344359136, 0.8225778592113587, 0.8925925588191661, 0.7239061164788698, 0.9409420679563103, 0.6166441822046764, 0.8333851650014761, 0.8725780310241862, 0.8724609955315272, 0.6412746475947531, 0.7507160870084728, 0.6855605908982398, 0.6762086439172391, 0.9946360546939546, 0.8895557214538586, 0.7968787314538355, 0.6705484581317189, 0.750963780969884, 0.5692483094684155, 0.7559875259936252, 0.818728674429207, 0.8295800037176841, 0.7444454271304493, 0.9524202813356795, 0.8588896597167563, 0.8342995884543927, 0.9161102117415443, 0.998331905932066, 0.9066585252269295, 0.9298018970053079, 0.6764629725418927, 0.7688219165422201, 0.8876403101439907, 0.5714713064039865, 0.8115471474842331, 0.8397706970997907, 0.9651048141567546, 0.5304659066947781, 0.5078995689397696, 0.8177137654654869, 0.7032081737369679, 0.7759603524681997, 0.6360366381639903, 0.9084883373570118, 0.6280887362601106, 0.9742906929331399, 0.7820801138138798, 0.6552859682134242, 0.9956836467562704, 0.8206115612687664, 0.831244613900549, 0.749181609555085, 0.6428790910240353, 0.9703870509388732, 0.8003280349250845, 0.9346010015960775, 0.7321561760797317, 0.850643556433561, 0.8790562583161815, 0.937094040180904, 0.5778541633949055, 0.9320716206607246, 0.5755066534989784, 0.6981796578212742, 0.86804327865494, 0.9313109389226089, 0.5860421136726794, 0.6979123572334596, 0.5562115157312126, 0.6544038601673974, 0.9545930379067129, 0.7901642320455392, 0.9911908326117609, 0.9093283017501468, 0.6912548281890023, 0.8934881587055545, 0.6673018399168282, 0.9481531991056924, 0.8116404538812667, 0.5770754248899665, 0.5196117212832351, 0.6979862196154123, 0.9245485013813307, 0.9917188220948336, 0.610076792018597, 0.5499141185003689, 0.955130421009397, 0.6246213241222536, 0.731071981838161, 0.9719961374373023, 0.6560272229543945, 0.6341377238834913, 0.7540249462609622, 0.6786389195926681, 0.6745884051607864, 0.6294019630179234, 0.6060127236375559, 0.6663154563897371, 0.58079534741119, 0.7620100270432207, 0.7513447071612716, 0.9585109744380339, 0.6009890744655164, 0.9121535759308554, 0.9566285666553154, 0.6824405287556708, 0.8639903982818585, 0.8529525637219864, 0.6681915170856614, 0.8723931114241338, 0.5422347987520577, 0.9348408509193347, 0.5895058141497216, 0.6533248819793822, 0.8591311585183602, 0.7533034441939205, 0.7397525129753619, 0.8449103857413255, 0.9504865702120677, 0.5871515120192112, 0.5125716253608092, 0.5321440807463691, 0.6318359214082465, 0.7738158985772965, 0.6208270749831529, 0.5723376132508678, 0.6295520426619294, 0.6079321789461327, 0.6307962236594624, 0.8720668013612612, 0.8573411159038253, 0.7994830479560654, 0.5291129366698315, 0.730299898341132, 0.9992615214712324, 0.6489138667889941, 0.5570532966529903, 0.7987420251300448, 0.5148767588015648, 0.5527355044801634, 0.924065301217761, 0.8314834555166328, 0.8765994018850569, 0.9177234086707626, 0.57744477014974, 0.9328207268559405, 0.6129153592088881, 0.5706522486999459, 0.8143160171643082, 0.7820681460886361, 0.9065851481515177, 0.6747397129193216, 0.662307905265836, 0.5394189610744446, 0.7157743243330196, 0.9039485508480232, 0.9667686813609421, 0.6656391845140814, 0.5484272229592088, 0.9623074549208026, 0.7812868313896815, 0.8905651914898415, 0.8343217898773115, 0.7224740160666783, 0.6575307055122546, 0.9799199071174773, 0.8594535075791576, 0.6683701936870281, 0.9313869844106115, 0.7656408799377834, 0.883508213446771, 0.6456353267342998, 0.8409905388214477, 0.6871639260185411, 0.6587994136517332, 0.6425464747042164, 0.6553159285025008, 0.747979223736001, 0.7692181538101066, 0.743110228910683, 0.7214353977774258, 0.6200631941956483, 0.6875547424660127, 0.7438073924911068, 0.9899620830289777, 0.831224918590273, 0.7242530387656467, 0.6378274276205242, 0.530447116746884, 0.8124503711694029, 0.9982278806039738, 0.5212795472509764, 0.9438474297895023, 0.7861639295705878, 0.8329548336005874, 0.6712733462473335, 0.5964346733084825, 0.8489478062827802, 0.787153868650061, 0.952430226281932, 0.8299829707311365, 0.9011520767420665, 0.6810330481933665, 0.7959215924779854, 0.9722465038605195, 0.7826533281930301, 0.9366391191315367, 0.7479530558240454, 0.8400515875519181, 0.5946967229333473, 0.9460118570216752, 0.8453284970022399, 0.8251354974527325, 0.6168331129517055, 0.6346175932775842, 0.5674624961393282, 0.7279324391880804, 0.9427375806116307, 0.9453012988849482, 0.566598313039319, 0.5470043532956596, 0.5506119789704131, 0.5552027739065204, 0.8798668426857461, 0.7814265117057642, 0.9105978810568618, 0.7551641220745543, 0.7437903608140974, 0.8332810931866148, 0.5476016993855162, 0.9006731853398722, 0.5130279235575103, 0.5301623726086515, 0.6872910075239711, 0.7465196035742174, 0.5236905595675623, 0.579334991486078, 0.9101331620921582, 0.5025037859152981, 0.7623963808924552, 0.8587356036158069, 0.8836943878390509, 0.624908748798299, 0.8689501034086986, 0.6383179754719102, 0.6425498652060226, 0.6865549932687112, 0.7412463734676329, 0.6280305951979734, 0.6437810921796641, 0.664301809566634, 0.9982913378202819, 0.6835782479013142, 0.9298308701742145, 0.7381133754037414, 0.602769739861282, 0.6755225753091366, 0.5413389702294492, 0.8481471109755765, 0.7439782030925761, 0.8138407649855817, 0.5598508062584108, 0.6050458116401012, 0.516861323675953, 0.8895397779871683, 0.6518058261117071, 0.5725061949757606, 0.623547182902894, 0.9919124403847397, 0.659000456026009, 0.8083253554612775, 0.6451740727197146, 0.6282817066348705, 0.5639453402867562, 0.5946570805200544, 0.5080975294363962, 0.5524821751962263, 0.9259315526655564, 0.6973665614471751, 0.617131134865859, 0.5194055776007778, 0.7390835637360076, 0.6479371583420295, 0.901291366850015, 0.9416712931538062, 0.8702742810410187, 0.5234046261809551, 0.779562077366474, 0.5046784748198361, 0.8325822926406503, 0.9648522581365508, 0.919124956626611, 0.5788926264152097, 0.9697145713945676, 0.5926024252350969, 0.7196987400088733, 0.5650184378653482, 0.8986896930685337, 0.5887093116647508, 0.8317607004520826, 0.9000910444385397, 0.9704087779681649, 0.9822206676047478, 0.9390015404380947, 0.7350260278778347, 0.9777580044780979, 0.8209574602624066, 0.5456521126082413, 0.6690582116818913, 0.7106763566968786, 0.5358275300663002, 0.5175184644545454, 0.9671598442824778, 0.8434477370108506, 0.5336708151203241, 0.5491230678879331, 0.7052364681589574, 0.8980568381314566, 0.8490511202073877, 0.9963586262301354, 0.7150740891105958, 0.9154839500867846, 0.5416947623775938, 0.8258943743094442, 0.8831062675985984, 0.9996308901716864, 0.8606535904520898, 0.7485940629171184, 0.8348248664365192, 0.6568738720798364, 0.7059718916191766, 0.8191499316839674, 0.5355428450253126, 0.994181873315425, 0.7192744543246001, 0.911784555144449, 0.7361051525610574, 0.9461471624844567, 0.9919364509529531, 0.7647576243968814, 0.9939217810877989, 0.7923205022211934, 0.5917206228464386, 0.7540679084528483, 0.6380451416340747, 0.90876941200116, 0.9109647990255668, 0.8762237801742128, 0.8718376708948153, 0.6089466601474736, 0.9422088278828535, 0.6992615667696045, 0.7428203481317598, 0.5037763202746968, 0.7581342581140671, 0.7428133483437884, 0.9507251888770272, 0.505767854923442, 0.7636171563857431, 0.7186992225390344, 0.5345903069888588, 0.8721687992114349, 0.6745420846651248, 0.9626044992106007, 0.7701081217624797, 0.9169916420117684, 0.6294399343689318, 0.8010339290208828, 0.8325697982122198, 0.6690063737377006, 0.5758870749139879, 0.9308131521451991, 0.9741957498204521, 0.8426989858673751, 0.6721335472374792, 0.9840082004175503, 0.6398082832468853, 0.7146739203313057, 0.6388539708213767, 0.5518810251899979, 0.6086908267141273, 0.9853312240410674, 0.7695695937780771, 0.5236407077102752, 0.8050335400548572, 0.9590852344348106, 0.6047981891703997, 0.7082475900864009, 0.8126924379507021, 0.6555187245669541, 0.5477763460078676, 0.6767294576614296, 0.7567072811039409, 0.929177106706844, 0.8297560141639826, 0.9084981060472459, 0.7295145406424864, 0.9576025106244299, 0.6031987365854332, 0.7400211830018926, 0.9131839791103595, 0.7564706896145135, 0.9069179695939111, 0.745852089723429, 0.5693640220477191, 0.530967997180285, 0.5878471579495275, 0.5272256477221992, 0.5541266028270933, 0.9054360457380093, 0.8451492738151849, 0.6276794665682606, 0.6393005769479679, 0.9156127329190741, 0.5732111073062887, 0.6820696140948646, 0.5487604696872912, 0.6638210307197243, 0.5357885664849409, 0.5574079664671834, 0.7520949373170231, 0.9310433927684811, 0.9611895583051681, 0.5508748444696392, 0.571856751833049, 0.8083324369894647, 0.6401666279076357, 0.5149783616224346, 0.712951402389226, 0.9697771418618912, 0.9598937218479184, 0.9315437048818521, 0.8780095209303238, 0.8307292114659354, 0.8632091632818641, 0.6631836697892819, 0.9032255101652717, 0.6208718076132246, 0.7981130510134825, 0.7619133161493655, 0.8393621474624626, 0.5142109244042133, 0.6853662583631417, 0.9174443774617522, 0.6264996016755534, 0.8142093949560223, 0.6107622286363483, 0.7631103323289437, 0.9595058972873932, 0.7480961120622356, 0.7870747650761089, 0.6563860545810211, 0.974933332862683, 0.8790500055690837, 0.5173268655970613, 0.9077615245351769, 0.5649220280946787, 0.9250425181197677, 0.515768911176496, 0.9861365498919419, 0.6204874195655736, 0.8856072237824193, 0.9696742394850757, 0.5601860727169075, 0.5566053171793514, 0.8148928789997116, 0.9276377366796138, 0.7620261840941702, 0.583685460339791, 0.5012766658992078, 0.658912385309783, 0.997645539852714, 0.8691218141782133, 0.740275226906329, 0.8604454937652504, 0.9714989365548432, 0.8680033424336302, 0.8591432115153267, 0.8818996997172206, 0.851022579128784, 0.8889644380070153, 0.5579733654774679, 0.7015377441221222, 0.5765878562999958, 0.848423838051402, 0.6794217040789039, 0.620849406482332, 0.9365532854758876, 0.9804626419221518, 0.933896126925242, 0.9638704780744998, 0.6932434659020816, 0.624355925829668, 0.9914349538704588, 0.5973919147172617, 0.9904960431312819, 0.6871952043194742, 0.5919116200084016, 0.5975622089389194, 0.7210960692113648, 0.8489350246926728, 0.5913121600241779, 0.827416809809387, 0.808243543542049, 0.916582822950619, 0.7827504972902646, 0.8035431848607406, 0.7420322889687868, 0.8738482420566027, 0.6719095823717143, 0.9413923383732696, 0.8894892870199551, 0.9286226397585142, 0.8480164759616844, 0.5499974689635071, 0.682134780883203, 0.9106390015767272, 0.5415436187482721, 0.6253802185391212, 0.9071158302684275, 0.6646717036149808, 0.9701109219243478, 0.6426738804352837, 0.6713109598327993, 0.6822901646962827, 0.6609658523183088, 0.5259440187730062, 0.9996007269133378, 0.6542960807197811, 0.9604380888060101, 0.7939003493320789, 0.6710603868510411, 0.9143222686394259, 0.7011769611265453, 0.6984193396617862, 0.7835829108338777, 0.9029053761138512, 0.5185301212420292, 0.9768748983913915, 0.9415011457014427, 0.605667165162392, 0.5807487221579383, 0.5229972519469499, 0.8996238300777222, 0.5333523175178193, 0.6338312707718852, 0.7906595502622555, 0.7394776958855113, 0.8822647031012276, 0.5792456726207447, 0.6060289752084239, 0.59738722505868, 0.8036987054884588, 0.6318838918780572, 0.7542769303287236, 0.7371766288707337, 0.5317398004401181, 0.9859062415675864, 0.5090268555795172, 0.5953014524822569, 0.6845006038992987, 0.618116678501057, 0.8397584495301553, 0.8911417998315636, 0.7574370040360896, 0.8022049029347608, 0.7412617112073377, 0.8521415328975266, 0.7847448973135493, 0.9361824326521333, 0.6141933606996414, 0.5579393041666796, 0.743446264721986, 0.8218650490572034, 0.7638250078143927, 0.6037231985775295, 0.5646252241428036, 0.8413183953870893, 0.8763059540536184, 0.7659744179680156, 0.5764257574449114, 0.9540026281628655, 0.9459591208912934, 0.6740129097806586, 0.8174383407713175, 0.929506104553306, 0.8332831904528146, 0.663263566225297, 0.5013466267031965, 0.8810081628878481, 0.961015739295236, 0.7202279995184866, 0.8956031104786478, 0.6629094231665342, 0.6499454852891273, 0.5635704829501369, 0.949100252346629, 0.801409983664389, 0.8499217548992319, 0.7102412441342927, 0.5659466859462172, 0.8310819671728318, 0.64919690062132, 0.9745646407105648, 0.5353443784099708, 0.7807915025186756, 0.8944976796764404, 0.7322101169379684, 0.8929598254934342, 0.8225385148406966, 0.9877194806062892, 0.8519158779828959, 0.7490921416150285, 0.9910482718179279, 0.8629317263921277, 0.9324597259320305, 0.7523593035203828, 0.5337732995406582, 0.9381318318201499, 0.9534112778188844, 0.9901212873349984, 0.8453933032595844, 0.5665860762199117, 0.848745001468165, 0.7302542818847466, 0.5806048673755082, 0.8892291005016375, 0.5600656515812532, 0.6621505183869394, 0.9882551078284239, 0.6184017365398122, 0.7034795141286356, 0.5713806747775991, 0.9793210788772707, 0.518739591676302, 0.9770530336462055, 0.6238009551441674, 0.769552498182572, 0.9021421508179768, 0.5864536992558724, 0.6650409805719825, 0.7029192050307664, 0.9247051787419717, 0.630844010055782, 0.8251133014313583, 0.7433441661396432, 0.5007663927200829, 0.5657765378652244, 0.8560847084618797, 0.532407529824069, 0.6203428995684561, 0.8657224988368766, 0.9414847678534359, 0.8128096812842516, 0.9415149845042652, 0.9968675477337037, 0.6910928344764198, 0.862197167781477, 0.7900355818901039, 0.8144255043885911, 0.5715349855162275, 0.6451606351791037, 0.906487817882614, 0.7955509442071145, 0.6138461827193851, 0.8672158586412106, 0.9821305854356762, 0.6490146156074252, 0.7497819861332757, 0.6233209219296154, 0.9961367778355639, 0.8268389494579773, 0.5911445306477774, 0.8692570468595011, 0.606225822878868, 0.9071164031009722, 0.9763444065528841, 0.8330365267963592, 0.5816252915967688, 0.8034701004585916, 0.9815171685338404, 0.6407791119865044, 0.6705284390014794, 0.9895017351388478, 0.8043267796899647, 0.9053671578191433, 0.8501588407732036, 0.9299602133962361, 0.7797121335804715, 0.7580661891275962, 0.757995553462457, 0.9207085675003752, 0.6603775693497382, 0.9203211401929199, 0.8063039460173962, 0.5072220115588004, 0.5834564258945258, 0.926672586643553, 0.5733840626417026, 0.6725181326603895, 0.7627139978493577, 0.9188483425695912, 0.6588471441608188, 0.5489400419829952, 0.5530431436732157, 0.6926795995203132, 0.8336761107772268, 0.5353857019029865, 0.9135668443449176, 0.9404195846644339, 0.6045938732687836, 0.832102712481127, 0.7246067899538654, 0.5797261865561318, 0.9086097530184468, 0.6794967576079536, 0.7279788100183431, 0.58352781439535, 0.9132851253243481, 0.7916021707578611, 0.9197166061178488, 0.6628595991537716, 0.8751491715264126, 0.6358378980923691, 0.6961644191741689, 0.8791648323764291, 0.7255280967969631, 0.8463327300462983, 0.6462634564166616, 0.654412280900885, 0.5028359673078285, 0.7320363872494591, 0.6738780657907539, 0.553314417855775, 0.8056666401066834, 0.5144258586110524, 0.620400666159552, 0.9618885420852021, 0.7212572393134558, 0.5921208623210962, 0.7307119192881724, 0.5658365851952072, 0.8744866290286397, 0.9094057340288264, 0.8727496131607384, 0.7963817783094735, 0.9445730122412959, 0.545107747796955, 0.7785372942342575, 0.5510461234613888, 0.6704647554854897, 0.6297175253271474, 0.9476322025069388, 0.6382585177372411, 0.6858951473332586, 0.506924472348355, 0.9711299642876424, 0.8416808932653932, 0.5171392145582858, 0.6070454066199347, 0.8181799947568047, 0.5949269235194905, 0.7011254287996236, 0.8851795644865501, 0.9336508387720961, 0.8114802756022452, 0.5335668795271751, 0.8477596579754565, 0.7839979889210541, 0.96357153760001, 0.7491378700375544, 0.8409461201072477, 0.5182573778086701, 0.983121330872997, 0.5727923494724874, 0.8733073479444509, 0.873732730809655, 0.5476747736734127, 0.8775909512349692, 0.6955266788524741, 0.6173700379279927, 0.5658567754437628, 0.6583144046193574, 0.5692106979715361, 0.8098959753159554, 0.9453743233580805, 0.5651749684071069, 0.940926259648261, 0.685107147700752, 0.9944575322647939, 0.8289494905652102, 0.809748986515362, 0.7199759114344715, 0.6477977122734841, 0.5008459124374651, 0.5458201066945627, 0.6673267427553381, 0.8082324602413753, 0.9251185181115764, 0.6593063601022977, 0.8616616372119741, 0.6480564029317369, 0.9928492532923685, 0.5858379800429006, 0.6648109554655944, 0.5843321664540182, 0.6036045936172416, 0.6213941508282957, 0.770544857122542, 0.6881063153758744, 0.9727727046133338, 0.5755144719462586, 0.7771927164667769, 0.547975611687048, 0.9100790258106277, 0.8693354264924416, 0.6873658692536446, 0.9375406398921834, 0.7125903578414812, 0.9670200877604898, 0.8996804982842057, 0.8270947704818606, 0.5220927459316825, 0.8331401696859071, 0.9262220868971276, 0.7662790279540495, 0.7277118081156889, 0.5429633465109894, 0.6499297467997416, 0.7774402106926288, 0.8690704463178767, 0.8746366409400838, 0.5732673225574458, 0.9005027649739645, 0.5942022846172836, 0.5016184431267685, 0.5724667566027446, 0.5679170853475457, 0.5571346161915087, 0.5359664606916478, 0.9042250327422112, 0.6119169740813302, 0.9537720500114317, 0.6052013758886547, 0.5649458789910851, 0.6698502872027516, 0.5943665694122892, 0.8420214281960239, 0.8922024667608199, 0.8834439172075004, 0.8585943863956393, 0.6545870971585632, 0.511864462734207, 0.7104262024285828, 0.8471447300162056, 0.5798181072903852, 0.9768848182634415, 0.7655062438199809, 0.9043508886239935, 0.5525912837584785, 0.8742135676270333, 0.5924430532702758, 0.7835322160675412, 0.9694952627970288, 0.6790714392487864, 0.7404036568514782, 0.6136874138894772, 0.7372237934320325, 0.5994036089057619, 0.9782860121655232, 0.6905422385653593, 0.7586769845549562, 0.5644560016279353, 0.5531692185323454, 0.9685674417118189, 0.7008834836166964, 0.8997264544497989, 0.8718475745698481, 0.7226049685942544, 0.9042912484634908, 0.6513439857000194, 0.5700677759815922, 0.5497150381648168, 0.6227497770911719, 0.7503174533864772, 0.9106822756873143, 0.9840962079114687, 0.7300989295293154, 0.6965864075687607, 0.7605939906386932, 0.6274927950607967, 0.7280673714160244, 0.5789315699577973, 0.7844095538149041, 0.6364644021146414, 0.7439610644815688, 0.862522773149345, 0.8124127148999893, 0.7329070540894043, 0.5881468559225811, 0.5711736600977816, 0.6516721351146957, 0.9034669670756992, 0.9756058714140338, 0.5105552274536298, 0.7901895171755392, 0.6576826102206815, 0.5153097142342173, 0.990789303225108, 0.5088438668768593, 0.7995434358221443, 0.6931214716326113, 0.7634814188405696, 0.73911987430203, 0.5298346603877444, 0.671802121252893, 0.5833694748626387, 0.7760111993931386, 0.5629325953234372, 0.5625252355339437, 0.7731941368835478, 0.9959076126478873, 0.508411347241783, 0.8637280487632221, 0.849528047564218, 0.6153752783385149, 0.737419052442398, 0.9961788216051635, 0.9308677218503676, 0.8703340877994614, 0.962764480661111, 0.7766341278383857, 0.969699634462984, 0.9249277200849835, 0.8428072199301783, 0.6542399343112852, 0.7788041204479893, 0.5149808868552206, 0.7703701196956748, 0.6656170615685806, 0.5014842724418124, 0.7339314124414604, 0.7084390098495446, 0.8612823595873873, 0.5534892031891943, 0.5983864941820476, 0.5052883209214856, 0.850654267767163, 0.6037475957376495, 0.7281082593814012, 0.9544564991410622, 0.9605432076438106, 0.8343915022026079, 0.9921839173575588, 0.8659412169494761, 0.7741720891365971, 0.7472776350435302, 0.9417336132983741, 0.7625703370421142, 0.9702957075734349, 0.6520372094621814, 0.5853461211600434, 0.8312698947732468, 0.9517634394178764, 0.793792661047068, 0.9203309022242423, 0.7526004833442819, 0.8126399077384473, 0.5468337059865118, 0.6910297688184622, 0.7602136431901315, 0.8941452215693861, 0.6971265675618927, 0.9405182724430502, 0.6533103886769077, 0.9814364566749274, 0.8507623924593727, 0.7565945531783052, 0.6281461594141244, 0.5528693137681613, 0.9288626428132976, 0.9603078392148165, 0.6981603402405614, 0.7032064823913237, 0.7208791863083243, 0.7321398921344049, 0.982615419205818, 0.8889257198841766, 0.7967556972668262, 0.5800975864571819, 0.5675293292341701, 0.6014051578786626, 0.9793672301564219, 0.5889080862383254, 0.9485731740737803, 0.8619029917296794, 0.953946219999102, 0.9184200168732022, 0.8599682805617193, 0.8671002933337709, 0.9461143436862698, 0.7598611511349622, 0.922132352683315, 0.6421955777614081, 0.8356584099843797, 0.632279882794637, 0.831633128237651, 0.6855347294184113, 0.7310709300373774, 0.7526302259840572, 0.947448154782603, 0.8772582935443404, 0.9482422778697546, 0.7940726000511904, 0.7450961864831841, 0.7727237420772337, 0.6558486134198822, 0.7896300882656345, 0.8169735607765589, 0.9255731208067093, 0.7515772237941648, 0.5285892025573582, 0.9325548935754131, 0.6467259116353458, 0.5527276411995121, 0.6938334564574848, 0.9020166972932648, 0.9511910759048087, 0.850539562392973, 0.9830790204678698, 0.6114751978110489, 0.5696758379184861, 0.6073976614326303, 0.9835859482457012, 0.6815113601183531, 0.7149422699609673, 0.668469500576615, 0.8134721582485482, 0.9523961505524883, 0.7036764527741294, 0.940936459619827, 0.9203568050849684, 0.6047497360139672, 0.8909711892624385, 0.9276841192761078, 0.8159069790078146, 0.6510312797058044, 0.96457022368362, 0.8197751949050621, 0.6492127877581992, 0.8174404523827372, 0.6456050955745896, 0.5537637854404754, 0.803423481452022, 0.5935711113967153, 0.5656568526537833, 0.6946488551082611, 0.9208290721609491, 0.8285723699677446, 0.5039348227860196, 0.83807770884522, 0.94941060981521, 0.9255519929388483, 0.83151446768875, 0.704261165107587, 0.6705925131448701, 0.7954414904634906, 0.9102417497439719, 0.8973814848083963, 0.7027823156685774, 0.6809743161338941, 0.8400930961410447, 0.6544825206912691, 0.5936191121475589, 0.6098432715716356, 0.6187910356982614, 0.5468658771089432, 0.6264343715214674, 0.5041356670127476, 0.9674962226078226, 0.9050277737885746, 0.7936593582349388, 0.6096407380274126, 0.613611000053517, 0.6238373968969477, 0.7029527730992118, 0.9370604268068581, 0.8806417952241865, 0.5360856084300725, 0.8656844068638787, 0.6668690507771546, 0.6417667250502159, 0.7738938003977714, 0.9426986965128328, 0.6056344566130712, 0.5230112096206755, 0.9159236840030894, 0.9937432348751171, 0.9345038135971047, 0.7683750727524779, 0.5775777960988799, 0.988611692752493, 0.9887296420955052, 0.536327618330819, 0.756566974451988, 0.8366075335427013, 0.9107928771245182, 0.5161937138360806, 0.6413721752920131, 0.8178308551283758, 0.5253290804143698, 0.8248561064031583, 0.5192251763088735, 0.7881903435787168, 0.8067590265203075, 0.8367853891313793, 0.900248399087032, 0.9629618712131891, 0.8197196871984488, 0.9571480519343173, 0.8696243147053886, 0.6024004004835963, 0.9940597346652376, 0.9152422533551574, 0.7880486411629548, 0.9414990986979996, 0.8970066882784551, 0.5453874358818824, 0.6969838763223108, 0.862193537591254, 0.7781637822417875, 0.5200707261894677, 0.9611839472602827, 0.8880188053232222, 0.8108581395730916, 0.9283254825625256, 0.8449422127229824, 0.6762651829236576, 0.8498937136682188, 0.8167577210771826, 0.979465973456586, 0.7685849046161162, 0.8311605190210216, 0.754767555670858, 0.6669684184801851, 0.9205544972427047, 0.6737882991367233, 0.6472369512866004, 0.8404396384940804, 0.7556380512059366, 0.943057687436327, 0.9452457419539588, 0.7540793253934455, 0.8134577598617327, 0.6870691929818225, 0.951508655185747, 0.6844400623811586, 0.5861785558268452, 0.5406831513303881, 0.7746811900699899, 0.5068925077969787, 0.5977354603726354, 0.9377364336913849, 0.7409974300096525, 0.6037342773254262, 0.8832145645567583, 0.5175076757744124, 0.7662699207204287, 0.9616248496438993, 0.9407397480028867, 0.8995356409653562, 0.6819297687111362, 0.6827526137152915, 0.5003349963411161, 0.7609182947481975, 0.788044318677101, 0.9109547547272163, 0.7778506670147598, 0.9214191628760735, 0.6461387854501006, 0.7102858055994863, 0.954014839734733, 0.6196056386352573, 0.5681670402891323, 0.5133303087650926, 0.5426087875466159, 0.5895786915594983, 0.7746487471172482, 0.8774653772073235, 0.5702753385572272, 0.7142300140535749, 0.6845494399315013, 0.7802740964462768, 0.5895505683992917, 0.7630675455299742, 0.9128899976811513, 0.9836318671591682, 0.685072030278494, 0.9928829912429464, 0.9107919622238143, 0.6295716059777767, 0.761727332751532, 0.7262962855497962, 0.7283901717213126, 0.766932388131865, 0.6357798368712051, 0.604507285867328, 0.6763687771279823, 0.6721865311114175, 0.5186769465341295, 0.8066526417101189, 0.737600245117265, 0.7574946253389045, 0.9822925363868376, 0.6400476720230552, 0.6269319627821257, 0.6952627196645965, 0.9166843147863557, 0.8773734656332947, 0.9900767689051748, 0.6135923163376094, 0.660832847432361, 0.8564925625261977, 0.968731827898049, 0.9807762483112663, 0.9727796581255344, 0.7561000628256942, 0.8481712049796453, 0.5565347548037309, 0.7459688846947418, 0.82472607104824, 0.8199137975632552, 0.9516790158451226, 0.9402416471917567, 0.9840108406153008, 0.8947776522944455, 0.774628391694426, 0.8986312324925527, 0.5314981070528024, 0.9625744961090141, 0.8911141130487985, 0.7132441127140182, 0.512990257284528, 0.579316618736835, 0.8620983300791822, 0.9833976751025184, 0.9527207830301339, 0.6298273625868904, 0.815495241343476, 0.5881015291527121, 0.7913188640624692, 0.7099210530947955, 0.7522925591640094, 0.5944220340185769, 0.6988632483908601, 0.637266770148003, 0.9576553890178743, 0.9007946341426135, 0.7417935527913493, 0.591436046108186, 0.9998238071596031, 0.5506011167893692, 0.88525712334556, 0.80754877405888, 0.7699961638591879, 0.803929094757599, 0.5663042473531694, 0.8214355169222599, 0.5391587489341252, 0.7605221834560651, 0.5304718300472717, 0.717559255198535, 0.9443316979473941, 0.7943145054172224, 0.9050678036083267, 0.6151612639198532, 0.5602507667394581, 0.8042916469699162, 0.5655426798382674, 0.652548419767767, 0.7713290554797467, 0.7899289360152547, 0.637770175257261, 0.7233317545761551, 0.9089221483811096, 0.5117995326940902, 0.8324344031561803, 0.6153085617230847, 0.9083125477844938, 0.8324152383943755, 0.8090296580483545, 0.5001745848730008, 0.9962346890679281, 0.7373417662116579, 0.9951703386629501, 0.7395931315663136, 0.5696377912019076, 0.5917488874097298, 0.6748573579327317, 0.8769432891857535, 0.5691026729976516, 0.6761289211131405, 0.8314405814992777, 0.9134457819583013, 0.6225968020866421, 0.6135759445656412, 0.8311070898123274, 0.7283674419712469, 0.9486284002644159, 0.6844657630718356, 0.5707254250317484, 0.6909127166143363, 0.6445000342932923, 0.7456531283124668, 0.7859649317468724, 0.6010633225460422, 0.7406827147584976, 0.8724638258710198, 0.5153928851581455, 0.9988760099479499, 0.5669304755233022, 0.8095205407625105, 0.792622485041846, 0.7913910846377656, 0.5535494736315726, 0.6919053993657291, 0.6536061016632948, 0.7464073118075729, 0.6396588396613225, 0.5794470834528156, 0.6490326893473868, 0.690088561595623, 0.874372007474997, 0.8838552498683668, 0.9743723992673781, 0.7362385941938411, 0.852415921151637, 0.804573500790353, 0.7144546658608908, 0.9113633838322783, 0.8485944806382635, 0.9714298649279068, 0.909170510926882, 0.7429611394694472, 0.5803296224511241, 0.5976068794202294, 0.8940634121658386, 0.7269907695543304, 0.6643535658670378, 0.7882089909695664, 0.8682977316935927, 0.8771195191775105, 0.6633989309201578, 0.740784654049776, 0.7534958910455833, 0.5707755124321667, 0.7274099527553044, 0.5249795181708543, 0.8138896329575629, 0.6967900973580026, 0.5932684545176272, 0.7327506945953604, 0.5553576267162681, 0.77581738471256, 0.6526446923467705, 0.672476438486218, 0.8794851882331634, 0.5546299276591804, 0.6728883641211967, 0.6187927615823623, 0.9692284082564657, 0.9675316647841373, 0.6465906066702043, 0.8603072997320135, 0.6933337633834702, 0.9473599492476242, 0.7345802044687432, 0.8675181044301365, 0.8694198932878385, 0.7770807119429932, 0.9952964264644264, 0.775219941157125, 0.862614356753628, 0.604606272156893, 0.8709402513683127, 0.9857742099084161, 0.8360069393803965, 0.6566118588365555, 0.6170684800499598, 0.5363769203703678, 0.900759320763458, 0.6005496919475729, 0.5691091550441016, 0.7656958835970669, 0.8801159654922822, 0.9441994008703869, 0.798699052743302, 0.9364585577258434, 0.9681879163365615, 0.6611034011875769, 0.5679327352358461, 0.6501808973341667, 0.521372901249659, 0.5458059553407324, 0.6497833035818329, 0.7280676266542376, 0.7208274603999099, 0.5629405338872506, 0.8236189303830068, 0.8832237715362599, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0};
int h_B[]= {
1, 3, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 339, 341, 343, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 396, 398, 400, 402, 404, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480, 482, 484, 486, 488, 490, 493, 495, 497, 499, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 524, 526, 528, 530, 532, 534, 536, 538, 540, 542, 544, 546, 548, 550, 552, 554, 556, 559, 561, 563, 565, 567, 569, 572, 574, 578, 580, 582, 584, 586, 588, 590, 592, 594, 596, 598, 600, 602, 604, 606, 608, 610, 612, 614, 616, 618, 620, 622, 624, 626, 628, 630, 632, 634, 636, 638, 640, 642, 644, 646, 648, 650, 652, 654, 656, 658, 660, 662, 664, 666, 668, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 690, 692, 694, 696, 699, 701, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 744, 746, 748, 750, 752, 754, 756, 758, 760, 762, 764, 766, 768, 770, 772, 774, 776, 778, 780, 782, 784, 786, 788, 790, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 816, 818, 820, 822, 824, 826, 828, 830, 832, 834, 836, 838, 840, 842, 845, 847, 849, 851, 853, 855, 858, 860, 862, 864, 866, 868, 870, 872, 874, 876, 878, 880, 882, 884, 886, 888, 890, 892, 894, 896, 898, 900, 902, 904, 906, 908, 910, 912, 915, 917, 919, 921, 923, 925, 927, 929, 931, 933, 935, 937, 939, 941, 943, 945, 949, 951, 953, 955, 957, 959, 961, 963, 965, 967, 969, 971, 973, 975, 977, 979, 981, 983, 985, 987, 989, 991, 993, 995, 997, 999, 1001, 1003, 1005, 1007, 1009, 1011, 1013, 1015, 1017, 1019, 1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1040, 1042, 1044, 1046, 1048, 1050, 1052, 1054, 1056, 1058, 1062, 1064, 1069, 1071, 1073, 1075, 1077, 1079, 1081, 1083, 1085, 1087, 1090, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1108, 1111, 1113, 1115, 1117, 1119, 1121, 1123, 1125, 1127, 1129, 1132, 1134, 1136, 1138, 1141, 1143, 1145, 1147, 1150, 1152, 1156, 1158, 1161, 1163, 1167, 1169, 1171, 1173, 1175, 1177, 1179, 1181, 1184, 1186, 1189, 1191, 1193, 1195, 1197, 1199, 1201, 1203, 1205, 1207, 1210, 1212, 1215, 1217, 1220, 1222, 1225, 1227, 1230, 1232, 1238, 1240, 1243, 1245, 1248, 1250, 1252, 1254, 1256, 1258, 1260, 1262, 1264, 1266, 1268, 1270, 1272, 1274, 1277, 1279, 1281, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1305, 1307, 1309, 1311, 1313, 1315, 1317, 1319, 1321, 1323, 1325, 1327, 1329, 1331, 1334, 1336, 1338, 1340, 1342, 1344, 1346, 1348, 1350, 1352, 1357, 1359, 1361, 1363, 1367, 1369, 1372, 1374, 1376, 1378, 1380, 1382, 1384, 1386, 1388, 1390, 1393, 1395, 1399, 1401, 1404, 1406, 1409, 1411, 1414, 1416, 1419, 1421, 1424, 1426, 1429, 1431, 1434, 1436, 1439, 1441, 1443, 1445, 1447, 1449, 1452, 1454, 1458, 1460, 1462, 1464, 1469, 1471, 1473, 1475, 1479, 1481, 1483, 1485, 1487, 1489, 1491, 1493, 1495, 1497, 1499, 1501, 1503, 1505, 1507, 1509, 1511, 1513, 1515, 1517, 1519, 1521, 1523, 1525, 1527, 1529, 1531, 1533, 1535, 1537, 1539, 1541, 1543, 1545, 1547, 1549, 1551, 1553, 1556, 1558, 1560, 1562, 1564, 1566, 1568, 1570, 1572, 1574, 1576, 1578, 1581, 1583, 1585, 1587, 1589, 1591, 1593, 1595, 1597, 1599, 1601, 1603, 1605, 1607, 1609, 1611, 1613, 1615, 1617, 1619, 1621, 1623, 1625, 1627, 1629, 1631, 1633, 1635, 1637, 1639, 1644, 1646, 1648, 1650, 1652, 1654, 1656, 1658, 1661, 1663, 1665, 1667, 1669, 1671, 1673, 1675, 1677, 1679, 1681, 1683, 1685, 1687, 1689, 1691, 1695, 1697, 1701, 1703, 1705, 1707, 1709, 1711, 1713, 1715, 1718, 1720, 1722, 1724, 1726, 1728, 1732, 1734, 1740, 1742, 1744, 1746, 1748, 1750, 1753, 1755, 1758, 1760, 1762, 1764, 1766, 1768, 1771, 1773, 1776, 1778, 1781, 1783, 1786, 1788, 1791, 1793, 1796, 1798, 1800, 1802, 1804, 1806, 1810, 1812, 1814, 1816, 1818, 1820, 1822, 1824, 1826, 1828, 1830, 1832, 1834, 1836, 1838, 1840, 1842, 1844, 1846, 1848, 1850, 1852, 1855, 1857, 1859, 1861, 1865, 1867, 1869, 1871, 1873, 1875, 1877, 1864, 1877, 1864, 1877, 1864, 1921, 1923, 1925, 1927, 1929, 1931, 1731, 1580, 1580, 1237, 1235, 1468, 1468, 1739, 1418, 1423, 1234, 1209, 1237, 1235, 1790, 1237, 1235, 1739, 1737, 1739, 1737, 1736, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 1555, 1237, 1235, 1641, 1555, 1237, 1235, 1237, 1235, 1752, 1694, 1731, 1237, 1235, 1224, 1229, 1224, 1229, 1237, 1235, 1757, 1752, 1643, 1877, 1209, 1237, 1235, 1234, 1237, 1235, 1234, 1209, 1237, 1235, 1224, 1229, 1224, 1229, 1209, 1234, 1237, 1235, 1224, 1229, 1224, 1229, 1209, 1234, 1237, 1235, 1061, 1060, 1808, 1643, 1641, 1643, 1790, 1736, 1808, 1641, 1237, 1235, 1757, 1694, 2285, 2287, 2289, 2291, 2294, 2296, 2298, 2300, 2303, 2305, 2307, 2309, 2312, 2314, 2316, 2318, 2320, 2322, 2324, 2326, 2328, 2330, 2332, 2334, 2336, 2338, 2340, 2342, 2344, 2346, 2349, 2351, 2353, 2355, 2357, 2359, 1456, 1451, 2364, 2366, 2368, 2370, 2372, 2374, 2376, 2378, 2380, 2382, 2384, 2386, 2388, 2390, 2392, 2394, 1237, 1235, 1224, 1229, 1224, 1229, 1237, 1235, 1237, 1235, 1061, 1060, 1237, 1235, 1418, 1423, 1451, 1451, 1717, 1775, 1752, 1757, 1757, 1752, 1785, 1785, 1757, 1752, 1757, 1752, 1775, 1757, 1752, 1737, 1737, 1757, 1752, 1717, 2615, 2617, 2619, 2621, 2623, 2625, 2627, 2629, 2631, 2633, 2635, 2637, 2639, 2641, 2644, 2646, 2649, 2651, 2653, 2655, 1061, 1060, 1214, 1219, 1229, 1224, 1237, 1235, 1214, 1219, 1229, 1224, 1149, 1229, 1224, 1237, 1235, 1149, 1155, 1237, 1235, 1237, 1235, 1456, 1451, 1438, 1456, 1451, 1467, 1423, 1418, 1423, 1433, 1418, 1433, 1438, 1456, 1451, 1457, 1398, 1398, 1457, 1467, 1877, 1643, 1641, 1770, 1770, 1739, 1737, 1739, 1737, 1877, 1864, 1877, 1864, 1877, 1864, 1877, 1864, 1864, 1864, 2979, 2981, 2984, 2986, 2988, 2990, 2992, 2994, 2996, 2998, 3000, 3002, 3004, 3006, 3008, 3010, 3012, 3014, 3016, 3018, 3020, 3022, 3024, 3026, 3028, 3030, 3032, 3034, 3036, 3038, 3040, 3042, 3044, 3046, 3048, 3050, 3052, 3054, 3056, 3058, 3060, 3062, 3065, 3067, 3070, 3072, 3074, 3076, 3078, 3080, 3083, 3085, 3089, 3091, 3094, 3096, 3100, 3102, 3104, 3106, 3108, 3110, 3113, 3115, 3119, 3121, 3124, 3126, 3130, 3132, 3134, 3136, 3139, 3141, 3098, 3093, 3146, 3144, 3098, 3093, 3128, 3123, 3128, 3123, 3146, 3144, 2983, 2983, 3098, 3093, 3064, 3146, 3144, 3098, 3093, 3143, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3128, 3123, 3128, 3123, 3128, 3123, 3098, 3093, 3098, 3093, 3098, 3093, 3128, 3123, 3128, 3123, 3146, 3144, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3151, 3680, 3682, 3688, 3690, 3149, 3147, 3149, 3147, 3149, 3147, 2658, 3707, 3709, 3098, 3093, 3064, 3098, 3093, 3098, 3093, 3143, 2658, 2972, 2972, 4018, 4020, 3146, 3144, 4053, 4055, 4057, 4059, 4062, 4064, 3146, 3144, 3146, 3144, 3149, 3147, 3082, 3088, 3112, 3118, 3144, 3146, 3146, 3144, 3149, 3147, 3151, 4140, 4142, 4145, 4147, 4152, 4154, 4157, 4159, 4162, 4164, 4166, 4168, 4171, 4173, 4175, 4177, 4156, 4061, 4161, 4156, 4181, 4179, 4161, 4156, 4181, 4179, 4181, 4179, 4151, 4161, 4061, 4181, 4179, 4151, 4179, 4181, 4181, 4179, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 6656, 6658, 6660, 6662, 6664, 6666, 6668, 6670, 6672, 6674, 6676, 6678, 6680, 6682, 6684, 6686, 6688, 6690, 6692, 6694, 6696, 6698, 6700, 6702, 6704, 6706, 6708, 6710, 6712, 6714, 6716, 6718, 6720, 6722, 6724, 6726, 6728, 6730, 6732, 6734, 6736, 6738, 6740, 6742, 6744, 6746, 6748, 6750, 6752, 6754, 6756, 6758, 6760, 6762, 6764, 6766, 6768, 6770, 6772, 6774, 6776, 6778, 6780, 6782, 6784, 6786, 6788, 6790, 6792, 6794, 6796, 6798, 6800, 6802, 6804, 6806, 6808, 6810, 6812, 6814, 6816, 6818, 6820, 6822, 6824, 6826, 6828, 6830, 6832, 6834, 6836, 6838, 6840, 6842, 6844, 6846, 6848, 6850, 6852, 6854, 6856, 6858, 6860, 6862, 6864, 6866, 6868, 6870, 6872, 6874, 6876, 6878, 6880, 6882, 6884, 6886, 6888, 6890, 6892, 6894, 6896, 6898, 6900, 6902, 6904, 6906, 6908, 6910, 6912, 6914, 6916, 6918, 6920, 6922, 6924, 6926, 6928, 6930, 6932, 6934, 6936, 6938, 6940, 6942, 6944, 6946, 6948, 6950, 6952, 6954, 6956, 6958, 6960, 6962, 6964, 6966, 6968, 6970, 6972, 6974, 6976, 6978, 6980, 6982, 6984, 6986, 6988, 6990, 6992, 6994, 6996, 6998, 7000, 7002, 7004, 7006, 7008, 7010, 7012, 7014, 7016, 7018, 7020, 7022, 7024, 7026, 7028, 7030, 7032, 7034, 7036, 7038, 7040, 7042, 7044, 7046, 7048, 7050, 7052, 7054, 7056, 7058, 7060, 7062, 7064, 7066, 7068, 7070, 7072, 7074, 7076, 7078, 7080, 7082, 7084, 7086, 7088, 7090, 7092, 7094, 7096, 7098, 7100, 7102, 7104, 7106, 7108, 7110, 7112, 7114, 7116, 7118, 7120, 7122, 7124, 7126, 7128, 7130, 7132, 7134, 7136, 7138, 7140, 7142, 7144, 7146, 7148, 7150, 7152, 7154, 7156, 7158, 7160, 7162, 7164, 7166, 7168, 7170, 7172, 7174, 7176, 7178, 7180, 7182, 7184, 7186, 7188, 7190, 7192, 7194, 7196, 7198, 7200, 7202, 7204, 7206, 7208, 7210, 7212, 7214, 7216, 7218, 7220, 7222, 7224, 7226, 7228, 7230, 7232, 7234, 7236, 7238, 7240, 7242, 7244, 7246, 7248, 7250, 7252, 7254, 7256, 7258, 7260, 7262, 7264, 7266, 7268, 7270, 7272, 7274, 7276, 7278, 7280, 7282, 7284, 7286, 7288, 7290, 7292, 7294, 7296, 7298, 7300, 7302, 7304, 7306, 7308, 7310, 7312, 7314, 7316, 7318, 7320, 7322, 7324, 7326, 7328, 7330, 7332, 7334, 7336, 7338, 7340, 7342, 7344, 7346, 7348, 7350, 7352, 7354, 7356, 7358, 7360, 7362, 7364, 7366, 7368, 7370, 7372, 7374, 7376, 7378, 7380, 7382, 7384, 7386, 7388, 7390, 7392, 7394, 7396, 7398, 7400, 7402, 7404, 7406, 7408, 7410, 7412, 7414, 7416, 7418, 7420, 7422, 7424, 7426, 7428, 7430, 7432, 7434, 7436, 7438, 7440, 7442, 7444, 7446, 7448, 7450, 7452, 7454, 7456, 7458, 7460, 7462, 7464, 7466, 7468, 7470, 7472, 7474, 7476, 7478, 7480, 7482, 7484, 7486, 7488, 7490, 7492, 7494, 7496, 7498, 7500, 7502, 7504, 7506, 7508, 7510, 7512, 7514, 7516, 7518, 7520, 7522, 7524, 7526, 7528, 7530, 7532, 7534, 7536, 7538, 7540, 7542, 7543, 7544, 7545, 7546, 7547, 7548, 7550, 7552, 7554, 7555, 7556, 7557, 7558, 7559, 7560, 7561, 7562, 7563, 7564, 7565, 7566, 7567, 7568, 7569, 7570, 7571, 7572, 7573, 7574, 7575, 7576, 7577, 7578, 7579, 7580, 7581, 7582, 7583, 7584, 7585, 7586, 7587, 7588, 7589, 7590, 7591, 7592, 7593, 7594, 7595, 7596, 7597, 7598, 7599, 7600, 7601, 7602, 7603, 7604, 7605, 7606, 7607, 7608, 7609, 7610, 7611, 7612, 7613, 7614, 7615, 7616, 7617, 7618, 7619, 7620, 7621, 7622, 7623, 7624, 7625, 7626, 7627, 7628, 7629, 7630, 7631, 7632, 7633, 7634, 7635, 7636, 7637, 7638, 7639, 7640, 7641, 7642, 7643, 7644, 7645, 7646, 7647, 7649, 7651, 7653, 7655, 7657, 7659, 7661, 7663, 7665, 7667, 7669, 7671, 7673, 7675, 7677, 7679, 7681, 7683, 7684, 7685, 7687, 7689, 7691, 7693, 7695, 7697, 7699, 7701, 7702, 7703, 7704, 7705, 7706, 7707, 7708, 7709, 7710, 7711, 7712, 7713, 7714, 7715, 7716, 7717, 7718, 7719, 7720, 7721, 7722, 7723, 7724, 7725, 7726, 7727, 7728, 7729, 7730, 7731, 7732, 7733, 7734, 7735, 7736, 7737, 7738, 7739, 7741, 7743, 7745, 7747, 7749, 7751, 7753, 7755, 7757, 7759, 7760, 7761, 7762, 7763, 7764, 7765, 7766, 7767, 7768, 7769, 7770, 7771, 7772, 7773, 7774, 7775, 7776, 7777, 7778, 7779, 7780, 7781, 7782, 7783, 7784, 7785, 7786, 7787, 7788, 7789, 7790, 7791, 7792, 7793, 7794, 7795, 7796, 7797, 7798, 7799, 7800, 7801, 7802, 7803, 7804, 7805, 7806, 7807, 7808, 7809, 7810, 7811, 7812, 7813, 7814, 7815, 7816, 7817, 7818, 7819, 7820, 7821, 7823, 7825, 7827, 7829, 7831, 7833, 7835, 7837, 7839, 7841, 7843, 7845, 7847, 7849, 7851, 7853, 7855, 7857, 7859, 7861, 7863, 7865, 7867, 7869, 7871, 7873, 7875, 7877, 7879, 7881, 7883, 7885, 7887, 7889, 7891, 7893, 7895, 7896, 7897, 7898, 7899, 7900, 7901, 7902, 7903, 7904, 7905, 7906, 7907, 7908, 7909, 7910, 7911, 7912, 7913, 7914, 7915, 7916, 7917, 7918, 7919, 7920, 7921, 7922, 7923, 7924, 7925, 7926, 7927, 7928, 7929, 7930, 7931, 7932, 7933, 7934, 7935, 7936, 7937, 7938, 7939, 7940, 7941, 7942, 7943, 7944, 7945, 7946, 7947, 7948, 7949, 7950, 7951, 7952, 7953, 7954, 7955, 7956, 7958, 7960, 7961, 7962, 7963, 7964, 7965, 7966, 7967, 7969, 7970, 7971, 7972, 7973, 7974, 7975, 7976, 7977, 7978, 7979, 7980, 7982, 7983, 7984, 7986, 7988, 7990, 7991, 7992, 7993, 7994, 7995, 7996, 7997, 7998, 7999, 8000, 8001, 8002, 8003, 8004, 8005, 8006, 8007, 8009, 8011, 8013, 8015, 8017, 8019, 8021, 8023, 8024, 8025, 8026, 8027, 8028, 8029, 8030, 8031, 8032, 8033, 8034, 8035, 8036, 8037, 8038, 8039, 8040, 8041, 8042, 8043, 8044, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8321, 8323, 1808, 8415, 8495, 8507, 8458, 8503, 8505, 1877, 8321, 8323, 1808, 8415, 8497, 8509, 8499, 8511, 8458, 8503, 8505, 1877, 1408, 1403, 1408, 1403, 1413, 1423, 1418, 1433, 1428, 1438, 8065, 1456, 1451, 1438, 8065, 1456, 1451, 8067, 576, 576, 576, 576, 1580, 1580, 1580, 8069, 1188, 1183, 8073, 1188, 1183, 1209, 1234, 8519, 8077, 1188, 1183, 698, 698, 698, 1785, 1785, 8426, 576, 1790, 1770, 1790, 1468, 1468, 1770, 8524, 8082, 1699, 1699, 1699, 1699, 1699, 1214, 1229, 1224, 8526, 8528, 1731, 1736, 1736, 1731, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 698, 8096, 8450, 1736, 1731, 8099, 1752, 1736, 1731, 1739, 1737, 8099, 1752, 8097, 1752, 8099, 1752, 8426, 8102, 1188, 1183, 8106, 1188, 1183, 8110, 1757, 1752, 1775, 8113, 8115, 1808, 8117, 8119, 8121, 8123, 1757, 1209, 1234, 8531, 1438, 8127, 576, 1736, 1731, 8533, 1736, 1731, 8535, 576, 576, 576, 576, 8132, 8134, 1790, 1790, 1790, 1790, 1790, 1739, 1737, 8538, 8540, 8542, 8135, 8137, 1188, 1183, 8141, 1188, 1183, 1209, 1234, 8546, 1780, 1785, 1785, 1785, 1785, 8148, 1757, 1752, 1785, 1785, 1785, 1785, 1641, 1234, 1209, 8550, 1209, 1234, 8552, 698, 1752, 8156, 8158, 1752, 8159, 8160, 698, 1752, 1214, 1224, 1229, 1234, 1209, 8557, 1219, 8559, 1219, 8561, 8330, 1188, 1183, 1234, 1209, 8563, 8314, 8565, 1699, 1694, 1643, 1877, 8172, 1188, 1183, 1214, 1224, 1229, 8570, 1214, 1224, 1229, 8573, 1214, 1224, 1229, 8575, 8577, 1219, 8579, 1219, 8581, 8583, 8585, 1219, 8587, 1219, 8589, 8591, 8593, 8188, 1188, 1183, 8595, 1188, 1183, 1408, 1403, 1408, 1403, 1371, 1438, 844, 8200, 857, 576, 8426, 8598, 576, 1555, 1643, 1641, 1580, 1580, 1580, 1580, 1580, 1468, 1468, 1468, 1468, 576, 8450, 576, 8426, 576, 576, 1209, 1234, 8605, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 1214, 1229, 1224, 1219, 1229, 1224, 1731, 1736, 1737, 1739, 698, 1757, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 698, 698, 698, 8224, 8226, 8228, 8230, 8232, 698, 1757, 1736, 1731, 1739, 1737, 698, 1757, 8237, 8238, 1864, 8408, 8627, 1165, 1160, 1165, 1160, 1165, 1160, 8347, 8247, 1188, 1183, 1214, 1224, 1229, 1234, 1209, 8637, 1165, 1160, 1165, 1160, 1165, 1160, 8347, 8247, 1188, 1183, 1219, 8639, 1219, 8641, 1209, 1234, 8643, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 8257, 1188, 1183, 1165, 1160, 8335, 1188, 1183, 1214, 1229, 1224, 1234, 1209, 8645, 1165, 1160, 8647, 1188, 1183, 1219, 1229, 1224, 1234, 1209, 8649, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1428, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1408, 1403, 8651, 8270, 844, 8273, 857, 8276, 8278, 8280, 8282, 8284, 8286, 1456, 1456, 8297, 8426, 1736, 1731, 1736, 1731, 1739, 8305, 1757, 1752, 1699, 1694, 8469, 1717, 8291, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8294, 8296, 1736, 1731, 8318, 8659, 1699, 1694, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8297, 8663, 1736, 1731, 8318, 8665, 8469, 1717, 8299, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8305, 1757, 1752, 8481, 1775, 1770, 1780, 8307, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8314, 8668, 1736, 1731, 1739, 8318, 8672, 1699, 1694, 1790, 8321, 8323, 8458, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 8685, 1188, 1183, 1165, 1160, 8335, 1188, 1183, 8687, 8689, 1234, 1209, 8691, 8693, 8695, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 8330, 1188, 1183, 1165, 1160, 1165, 1160, 1155, 8335, 1188, 1183, 1219, 1214, 8698, 1234, 1209, 8700, 1165, 1160, 1165, 1160, 1165, 1160, 8347, 8349, 1188, 1183, 1219, 1214, 1229, 1224, 1209, 8704, 1219, 1214, 1229, 1224, 1234, 8706, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 8408, 8412, 8362, 8708, 8412, 1408, 1403, 1408, 1403, 1413, 8408, 8711, 8412, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1408, 1403, 1408, 1403, 1413, 8715, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1428, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 8391, 8721, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1423, 1418, 1433, 1428, 1371, 8394, 1456, 1451, 8412, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1423, 1418, 1433, 1428, 1438, 8408, 1456, 1451, 8412, 1468, 1468, 1468, 1468, 1468, 1468, 8413, 8414, 8495, 8455, 8458, 8503, 8505, 1478, 8415, 1739, 1737, 8419, 1757, 1752, 1699, 1694, 1717, 8426, 8428, 8430, 8432, 1555, 1643, 1641, 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1736, 1731, 1739, 1737, 8477, 1757, 1752, 1694, 1699, 1770, 8469, 1775, 1717, 8450, 8481, 1775, 1770, 1780, 8452, 8454, 8729, 8493, 8495, 8455, 1877, 1864, 8458, 8503, 8505, 1736, 1731, 1737, 1736, 1731, 1739, 8477, 1757, 1752, 1699, 1694, 1699, 1694, 8469, 1775, 1717, 1790, 1785, 1736, 1731, 8733, 1736, 1731, 8735, 8477, 1757, 1752, 8481, 1775, 1770, 1780, 1790, 1785, 1795, 8489, 1808, 8491, 8493, 8737, 8495, 8739, 8497, 8741, 8499, 8743, 8501, 8503, 8505, 1877, 8514, 8784, 3128, 3123, 3128, 3123, 3128, 3123, 8679, 8786, 8681, 8788, 8790, 8792, 8515, 8794, 8681, 8671, 8670, 8671, 8670, 8671, 8670, 8671, 8670, 8732, 8746, 8745, 8746, 8745, 8670, 2983, 8746, 8745, 8746, 8745, 8530, 8662, 8671, 8670, 8671, 8670, 8671, 8548, 8548, 8732, 8731, 8671, 8604, 2983, 2983, 8601, 8670, 8661, 8604, 8662, 8661, 8731, 8798, 3128, 3123, 3128, 3123, 3128, 3123, 8679, 8801, 8610, 8758, 8803, 3128, 3123, 3128, 3123, 3128, 3123, 8781, 8626, 8616, 8806, 3128, 3123, 3128, 3123, 3128, 3123, 8781, 8626, 8684, 8808, 8810, 8812, 8612, 8814, 8816, 8818, 8613, 8820, 8822, 8824, 8614, 8826, 8828, 8615, 8830, 8630, 8758, 3128, 3123, 8626, 8616, 8832, 3128, 3123, 3128, 3123, 8622, 3143, 8834, 8836, 8838, 8840, 8842, 3143, 8626, 8630, 3098, 3093, 3098, 3093, 3098, 3093, 8635, 8636, 8847, 8849, 8851, 8723, 8713, 8713, 8723, 8662, 8661, 2983, 8855, 3128, 3123, 3128, 3123, 3128, 3123, 8679, 8681, 8858, 8860, 3128, 3123, 8684, 8719, 8719, 2983, 2983, 2983, 3098, 3093, 3064, 3098, 3093, 8775, 3093, 3098, 3098, 3093, 3128, 3123, 3128, 3123, 3128, 3123, 8781, 3064, 3064, 3064, 8867, 8758, 3093, 3098, 3128, 3123, 3128, 3123, 3128, 3123, 8781, 3143, 8769, 3098, 3093, 8763, 3098, 3093, 3093, 3098, 3128, 3123, 3128, 3123, 3128, 3123, 8781, 3064, 3064, 3064, 8872, 8758, 3093, 3098, 3098, 3093, 3098, 3093, 8763, 3128, 3123, 3064, 3064, 3064, 8874, 8876, 3093, 3098, 3128, 3123, 3128, 3123, 3128, 3123, 8781, 3143, 8769, 3093, 3098, 3098, 3093, 3098, 3093, 8775, 3128, 3123, 3128, 3123, 3128, 3123, 8781, 3143, 3143, 8884, 8886, 8883, 8882, 8883, 8882, 8883, 8882, 8883, 8882, 8899, 8894, 8896, 8883, 8882, 8883, 8882, 8883, 8882, 8883, 8882, 8883, 8882, 4149, 4144, 8845, 8894, 8896, 8901, 4149, 4144, 8903, 8894, 8896, 8905, 4149, 4144, 8845, 8896, 8907, 4149, 4144, 8845, 8894, 8896, 4061, 4061, 4151, 8883, 8882, 8883, 8882, 4149, 4144, 4156, 4156, 4156, 4149, 4144, 4161, 4161, 4161, 8912, 4149, 4144, 4161, 4156, 8894, 8896, 8917, 8916, 8915, 8916, 8915, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8960, 8961, 8962, 8963, 8964, 8966, 8967, 8968, 8969, 8970, 8971, 8972, 8973, 8974, 8976, 8978, 8979, 8980, 8981, 8982, 8983, 8984, 8985, 8986, 8987, 8988, 8989, 8990, 8991, 8992, 8993, 8994, 8995, 8996, 8997, 8998, 8999, 9000, 9001, 9002, 9003, 9004, 9005, 9006, 9007, 9008, 9009, 9010, 9011, 9012, 9013, 9014, 9016, 9017, 9018, 9019, 9020, 9021, 9022, 9023, 9024, 9025, 9026, 9027, 9028, 9029, 9030, 9031, 9033, 9034, 9035, 9036, 9037, 9038, 9039, 9040, 9041, 9044, 9045, 9046, 9047, 9048, 9049, 9050, 9051, 9052, 9053, 9054, 9055, 9056, 9057, 9058, 9059, 9060, 9061, 9062, 9063, 9064, 9065, 9066, 9067, 9068, 9069, 9070, 9071, 9072, 9073, 9074, 9075, 9076, 9077, 9078, 9079, 9080, 9081, 9082, 9083, 9084, 9085, 9086, 9087, 9088, 9089, 9090, 9091, 9092, 9094, 9095, 9096, 9097, 9098, 9100, 9101, 9103, 9104, 9105, 9106, 9107, 9108, 9109, 9110, 9111, 9112, 9113, 9114, 9115, 9119, 9120, 9121, 9122, 9123, 9124, 9125, 9126, 9127, 9129, 9130, 9131, 9132, 9133, 9134, 9135, 9136, 9137, 9138, 9139, 9140, 9141, 9142, 9143, 9145, 9146, 9148, 9149, 9150, 9151, 9152, 9153, 9154, 9155, 9156, 9157, 9158, 9159, 9160, 9161, 9163, 9165, 9167, 9168, 9169, 9170, 9171, 9173, 9175, 9176, 9177, 9178, 9179, 9180, 9181, 9182, 9183, 9184, 9186, 9187, 9188, 9190, 9191, 9192, 9195, 9197, 9201, 9203, 9207, 9208, 9209, 9211, 9212, 9213, 9214, 9215, 9216, 9217, 9218, 9219, 9220, 9221, 9222, 9223, 9225, 9226, 9227, 9228, 9229, 9230, 9231, 9232, 9233, 9234, 9235, 9236, 9237, 9238, 9239, 9240, 9241, 9242, 9243, 9244, 9245, 9247, 9248, 9249, 9250, 9251, 9252, 9253, 9254, 9255, 9256, 9257, 9258, 9259, 9260, 9261, 9262, 9263, 9264, 9265, 9266, 9267, 9268, 9269, 9270, 9271, 9272, 9273, 9274, 9275, 9276, 9277, 9278, 9279, 9280, 9281, 9282, 9283, 9284, 9285, 9286, 9287, 9288, 9289, 9290, 9291, 9292, 9294, 9295, 9296, 9297, 9298, 9299, 9300, 9301, 9302, 9303, 9304, 9305, 9306, 9307, 9308, 9310, 9311, 9312, 9313, 9314, 9315, 9316, 9317, 9318, 9319, 9320, 9322, 9324, 9325, 9327, 9328, 9329, 9330, 9331, 9332, 9333, 9334, 9335, 9336, 9337, 9338, 9339, 9340, 9341, 9342, 9343, 9344, 9345, 9346, 9348, 9349, 9351, 9352, 9353, 9354, 9355, 9356, 9357, 9359, 9360, 9361, 9362, 9363, 9364, 9365, 9366, 9367, 9368, 9369, 9370, 9371, 9372, 9373, 9374, 9375, 9376, 9377, 9378, 9379, 9380, 9381, 9382, 9383, 9384, 9385, 9386, 9387, 9388, 9389, 9390, 9391, 9392, 9393, 9395, 9396, 9397, 9398, 9399, 9400, 9401, 9402, 9403, 9404, 9405, 9406, 9407, 9408, 9409, 9410, 9411, 9412, 9413, 9414, 9415, 9416, 9417, 9418, 9419, 9420, 9421, 9422, 9423, 9424, 9425, 9426, 9427, 9428, 9429, 9430, 9431, 9432, 9433, 9435, 9436, 9437, 9438, 9439, 9440, 9441, 9442, 9443, 9444, 9446, 9447, 9448, 9450, 9451, 9452, 9453, 9454, 9455, 9456, 9457, 9458, 9459, 9460, 9461, 9462, 9463, 9464, 9465, 9466, 9467, 9468, 9469, 9470, 9471, 9472, 9473, 9474, 9475, 9477, 9478, 9479, 9480, 9482, 9483, 9484, 9485, 9486, 9487, 9488, 9489, 9490, 9491, 9492, 9493, 9494, 9496, 9497, 9498, 9499, 9500, 9501, 9502, 9505, 9506, 9510, 9511, 9512, 9513, 9514, 9515, 9516, 9517, 9518, 9519, 9520, 9521, 9522, 9523, 9524, 9525, 9526, 9527, 9528, 9529, 9531, 9532, 9534, 9535, 9536, 9537, 9538, 9539, 9540, 9541, 9542, 9543, 9544, 9545, 9546, 9547, 9548, 9550, 9551, 9552, 9553, 9554, 9556, 9557, 9558, 9559, 9560, 9561, 9562, 9563, 9564, 9565, 9567, 9568, 9569, 9570, 9571, 9572, 9573, 9575, 9576, 9577, 9578, 9579, 9580, 9581, 9582, 9583, 9584, 9585, 9586, 9587, 9589, 9590, 9591, 9592, 9593, 9594, 9595, 9596, 9597, 9598, 9599, 9600, 9601, 9602, 9603, 9604, 9605, 9606, 9607, 9608, 9609, 9610, 9611, 9612, 9613, 9614, 9615, 9617, 9618, 9619, 9620, 9621, 9622, 9623, 9624, 9625, 9626, 9627, 9628, 9629, 9630, 9631, 9632, 9633, 9634, 9635, 9636, 9637, 9638, 9639, 9640, 9641, 9642, 9643, 9644, 9645, 9646, 9647, 9648, 9649, 9650, 9651, 9652, 9653, 9654, 9655, 9656, 9657, 9658, 9659, 9660, 9661, 9662, 9663, 9664, 9665, 9666, 9667, 9668, 9669, 9670, 9671, 9672, 9673, 9674, 9675, 9676, 9677, 9678, 9679, 9680, 9681, 9682, 9683, 9684, 9685, 9686, 9687, 9688, 9689, 9690, 9691, 9692, 9693, 9694, 9695, 9696, 9697, 9698, 9699, 9700, 9701, 9702, 9703, 9704, 9705, 9707, 9708, 9709, 9710, 9711, 9712, 9713, 9714, 9715, 9716, 9717, 9718, 9719, 9720, 9721, 9722, 9723, 9724, 9725, 9726, 9727, 9728, 9729, 9730, 9731, 9732, 9733, 9734, 9736, 9737, 9739, 9740, 9741, 9742, 9743, 9744, 9745, 9746, 9747, 9748, 9749, 9750, 9751, 9752, 9754, 9756, 9758, 9760, 9761, 9762, 9763, 9764, 9766, 9767, 9768, 9769, 9770, 9771, 9772, 9774, 9778, 9780, 9781, 9782, 9783, 9784, 9785, 9786, 9787, 9788, 9789, 9790, 9791, 9792, 9793, 9794, 9795, 9796, 9797, 9798, 9799, 8523, 8523, 8523, 8719, 9043, 9800, 9801, 9802, 9803, 9804, 9805, 8544, 8544, 8544, 9806, 9807, 9808, 9809, 9810, 9811, 9185, 9189, 9194, 9200, 9206, 9812, 9813, 9814, 9815, 9816, 9817, 9818, 9819, 9820, 9821, 9823, 9824, 9825, 9826, 9827, 9828, 9829, 9831, 9832, 9834, 9835, 9836, 9837, 9838, 9839, 9840, 9841, 9842, 9844, 9845, 9846, 9847, 9848, 9849, 9850, 9851, 9852, 9856, 9860, 9864, 9867, 9869, 9870, 9871, 9872, 9873, 9874, 9876, 9877, 9878, 9879, 9880, 9881, 9887, 9888, 8723, 9889, 9890, 9891, 9892, 9893, 9894, 9895, 9896, 9897, 8719, 9901, 9902, 9903, 9904, 9905, 9906, 9907, 9909, 9910, 9911, 9912, 9913, 9914, 9915, 9916, 9919, 9920, 9921, 9504, 9509, 8723, 8723, 9922, 8719, 9923, 8723, 9924, 9925, 9926, 9927, 9928, 9929, 9930, 9931, 9932, 9933, 9934, 9935, 9936, 9937, 9938, 9939, 9940, 9941, 9942, 9943, 9944, 9945, 9946, 9948, 9949, 9950, 9951, 9952, 9953, 9954, 9955, 9956, 9957, 9958, 9959, 9960, 9961, 9962, 9963, 9964, 9965, 9966, 9967, 9968, 9969, 9970, 9971, 9972, 9973, 9974, 9975, 9976, 9978, 9979, 9980, 9981, 9982, 9983, 9984, 9985, 9986, 9987, 9988, 9989, 9990, 9993, 9994, 9995, 9996, 9997, 9998, 9999, 10000, 10001, 10002, 10003, 10004, 10005, 10006, 10007, 10008, 10009, 10010, 10011, 10012, 10013, 10014, 10015, 10016, 10017, 10018, 10019, 8879, 8878, 9773, 10022, 10023, 8879, 8878, 8881, 8880, 8879, 8878, 8881, 8880, 9779, 10024, 10025, 9947, 10026, 10027, 10020, 10028, 10029, 10031, 10032, 8879, 8878, 9830, 10033, 10034, 8879, 8878, 10020, 10035, 10036, 8879, 8878, 10020, 10037, 10038, 8879, 8878, 8881, 8880, 8879, 8878, 8881, 8880, 9868, 10039, 10040, 8879, 8878, 10020, 10041, 10042, 10043, 10044, 10045, 10046, 10047, 8878, 8879, 10049, 10050, 10052, 10053, 8878, 8879, 8879, 8878, 10055, 10056, 10057, 10058, 8878, 8879, 8879, 8878, 10060, 10061, 10062, 10063, 10064, 10065, 10066, 8853, 8853, 8853, 10067, 8879, 8878, 9947, 10068, 10069, 8879, 8878, 10020, 10070, 10071, 10072, 10073, 10074, 10075, 10076, 10077, 10078, 10079, 10080, 10081, 8888, 8888, 10083, 10084, 10085, 10086, 10087, 10088, 8915, 10089, 10090, 10091, 8915, 8915, 10089, 10092, 10093, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 10131, 10133, 10136, 10138, 10142, 10146, 10157, 10160, 10162, 10165, 10187, 10189, 10191, 10193, 10195, 10197, 10203, 10207, 10209, 10219, 10222, 10225, 10236, 10241, 10243, 10256, 10260, 10263, 10265, 10273, 10280, 10282, 10294, 10296, 10301, 10303, 10306, 10311, 10314, 10317, 10320, 10327, 10329, 10331, 10333, 10335, 10344, 10361, 10363, 10365, 10367, 10371, 10374, 10376, 10378, 10382, 10384, 10386, 10399, 10401, 10409, 10411, 10413, 10417, 10420, 10422, 10424, 10426, 10428, 10432, 10436, 10438, 10440, 10442, 10446, 10448, 10451, 10454, 10456, 10458, 10460, 10463, 10465, 10467, 10469, 10471, 10474, 10476, 10478, 10481, 10484, 10486, 10488, 10491, 10493, 10495, 10498, 10500, 10516, 10518, 10522, 10524, 10529, 10531, 10533, 10538, 10541, 10543, 10545, 10547, 10551, 10557, 10559, 10561, 10565, 10568, 10572, 10574, 10576, 10580, 10584, 10590, 10592, 10594, 10597, 10599, 10602, 10604, 10606, 10608, 10610, 10614, 10616, 10618, 10622, 10624, 10626, 10628, 10630, 10632, 10636, 10638, 10640, 10643, 10645, 10648, 10650, 10652, 10659, 10661, 10666, 10668, 10670, 10673, 10675, 10678, 10680, 10682, 10685, 10688, 10690, 10692, 10695, 10697, 10699, 10701, 10705, 10707, 10709, 10712, 10714, 10718, 10721, 10723, 10725, 10728, 10730, 10734, 10752, 10755, 10757, 10765, 10774, 10776, 10779, 10781, 10789, 10797, 10802, 10805, 10809, 10811, 10813, 10818, 10820, 10822, 10825, 10828, 10831, 10113, 10115, 8965, 9753, 10120, 8746, 8745, 10122, 10124, 8977, 8975, 10130, 8746, 8745, 10141, 8727, 10145, 8727, 10846, 10848, 10850, 10582, 10582, 10578, 10582, 8656, 10785, 10785, 10785, 10292, 8657, 8554, 10285, 10288, 10292, 10407, 10865, 8746, 8745, 8746, 8745, 10407, 10867, 10206, 8657, 8554, 10216, 10214, 8554, 8657, 8554, 10170, 10179, 10170, 10171, 8746, 8745, 10358, 10763, 8604, 8746, 8745, 8746, 8745, 8746, 8745, 10172, 10356, 8554, 8554, 8554, 8554, 8554, 8554, 8554, 8554, 10179, 10174, 10407, 10871, 10175, 10176, 8746, 8745, 10179, 10407, 10873, 10875, 10876, 10877, 10878, 10783, 8658, 8657, 8731, 8732, 8731, 10783, 10783, 10879, 8657, 8657, 8554, 8657, 8554, 10288, 10202, 10206, 10217, 10212, 10217, 10214, 10216, 10217, 10227, 8746, 8745, 10227, 10229, 10231, 8746, 8745, 8658, 8658, 8607, 10398, 8658, 8607, 10235, 10398, 10239, 10404, 10381, 8658, 8607, 8658, 8607, 8746, 8745, 10398, 10586, 10586, 10251, 10252, 10252, 10253, 10254, 10255, 10578, 10582, 10886, 10887, 10888, 10258, 10279, 10275, 10276, 10268, 10269, 10270, 10271, 10275, 10276, 10277, 10278, 10279, 8657, 10285, 8657, 8554, 8746, 8745, 8746, 8745, 10288, 10783, 10783, 8746, 8745, 10292, 10783, 9166, 9164, 9174, 8568, 8728, 8568, 8728, 10844, 8568, 8728, 10309, 10844, 10308, 8568, 8728, 10309, 10844, 8568, 8728, 10309, 10844, 8567, 8568, 8728, 10309, 10844, 10895, 10896, 10897, 9198, 9196, 10898, 9204, 9202, 10899, 10339, 8657, 10763, 8746, 8745, 8746, 8745, 8746, 8745, 8657, 10341, 10763, 9224, 8746, 8745, 8657, 10358, 8657, 10392, 8600, 8746, 8745, 8746, 8745, 8746, 8745, 8600, 8746, 8745, 8746, 8745, 8657, 10356, 8657, 10358, 8657, 8657, 10763, 8746, 8745, 8746, 8745, 8746, 8745, 10407, 8746, 8745, 10381, 8746, 8745, 10407, 8746, 8745, 8658, 10907, 8658, 8607, 10392, 8746, 8745, 8746, 8745, 8746, 8745, 10398, 8746, 8745, 10407, 8746, 8745, 10404, 8746, 8745, 10407, 8746, 8745, 10910, 10912, 10914, 10919, 10921, 10923, 10928, 10930, 10932, 10943, 10947, 10949, 10408, 10955, 10957, 10959, 10961, 9323, 9321, 10965, 10505, 10503, 10507, 10509, 10968, 10511, 8713, 8723, 9476, 10515, 8656, 10528, 8658, 8657, 9434, 10970, 9445, 9449, 8667, 10556, 10571, 9476, 9481, 10586, 10588, 10751, 9755, 9753, 9759, 9757, 10844, 8746, 8745, 10973, 10975, 10977, 10981, 10984, 10985, 9549, 9555, 10655, 8713, 10717, 8713, 10986, 10664, 8713, 10987, 10989, 10704, 10991, 10717, 8727, 10733, 8727, 9755, 9753, 10844, 8746, 8745, 10785, 10760, 10761, 10763, 9706, 9755, 8728, 8745, 10844, 8746, 8745, 10763, 10751, 9755, 9753, 9759, 9757, 10844, 8746, 8745, 10785, 10760, 10761, 10763, 9755, 8728, 8745, 10844, 8746, 8745, 10785, 10787, 10792, 10834, 9706, 9755, 9753, 10844, 8746, 8745, 10816, 10834, 10836, 9755, 9753, 9759, 9757, 10844, 8746, 8745, 10995, 10998, 11001, 11003, 11005, 11007, 11009, 11016, 11018, 11020, 11022, 11027, 11030, 11032, 11034, 11036, 11038, 11045, 11047, 11049, 11052, 11057, 11059, 11061, 11063, 11068, 11070, 11072, 11075, 11077, 11079, 8844, 8863, 8844, 8844, 11084, 11085, 11074, 11086, 8863, 11089, 11090, 10854, 10940, 11091, 11092, 11093, 11094, 10854, 10940, 11095, 11096, 11097, 8863, 11100, 11103, 10880, 10881, 10903, 10905, 11074, 11108, 11109, 11110, 8844, 8863, 11074, 11113, 11114, 11115, 8863, 8844, 11074, 11118, 11119, 11120, 8863, 8844, 10937, 11123, 11124, 10938, 11125, 11126, 10939, 11127, 11128, 10940, 11129, 11130, 11131, 8844, 8863, 11074, 11134, 11135, 11136, 8863, 8844, 11139, 11144, 11145, 11000, 8883, 8882, 9947, 8844, 8863, 11146, 11150, 11151, 11000, 8883, 8882, 9947, 8844, 8863, 11074, 11152, 11153, 10020, 8883, 8882, 8863, 8844, 11154, 11158, 11159, 11000, 8844, 8863, 11074, 11160, 11161, 8863, 8844, 11162, 9947, 8883, 8882, 8844, 8863, 10020, 8883, 8882, 8863, 8844, 8853, 8853, 8853, 8853, 11169, 11170, 11171, 11173, 11174, 11000, 11175, 8863, 11074, 11178, 11179, 11180, 8863, 8883, 8882, 9947, 11183, 9947, 8883, 8882, 8888, 10020, 8883, 8882, 8888, 11188, 9977, 8883, 8882, 8888, 9991, 8883, 8882, 11193, 10020, 8883, 8882, 8888, 10020, 8883, 8882, 11194, 11195, 11197, 10082, 8916, 8915, 10089, 8916, 11201, 11202, 10089, 8916, 10089, 8916, 11205, 10048, 8916, 8915, 10054, 8916, 11206, 10059, 8916, 8915, 10082, 8916, 8915, 10089, 8916, 8915, 10082, 8916, 8915, 10089, 8916, 11207, 10082, 8916, 8915, 10082, 8916, 8915, 10089, 8916, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 11463, 11464, 11465, 11466, 11467, 11468, 11469, 11470, 11471, 11472, 11473, 11474, 11475, 11476, 10135, 8725, 8724, 11267, 11477, 11478, 8726, 11479, 11480, 8726, 11484, 11289, 11447, 11318, 11485, 11486, 11487, 11442, 11488, 11489, 11490, 11491, 11492, 11493, 11494, 11495, 11496, 11497, 11498, 11500, 11501, 11502, 11503, 11504, 11270, 11271, 9015, 11273, 11506, 10199, 8671, 8670, 11507, 11508, 11509, 11510, 11511, 10199, 8671, 8670, 11512, 10582, 8671, 8670, 11513, 11514, 11515, 11516, 11517, 11518, 11519, 11520, 11521, 11522, 11523, 11524, 11525, 11526, 11527, 11528, 11529, 11530, 10582, 8671, 8670, 11531, 11532, 10582, 8671, 8670, 11533, 11534, 10582, 8671, 8670, 11535, 11536, 11537, 11538, 11539, 11540, 11541, 11543, 11544, 11545, 11546, 11547, 11548, 11554, 11555, 11556, 11557, 11558, 11559, 11560, 11561, 11274, 11563, 10582, 8671, 8670, 10199, 8671, 8670, 11564, 11565, 11566, 11567, 11568, 11569, 11289, 11570, 11571, 11282, 11572, 11573, 11574, 11442, 11575, 11576, 11283, 11284, 11285, 11577, 11578, 11579, 11580, 11581, 11582, 11583, 11584, 11585, 11586, 11587, 11588, 11589, 11590, 11591, 11592, 9093, 11593, 11594, 9102, 9099, 11595, 11596, 11597, 11598, 11599, 11600, 11601, 11602, 11603, 11604, 11605, 11606, 11607, 11608, 11609, 11610, 11318, 11447, 11442, 11289, 11611, 11612, 11616, 11617, 11290, 11291, 9128, 11618, 11619, 11620, 11621, 11622, 11623, 11293, 11624, 11625, 11626, 11627, 11628, 9144, 9147, 11629, 11630, 11631, 11632, 11633, 11634, 11635, 11636, 11637, 11638, 11639, 11640, 11641, 11642, 11643, 11296, 9162, 11644, 11645, 11298, 9172, 11646, 8731, 11647, 11648, 11649, 11650, 11651, 11652, 11653, 11654, 11655, 11656, 11657, 11658, 11659, 11660, 11661, 11662, 11663, 11664, 11665, 11666, 11667, 11668, 11669, 11301, 11302, 11303, 11304, 11673, 11674, 11676, 11677, 11305, 11306, 10677, 8725, 8724, 11679, 10337, 11680, 11681, 11682, 11683, 11684, 11685, 11686, 11687, 11688, 11689, 11690, 11691, 11692, 11693, 11694, 11695, 11310, 11696, 11697, 11698, 11699, 11700, 11701, 11702, 11703, 11704, 11705, 11706, 11707, 11708, 11709, 11710, 11711, 11712, 11713, 11714, 11715, 11716, 11717, 11718, 11719, 11720, 9246, 10369, 8697, 8702, 11315, 11316, 11721, 11722, 11723, 11724, 11725, 11318, 11726, 11727, 11728, 11729, 11730, 11731, 10388, 8671, 8670, 11732, 11734, 11735, 11736, 11737, 11738, 11739, 11740, 11741, 11742, 11743, 11744, 11745, 11746, 11747, 11748, 11323, 11749, 11750, 11751, 11752, 11753, 11754, 11767, 10415, 8703, 8702, 11327, 11328, 9309, 10430, 8703, 8702, 11333, 11772, 11773, 9326, 10444, 8697, 8702, 11338, 10620, 8697, 8702, 11340, 11341, 9347, 10612, 8697, 8702, 11344, 11345, 9358, 10473, 8725, 8724, 10480, 8725, 8724, 10483, 10490, 8725, 8724, 10497, 8725, 8724, 8717, 10677, 8725, 8724, 11775, 11776, 11777, 11778, 11780, 11781, 11782, 11783, 11784, 10520, 8671, 8670, 11364, 11785, 8732, 11786, 10535, 8671, 8670, 11787, 11788, 10582, 8671, 8670, 11789, 8731, 10549, 8671, 8670, 11791, 10582, 8671, 8670, 11792, 11793, 11794, 10563, 8671, 8670, 11378, 11379, 11795, 10578, 8671, 8670, 11796, 10582, 8671, 8670, 11797, 8731, 11798, 11799, 11800, 11801, 11802, 11803, 11804, 11805, 11806, 11807, 10596, 8697, 8702, 11388, 10620, 8697, 8702, 11390, 9507, 10612, 8697, 8702, 11395, 10620, 8697, 8702, 11398, 9530, 9533, 10634, 8703, 8702, 11404, 11406, 11814, 11408, 11815, 10654, 8725, 8724, 11816, 11817, 11818, 11819, 10663, 8725, 8724, 11821, 11822, 10672, 8725, 8724, 10677, 8725, 8724, 10684, 8725, 8724, 10687, 10694, 8725, 8724, 8717, 10703, 8725, 8724, 11825, 10711, 8725, 8724, 11434, 11827, 11828, 8726, 10727, 8725, 8724, 11440, 11829, 11830, 8726, 11445, 11831, 11832, 11451, 11833, 11834, 11835, 11442, 11443, 11836, 10783, 11837, 11450, 11838, 11839, 11840, 11841, 11842, 11843, 11451, 11844, 11845, 11846, 11847, 11848, 11849, 11850, 11851, 11852, 11853, 11854, 11855, 11442, 11443, 11856, 10783, 11857, 11450, 11858, 11859, 11445, 11860, 11861, 11862, 11451, 11863, 11864, 11865, 11447, 11448, 11866, 10783, 11867, 11450, 11868, 11869, 11870, 11871, 11872, 11451, 11873, 11874, 11875, 10807, 10804, 11454, 11876, 8732, 8731, 11457, 9738, 9735, 11460, 11461, 11462, 11877, 11878, 11879, 11880, 11881, 11882, 11883, 11884, 11885, 11917, 11918, 11919, 11920, 11923, 11921, 10852, 8881, 8880, 11924, 11925, 11928, 11926, 11929, 11934, 11932, 11935, 11938, 11939, 11940, 11941, 11733, 10993, 8864, 10993, 8864, 10993, 8864, 10993, 8864, 11942, 11943, 11733, 11733, 11944, 11945, 11733, 11946, 10916, 8881, 8880, 11949, 11950, 11951, 11952, 10925, 8881, 8880, 11955, 11956, 11957, 11958, 10934, 8881, 8880, 11961, 11962, 11963, 11964, 11967, 11970, 11973, 11976, 11977, 11978, 11979, 11081, 8881, 8880, 11982, 11983, 11984, 11988, 11986, 10979, 8881, 8880, 11989, 11990, 11991, 11992, 11993, 11997, 11995, 10979, 8881, 8880, 11998, 11999, 12000, 12001, 12002, 12003, 11024, 8881, 8880, 12006, 12007, 12008, 12009, 12010, 12014, 12012, 10979, 8881, 8880, 12015, 12016, 12017, 12020, 12021, 12023, 12024, 12025, 12026, 12027, 12028, 12029, 12030, 12031, 12032, 12033, 12034, 12035, 10963, 8879, 8878, 12036, 12042, 12040, 10979, 8881, 8880, 12043, 12044, 12045, 11024, 8881, 8880, 12048, 12049, 8879, 8878, 11000, 11011, 8881, 8880, 12050, 12051, 12052, 8879, 8878, 11000, 11011, 8881, 8880, 12054, 12055, 12056, 12057, 11074, 8879, 8878, 11024, 8881, 8880, 12058, 12059, 12060, 12061, 8878, 8879, 11029, 11040, 8881, 8880, 12063, 12064, 12065, 12066, 11051, 8879, 8878, 11081, 8881, 8880, 12067, 12068, 12069, 11074, 8879, 8878, 11065, 8881, 8880, 12071, 12072, 12073, 12074, 11074, 8879, 8878, 11081, 8881, 8880, 12075, 12076, 12077, 11167, 12081, 12082, 12083, 8914, 12084, 12085, 8914, 12087, 8898, 8898, 8909, 12088, 12089, 8915, 12090, 12091, 8914, 12093, 12094, 12095, 8909, 12096, 12097, 8914, 12099, 12100, 12101, 8909, 12102, 12103, 12104, 8914, 12105, 12106, 12107, 11167, 12108, 12109, 12110, 11168, 11172, 12111, 12112, 8915, 8914, 12113, 8911, 12114, 12115, 12116, 8911, 12117, 12118, 12119, 8914, 12120, 12121, 8915, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 12162, 12164, 12169, 12171, 12174, 12175, 12176, 12177, 12180, 12183, 12184, 12185, 12186, 12187, 12188, 12189, 12190, 12191, 12203, 12205, 12208, 12209, 12210, 12211, 12213, 12214, 12215, 12221, 12222, 12223, 12225, 12226, 12227, 12233, 12238, 12240, 12242, 12246, 12247, 12248, 12251, 12252, 12253, 12256, 12257, 12258, 12268, 12273, 12280, 12282, 12283, 12284, 12285, 12286, 12287, 12294, 12297, 12301, 12304, 12305, 12306, 12308, 12313, 12323, 12326, 12327, 12333, 12344, 12345, 12346, 12347, 12348, 12349, 12352, 12353, 12354, 12361, 12367, 12368, 12373, 12375, 12380, 12384, 12385, 12386, 12388, 12389, 12391, 12392, 12394, 12397, 12402, 12406, 12411, 12415, 12416, 12417, 12418, 12419, 12421, 12423, 12424, 12425, 12426, 12427, 12429, 12432, 12434, 12436, 12442, 12446, 12450, 12452, 12454, 12457, 12459, 12468, 12470, 12472, 12473, 12474, 12475, 12476, 12477, 12478, 12481, 12483, 12485, 12488, 12490, 12491, 12492, 12497, 12499, 12501, 12504, 12507, 12509, 12511, 12514, 12517, 12518, 12519, 12520, 12521, 12522, 12523, 12524, 12525, 12526, 12527, 12529, 12530, 12531, 12532, 12533, 12534, 12535, 12536, 12537, 12538, 12539, 12540, 12541, 12542, 12543, 12544, 12545, 12546, 12547, 12548, 12549, 12550, 12551, 12552, 12553, 12554, 12555, 12556, 12557, 12558, 12559, 12560, 12561, 12562, 12563, 12568, 12572, 12573, 12574, 12575, 12577, 12579, 12580, 12581, 12582, 12584, 12585, 12586, 12588, 12589, 12590, 12591, 12593, 12594, 12595, 12599, 12600, 12601, 12602, 12603, 12605, 12606, 12607, 12609, 12610, 12611, 12613, 12617, 12619, 12621, 12624, 12625, 12626, 12627, 12628, 12629, 12630, 12631, 12632, 12633, 12634, 12635, 12636, 12637, 12638, 12639, 12640, 12641, 12642, 12643, 12644, 12645, 12646, 12647, 12649, 12651, 12652, 12653, 12657, 12658, 12659, 12660, 12662, 12663, 12664, 12665, 12666, 12667, 12668, 12669, 12670, 12671, 12672, 12673, 12674, 12675, 12676, 12677, 12678, 12679, 12681, 12682, 12683, 12684, 12687, 12688, 12689, 12690, 12691, 12694, 12695, 12696, 12698, 12699, 12702, 12703, 12705, 12707, 12711, 12714, 12715, 12720, 12722, 12724, 12727, 12728, 12730, 12732, 12735, 12736, 12739, 12740, 12743, 12744, 12746, 12748, 12752, 12754, 12755, 12758, 12759, 12760, 12762, 12763, 12764, 12765, 12766, 12767, 12768, 12769, 12772, 12774, 12776, 12161, 12168, 12779, 12784, 12785, 12786, 12787, 12788, 12791, 12792, 12794, 12795, 12796, 12798, 12799, 11779, 10967, 12439, 12441, 12445, 8865, 10900, 12449, 11779, 10967, 12578, 12598, 12312, 12706, 12710, 8865, 12731, 8865, 12747, 11779, 10967, 12706, 12710, 8865, 12800, 12578, 12312, 12731, 8865, 12578, 12731, 12706, 12747, 11779, 10967, 12801, 8865, 12802, 12264, 12267, 12616, 12803, 8865, 12804, 11779, 10967, 12439, 12441, 12449, 12445, 8865, 12237, 11779, 10967, 12229, 12710, 8865, 12231, 8865, 12232, 12616, 12365, 11779, 10967, 12463, 12466, 12444, 12445, 8865, 12438, 12439, 12441, 12447, 12449, 12461, 12465, 12430, 10900, 12235, 12237, 12244, 12245, 11779, 10967, 10900, 12449, 12445, 8865, 12439, 12441, 12264, 12805, 8865, 12806, 12267, 12616, 12343, 12807, 8865, 12808, 12463, 12447, 12466, 12444, 12465, 12430, 12461, 12438, 11779, 10967, 12731, 8865, 12614, 11733, 12578, 12571, 12312, 12706, 12710, 8865, 12747, 12293, 12296, 12299, 12303, 12733, 12749, 12708, 8865, 12604, 12312, 11779, 10967, 12706, 12710, 8865, 12731, 8865, 12811, 12616, 10891, 12337, 10900, 12447, 12461, 12463, 12430, 12444, 12438, 12465, 12466, 11779, 10967, 12430, 10900, 12438, 12447, 12449, 12444, 12445, 12461, 12465, 12466, 12463, 10906, 12351, 10890, 10890, 12351, 11779, 10967, 12366, 10891, 12358, 10900, 10972, 10891, 10993, 8865, 12366, 12614, 12812, 12578, 12571, 12747, 12706, 8865, 12731, 11779, 10967, 12447, 12449, 12438, 12439, 12441, 12463, 12444, 12445, 8865, 12430, 10900, 12465, 12466, 12461, 12598, 12456, 10972, 12401, 10992, 8865, 12401, 10993, 8865, 12410, 10993, 8865, 12410, 10993, 8865, 11779, 10967, 12430, 10900, 12438, 12439, 12441, 12444, 12445, 8865, 12447, 12449, 12456, 12461, 12462, 12463, 12464, 8865, 12465, 12466, 10906, 11779, 10967, 8865, 12731, 8865, 12614, 12815, 12571, 12616, 12747, 8865, 12706, 12710, 8865, 12816, 12817, 12818, 12819, 12820, 12821, 12823, 12824, 12825, 12826, 12827, 12828, 12830, 12831, 12832, 12833, 12834, 12835, 12837, 12838, 12839, 12840, 12841, 12842, 12844, 12845, 12846, 12847, 12848, 12849, 12852, 12853, 12854, 12855, 12856, 12859, 12862, 12863, 12864, 12865, 12866, 12869, 12871, 12872, 12873, 12874, 12875, 12878, 12881, 12882, 12883, 12884, 12885, 12887, 12888, 12890, 12893, 12895, 12898, 11768, 12903, 12904, 12905, 11779, 10967, 12598, 12616, 12908, 12909, 12910, 12911, 12912, 12914, 12915, 12916, 12917, 12918, 12655, 11826, 12710, 12719, 12751, 12771, 12920, 12921, 12922, 12923, 12924, 12925, 12926, 12929, 12930, 12931, 12932, 12933, 12934, 12935, 12939, 12940, 12941, 12942, 12943, 12944, 12945, 12949, 12950, 12951, 12952, 12953, 12954, 12955, 12959, 12960, 12961, 12962, 12963, 12964, 12965, 12968, 12969, 12970, 12971, 12972, 12973, 12974, 12978, 12979, 12980, 12981, 12982, 12983, 12984, 12987, 12988, 12991, 12992, 12994, 12995, 12996, 12997, 12998, 13001, 12999, 13002, 13004, 13005, 13008, 13009, 13011, 13012, 13015, 13016, 13019, 13020, 13023, 13024, 13027, 13028, 13031, 13029, 13032, 13033, 13034, 13035, 13038, 13039, 13042, 13045, 13043, 122, 123, 124, 125, 126, 127, 13057, 13059, 13060, 12179, 12182, 13066, 13070, 13071, 13072, 13080, 13083, 13086, 13093, 13096, 13099, 13105, 13108, 13120, 13127, 13128, 13158, 12428, 13175, 13185, 13196, 13202, 13208, 13212, 13218, 13224, 13227, 13231, 13234, 13238, 13243, 12576, 13248, 13252, 13256, 13259, 13262, 13267, 13270, 13276, 13277, 13281, 13286, 13290, 13296, 13302, 13306, 13310, 13313, 13316, 13320, 13324, 13327, 12686, 13332, 12693, 13340, 12704, 13345, 13347, 13350, 12729, 13356, 13358, 12745, 13365, 13366, 12761, 13372, 13379, 13380, 8864, 13381, 8865, 13384, 13389, 13391, 11672, 11671, 11670, 11678, 11675, 13242, 13395, 13396, 12438, 13397, 13398, 8796, 8864, 12461, 12444, 13399, 13166, 10992, 13400, 8864, 13401, 8864, 10901, 8865, 13402, 8864, 10902, 8865, 12463, 13139, 13131, 13142, 13223, 13242, 13403, 13404, 10966, 13405, 13406, 13407, 8865, 8864, 10972, 13132, 13408, 13409, 10993, 13410, 8864, 13116, 13411, 13355, 10992, 13412, 8864, 13413, 13133, 13134, 13142, 13223, 13242, 13414, 13415, 12196, 13416, 13417, 10993, 13418, 8864, 13420, 13421, 8864, 10972, 8865, 12200, 12201, 13422, 13355, 10992, 13423, 8864, 13201, 13424, 13425, 13426, 13427, 13139, 13142, 13223, 13242, 13428, 13429, 10966, 13431, 13433, 13434, 13435, 10972, 8865, 8864, 13437, 11672, 11671, 11670, 13078, 13217, 13223, 13242, 13439, 13440, 12212, 13441, 13442, 8796, 8864, 13443, 8864, 10902, 8865, 12218, 13444, 13355, 10992, 13445, 8864, 13446, 8864, 10870, 8865, 13139, 13131, 13142, 13223, 13242, 13447, 13448, 10966, 13132, 13449, 13450, 10993, 13451, 8864, 13116, 13452, 13166, 10992, 13453, 8864, 13454, 13455, 10972, 8865, 8864, 13456, 11672, 11671, 11670, 11678, 11675, 13217, 13242, 13457, 13458, 13459, 13460, 13461, 13462, 13166, 10992, 13463, 8864, 13464, 13465, 13466, 8796, 8864, 13467, 13468, 8864, 10902, 8865, 13469, 13470, 13471, 13472, 8864, 10901, 8865, 13473, 13474, 8864, 10870, 8865, 13475, 13476, 11672, 11671, 11670, 11678, 11675, 13217, 13223, 13242, 13477, 13478, 13479, 8864, 10901, 8865, 12250, 13480, 8864, 10902, 8865, 12260, 13481, 13166, 10992, 13482, 8864, 12261, 13483, 13484, 8796, 8864, 12262, 13485, 13487, 13489, 13490, 10972, 8865, 8864, 13491, 13493, 13495, 13496, 13497, 13498, 13499, 13500, 13501, 13502, 13133, 13134, 13142, 13223, 13242, 13503, 13504, 10966, 13352, 13505, 13355, 10992, 13506, 8864, 13507, 13508, 13509, 13510, 13511, 8865, 8864, 10972, 13342, 13512, 13513, 10993, 13514, 8864, 13515, 11672, 11562, 12292, 13516, 12295, 13517, 12298, 13518, 12300, 12302, 13519, 13116, 13520, 13521, 13522, 10993, 13523, 8864, 13524, 13525, 8865, 8864, 10972, 12321, 12321, 12322, 13174, 13119, 13217, 13223, 13242, 13526, 13527, 12325, 13528, 13529, 10993, 13530, 8864, 13531, 13355, 10992, 13532, 8864, 13534, 8864, 8865, 10972, 12335, 12341, 13535, 13536, 12338, 12339, 13537, 12341, 12343, 12444, 12461, 12463, 12438, 13538, 13539, 13540, 13541, 13542, 13543, 13544, 13545, 11672, 11671, 11670, 11678, 11675, 13217, 13223, 13242, 13546, 13547, 13548, 13549, 8864, 10901, 8865, 13550, 13551, 13552, 8864, 10902, 8865, 13553, 13554, 13166, 13555, 13556, 13557, 13558, 13131, 12350, 13559, 13560, 13132, 13561, 13562, 13563, 13139, 13131, 13142, 13223, 13242, 13564, 13565, 10966, 12356, 13566, 13567, 12357, 13568, 11790, 12360, 13569, 13570, 8865, 8864, 13132, 12363, 13571, 13572, 13573, 8864, 12365, 13574, 13133, 13134, 13575, 13577, 13578, 8865, 8864, 10972, 12377, 13579, 12382, 13580, 10993, 13581, 8864, 12382, 13582, 11672, 11671, 11670, 11678, 11675, 13217, 13223, 13242, 13583, 13584, 13585, 13586, 8864, 10902, 8865, 13587, 13588, 13589, 8796, 8864, 13590, 13591, 13592, 13166, 10992, 13593, 8864, 13594, 13595, 8864, 10901, 8865, 13596, 13597, 13598, 13139, 13207, 13142, 11790, 12604, 13599, 13600, 13601, 8865, 8864, 13352, 12733, 13602, 13603, 13604, 8864, 13605, 13606, 13607, 8864, 13342, 12708, 13608, 13609, 13610, 8864, 13611, 13612, 13613, 8864, 11672, 11671, 11670, 11678, 11675, 13217, 13223, 13242, 13614, 13615, 13616, 13617, 8864, 10901, 8865, 13618, 13619, 13620, 8796, 8864, 13621, 13622, 13166, 10992, 13623, 8864, 13624, 13625, 8864, 10902, 8865, 13626, 8864, 10993, 8865, 13627, 13628, 13629, 13630, 8797, 13631, 13632, 13633, 13634, 8864, 10993, 8865, 13174, 13207, 13217, 13223, 13242, 13635, 13636, 10966, 10993, 13637, 8864, 12484, 13638, 13355, 10992, 13639, 8864, 13640, 13642, 13643, 8864, 10972, 8865, 12503, 13644, 10993, 13645, 8864, 12510, 13646, 13647, 10993, 13648, 8864, 13649, 13650, 13655, 13656, 13661, 13662, 13667, 13668, 13669, 13670, 13673, 13674, 13680, 13683, 13686, 13689, 13691, 13692, 13695, 13698, 13702, 13704, 13706, 13708, 13709, 13201, 13207, 13217, 13223, 13242, 13712, 13713, 10966, 12571, 11790, 13714, 12604, 12614, 13715, 8865, 8864, 13717, 13721, 13722, 13285, 13295, 13295, 12650, 12648, 13726, 13309, 13305, 13309, 13727, 13337, 8865, 8864, 13342, 12708, 13728, 8865, 13729, 8865, 8864, 13352, 12733, 13355, 8865, 13360, 12749, 13730, 8865, 8864, 13376, 13731, 8865, 8864, 13732, 13735, 13738, 13739, 13742, 13745, 13746, 13749, 13752, 13753, 13756, 13759, 13760, 13763, 13766, 13767, 13770, 13773, 13774, 13777, 13780, 13782, 12781, 12782, 13784, 12789, 12797, 12919, 13791, 13705, 13707, 13792, 13654, 13660, 13666, 13672, 13678, 13794, 13796, 13798, 13800, 13802, 13804, 13808, 12913, 12919, 13812, 13814, 13817, 13786, 13810, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 13826, 13833, 13834, 13835, 13836, 13837, 13838, 13839, 13840, 13844, 13846, 13847, 13848, 13849, 13850, 13851, 13852, 13853, 13854, 13855, 13856, 13857, 13858, 13860, 13861, 13862, 13863, 13864, 13865, 13866, 13868, 13869, 13870, 13871, 13872, 13873, 13874, 13875, 13876, 13877, 13878, 13879, 13880, 13882, 13895, 8796, 13899, 8797, 13901, 13827, 13828, 13902, 13905, 13906, 13907, 13908, 13909, 13910, 10966, 12466, 13913, 13916, 13917, 13918, 13919, 13921, 13922, 13924, 12430, 13926, 13927, 13928, 12465, 12447, 13930, 13931, 13932, 13933, 13934, 13935, 13936, 13937, 13883, 13938, 13941, 13945, 13946, 13947, 13948, 13951, 13953, 13954, 13956, 13957, 13959, 13961, 13962, 13963, 13964, 13883, 13965, 10966, 13968, 13971, 13973, 13976, 13977, 13978, 13979, 13980, 13982, 13983, 13985, 13986, 13991, 13992, 13993, 13883, 13994, 13997, 13430, 14002, 14003, 14004, 13436, 14006, 14007, 14008, 14009, 14010, 14011, 13883, 14012, 10966, 14015, 14018, 14019, 14021, 14022, 14023, 14024, 12219, 14026, 14027, 14029, 14031, 14032, 14033, 14034, 14035, 14036, 14037, 13883, 14038, 14041, 14042, 14045, 14047, 14048, 14050, 14051, 14053, 14056, 14057, 14058, 14060, 14061, 14062, 14063, 14064, 14065, 13883, 14066, 10966, 14073, 14074, 14076, 14080, 14081, 14084, 14085, 14086, 14091, 14092, 14093, 14096, 14097, 14098, 14101, 14102, 14103, 14104, 14105, 14106, 14107, 13883, 14108, 10966, 14112, 14113, 14114, 14115, 14117, 14118, 14119, 14120, 14122, 14123, 14125, 14126, 14129, 14130, 14131, 13486, 14136, 14137, 14138, 13492, 14149, 14150, 14151, 14152, 13883, 14153, 14156, 14157, 14159, 14160, 14162, 14168, 14169, 14170, 14171, 14174, 14176, 14178, 14179, 14180, 14182, 14184, 14186, 14187, 14189, 14193, 14195, 14198, 14199, 14200, 14201, 14202, 14203, 14204, 14205, 14206, 14207, 13883, 14208, 10966, 14211, 14214, 14216, 12328, 14218, 14219, 14221, 14223, 14224, 14225, 14226, 14227, 12340, 12336, 14230, 14231, 14233, 12340, 14234, 12342, 12447, 14235, 14236, 14237, 12466, 14238, 12465, 12430, 14247, 14248, 14249, 14250, 14251, 14252, 14253, 14254, 10966, 14259, 14260, 14261, 14265, 14266, 14267, 14270, 14275, 14276, 14279, 14283, 14284, 14285, 14286, 13883, 14287, 14290, 14291, 12355, 14294, 14296, 12359, 14297, 14300, 14301, 14302, 14303, 12362, 14307, 14305, 14308, 12364, 14310, 14311, 14315, 14316, 14317, 14318, 14320, 14322, 14324, 14325, 14327, 14328, 14329, 14330, 14331, 14332, 14333, 14334, 10966, 14339, 14340, 14341, 14345, 14346, 14350, 14351, 14353, 14356, 14357, 14358, 14362, 14363, 14364, 14365, 14366, 12578, 14370, 14371, 14372, 14373, 12731, 14377, 14375, 14381, 14379, 14382, 14383, 12706, 14387, 14385, 14391, 14389, 14392, 14393, 14394, 14395, 14396, 14397, 14398, 14399, 10966, 14404, 14405, 14406, 14410, 14411, 14414, 14415, 14417, 14420, 14421, 14422, 14424, 14425, 14426, 14431, 14436, 14437, 14438, 14439, 14440, 14441, 14442, 13883, 14443, 14446, 14447, 14449, 14450, 14452, 14453, 14455, 14459, 14460, 14461, 14462, 14464, 14466, 14467, 14470, 14472, 14474, 14476, 14478, 14484, 14485, 14487, 14490, 14492, 14497, 14498, 14499, 14500, 14501, 14502, 14505, 14506, 12578, 14507, 14509, 14510, 10972, 14512, 14513, 14514, 14516, 14517, 14518, 14519, 14520, 14521, 14523, 14524, 14525, 13881, 13883, 14527, 10993, 14528, 14529, 14530, 14531, 12706, 10993, 14533, 8864, 10993, 14535, 14536, 14537, 14538, 12731, 14539, 10992, 14540, 8864, 14541, 14542, 12747, 10993, 14544, 14545, 13368, 13374, 14546, 10994, 14548, 14549, 14550, 14551, 14553, 14554, 14556, 14557, 14559, 14560, 14562, 14563, 14565, 14566, 14568, 14569, 13382, 13703, 14572, 14573, 14575, 13904, 13903, 14576, 14577, 12958, 12070, 12977, 12078, 14579, 14580, 14430, 12814, 12813, 14072, 14078, 12814, 12813, 14428, 12814, 12813, 14430, 12814, 12813, 12814, 12813, 14269, 12814, 12813, 14428, 14408, 12814, 12813, 14428, 14430, 12814, 12813, 14269, 14408, 12814, 12813, 12814, 12813, 14408, 12814, 12813, 14269, 14428, 12814, 12813, 14430, 12814, 12813, 14343, 14430, 14349, 12814, 12813, 12814, 12813, 14428, 12814, 12813, 14408, 14413, 12814, 12813, 14428, 14430, 12814, 12813, 14582, 14583, 14584, 14482, 14480, 14585, 14586, 13684, 13690, 13696, 13701, 13703, 13705, 13707, 12900, 12901, 12902, 12906, 12037, 12038, 12039, 14594, 14595, 12938, 12938, 12948, 12958, 12070, 12977, 12078, 14571, 14574, 14599, 14596, 14597, 14578, 14581, 14587, 14588, 14589, 14590, 14591, 14592, 14597, 14593, 14600, 14596, 14597, 14598, 121, 122, 123, 124, 125, 126, 127, 14765, 14767, 13063, 14769, 14770, 13199, 14772, 13205, 14775, 13237, 10990, 11774, 10988, 13230, 14778, 14777, 14779, 14781, 14786, 14788, 14789, 14792, 14793, 14794, 13114, 13115, 13215, 13141, 13293, 13221, 13335, 14802, 13237, 11774, 10990, 13230, 10988, 14803, 13940, 13246, 12596, 12592, 14805, 14809, 14813, 13114, 13115, 13215, 13141, 13293, 13221, 13335, 14819, 13230, 10990, 10988, 11553, 13237, 14821, 14820, 14823, 12198, 12197, 12199, 12255, 12465, 14825, 14831, 13076, 13215, 13141, 13293, 13221, 13335, 14837, 13237, 10988, 11774, 13230, 10990, 14838, 13996, 14840, 13265, 14841, 14844, 13076, 14845, 13077, 13215, 13079, 13293, 13221, 13335, 14851, 11774, 13237, 13230, 10988, 10990, 14853, 14852, 14855, 12217, 12216, 14857, 14861, 14863, 12220, 12288, 12228, 12224, 14865, 13129, 13130, 13215, 13141, 13293, 13221, 13335, 14872, 10988, 13237, 11774, 10990, 13230, 14873, 14040, 14876, 14880, 13265, 14882, 13150, 14885, 13205, 14888, 13215, 13156, 13335, 14891, 10988, 11774, 13237, 10990, 13230, 14893, 14892, 14895, 14897, 14899, 14902, 14905, 13199, 14908, 13205, 14911, 13215, 13156, 13293, 13157, 13335, 14915, 13230, 13237, 11553, 10990, 10988, 14917, 14916, 12249, 14918, 12254, 12255, 12259, 14922, 14927, 14930, 14933, 14934, 14937, 13114, 13115, 13215, 13141, 13293, 13221, 13335, 14942, 13237, 10988, 10990, 13230, 11553, 14943, 14155, 14947, 12587, 13103, 13246, 14949, 14953, 14955, 12291, 12281, 12289, 12288, 12466, 12291, 12290, 13114, 13115, 14963, 14965, 12320, 12315, 12317, 12316, 12318, 12320, 12319, 13129, 13130, 13215, 13211, 13293, 13221, 13335, 14975, 11774, 10988, 10990, 13237, 13230, 14977, 14976, 14979, 14981, 14983, 12330, 12329, 12332, 12331, 14985, 13129, 13130, 13215, 13141, 14990, 14991, 14995, 14997, 14998, 15002, 15004, 15005, 15006, 15009, 13215, 13156, 13293, 13157, 10990, 11774, 13237, 10988, 13230, 15014, 15013, 15015, 15018, 13215, 13211, 13129, 13130, 13215, 13141, 13293, 13221, 13335, 15029, 10988, 10990, 13230, 13237, 11774, 15030, 14289, 15033, 13265, 12596, 12592, 12587, 12390, 13246, 15036, 14299, 15042, 15044, 15046, 12372, 12369, 12370, 12372, 12371, 15049, 15054, 13199, 15057, 13205, 15060, 13215, 13156, 13293, 13157, 10990, 13237, 13230, 10988, 11774, 15065, 15064, 15066, 15069, 15072, 15074, 13215, 13141, 12587, 12390, 13265, 12596, 12592, 13246, 15082, 14369, 15087, 15089, 15091, 15094, 15096, 15098, 13150, 15099, 13205, 15102, 13215, 13156, 13293, 13157, 10990, 10988, 11774, 13230, 13237, 15107, 15106, 15108, 15111, 15114, 15116, 15119, 15122, 15123, 13199, 13205, 13215, 13211, 13293, 13221, 13335, 15130, 10988, 10990, 11774, 13237, 13230, 15131, 14445, 15133, 15137, 12495, 12493, 12495, 12494, 15139, 15143, 15146, 13199, 13205, 13215, 13211, 13293, 13221, 10988, 10990, 13237, 13230, 11774, 15161, 14504, 12612, 12570, 13246, 15164, 12587, 13251, 12596, 12592, 13265, 12612, 12608, 15168, 13284, 13280, 13293, 13289, 13299, 15176, 13319, 10988, 11824, 13323, 10990, 11824, 10988, 13319, 13323, 10990, 13319, 10990, 13323, 10988, 11824, 13330, 15181, 13335, 15182, 15184, 15189, 15190, 15192, 15193, 15198, 15200, 15202, 15205, 15206, 15209, 13371, 15210, 15212, 15216, 15229, 15230, 15171, 15172, 14771, 15234, 15235, 15172, 15222, 15238, 15224, 15239, 15226, 15240, 15228, 15241, 13914, 14428, 13920, 14430, 13949, 13955, 13960, 13969, 14319, 13981, 13988, 13989, 13990, 13999, 14016, 14430, 14428, 14043, 14049, 14059, 15244, 15245, 15246, 15247, 15248, 15249, 15250, 15251, 15252, 15253, 14094, 14094, 14099, 14100, 14430, 14121, 14127, 14428, 14132, 14139, 15254, 15255, 15256, 15257, 15258, 15259, 15260, 15261, 15262, 15263, 14158, 14172, 14177, 14181, 14185, 14183, 14188, 14188, 14190, 14191, 14192, 14326, 14321, 14319, 14212, 14463, 14269, 14428, 14430, 14408, 15264, 15265, 15266, 15267, 15268, 15269, 15270, 15271, 15272, 15273, 15274, 15275, 15276, 15277, 15278, 15279, 15280, 15281, 15282, 15283, 14319, 14321, 14326, 15284, 15285, 15286, 15287, 15288, 15289, 15290, 15291, 15292, 15293, 15294, 15295, 15296, 15297, 15298, 15299, 15300, 15301, 15302, 15303, 14451, 14463, 14468, 15148, 15149, 15150, 15307, 15308, 15151, 15152, 15311, 15153, 15312, 15154, 15313, 15155, 15314, 15220, 15315, 15171, 15316, 15172, 15317, 15216, 15318, 15319, 15320, 15222, 15321, 15224, 15322, 15226, 15323, 15228, 15324, 15171, 15172, 15216, 15327, 15218, 15328, 15220, 15329, 15222, 15330, 15224, 15331, 15226, 15332, 15228, 15333, 15334, 15335, 15337, 15338, 15339, 15340, 15341, 15342, 15343, 15344, 15345, 15346, 15347, 15348, 15350, 15351, 15352, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 15360, 15361, 15362, 15365, 15366, 15367, 15369, 15370, 15371, 15372, 15373, 13912, 15378, 15380, 15383, 15384, 15385, 15386, 15387, 15388, 15389, 15390, 15392, 15393, 15394, 15395, 15396, 15397, 15399, 15400, 15401, 15402, 15403, 15404, 15405, 15406, 15407, 15408, 15409, 15410, 15411, 15413, 15414, 15415, 15416, 15417, 13967, 15420, 15421, 15422, 15423, 15424, 15425, 15426, 15427, 15428, 15429, 15430, 15431, 15432, 15433, 15435, 15436, 15437, 15438, 15439, 15440, 15443, 15444, 15446, 15447, 15448, 15449, 15450, 15451, 15452, 15453, 15455, 15456, 15457, 15458, 15459, 14014, 15463, 15464, 15465, 15467, 15468, 15469, 15470, 15471, 15472, 15473, 15474, 15475, 15476, 15477, 15478, 15479, 15481, 15482, 15483, 15484, 15485, 15486, 15488, 15489, 15490, 15491, 15492, 15493, 15494, 15496, 15497, 15498, 15500, 15501, 15502, 15503, 15504, 14068, 15507, 15509, 15510, 15511, 15512, 15513, 15514, 15516, 15517, 15518, 15519, 15520, 15522, 15523, 15524, 15525, 15526, 14110, 15529, 15530, 15531, 15532, 15533, 15534, 15535, 15538, 15540, 15541, 15542, 15543, 15544, 15545, 15546, 15548, 15549, 15550, 15551, 15552, 15553, 15555, 15556, 15557, 15558, 15559, 15560, 15562, 15563, 15564, 15565, 15566, 15567, 15568, 15569, 15570, 15571, 15572, 15573, 15574, 15575, 15576, 15577, 15578, 15579, 15580, 15581, 15582, 15583, 15584, 15585, 15586, 15588, 15589, 15590, 15591, 15592, 14210, 15595, 15597, 15598, 15599, 15600, 15601, 15602, 15603, 15604, 15605, 15606, 14989, 14994, 14996, 15615, 15617, 15618, 15619, 15620, 15621, 15622, 15623, 15624, 15625, 14256, 15628, 15629, 15630, 15631, 15632, 15633, 15634, 15635, 15636, 15637, 15638, 15640, 15641, 15642, 15643, 15644, 15645, 15032, 15648, 15649, 15650, 15651, 15652, 15653, 15655, 15041, 15045, 15659, 15660, 15661, 15662, 15663, 15664, 15665, 15666, 15667, 15668, 15670, 15671, 15672, 15673, 15674, 15675, 15676, 15677, 15678, 14336, 15681, 15683, 15684, 15685, 15686, 15687, 15688, 15689, 15690, 15691, 15692, 15694, 15086, 15093, 15701, 15702, 15703, 15705, 15706, 15707, 15708, 15709, 15710, 15711, 15712, 15713, 14401, 15716, 15718, 15719, 15720, 15722, 15723, 15724, 15725, 15726, 15727, 15728, 15729, 15731, 15732, 15733, 15734, 15735, 15736, 15738, 15739, 15740, 15741, 15742, 15743, 15744, 15745, 15746, 15747, 15748, 15749, 15750, 15751, 15752, 15753, 15754, 15755, 15756, 15757, 15758, 15760, 15761, 15762, 15764, 15765, 15766, 15767, 15768, 15769, 15770, 15771, 15772, 15773, 15774, 15775, 15776, 15778, 15779, 15780, 15781, 15782, 15783, 15784, 15785, 15786, 15787, 15788, 15789, 15790, 15791, 15792, 15793, 15795, 15797, 15188, 15799, 15801, 15197, 15803, 15204, 15806, 15808, 15810, 15811, 15814, 15815, 15816, 15817, 15819, 15211, 15820, 15822, 15824, 15826, 12814, 12813, 15828, 15377, 15829, 15830, 12814, 12813, 12814, 12813, 15831, 15832, 15833, 15834, 15835, 15836, 15837, 15838, 15839, 15840, 15442, 15841, 15445, 15842, 15462, 15843, 15844, 14025, 15845, 15846, 15847, 15849, 15508, 15853, 15856, 15858, 15859, 15860, 15861, 15862, 15863, 15864, 15536, 15865, 15866, 15537, 15867, 15539, 15869, 15871, 15874, 15878, 15879, 15880, 15881, 15882, 15883, 15884, 15885, 15886, 15887, 15888, 15889, 15890, 15891, 15892, 14217, 15893, 15699, 12814, 12813, 15894, 15895, 15896, 12814, 12813, 15897, 12814, 12813, 15898, 15902, 15906, 15908, 15911, 15915, 15696, 15699, 15700, 15697, 15696, 15700, 15657, 15697, 15918, 15919, 15920, 15921, 15682, 15926, 15928, 15696, 15697, 15699, 15700, 15931, 15717, 15935, 15721, 15939, 15941, 15942, 15943, 15944, 15945, 15946, 15947, 15949, 15950, 15952, 15954, 15956, 15958, 15960, 15962, 15211, 15964, 15968, 15970, 15972, 15974, 15976, 15977, 15211, 15978, 15980, 15982, 15984, 15986, 15988, 15990, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 16134, 16136, 15375, 16145, 16147, 16150, 16152, 16157, 16164, 16166, 16169, 16171, 15419, 16176, 16179, 16184, 16186, 16189, 16191, 16200, 16202, 16205, 16207, 15461, 16211, 16215, 16217, 16222, 16224, 16227, 16229, 16240, 16243, 16245, 15506, 16256, 16258, 16261, 16263, 15528, 16277, 16279, 16282, 16284, 16289, 16294, 16296, 16299, 16305, 16307, 16310, 16314, 16316, 16319, 16321, 15594, 16327, 16329, 16334, 16340, 16342, 16344, 16346, 15627, 16352, 16356, 16358, 16361, 16363, 16369, 16371, 16377, 16380, 16387, 16389, 16391, 16393, 15680, 16400, 16402, 16405, 16414, 16416, 16418, 16420, 15715, 16431, 16433, 16436, 16438, 16444, 16446, 16453, 16455, 16457, 16459, 16463, 16466, 16468, 16471, 16473, 16474, 16476, 16479, 16481, 16484, 16486, 16489, 16491, 16496, 16498, 16499, 16501, 16503, 16505, 15777, 15794, 15796, 15166, 15763, 16129, 16128, 16497, 16500, 15158, 15157, 15794, 15364, 15363, 16500, 15763, 15166, 16497, 15158, 15157, 15794, 15796, 16497, 15763, 15166, 16500, 15777, 15794, 15796, 16512, 16504, 15368, 16132, 15794, 15730, 16517, 16518, 16520, 16140, 16523, 16524, 16141, 16525, 16526, 16142, 14799, 14798, 15794, 15391, 13942, 16159, 16160, 16161, 14816, 14815, 15794, 15412, 16175, 13974, 16181, 16182, 15078, 14833, 13987, 15022, 14834, 15794, 15434, 16537, 14000, 16196, 16539, 14848, 16198, 15794, 15454, 16541, 16213, 16544, 16214, 16219, 14869, 14868, 15794, 15480, 16233, 16234, 14054, 16236, 15495, 16238, 15794, 15499, 16249, 16549, 16250, 16251, 16252, 15515, 16254, 15794, 15521, 12814, 12813, 16268, 12814, 12813, 12814, 12813, 16272, 16273, 16559, 15022, 15077, 16562, 14134, 16274, 16564, 14939, 14938, 15794, 15547, 16288, 14165, 16292, 16293, 15127, 15561, 16572, 15022, 15077, 16303, 14196, 16304, 14313, 14972, 14971, 15794, 15587, 16325, 16583, 16326, 16331, 15022, 15077, 16336, 16585, 14992, 15608, 16408, 16337, 16338, 16586, 16587, 16591, 16592, 16594, 16595, 15616, 16339, 15794, 15730, 16350, 16351, 16443, 15022, 15077, 15693, 15023, 16408, 16602, 16410, 16603, 16604, 16605, 15026, 15025, 15794, 15639, 16367, 16606, 16607, 15034, 15654, 16374, 16375, 16608, 16376, 16609, 15048, 15047, 14313, 16382, 16383, 15669, 16385, 15794, 15730, 16397, 16614, 16398, 16399, 15078, 15077, 15693, 15081, 16408, 16409, 16617, 16618, 16410, 16619, 16620, 15704, 16412, 15794, 15730, 16424, 16622, 16425, 16426, 16427, 16624, 16428, 15127, 15126, 15794, 15730, 16442, 16443, 16448, 16449, 16450, 15158, 15157, 15794, 15796, 16500, 15763, 15166, 16497, 15777, 15794, 15796, 16500, 16497, 16641, 16504, 15158, 15157, 15794, 15796, 16497, 16500, 15166, 15763, 15777, 15794, 15796, 16497, 16500, 16502, 16649, 16504, 15813, 15812, 15813, 15957, 15232, 15231, 15237, 15236, 15233, 15827, 15825, 15823, 15821, 15243, 15242, 15305, 15306, 15304, 15310, 15309, 15959, 15951, 15955, 15953, 15959, 15957, 15963, 15961, 15967, 15965, 15967, 15966, 15975, 15973, 15971, 15969, 15326, 15325, 15983, 15979, 15983, 15981, 15991, 15989, 15987, 15985, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 16768, 16773, 16778, 16785, 16789, 16797, 16800, 16805, 16810, 16814, 16821, 16829, 16835, 16843, 16851, 16856, 16862, 16871, 16873, 16875, 16883, 15160, 15159, 16884, 16885, 16886, 15165, 15167, 15163, 16887, 14508, 16888, 16889, 16890, 16878, 16891, 16880, 15160, 15159, 16892, 16893, 16894, 16895, 16896, 16897, 16880, 15163, 15167, 16898, 15165, 16899, 14508, 16868, 16900, 16878, 15160, 15159, 16901, 16902, 16903, 16904, 16905, 16878, 15167, 15165, 15163, 16906, 14508, 16907, 16868, 16908, 16880, 16909, 15175, 15174, 15173, 16910, 16911, 16913, 16882, 15012, 15011, 16914, 16915, 16916, 16917, 16918, 16921, 16922, 16924, 16925, 16927, 14801, 14800, 16928, 16929, 16930, 16931, 13943, 16932, 16933, 16934, 16935, 14818, 14817, 16936, 16937, 16938, 16939, 16940, 14314, 16941, 13419, 14312, 16942, 16943, 15129, 15079, 16944, 16945, 16946, 14836, 14835, 16947, 16948, 16949, 16950, 16952, 16953, 14850, 14849, 16955, 16956, 16957, 16958, 12814, 12813, 16960, 16962, 12814, 12813, 12814, 12813, 16963, 14871, 14870, 16964, 16965, 16966, 16967, 16968, 16969, 16970, 16971, 15012, 14890, 16972, 16973, 16974, 16975, 16976, 16978, 16979, 16980, 14099, 14914, 14913, 16981, 16982, 16983, 16984, 16985, 16986, 16987, 16988, 16989, 16990, 16991, 16992, 16993, 15129, 15079, 16995, 16996, 16998, 16999, 14941, 14940, 17001, 17002, 17003, 17004, 17005, 14166, 17006, 14164, 14163, 17007, 17008, 15012, 15128, 17009, 17010, 14185, 12809, 15129, 15079, 17012, 17013, 17014, 17015, 17016, 14314, 17017, 13576, 14312, 14974, 14973, 17018, 17019, 17020, 17021, 17022, 17024, 14457, 13533, 14456, 17025, 15129, 15079, 17026, 17027, 17028, 14993, 17030, 15167, 14229, 17031, 17032, 17033, 17034, 17035, 17037, 17039, 15012, 15011, 17041, 17042, 17043, 17044, 17045, 17046, 17047, 15129, 15079, 17048, 17049, 17050, 14367, 17051, 15163, 15167, 15080, 17052, 17054, 15028, 15027, 17058, 17059, 17060, 17061, 17062, 17065, 15035, 15037, 14295, 17066, 17067, 17068, 17070, 15129, 15079, 17072, 17073, 14314, 17074, 13576, 14312, 17075, 17076, 15063, 15062, 17077, 17078, 17079, 17080, 17081, 17083, 17084, 15129, 15079, 17085, 17086, 17087, 14367, 15163, 17088, 15167, 15080, 17089, 17090, 17093, 15105, 15104, 17096, 17097, 17098, 17099, 17100, 17102, 17103, 17104, 17106, 15129, 15128, 17107, 17108, 17109, 17110, 17111, 17112, 14457, 13641, 14456, 17113, 17114, 17115, 15160, 15159, 17116, 17117, 17118, 17119, 17120, 16880, 17121, 14508, 17122, 15165, 15163, 15167, 16868, 17123, 16878, 17124, 15175, 15174, 15173, 17125, 17126, 17127, 16880, 17128, 16878, 17130, 16882, 15160, 15159, 17131, 17132, 17133, 17134, 17135, 16878, 17136, 16880, 15165, 17137, 14508, 15167, 17138, 15163, 16868, 17139, 15175, 15174, 15173, 17140, 17141, 16877, 17142, 16878, 16879, 17143, 16880, 17144, 16881, 17146, 16882, 17147, 17148, 17149, 17150, 17151, 17152, 17153, 17154, 17155, 17156, 17157, 17158, 17159, 17160, 17161, 16920, 17105, 16954, 16951, 17105, 16959, 17105, 16977, 17105, 17101, 17105, 16994, 17000, 16997, 17101, 17105, 17101, 17105, 17101, 17105, 17105, 17101, 17101, 17105, 17105, 17082, 17105, 17101, 17162, 17163, 17164, 17165, 17166, 17167, 17168, 17169, 17170, 17171, 17172, 17173, 17174, 17175, 17176, 17177, 17178, 17179, 17180, 17181, 17182, 17183, 17184, 17185, 17186, 17187, 17188, 17189, 17190, 17191, 17192, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 17280, 17281, 17282, 17283, 17284, 17285, 17286, 17287, 17288, 17290, 17291, 17292, 17293, 17294, 17295, 17296, 17297, 17298, 17299, 17301, 17302, 17306, 17307, 17308, 17310, 17311, 17314, 17316, 17317, 17318, 17319, 17322, 17325, 17326, 17327, 17329, 17331, 17332, 17334, 17335, 17336, 17337, 17342, 17343, 17344, 17345, 17347, 17349, 17351, 17353, 17354, 17355, 16912, 17359, 17360, 17361, 17362, 17372, 17373, 17374, 17378, 17383, 17384, 17385, 17390, 17392, 17393, 17396, 17397, 17398, 17401, 17402, 17403, 17409, 17410, 17411, 17415, 17416, 17419, 17420, 17421, 17422, 17424, 17425, 17426, 17434, 17435, 17436, 17444, 17445, 17446, 17447, 17451, 17454, 17456, 17460, 17461, 17462, 17466, 17467, 17468, 17473, 17475, 17476, 17479, 17480, 17481, 17483, 12810, 17484, 17485, 17486, 17487, 17492, 17494, 17495, 17496, 17497, 17498, 17504, 17505, 17506, 17508, 17509, 17510, 17513, 17515, 17516, 17524, 17525, 17526, 17533, 17534, 17535, 17538, 17540, 17541, 17542, 17545, 17546, 17547, 17553, 17554, 17555, 17560, 17561, 17562, 17564, 17566, 17567, 17570, 17571, 17572, 17579, 17580, 17581, 17584, 17585, 17587, 17588, 17592, 17593, 17594, 17603, 17604, 17605, 17611, 17612, 17613, 17617, 17618, 17619, 17624, 17626, 17628, 17629, 17630, 17631, 17633, 17635, 17636, 17637, 17641, 17643, 17129, 17645, 17646, 17647, 17648, 17653, 17655, 17656, 17658, 17659, 17661, 17662, 17664, 17665, 17666, 17669, 17671, 17672, 17674, 17676, 17145, 17678, 17679, 17681, 17683, 17685, 17688, 17690, 17692, 17369, 17367, 17609, 17602, 17615, 17694, 17695, 17371, 17601, 17382, 17615, 17609, 17381, 17609, 17389, 17615, 17395, 17568, 17569, 17609, 17610, 17615, 17696, 17408, 17610, 17697, 17698, 17615, 17699, 17418, 17609, 17431, 17615, 17433, 17609, 17430, 17609, 17700, 17601, 17615, 17441, 17442, 17440, 17602, 17701, 17702, 17703, 17610, 17609, 17615, 17704, 17459, 17609, 17705, 17615, 17706, 17465, 17615, 17707, 17602, 17531, 17708, 17532, 17609, 17615, 17530, 17709, 17601, 17609, 17472, 17615, 17478, 17615, 17710, 17711, 17609, 17610, 17615, 17610, 17491, 17489, 17609, 17615, 17569, 17610, 17609, 17502, 17615, 17503, 17609, 17029, 17064, 17071, 17063, 17712, 17601, 17609, 17530, 17532, 17531, 17615, 17713, 17602, 17601, 17714, 17531, 17615, 17530, 17609, 17715, 17602, 17532, 17716, 17609, 17532, 17602, 17531, 17530, 17717, 17601, 17615, 17057, 17055, 17056, 17053, 17071, 17063, 17064, 17069, 17569, 17615, 17610, 17609, 17578, 17601, 17577, 17576, 17718, 17615, 17719, 17602, 17609, 17094, 17095, 17092, 17091, 17600, 17615, 17602, 17599, 17601, 17720, 17721, 17598, 17609, 17610, 17609, 17615, 17616, 17722, 17724, 17727, 17729, 17731, 17733, 17735, 17737, 17739, 17741, 17743, 17745, 17747, 17749, 17751, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 17300, 16462, 17305, 17814, 17309, 17820, 16462, 17825, 17328, 17330, 17831, 16462, 17835, 17837, 17838, 17352, 17842, 15178, 15180, 14522, 14496, 15179, 17846, 16770, 17849, 16155, 17852, 17853, 16780, 17856, 17857, 17859, 17862, 16194, 17865, 16791, 17868, 17870, 17872, 17874, 16232, 17877, 16802, 17881, 16807, 17887, 17890, 16287, 17893, 17894, 17896, 17900, 17902, 17905, 17906, 17908, 16823, 17911, 17914, 17917, 17918, 17920, 16831, 17923, 17537, 17539, 17928, 17930, 16366, 17552, 17934, 17936, 17939, 17940, 17942, 16845, 17945, 17583, 17949, 17950, 17952, 16853, 17955, 16441, 17958, 17961, 16462, 17625, 17627, 17967, 17634, 17972, 15179, 14496, 15180, 15178, 14522, 17978, 16462, 17983, 17984, 17660, 17663, 17989, 14522, 15180, 15179, 15178, 14526, 17819, 17995, 17818, 17991, 17995, 17830, 17991, 17824, 17834, 17995, 17840, 17991, 18001, 17993, 17970, 17991, 17964, 17995, 17845, 18002, 18005, 18006, 18007, 18008, 18009, 18012, 18013, 18014, 18015, 18016, 18017, 18018, 18019, 18020, 18021, 18022, 18023, 18024, 18025, 18026, 18028, 18029, 18032, 18034, 18035, 18036, 18037, 18038, 18039, 18040, 18041, 18043, 18044, 18045, 18046, 18047, 18048, 17601, 17443, 18052, 18053, 18054, 17531, 18050, 17602, 17458, 18056, 18057, 18059, 17453, 17601, 18061, 18062, 18064, 18065, 18067, 18068, 18069, 18070, 18072, 18073, 18074, 18075, 18076, 18077, 18080, 18081, 18078, 18082, 18083, 18084, 18085, 18086, 18087, 18088, 18089, 18090, 18091, 18092, 18093, 18094, 18095, 18096, 18097, 18098, 18100, 18101, 18102, 18103, 18104, 18105, 18107, 18108, 18110, 18111, 18112, 18113, 18115, 18116, 18118, 18119, 18120, 18121, 18122, 18124, 18125, 18126, 18127, 18128, 18129, 18130, 18131, 18132, 18133, 18134, 18135, 18136, 18137, 18138, 18139, 18140, 18141, 18143, 18145, 18146, 18147, 18148, 18149, 18150, 18151, 18152, 18153, 18154, 18155, 18158, 18159, 18160, 18161, 18162, 18163, 18164, 17995, 17970, 17991, 17964, 17975, 17993, 17995, 17974, 17977, 17991, 18172, 17991, 17995, 17982, 17981, 17992, 17991, 17993, 17995, 17997, 17994, 18177, 15996, 16004, 15992, 15993, 15995, 15994, 15997, 16001, 16000, 15999, 16002, 16004, 16003, 15349, 16007, 16006, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18304, 18305, 18306, 18309, 18310, 18311, 18314, 18315, 18316, 18319, 18321, 18322, 18323, 18324, 18325, 18326, 18327, 18328, 18329, 18331, 18332, 18333, 18335, 18336, 18337, 18338, 18339, 18343, 18344, 18345, 18346, 18347, 18348, 18349, 18350, 18351, 18352, 18354, 17899, 18356, 18357, 18359, 18360, 18361, 18362, 18363, 18365, 18366, 18367, 18368, 18371, 18372, 18373, 18375, 18376, 18378, 18379, 18380, 18381, 18384, 18385, 18386, 18387, 18388, 18389, 18390, 18391, 18394, 18396, 18397, 18398, 18399, 18400, 18401, 18402, 18403, 18406, 18408, 18409, 18410, 18411, 18412, 18413, 18414, 18415, 18416, 18417, 18418, 18419, 18420, 18421, 18422, 18423, 18424, 18426, 18427, 18428, 18429, 18430, 18431, 18433, 18435, 18437, 18011, 17380, 18440, 18442, 18445, 18448, 18450, 18027, 18454, 17601, 17417, 17423, 18031, 18456, 18458, 18460, 18463, 18464, 18466, 18468, 18470, 18471, 18475, 18472, 18477, 18478, 18482, 18483, 18480, 18060, 18485, 18486, 18066, 18489, 18491, 18494, 18498, 18501, 18503, 18507, 18511, 18515, 18099, 18519, 18521, 18523, 18525, 18526, 18528, 18114, 18117, 18533, 18535, 18123, 18540, 18544, 18547, 18551, 18553, 18142, 18144, 18558, 18562, 18564, 18566, 18157, 18570, 18573, 18574, 18575, 18576, 18577, 18578, 18579, 18580, 18581, 18582, 18583, 18585, 18586, 18587, 18588, 18589, 18590, 18591, 18592, 18593, 18594, 18596, 18597, 18598, 18599, 15336, 15996, 18600, 18601, 18602, 18603, 18604, 18605, 18606, 16005, 18607, 18608, 18609, 16008, 18610, 18611, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18689, 18690, 18692, 18693, 18695, 18696, 18698, 18699, 18701, 18704, 18706, 18708, 18712, 18714, 18716, 18718, 18720, 18723, 18726, 18730, 18733, 18735, 18737, 18739, 18740, 18744, 18746, 18748, 18750, 18753, 18754, 18756, 18757, 17639, 18762, 18763, 18765, 18767, 17667, 18770, 18772, 18774, 18776, 18778, 18782, 18784, 18786, 18788, 18790, 18792, 18793, 17394, 18796, 18798, 18800, 18801, 18802, 18805, 18807, 18809, 18811, 18474, 18815, 18816, 18481, 18820, 18822, 18824, 17477, 18828, 17568, 17507, 18833, 18835, 18837, 18839, 18841, 18843, 17568, 18848, 18850, 18853, 18855, 17614, 18860, 18863, 18865, 18867, 18870, 18873, 18875, 18877, 18880, 18883, 18884, 15998, 18888, 18892, 18896, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18944, 18946, 18948, 18950, 18952, 18953, 18954, 18955, 18956, 18957, 18958, 18959, 18960, 18961, 18963, 18965, 18967, 18969, 18971, 18972, 18973, 18975, 18977, 18978, 18980, 18982, 17817, 18983, 17829, 18985, 17839, 18988, 18991, 18994, 18995, 18996, 18998, 18999, 19001, 19002, 18476, 18814, 19006, 18819, 19010, 19012, 17614, 19013, 19014, 19015, 17518, 19016, 19018, 19020, 17543, 17557, 19022, 19023, 17589, 19025, 19027, 17969, 19029, 17987, 19033, 19038, 19039, 19041, 19042, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19075, 19093, 19096, 19072, 19098, 19073, 19100, 19074, 19102, 19103, 19077, 19104, 19078, 19079, 18444, 19091, 19080, 19081, 18803, 18804, 19082, 19083, 19111, 19112, 19084, 19114, 19091, 19116, 19085, 18493, 19087, 19118, 19091, 18506, 19086, 18510, 19091, 19122, 19123, 19124, 19087, 19125, 19091, 19126, 19088, 19127, 19091, 19128, 19089, 19129, 19091, 19130, 19090, 19131, 19091, 18569, 19092, 19133, 19134, 19095, 19135, 19136, 19137, 18890, 19139, 19140, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19203, 19099, 19205, 19101, 19207, 18780, 19200, 19210, 19212, 19213, 19214, 19215, 19216, 19217, 19218, 19220, 19221, 19224, 19225, 19226, 19228, 19229, 19230, 18497, 19232, 19233, 19234, 19235, 19236, 18514, 19240, 19242, 19243, 19244, 19245, 19246, 18847, 19248, 19250, 18560, 19252, 19254, 19255, 19256, 18859, 19201, 19259, 19260, 19202, 19040, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18987, 19334, 19338, 19349, 19351, 19353, 19355, 19357, 19360, 19362, 19364, 18852, 19370, 19372, 19373, 19375, 19376, 19329, 19331, 19241, 19211, 19227, 19249, 19253, 19009, 19105, 19110, 19346, 19223, 19238, 19239, 19119, 19222, 19107, 19342, 18997, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19456, 19460, 19463, 19464, 19465, 19467, 19469, 19471, 19473, 19474, 19209, 19475, 19476, 19477, 19461, 19478, 19479, 19480, 19481, 19458, 19468, 19482, 19483, 19484, 19459, 19466, 19485, 19486, 19462, 19487, 19488, 19489, 19490, 19491, 19258, 19261, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19584, 19594, 19598, 19603, 19588, 19604, 19608, 19586, 19587, 19589, 19609, 19612, 19585, 19595, 19599, 19601, 19606, 19613, 19616, 19590, 19618, 19591, 19619, 18882, 19036, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19712, 19716, 19719, 19720, 19721, 19724, 19597, 19717, 19722, 19611, 19726, 19731, 19733, 19265, 19735, 19736, 19264, 19262, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19715, 19718, 19843, 19845, 19725, 19849, 19853, 18879, 18887, 18891, 18895, 19856, 19037, 19857, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19968, 19728, 19970, 19971, 19972, 19975, 19976, 19977, 19978, 19980, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20096, 20098, 19974, 20102, 19855, 20104, 20105, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20100, 20225, 20226, 20228, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20352, 20354, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19377, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20608, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20230, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20481, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127};
int h_C[]= {
2, 4, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 397, 399, 401, 403, 405, 407, 409, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 475, 477, 479, 481, 483, 485, 487, 489, 491, 494, 496, 498, 500, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523, 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557, 560, 562, 564, 566, 568, 570, 573, 575, 579, 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 613, 615, 617, 619, 621, 623, 625, 627, 629, 631, 633, 635, 637, 639, 641, 643, 645, 647, 649, 651, 653, 655, 657, 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 691, 693, 695, 697, 700, 702, 705, 707, 709, 711, 713, 715, 717, 719, 721, 723, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 745, 747, 749, 751, 753, 755, 757, 759, 761, 763, 765, 767, 769, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 817, 819, 821, 823, 825, 827, 829, 831, 833, 835, 837, 839, 841, 843, 846, 848, 850, 852, 854, 856, 859, 861, 863, 865, 867, 869, 871, 873, 875, 877, 879, 881, 883, 885, 887, 889, 891, 893, 895, 897, 899, 901, 903, 905, 907, 909, 911, 913, 916, 918, 920, 922, 924, 926, 928, 930, 932, 934, 936, 938, 940, 942, 944, 946, 950, 952, 954, 956, 958, 960, 962, 964, 966, 968, 970, 972, 974, 976, 978, 980, 982, 984, 986, 988, 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1023, 1025, 1027, 1029, 1031, 1033, 1035, 1037, 1041, 1043, 1045, 1047, 1049, 1051, 1053, 1055, 1057, 1059, 1063, 1065, 1070, 1072, 1074, 1076, 1078, 1080, 1082, 1084, 1086, 1088, 1091, 1093, 1095, 1097, 1099, 1101, 1103, 1105, 1107, 1109, 1112, 1114, 1116, 1118, 1120, 1122, 1124, 1126, 1128, 1130, 1133, 1135, 1137, 1139, 1142, 1144, 1146, 1148, 1151, 1153, 1157, 1159, 1162, 1164, 1168, 1170, 1172, 1174, 1176, 1178, 1180, 1182, 1185, 1187, 1190, 1192, 1194, 1196, 1198, 1200, 1202, 1204, 1206, 1208, 1211, 1213, 1216, 1218, 1221, 1223, 1226, 1228, 1231, 1233, 1239, 1241, 1244, 1246, 1249, 1251, 1253, 1255, 1257, 1259, 1261, 1263, 1265, 1267, 1269, 1271, 1273, 1275, 1278, 1280, 1282, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1306, 1308, 1310, 1312, 1314, 1316, 1318, 1320, 1322, 1324, 1326, 1328, 1330, 1332, 1335, 1337, 1339, 1341, 1343, 1345, 1347, 1349, 1351, 1353, 1358, 1360, 1362, 1364, 1368, 1370, 1373, 1375, 1377, 1379, 1381, 1383, 1385, 1387, 1389, 1391, 1394, 1396, 1400, 1402, 1405, 1407, 1410, 1412, 1415, 1417, 1420, 1422, 1425, 1427, 1430, 1432, 1435, 1437, 1440, 1442, 1444, 1446, 1448, 1450, 1453, 1455, 1459, 1461, 1463, 1465, 1470, 1472, 1474, 1476, 1480, 1482, 1484, 1486, 1488, 1490, 1492, 1494, 1496, 1498, 1500, 1502, 1504, 1506, 1508, 1510, 1512, 1514, 1516, 1518, 1520, 1522, 1524, 1526, 1528, 1530, 1532, 1534, 1536, 1538, 1540, 1542, 1544, 1546, 1548, 1550, 1552, 1554, 1557, 1559, 1561, 1563, 1565, 1567, 1569, 1571, 1573, 1575, 1577, 1579, 1582, 1584, 1586, 1588, 1590, 1592, 1594, 1596, 1598, 1600, 1602, 1604, 1606, 1608, 1610, 1612, 1614, 1616, 1618, 1620, 1622, 1624, 1626, 1628, 1630, 1632, 1634, 1636, 1638, 1640, 1645, 1647, 1649, 1651, 1653, 1655, 1657, 1659, 1662, 1664, 1666, 1668, 1670, 1672, 1674, 1676, 1678, 1680, 1682, 1684, 1686, 1688, 1690, 1692, 1696, 1698, 1702, 1704, 1706, 1708, 1710, 1712, 1714, 1716, 1719, 1721, 1723, 1725, 1727, 1729, 1733, 1735, 1741, 1743, 1745, 1747, 1749, 1751, 1754, 1756, 1759, 1761, 1763, 1765, 1767, 1769, 1772, 1774, 1777, 1779, 1782, 1784, 1787, 1789, 1792, 1794, 1797, 1799, 1801, 1803, 1805, 1807, 1811, 1813, 1815, 1817, 1819, 1821, 1823, 1825, 1827, 1829, 1831, 1833, 1835, 1837, 1839, 1841, 1843, 1845, 1847, 1849, 1851, 1853, 1856, 1858, 1860, 1862, 1866, 1868, 1870, 1872, 1874, 1876, 1863, 1863, 1854, 1854, 1863, 1863, 1922, 1924, 1926, 1928, 1930, 1932, 286, 1477, 1660, 1236, 1236, 1477, 1660, 1730, 1276, 1276, 492, 492, 1236, 1236, 948, 1236, 1236, 1730, 1730, 1738, 1738, 286, 571, 571, 571, 571, 571, 571, 1738, 577, 1236, 1236, 1642, 558, 1236, 1236, 1236, 1236, 1038, 395, 571, 1236, 1236, 1068, 1068, 1131, 1131, 1140, 1140, 1021, 1021, 1642, 1863, 492, 1236, 1236, 492, 1236, 1236, 501, 501, 1236, 1236, 1068, 1068, 1131, 1131, 492, 492, 1236, 1236, 1068, 1068, 1131, 1131, 501, 501, 1236, 1236, 1089, 1089, 558, 1642, 1642, 1809, 947, 571, 577, 1809, 1236, 1236, 1038, 703, 2286, 2288, 2290, 2292, 2295, 2297, 2299, 2301, 2304, 2306, 2308, 2310, 2313, 2315, 2317, 2319, 2321, 2323, 2325, 2327, 2329, 2331, 2333, 2335, 2337, 2339, 2341, 2343, 2345, 2347, 2350, 2352, 2354, 2356, 2358, 2360, 1365, 1365, 2365, 2367, 2369, 2371, 2373, 2375, 2377, 2379, 2381, 2383, 2385, 2387, 2389, 2391, 2393, 2395, 1236, 1236, 1068, 1068, 1131, 1131, 1236, 1236, 1140, 1140, 1089, 1089, 1140, 1140, 1276, 1276, 1365, 1242, 914, 1700, 1021, 1021, 1038, 1038, 947, 948, 1021, 1021, 1038, 1038, 1693, 1021, 1021, 1730, 1738, 1038, 1038, 1039, 2616, 2618, 2620, 2622, 2624, 2626, 2628, 2630, 2632, 2634, 2636, 2638, 2640, 2642, 2645, 2647, 2650, 2652, 2654, 2656, 1089, 1089, 1066, 1066, 1068, 1068, 1140, 1140, 1067, 1067, 1068, 1068, 1166, 1131, 1131, 1140, 1140, 1154, 1154, 1236, 1236, 1236, 1236, 1242, 1242, 1247, 1365, 1365, 1366, 1354, 1276, 1276, 1333, 1354, 1355, 1356, 1365, 1365, 1366, 1392, 1397, 1466, 1466, 1854, 1642, 1642, 1693, 1700, 1730, 1730, 1738, 1738, 1854, 1854, 1863, 1863, 1854, 1854, 1863, 1863, 1854, 1863, 2980, 2982, 2985, 2987, 2989, 2991, 2993, 2995, 2997, 2999, 3001, 3003, 3005, 3007, 3009, 3011, 3013, 3015, 3017, 3019, 3021, 3023, 3025, 3027, 3029, 3031, 3033, 3035, 3037, 3039, 3041, 3043, 3045, 3047, 3049, 3051, 3053, 3055, 3057, 3059, 3061, 3063, 3066, 3068, 3071, 3073, 3075, 3077, 3079, 3081, 3084, 3086, 3090, 3092, 3095, 3097, 3101, 3103, 3105, 3107, 3109, 3111, 3114, 3116, 3120, 3122, 3125, 3127, 3131, 3133, 3135, 3137, 3140, 3142, 2643, 2643, 3145, 3145, 2643, 2643, 2302, 2302, 2302, 2302, 3145, 3145, 2964, 2971, 2348, 2348, 2293, 3145, 3145, 2348, 2348, 2293, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2302, 2302, 2302, 2302, 2302, 2302, 2348, 2348, 2348, 2348, 2348, 2348, 2311, 2311, 2311, 2311, 3145, 3145, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2657, 3681, 3683, 3689, 3691, 3148, 3148, 3069, 3069, 3148, 3148, 3150, 3708, 3710, 2643, 2643, 2648, 2643, 2643, 2643, 2643, 2648, 2657, 2964, 2971, 4019, 4021, 3145, 3145, 4054, 4056, 4058, 4060, 4063, 4065, 3145, 3145, 3145, 3145, 3148, 3148, 3087, 3087, 3117, 3117, 3138, 3138, 3145, 3145, 3148, 3148, 3150, 4141, 4143, 4146, 4148, 4153, 4155, 4158, 4160, 4163, 4165, 4167, 4169, 4172, 4174, 4176, 4178, 3846, 3676, 3846, 3846, 4180, 4180, 3846, 3846, 4180, 4180, 4180, 4180, 3676, 3846, 4150, 4180, 4180, 4150, 4170, 4170, 4180, 4180, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 6657, 6659, 6661, 6663, 6665, 6667, 6669, 6671, 6673, 6675, 6677, 6679, 6681, 6683, 6685, 6687, 6689, 6691, 6693, 6695, 6697, 6699, 6701, 6703, 6705, 6707, 6709, 6711, 6713, 6715, 6717, 6719, 6721, 6723, 6725, 6727, 6729, 6731, 6733, 6735, 6737, 6739, 6741, 6743, 6745, 6747, 6749, 6751, 6753, 6755, 6757, 6759, 6761, 6763, 6765, 6767, 6769, 6771, 6773, 6775, 6777, 6779, 6781, 6783, 6785, 6787, 6789, 6791, 6793, 6795, 6797, 6799, 6801, 6803, 6805, 6807, 6809, 6811, 6813, 6815, 6817, 6819, 6821, 6823, 6825, 6827, 6829, 6831, 6833, 6835, 6837, 6839, 6841, 6843, 6845, 6847, 6849, 6851, 6853, 6855, 6857, 6859, 6861, 6863, 6865, 6867, 6869, 6871, 6873, 6875, 6877, 6879, 6881, 6883, 6885, 6887, 6889, 6891, 6893, 6895, 6897, 6899, 6901, 6903, 6905, 6907, 6909, 6911, 6913, 6915, 6917, 6919, 6921, 6923, 6925, 6927, 6929, 6931, 6933, 6935, 6937, 6939, 6941, 6943, 6945, 6947, 6949, 6951, 6953, 6955, 6957, 6959, 6961, 6963, 6965, 6967, 6969, 6971, 6973, 6975, 6977, 6979, 6981, 6983, 6985, 6987, 6989, 6991, 6993, 6995, 6997, 6999, 7001, 7003, 7005, 7007, 7009, 7011, 7013, 7015, 7017, 7019, 7021, 7023, 7025, 7027, 7029, 7031, 7033, 7035, 7037, 7039, 7041, 7043, 7045, 7047, 7049, 7051, 7053, 7055, 7057, 7059, 7061, 7063, 7065, 7067, 7069, 7071, 7073, 7075, 7077, 7079, 7081, 7083, 7085, 7087, 7089, 7091, 7093, 7095, 7097, 7099, 7101, 7103, 7105, 7107, 7109, 7111, 7113, 7115, 7117, 7119, 7121, 7123, 7125, 7127, 7129, 7131, 7133, 7135, 7137, 7139, 7141, 7143, 7145, 7147, 7149, 7151, 7153, 7155, 7157, 7159, 7161, 7163, 7165, 7167, 7169, 7171, 7173, 7175, 7177, 7179, 7181, 7183, 7185, 7187, 7189, 7191, 7193, 7195, 7197, 7199, 7201, 7203, 7205, 7207, 7209, 7211, 7213, 7215, 7217, 7219, 7221, 7223, 7225, 7227, 7229, 7231, 7233, 7235, 7237, 7239, 7241, 7243, 7245, 7247, 7249, 7251, 7253, 7255, 7257, 7259, 7261, 7263, 7265, 7267, 7269, 7271, 7273, 7275, 7277, 7279, 7281, 7283, 7285, 7287, 7289, 7291, 7293, 7295, 7297, 7299, 7301, 7303, 7305, 7307, 7309, 7311, 7313, 7315, 7317, 7319, 7321, 7323, 7325, 7327, 7329, 7331, 7333, 7335, 7337, 7339, 7341, 7343, 7345, 7347, 7349, 7351, 7353, 7355, 7357, 7359, 7361, 7363, 7365, 7367, 7369, 7371, 7373, 7375, 7377, 7379, 7381, 7383, 7385, 7387, 7389, 7391, 7393, 7395, 7397, 7399, 7401, 7403, 7405, 7407, 7409, 7411, 7413, 7415, 7417, 7419, 7421, 7423, 7425, 7427, 7429, 7431, 7433, 7435, 7437, 7439, 7441, 7443, 7445, 7447, 7449, 7451, 7453, 7455, 7457, 7459, 7461, 7463, 7465, 7467, 7469, 7471, 7473, 7475, 7477, 7479, 7481, 7483, 7485, 7487, 7489, 7491, 7493, 7495, 7497, 7499, 7501, 7503, 7505, 7507, 7509, 7511, 7513, 7515, 7517, 7519, 7521, 7523, 7525, 7527, 7529, 7531, 7533, 7535, 7537, 7539, 7541, 1883, 1884, 1894, 1895, 1897, 1898, 7549, 7551, 7553, 1933, 1938, 1942, 1951, 1952, 1964, 1970, 1971, 1972, 1973, 1983, 1984, 1985, 1986, 1987, 2037, 2038, 2044, 2045, 2048, 2049, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2072, 2081, 2082, 2093, 2096, 2100, 2101, 2104, 2105, 2108, 2116, 2117, 2123, 2124, 2126, 2127, 2129, 2130, 2136, 2137, 2139, 2140, 2144, 2145, 2153, 2154, 2155, 2159, 2160, 2161, 2165, 2166, 2167, 2168, 2170, 2171, 2173, 2174, 2175, 2176, 2177, 2178, 2180, 2181, 2183, 2184, 2185, 2186, 2187, 2188, 2192, 2193, 2205, 2208, 2209, 2219, 2229, 2230, 2232, 2233, 2236, 2237, 2267, 2281, 7648, 7650, 7652, 7654, 7656, 7658, 7660, 7662, 7664, 7666, 7668, 7670, 7672, 7674, 7676, 7678, 7680, 7682, 2362, 2363, 7686, 7688, 7690, 7692, 7694, 7696, 7698, 7700, 2411, 2412, 2424, 2425, 2427, 2428, 2431, 2432, 2453, 2454, 2457, 2458, 2466, 2467, 2503, 2504, 2511, 2514, 2520, 2533, 2544, 2546, 2550, 2551, 2554, 2555, 2564, 2565, 2569, 2570, 2572, 2598, 2599, 2602, 2603, 2606, 2607, 2610, 7740, 7742, 7744, 7746, 7748, 7750, 7752, 7754, 7756, 7758, 2666, 2667, 2675, 2676, 2677, 2678, 2681, 2682, 2683, 2684, 2685, 2686, 2699, 2708, 2709, 2712, 2713, 2716, 2719, 2731, 2732, 2738, 2739, 2750, 2751, 2758, 2760, 2761, 2763, 2771, 2777, 2778, 2798, 2806, 2807, 2808, 2810, 2811, 2812, 2831, 2834, 2846, 2848, 2881, 2907, 2908, 2928, 2931, 2939, 2940, 2943, 2944, 2959, 2960, 2962, 2963, 2966, 2967, 2969, 2970, 2974, 2976, 7822, 7824, 7826, 7828, 7830, 7832, 7834, 7836, 7838, 7840, 7842, 7844, 7846, 7848, 7850, 7852, 7854, 7856, 7858, 7860, 7862, 7864, 7866, 7868, 7870, 7872, 7874, 7876, 7878, 7880, 7882, 7884, 7886, 7888, 7890, 7892, 7894, 3177, 3178, 3186, 3187, 3189, 3190, 3191, 3192, 3193, 3194, 3196, 3197, 3505, 3528, 3587, 3588, 3596, 3597, 3598, 3601, 3602, 3610, 3613, 3614, 3624, 3625, 3626, 3627, 3628, 3629, 3631, 3632, 3633, 3634, 3635, 3636, 3638, 3639, 3640, 3641, 3642, 3643, 3645, 3646, 3647, 3648, 3650, 3651, 3658, 3659, 3666, 3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675, 3679, 7957, 7959, 3700, 3701, 3702, 3703, 3704, 3705, 3706, 7968, 3826, 3827, 3835, 3837, 3838, 3839, 3840, 3843, 3845, 4007, 4010, 7981, 4039, 4040, 7985, 7987, 7989, 4083, 4084, 4098, 4099, 4100, 4101, 4115, 4118, 4124, 4127, 4132, 4133, 4135, 4136, 4137, 4138, 4139, 8008, 8010, 8012, 8014, 8016, 8018, 8020, 8022, 4221, 4222, 4229, 4230, 5163, 5164, 5178, 5179, 5182, 5183, 5210, 5211, 5227, 5314, 5422, 5426, 5427, 5470, 5473, 5475, 5477, 5478, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 8320, 8322, 8490, 1809, 8494, 8508, 8439, 8502, 8504, 8506, 8320, 8322, 8490, 1809, 8496, 8510, 8498, 8512, 8439, 8502, 8504, 8506, 8397, 8392, 8400, 8399, 8401, 8403, 8402, 8405, 8404, 8064, 0, 8288, 8066, 8064, 5, 8288, 8066, 8288, 8154, 8317, 8444, 8418, 8496, 8498, 8439, 8068, 8071, 8070, 8072, 8075, 8074, 8076, 8152, 8520, 1089, 8079, 8078, 8204, 8215, 8095, 8080, 8080, 947, 8095, 8080, 8482, 8427, 8496, 8498, 8482, 8525, 8081, 8423, 8423, 8423, 8423, 8445, 8083, 8085, 8084, 8527, 8529, 8087, 8086, 8475, 8088, 8090, 8089, 8475, 8091, 8475, 8092, 8093, 8094, 8095, 948, 8441, 8440, 8235, 8100, 8441, 8440, 8417, 8416, 8235, 8100, 8215, 8098, 8235, 8100, 948, 8101, 8104, 8103, 8105, 8108, 8107, 8109, 8150, 8111, 8483, 8112, 8114, 8116, 1809, 8118, 8120, 8122, 8421, 8124, 8152, 8532, 8125, 8126, 8235, 8129, 8128, 8534, 8475, 8130, 8536, 8215, 8222, 8317, 8317, 8131, 8133, 8146, 8425, 8427, 8449, 8451, 8417, 8416, 8539, 8541, 8543, 8146, 8136, 8139, 8138, 8140, 8143, 8142, 8144, 8152, 8547, 8145, 8146, 8319, 8319, 8425, 8147, 8150, 8149, 8425, 8427, 8449, 8451, 8434, 8152, 8151, 8551, 8153, 8152, 8553, 8154, 8478, 8155, 8157, 8478, 1477, 1477, 8418, 8161, 8162, 8164, 8163, 8166, 8165, 8558, 8167, 8560, 8168, 8562, 1089, 8332, 8331, 8341, 8340, 8564, 8313, 8566, 8170, 8169, 8435, 8457, 8171, 8174, 8173, 8175, 8177, 8176, 8571, 8178, 8180, 8179, 8574, 8181, 8183, 8182, 8576, 8578, 8184, 8580, 8185, 8582, 8584, 8586, 8186, 8588, 8187, 8590, 8592, 8594, 1089, 8190, 8189, 8596, 8192, 8191, 8194, 8193, 8400, 8195, 8197, 8196, 8198, 8199, 8201, 8235, 947, 8599, 8215, 8202, 8435, 8203, 8225, 8227, 8229, 8231, 8492, 8225, 8227, 8229, 8231, 8444, 947, 8204, 947, 8313, 8205, 8207, 8206, 8606, 8326, 8208, 8346, 8327, 8346, 8328, 8329, 8338, 8210, 8209, 8339, 8210, 8209, 8212, 8211, 8214, 8213, 8215, 8216, 8218, 8217, 8475, 8219, 8475, 8220, 8221, 8222, 8313, 8317, 8223, 8225, 8227, 8229, 8231, 8444, 8479, 8475, 8233, 8316, 8234, 8235, 8236, 1477, 1477, 8456, 8407, 8628, 8343, 8239, 8346, 8344, 8346, 8345, 1166, 8246, 8351, 8350, 8240, 8242, 8241, 8244, 8243, 8638, 8343, 8245, 8346, 8344, 8346, 8345, 1166, 8246, 8351, 8350, 8248, 8640, 8249, 8642, 8251, 8250, 8644, 8253, 8252, 8346, 8254, 8346, 8255, 8256, 1089, 8259, 8258, 8346, 8333, 1110, 8337, 8336, 8260, 8262, 8261, 8264, 8263, 8646, 8326, 8265, 8648, 8332, 8331, 8266, 8268, 8267, 8341, 8340, 8650, 8386, 8385, 8400, 8387, 8400, 8388, 8389, 8372, 8371, 8400, 8373, 8400, 8374, 8375, 8384, 8376, 8377, 8365, 8363, 8400, 8366, 8400, 8367, 8368, 8379, 8378, 8400, 8380, 8400, 8381, 8382, 8384, 8383, 8400, 8369, 8652, 8269, 8271, 8272, 8274, 8275, 8277, 8279, 8281, 8283, 8285, 8287, 8288, 8313, 8425, 8301, 8289, 8475, 8303, 8304, 8444, 8479, 8478, 8423, 8290, 8468, 8448, 8319, 8309, 8292, 8475, 8310, 8475, 8311, 8312, 8293, 8295, 8475, 8315, 8317, 8660, 8423, 8422, 8309, 8308, 8475, 8310, 8475, 8311, 8312, 8313, 8664, 8475, 8315, 8317, 8666, 8468, 8298, 8319, 8301, 8300, 8475, 8302, 8475, 8303, 8304, 8444, 8479, 8306, 8480, 8483, 8482, 8484, 8427, 8309, 8308, 8475, 8310, 8475, 8311, 8312, 8313, 8669, 8475, 8315, 8316, 8317, 8673, 8423, 8422, 8319, 8320, 8322, 8439, 8326, 8325, 8346, 8327, 8346, 8328, 8329, 8686, 8332, 8324, 8346, 8333, 1110, 8337, 8336, 8688, 8690, 8341, 8340, 8692, 8694, 8696, 8326, 8325, 8346, 8327, 8346, 8328, 8329, 1089, 8332, 8331, 8346, 8333, 8346, 8346, 8334, 1110, 8337, 8336, 8339, 8338, 8699, 8341, 8340, 8701, 8343, 8342, 8346, 8344, 8346, 8345, 1166, 8348, 8351, 8350, 8353, 8352, 8355, 8354, 8356, 8705, 8358, 8357, 8360, 8359, 8361, 8707, 8365, 8364, 8400, 8366, 8400, 8367, 8368, 8407, 1365, 8406, 8709, 1242, 8365, 8363, 8400, 8367, 8368, 8407, 8712, 1365, 8365, 8364, 8400, 8366, 8400, 8367, 8368, 8400, 8369, 8400, 8400, 8370, 8716, 8372, 8371, 8400, 8373, 8400, 8374, 8375, 8384, 8376, 8377, 8379, 8378, 8400, 8380, 8400, 8381, 8382, 8384, 8383, 8386, 8385, 8400, 8387, 8400, 8388, 8389, 8390, 8722, 8397, 8392, 8400, 8398, 8400, 8399, 8401, 8403, 8402, 8405, 8404, 8406, 8393, 8410, 8395, 8411, 8397, 8396, 8400, 8398, 8400, 8399, 8401, 8403, 8402, 8405, 8404, 8406, 8407, 8410, 8409, 8411, 8492, 8494, 8438, 8439, 8502, 8504, 1477, 1477, 1477, 1477, 1477, 1477, 1477, 8453, 1809, 8417, 8416, 8418, 8421, 8420, 8423, 8422, 8424, 8425, 8427, 8429, 8431, 8433, 8435, 8434, 8436, 8437, 8494, 8438, 8439, 8502, 8504, 8441, 8440, 8443, 8442, 8444, 8479, 8478, 8446, 8445, 8447, 8468, 8470, 8448, 8449, 8480, 8483, 8482, 8484, 8451, 8453, 8730, 1660, 1660, 1660, 8457, 8456, 1660, 1660, 1660, 8460, 8459, 8461, 8475, 8462, 8463, 8476, 8479, 8464, 8466, 8465, 8467, 8467, 8468, 8470, 8471, 8473, 8472, 8475, 8474, 8734, 8475, 8475, 8736, 8476, 8479, 8478, 8480, 8483, 8482, 8484, 8486, 8485, 8487, 8488, 8490, 1809, 8492, 8738, 8494, 8740, 8496, 8742, 8498, 8744, 8500, 8502, 8504, 8506, 8513, 8785, 8676, 8609, 8780, 8677, 8780, 8678, 3129, 8787, 3069, 8789, 8791, 8793, 3099, 8795, 3148, 8516, 8516, 8516, 8516, 8516, 8516, 8516, 8516, 8608, 8517, 8517, 8518, 8518, 8556, 8747, 8521, 8521, 8522, 8522, 8674, 8674, 8537, 8537, 8537, 8537, 8602, 8603, 8597, 8555, 8555, 8556, 8597, 8747, 8747, 8674, 8602, 8674, 8603, 8674, 8674, 8608, 8799, 8676, 8609, 8780, 8677, 8780, 8678, 3129, 8802, 8629, 8755, 8804, 8754, 8753, 8780, 8778, 8780, 8779, 3129, 8625, 8683, 8807, 8754, 8611, 8780, 8778, 8780, 8779, 3129, 3069, 3069, 8809, 8811, 8813, 3099, 8815, 8817, 8819, 3129, 8821, 8823, 8825, 3099, 8827, 8829, 3129, 8831, 8629, 8755, 8754, 8753, 3148, 3148, 8833, 8618, 8617, 8620, 8619, 8621, 8623, 8835, 8837, 8839, 8841, 8843, 8624, 8625, 8629, 8632, 8631, 8774, 8633, 8774, 8634, 3099, 3069, 8848, 8850, 8852, 8653, 8653, 8654, 8654, 8674, 8674, 8747, 8856, 8676, 8675, 8780, 8677, 8780, 8678, 3129, 8680, 8859, 8861, 8777, 8682, 8683, 8714, 8718, 8747, 8747, 8747, 8749, 8748, 8750, 8774, 8773, 3099, 8751, 8770, 8774, 8772, 8777, 8752, 8780, 8778, 8780, 8779, 3129, 8765, 8766, 8757, 8868, 8755, 8771, 8770, 8754, 8753, 8780, 8778, 8780, 8779, 3129, 8782, 8755, 8774, 8762, 3099, 8774, 8761, 8760, 8759, 8777, 8756, 8780, 8778, 8780, 8779, 3129, 8765, 8766, 8757, 8873, 3069, 8760, 8759, 8774, 8761, 8774, 8762, 3099, 8777, 8764, 8765, 8766, 8767, 8875, 8877, 8771, 8770, 8777, 8768, 8780, 8778, 8780, 8779, 3129, 8782, 3069, 8771, 8770, 8774, 8772, 8774, 8773, 3099, 8777, 8776, 8780, 8778, 8780, 8779, 3129, 8782, 8783, 8885, 8887, 8857, 8857, 8857, 8857, 8800, 8800, 8805, 8805, 8900, 3846, 3846, 8800, 8800, 8805, 8805, 8862, 8862, 8857, 8857, 8862, 8862, 8890, 8889, 3846, 3846, 3846, 8902, 8890, 8889, 8904, 3846, 3846, 8906, 8890, 8889, 3684, 3684, 8908, 8890, 8889, 3684, 3684, 3684, 8846, 8846, 8854, 8857, 8857, 8862, 8862, 8870, 8869, 8866, 8893, 8895, 8870, 8869, 8871, 8893, 8895, 8913, 8890, 8889, 8892, 8891, 8893, 8895, 8918, 8897, 8897, 8910, 8910, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 1878, 1879, 1880, 1881, 1882, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1896, 1899, 1900, 1901, 1902, 1903, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1934, 1935, 1936, 1937, 1939, 1940, 1941, 1943, 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1965, 1966, 1967, 1968, 1969, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2039, 2040, 2041, 2042, 2043, 2046, 2047, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, 2062, 2071, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2094, 2095, 2097, 2098, 2099, 2102, 2103, 2106, 2107, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2118, 2119, 2120, 2121, 2122, 2125, 2128, 2131, 2132, 2133, 2134, 2135, 2138, 2141, 2142, 2143, 2146, 2147, 2148, 2149, 2150, 2151, 2152, 2156, 2157, 2158, 2162, 2163, 2164, 2169, 2172, 2179, 2182, 2189, 2190, 2191, 2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, 2202, 2203, 2204, 2206, 2207, 2210, 2211, 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2231, 2234, 2235, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2268, 2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2282, 2283, 2284, 2361, 2396, 2397, 2398, 2399, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2413, 2414, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2426, 2429, 2430, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452, 2455, 2456, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2468, 2469, 2470, 2471, 2472, 2473, 2474, 2475, 2476, 2477, 2478, 2479, 2480, 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 2505, 2506, 2507, 2508, 2509, 2510, 2512, 2513, 2515, 2516, 2517, 2518, 2519, 2521, 2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529, 2530, 2531, 2532, 2534, 2535, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2545, 2547, 2548, 2549, 2552, 2553, 2556, 2557, 2558, 2559, 2560, 2561, 2562, 2563, 2566, 2567, 2568, 2571, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2600, 2601, 2604, 2605, 2608, 2609, 2611, 2612, 2613, 2614, 2659, 2660, 2661, 2662, 2663, 2664, 2665, 2668, 2669, 2670, 2671, 2672, 2673, 2674, 2679, 2680, 2687, 2688, 2689, 2690, 2691, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2710, 2711, 2714, 2715, 2717, 2718, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2733, 2734, 2735, 2736, 2737, 2740, 2741, 2742, 2743, 2744, 2745, 2746, 2747, 2748, 2749, 2752, 2753, 2754, 2755, 2756, 2757, 2759, 2762, 2764, 2765, 2766, 2767, 2768, 2769, 2770, 2772, 2773, 2774, 2775, 2776, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790, 2791, 2792, 2793, 2794, 2795, 2796, 2797, 2799, 2800, 2801, 2802, 2803, 2804, 2805, 2809, 2813, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821, 2822, 2823, 2824, 2825, 2826, 2827, 2828, 2829, 2830, 2832, 2833, 2835, 2836, 2837, 2838, 2839, 2840, 2841, 2842, 2843, 2844, 2845, 2847, 2849, 2850, 2851, 2852, 2853, 2854, 2855, 2856, 2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867, 2868, 2869, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878, 2879, 2880, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 2898, 2899, 2900, 2901, 2902, 2903, 2904, 2905, 2906, 2909, 2910, 2911, 2912, 2913, 2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924, 2925, 2926, 2927, 2929, 2930, 2932, 2933, 2934, 2935, 2936, 2937, 2938, 2941, 2942, 2945, 2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955, 2956, 2957, 2958, 2961, 2965, 2968, 2973, 2975, 2977, 2978, 3176, 3179, 3180, 3181, 3182, 3183, 3184, 3185, 3188, 3195, 3198, 3200, 3201, 3206, 3207, 3209, 3210, 3212, 3213, 3225, 3227, 3228, 3234, 3235, 3263, 3271, 3296, 3297, 3304, 3305, 9116, 9118, 9117, 9032, 9042, 3321, 3330, 3390, 3391, 3393, 3394, 9116, 9117, 9118, 3398, 3400, 3414, 3422, 3425, 3437, 8569, 8572, 9193, 9199, 9205, 3491, 3496, 3516, 3530, 3531, 3533, 3535, 3562, 3563, 3581, 3589, 3590, 3591, 3592, 3593, 3594, 3595, 3599, 3600, 3603, 3604, 3605, 3606, 3607, 3608, 3609, 3611, 3612, 3615, 3616, 3617, 3618, 3619, 3620, 3621, 3622, 3623, 3630, 3637, 3644, 3649, 3652, 3653, 3654, 3655, 3656, 3657, 3660, 3661, 3662, 3663, 3664, 3665, 3677, 3678, 9293, 3687, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 9394, 3760, 3762, 3764, 3765, 3788, 3789, 3825, 3828, 3829, 3830, 3831, 3832, 3833, 3834, 3836, 3841, 3842, 3844, 9503, 9508, 9566, 9574, 3893, 9588, 3909, 9616, 3974, 3990, 4014, 4015, 4016, 4017, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051, 4052, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, 4096, 4097, 4102, 4103, 4104, 4105, 4106, 4107, 4108, 4109, 4110, 4111, 4112, 4113, 4114, 4116, 4117, 4119, 4120, 4121, 4122, 4123, 4125, 4126, 4128, 4129, 4130, 4131, 4134, 9918, 9765, 8857, 4202, 4203, 9918, 9775, 9777, 9776, 9918, 9918, 9866, 9866, 8857, 4218, 4219, 8800, 4224, 4225, 8805, 4227, 4228, 4231, 4232, 9886, 9822, 8800, 5104, 5105, 9886, 9833, 8805, 5115, 5116, 9886, 9843, 8862, 5126, 5127, 9854, 9853, 9858, 9857, 9862, 9861, 9866, 9865, 8857, 5143, 5144, 9886, 9886, 8862, 5154, 5155, 5158, 5159, 5160, 5161, 5162, 9875, 9883, 5176, 5177, 5180, 5181, 9884, 9883, 9886, 9885, 5206, 5207, 5208, 5209, 9884, 9883, 9886, 9885, 5225, 5226, 5238, 5239, 5240, 5243, 5246, 9898, 9899, 9900, 5254, 9918, 9908, 8857, 5301, 5302, 9918, 9917, 8862, 5311, 5312, 5395, 5396, 5397, 5398, 5399, 5420, 5421, 5423, 5424, 5425, 9992, 10021, 5468, 5469, 5471, 5472, 5474, 5476, 10030, 8897, 5558, 5559, 10030, 10051, 8910, 6140, 6141, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 10132, 10134, 10137, 10139, 10143, 10147, 10158, 10161, 10163, 10166, 10188, 10190, 10192, 10194, 10196, 10198, 10204, 10208, 10210, 10220, 10223, 10226, 10237, 10242, 10244, 10257, 10261, 10264, 10266, 10274, 10281, 10283, 10295, 10297, 10302, 10304, 10307, 10312, 10315, 10318, 10321, 10328, 10330, 10332, 10334, 10336, 10345, 10362, 10364, 10366, 10368, 10372, 10375, 10377, 10379, 10383, 10385, 10387, 10400, 10402, 10410, 10412, 10414, 10418, 10421, 10423, 10425, 10427, 10429, 10433, 10437, 10439, 10441, 10443, 10447, 10449, 10452, 10455, 10457, 10459, 10461, 10464, 10466, 10468, 10470, 10472, 10475, 10477, 10479, 10482, 10485, 10487, 10489, 10492, 10494, 10496, 10499, 10501, 10517, 10519, 10523, 10525, 10530, 10532, 10534, 10539, 10542, 10544, 10546, 10548, 10552, 10558, 10560, 10562, 10566, 10569, 10573, 10575, 10577, 10581, 10585, 10591, 10593, 10595, 10598, 10600, 10603, 10605, 10607, 10609, 10611, 10615, 10617, 10619, 10623, 10625, 10627, 10629, 10631, 10633, 10637, 10639, 10641, 10644, 10646, 10649, 10651, 10653, 10660, 10662, 10667, 10669, 10671, 10674, 10676, 10679, 10681, 10683, 10686, 10689, 10691, 10693, 10696, 10698, 10700, 10702, 10706, 10708, 10710, 10713, 10715, 10719, 10722, 10724, 10726, 10729, 10731, 10735, 10753, 10756, 10758, 10766, 10775, 10777, 10780, 10782, 10790, 10798, 10803, 10806, 10810, 10812, 10814, 10819, 10821, 10823, 10826, 10829, 10832, 10112, 10114, 10116, 10837, 10119, 10118, 10117, 10121, 10123, 10126, 10125, 10129, 10128, 10127, 10140, 10148, 10144, 10148, 10847, 10849, 10851, 8516, 8516, 8516, 8516, 10526, 10784, 10784, 10784, 10152, 10149, 10150, 10151, 10355, 10152, 8517, 10866, 10154, 10153, 10772, 10155, 8518, 10868, 10167, 10200, 10169, 10403, 10168, 10169, 10200, 10169, 10791, 10788, 10791, 10570, 10842, 10589, 8655, 10762, 10835, 10394, 10393, 10396, 10395, 10838, 10837, 8655, 8655, 10173, 10357, 10173, 10248, 10173, 10342, 10340, 10355, 10788, 10791, 8521, 10872, 10567, 10570, 10178, 10177, 10788, 8522, 10874, 3306, 3307, 3308, 3309, 10184, 10234, 10180, 10181, 10182, 10183, 10184, 10185, 3319, 10579, 10200, 10201, 10514, 10583, 10778, 10786, 10205, 8674, 10211, 8655, 10213, 10215, 10786, 10788, 10745, 10743, 10567, 10228, 10230, 10233, 10232, 10579, 10234, 10540, 10521, 10514, 10583, 10754, 10778, 10238, 10240, 10245, 10246, 10247, 10359, 10248, 10250, 10249, 10355, 10527, 10555, 10267, 8655, 10786, 10791, 10786, 10791, 8537, 8537, 3395, 3396, 3397, 10267, 8545, 10786, 10791, 10267, 10555, 10527, 8655, 10759, 10791, 10786, 10791, 8549, 10284, 10397, 10390, 10391, 10287, 10286, 10840, 10839, 10397, 8555, 8555, 10290, 10289, 10291, 8555, 10299, 10298, 10305, 10838, 10837, 10840, 10839, 10843, 10769, 10768, 10770, 10773, 10764, 10738, 10737, 10739, 10742, 10745, 10744, 10746, 10749, 10835, 10795, 10794, 10796, 10801, 3471, 3473, 3475, 10323, 10322, 3478, 10325, 10324, 3481, 10338, 10360, 10793, 10394, 10393, 10396, 10395, 10838, 10837, 10340, 8674, 10793, 10835, 10406, 10405, 10342, 8674, 10360, 10793, 10835, 10347, 10346, 10349, 10348, 10769, 10350, 10835, 10352, 10351, 10354, 10353, 10355, 8674, 10357, 8674, 10359, 10360, 10762, 10394, 10393, 10838, 10837, 10795, 10794, 10796, 10800, 10799, 10380, 10769, 10767, 10770, 10772, 10771, 10389, 10908, 10390, 10391, 10793, 10394, 10393, 10396, 10395, 10838, 10837, 10397, 10738, 10737, 10739, 10741, 10740, 10403, 10406, 10405, 10746, 10748, 10747, 10911, 10913, 10915, 10920, 10922, 10924, 10929, 10931, 10933, 10944, 10948, 10950, 8720, 3686, 10958, 10960, 10962, 10435, 10434, 3757, 10504, 10502, 10506, 10508, 10969, 10510, 10513, 10512, 10514, 8655, 10526, 10527, 10537, 10536, 10540, 10971, 10550, 10553, 10554, 10555, 10570, 10579, 10583, 8674, 10587, 10835, 10838, 10837, 10840, 10839, 10843, 10842, 10589, 10974, 10976, 10978, 10982, 3855, 3857, 10642, 10647, 8720, 10656, 10657, 10658, 3883, 8710, 10665, 3889, 3897, 8720, 3911, 10716, 10720, 10732, 10736, 10738, 10737, 10742, 10741, 10740, 10784, 10759, 10791, 10793, 10835, 10745, 10744, 10743, 10749, 10748, 10747, 10750, 10835, 10838, 10837, 10840, 10839, 10843, 10842, 10841, 10784, 10759, 10791, 10762, 10769, 10768, 10767, 10773, 10772, 10771, 10784, 10786, 10791, 10793, 10835, 10795, 10794, 10801, 10800, 10799, 10815, 10833, 10835, 10838, 10837, 10840, 10839, 10843, 10842, 10841, 10996, 10999, 11002, 11004, 11006, 11008, 11010, 11017, 11019, 11021, 11023, 11028, 11031, 11033, 11035, 11037, 11039, 11046, 11048, 11050, 11053, 11058, 11060, 11062, 11064, 11069, 11071, 11073, 11076, 11078, 11080, 11015, 10956, 10845, 10954, 4195, 4196, 9918, 4201, 10853, 4205, 4206, 9918, 9777, 4209, 4210, 4211, 4212, 9918, 9866, 4215, 4216, 4217, 10855, 4223, 4226, 10909, 10909, 10909, 10909, 9886, 5098, 5099, 5103, 10918, 10917, 9886, 5109, 5110, 5114, 10927, 10926, 9886, 5120, 5121, 5125, 10936, 10935, 9855, 5131, 5132, 9859, 5134, 5135, 9863, 5137, 5138, 9866, 5140, 5141, 5142, 10942, 10941, 9886, 5148, 5149, 5153, 10946, 10945, 11140, 5165, 5166, 9882, 11013, 11012, 10997, 11015, 10956, 11147, 5184, 5185, 9882, 11013, 11012, 10997, 11015, 10951, 9886, 5196, 5197, 11083, 11083, 10952, 10983, 11026, 11155, 5212, 5213, 9882, 11015, 10956, 9886, 5221, 5222, 10983, 11026, 11163, 11014, 11013, 11012, 11015, 10956, 11083, 11083, 10953, 10983, 10954, 10956, 10956, 10983, 10964, 5251, 5252, 5253, 5294, 5295, 9918, 5300, 10980, 9918, 5305, 5306, 5310, 10983, 11013, 11012, 10997, 11184, 11014, 11013, 11012, 11015, 11083, 11083, 11025, 11026, 11189, 11043, 11042, 11041, 11044, 11056, 11055, 11054, 5447, 11083, 11083, 11066, 11067, 11083, 11083, 11082, 5467, 11196, 11198, 11187, 11186, 11185, 11107, 11106, 5530, 5557, 11200, 11199, 11107, 11106, 5592, 11143, 11142, 11141, 11149, 11148, 6034, 11157, 11165, 11156, 11166, 11165, 11164, 11166, 11165, 11164, 11187, 11186, 11185, 11200, 11199, 6139, 11187, 11186, 11185, 11192, 11191, 11190, 11200, 11199, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 3152, 3153, 3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164, 3165, 11265, 11431, 11264, 11266, 3170, 3171, 11268, 3173, 3174, 11269, 3199, 8516, 8516, 8516, 3205, 3208, 3211, 8516, 3215, 3216, 3217, 3218, 3219, 3220, 3221, 3222, 3223, 3224, 3226, 3229, 3230, 3231, 3232, 3233, 10156, 10159, 11272, 10164, 3240, 11279, 11278, 11277, 3244, 3245, 3246, 3247, 3248, 11279, 11278, 11277, 3252, 11459, 11459, 11369, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3264, 3265, 3266, 3267, 3268, 3269, 3270, 3272, 3273, 3274, 3275, 11459, 11459, 11369, 3279, 3280, 11459, 11459, 11369, 3284, 3285, 11459, 11459, 11369, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3298, 3299, 3300, 3301, 3302, 3303, 3310, 3311, 3312, 3313, 3314, 3315, 3316, 3317, 10186, 3320, 11459, 11276, 11275, 11279, 11278, 11277, 3328, 3329, 3331, 3332, 3333, 3334, 11280, 3336, 3337, 11281, 3339, 3340, 3341, 11446, 3343, 3344, 10218, 10221, 10224, 3348, 3349, 3350, 3351, 3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 3360, 3361, 3362, 3363, 11286, 3365, 3366, 11288, 11287, 3369, 3370, 3371, 3372, 3373, 3374, 3375, 3376, 3377, 3378, 3379, 3380, 3381, 3382, 3383, 3384, 8537, 8537, 8537, 8537, 3389, 3392, 3399, 3401, 10259, 10262, 11292, 3405, 3406, 3407, 3408, 3409, 3410, 10272, 3412, 3413, 3415, 3416, 3417, 11294, 11295, 3420, 3421, 3423, 3424, 3426, 3427, 3428, 3429, 3430, 3431, 3432, 3433, 3434, 3435, 3436, 10293, 11297, 3440, 3441, 10300, 11299, 3444, 11300, 3446, 3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3455, 3456, 3457, 3458, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3466, 3467, 3468, 10310, 10313, 10316, 10319, 3476, 3477, 3479, 3480, 10326, 9210, 11418, 11308, 11307, 3487, 11309, 3489, 3490, 3492, 3493, 3494, 3495, 3497, 3498, 3499, 3500, 3501, 3502, 3503, 3504, 3506, 3507, 10343, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524, 3525, 3526, 3527, 3529, 3532, 3534, 3536, 3537, 3538, 3539, 11311, 11314, 11313, 11312, 10370, 10373, 3546, 3547, 3548, 3549, 3550, 11317, 3552, 3553, 3554, 3555, 3556, 3557, 11321, 11320, 11319, 3561, 3564, 3565, 3566, 3567, 3568, 3569, 3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578, 11322, 3580, 3582, 3583, 3584, 3585, 3586, 3685, 11326, 11325, 11324, 10416, 10419, 11329, 11332, 11331, 11330, 10431, 3721, 3722, 11334, 11337, 11336, 11335, 10445, 11397, 11397, 11339, 10450, 10453, 11342, 11394, 11393, 11343, 9350, 10462, 11346, 11349, 11348, 11347, 11352, 11351, 11350, 11353, 11356, 11355, 11354, 11359, 11358, 11357, 11360, 11418, 11418, 11361, 3758, 3759, 3761, 3763, 3766, 3767, 3768, 3769, 3770, 11363, 11376, 11362, 10521, 3775, 11365, 3777, 11368, 11367, 11366, 3781, 3782, 11459, 11459, 11369, 3786, 11370, 11373, 11372, 11371, 3793, 11459, 11459, 11374, 3797, 3798, 3799, 11377, 11376, 11375, 10564, 10567, 3805, 11382, 11381, 11380, 3809, 11459, 11459, 11383, 3813, 11384, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 3822, 3823, 3824, 11387, 11386, 11385, 9495, 11397, 11397, 11389, 10601, 11391, 11394, 11393, 11392, 10613, 11397, 11397, 11396, 10621, 11399, 11400, 11403, 11402, 11401, 10635, 11405, 3873, 11407, 3875, 11411, 11410, 11409, 3879, 3880, 3881, 3882, 11413, 11415, 11412, 3887, 3888, 11416, 11415, 11414, 11418, 11418, 11417, 11421, 11420, 11419, 11422, 11425, 11424, 11423, 11426, 11429, 11428, 11427, 3910, 11432, 11431, 11430, 11433, 3916, 3917, 11435, 11438, 11437, 11436, 11439, 3923, 3924, 11441, 10764, 3927, 3928, 10739, 3930, 3931, 3932, 11446, 10754, 3935, 11444, 3937, 10788, 3939, 3940, 3941, 3942, 3943, 3944, 10746, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3955, 3956, 3957, 11446, 10754, 3960, 11444, 3962, 10788, 3964, 3965, 10764, 3967, 3968, 3969, 10770, 3971, 3972, 3973, 11446, 10778, 3977, 11449, 3979, 10788, 3981, 3982, 3983, 3984, 3985, 10796, 3987, 3988, 3989, 11453, 11452, 10808, 3994, 11456, 11455, 10817, 11459, 11458, 10824, 10827, 10830, 4003, 4004, 4005, 4006, 4008, 4009, 4011, 4012, 4013, 4188, 4189, 4193, 4194, 4197, 11922, 11483, 11482, 11481, 11087, 4204, 4207, 11927, 4208, 4213, 11933, 4214, 11098, 4220, 11101, 11104, 10893, 11499, 11499, 11505, 11505, 11542, 11542, 11549, 11549, 4660, 4664, 10909, 10893, 5038, 5040, 10909, 5097, 11757, 11756, 11755, 11111, 5106, 5107, 5108, 11760, 11759, 11758, 11116, 5117, 5118, 5119, 11763, 11762, 11761, 11121, 5128, 5129, 5130, 5133, 5136, 5139, 11132, 5145, 5146, 5147, 11916, 11915, 11764, 11137, 5156, 5157, 5167, 11987, 11810, 11809, 11765, 5171, 5172, 5173, 5174, 5175, 5186, 11996, 11810, 11809, 11766, 5190, 5191, 5192, 5193, 5194, 5195, 11896, 11895, 11894, 5201, 5202, 5203, 5204, 5205, 5214, 12013, 11810, 11809, 11808, 5218, 5219, 5220, 5223, 5224, 5228, 5229, 5230, 5231, 5232, 5233, 5234, 5235, 5236, 5237, 5242, 5244, 5245, 11771, 11770, 11769, 5250, 5296, 12041, 11810, 11809, 11808, 11176, 5303, 5304, 11896, 11895, 11811, 11181, 5313, 11889, 11886, 11887, 11892, 11891, 11890, 5392, 5393, 5394, 11889, 11888, 11887, 11892, 11891, 11890, 5406, 5407, 5408, 5409, 11913, 11912, 11893, 11896, 11895, 11894, 5416, 5417, 5418, 5419, 11899, 11898, 11897, 11902, 11901, 11900, 5434, 5435, 5436, 5437, 11905, 11904, 11903, 11916, 11915, 11906, 5444, 5445, 5446, 11913, 11912, 11907, 11910, 11909, 11908, 5454, 5455, 5456, 5457, 11913, 11912, 11911, 11916, 11915, 11914, 5464, 5465, 5466, 12053, 5500, 5501, 5502, 12079, 5528, 5529, 12079, 11203, 12053, 12062, 12079, 5585, 5586, 12080, 5590, 5591, 11985, 6026, 6027, 6028, 11994, 6032, 6033, 12011, 6040, 6041, 6042, 12022, 6048, 6049, 6050, 12079, 6074, 6075, 6076, 12053, 6098, 6099, 6100, 12062, 12079, 6113, 6114, 12080, 12079, 11208, 12053, 6167, 6168, 6169, 12062, 6175, 6176, 6177, 12079, 6187, 6188, 12080, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 12163, 12165, 12170, 12172, 3166, 3167, 3168, 3169, 3172, 3175, 10856, 3202, 3203, 3204, 10858, 10860, 10862, 3214, 12204, 12206, 3236, 3237, 3238, 3239, 3241, 3242, 3243, 3249, 3250, 3251, 3253, 3254, 3255, 12234, 12239, 12241, 12243, 3276, 3277, 3278, 3281, 3282, 3283, 3286, 3287, 3288, 12269, 12274, 3318, 3322, 3323, 3324, 3325, 3326, 3327, 3335, 3338, 3342, 3345, 3346, 3347, 12309, 12314, 3364, 3367, 3368, 12334, 3385, 3386, 3387, 3388, 10882, 10884, 3402, 3403, 3404, 3411, 3418, 3419, 12374, 12376, 12381, 3438, 3439, 12387, 3442, 3443, 3445, 12393, 12395, 12398, 12403, 12407, 12412, 3469, 3470, 3472, 3474, 12420, 12422, 3482, 3483, 3484, 3485, 3486, 3488, 12433, 12435, 12437, 12443, 3508, 12451, 12453, 12455, 12458, 12460, 12469, 12471, 3540, 3541, 3542, 3543, 3544, 3545, 12479, 12482, 3551, 12486, 12489, 3558, 3559, 3560, 12498, 12500, 12502, 12505, 12508, 3579, 12512, 12515, 3711, 3712, 3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 12528, 3723, 3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732, 3733, 3734, 3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 3745, 3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754, 3755, 3756, 12564, 12569, 3771, 3772, 3773, 3774, 3776, 3778, 3779, 3780, 12583, 3783, 3784, 3785, 3787, 3790, 3791, 3792, 3794, 3795, 3796, 3800, 3801, 3802, 3803, 3804, 3806, 3807, 3808, 3810, 3811, 3812, 3814, 12618, 12620, 12622, 3847, 3848, 3849, 3850, 3851, 3852, 3853, 3854, 3856, 3858, 3859, 3860, 3861, 3862, 3863, 3864, 3865, 3866, 3867, 3868, 3869, 3870, 3871, 3872, 3874, 3876, 3877, 3878, 11820, 3884, 3885, 3886, 11823, 3890, 3891, 3892, 3894, 3895, 3896, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3912, 3913, 3914, 3915, 3918, 3919, 3920, 3921, 3922, 3925, 3926, 12697, 3929, 12700, 3933, 3934, 3936, 3938, 12712, 3945, 12716, 12721, 12723, 12725, 3958, 3959, 3961, 3963, 3966, 12737, 3970, 12741, 3975, 3976, 3978, 3980, 12753, 3986, 12756, 3991, 3992, 3993, 3995, 3996, 3997, 3998, 3999, 4000, 4001, 4002, 12773, 12775, 12777, 12160, 12167, 12780, 12783, 4198, 4199, 4200, 11088, 12790, 11930, 12793, 11936, 11099, 11102, 11105, 12566, 12565, 10909, 12440, 10909, 12487, 12431, 12448, 12566, 12565, 12192, 12597, 12311, 12193, 12709, 12513, 12194, 12487, 12195, 12566, 12565, 12379, 12709, 12513, 4346, 10892, 12496, 12383, 12487, 10864, 10864, 10864, 10864, 12566, 12565, 4386, 12202, 4388, 12230, 12266, 12311, 4396, 12207, 4398, 12566, 12565, 10909, 12440, 12448, 10909, 12487, 12236, 12566, 12565, 12263, 12709, 12513, 12230, 12487, 12266, 12311, 12270, 12566, 12565, 10869, 10869, 10869, 10909, 12487, 10869, 10909, 12440, 10869, 12448, 10869, 10869, 10869, 12431, 10909, 12236, 10909, 10909, 12566, 12565, 12431, 12448, 10909, 12487, 10909, 12440, 12263, 4591, 12265, 4593, 12266, 12311, 12270, 4600, 12271, 4602, 11551, 11551, 11551, 11550, 11551, 11551, 11551, 11552, 12566, 12565, 12272, 12487, 12277, 12275, 12276, 12277, 12311, 12278, 12709, 12513, 12279, 10909, 10909, 10909, 10909, 12307, 12307, 12307, 12513, 12310, 12311, 12566, 12565, 10909, 12709, 12513, 10909, 12487, 4735, 12496, 12709, 12597, 12496, 11615, 11615, 11615, 11615, 11613, 11614, 11615, 11615, 12566, 12565, 10889, 12431, 10889, 10889, 12448, 10889, 10909, 10889, 10889, 10889, 10889, 12496, 12734, 12709, 12750, 12750, 12566, 12565, 12734, 12750, 12597, 12496, 12396, 12709, 12409, 12408, 12750, 10893, 4878, 10892, 10893, 12378, 12379, 12513, 12383, 12566, 12565, 10894, 12448, 10894, 10909, 12440, 10894, 10894, 10909, 12487, 10894, 12431, 10894, 10894, 10894, 12597, 12615, 12396, 12734, 12400, 12399, 12750, 12405, 12404, 12709, 12409, 12408, 12750, 12414, 12413, 12566, 12565, 10904, 12431, 10904, 10909, 12440, 10904, 10909, 12487, 10904, 12448, 12467, 10904, 10909, 10904, 10909, 12513, 10904, 10904, 12467, 12566, 12565, 12480, 10909, 12487, 10909, 5078, 10909, 12496, 10909, 12506, 10909, 12709, 12513, 11947, 5100, 5101, 5102, 11112, 12822, 11953, 5111, 5112, 5113, 11117, 12829, 11959, 5122, 5123, 5124, 11122, 12836, 11965, 11968, 11971, 11974, 11133, 12843, 11980, 5150, 5151, 5152, 11138, 12850, 12851, 5168, 5169, 5170, 12857, 12860, 12861, 5187, 5188, 5189, 12867, 12870, 12004, 5198, 5199, 5200, 12876, 12879, 12880, 5215, 5216, 5217, 12886, 12018, 12889, 12891, 12894, 12896, 12899, 12516, 5247, 5248, 5249, 12566, 12565, 12597, 12615, 12907, 5297, 5298, 5299, 11177, 12046, 5307, 5308, 5309, 11182, 12654, 12680, 12709, 12718, 12750, 12770, 5386, 5387, 5388, 5389, 5390, 5391, 12927, 5400, 5401, 5402, 5403, 5404, 5405, 12936, 5410, 5411, 5412, 5413, 5414, 5415, 12946, 5428, 5429, 5430, 5431, 5432, 5433, 12956, 5438, 5439, 5440, 5441, 5442, 5443, 12966, 5448, 5449, 5450, 5451, 5452, 5453, 12975, 5458, 5459, 5460, 5461, 5462, 5463, 12985, 5499, 12989, 5527, 12993, 5556, 11204, 5574, 5575, 5584, 5587, 13000, 13003, 6025, 13006, 6031, 13010, 6039, 13013, 6047, 13017, 6073, 13021, 6097, 13025, 6103, 6112, 6115, 13030, 6138, 11209, 6166, 13036, 6174, 13040, 6186, 6189, 13044, 122, 123, 124, 125, 126, 127, 12166, 12173, 13061, 13064, 13065, 10857, 10859, 10861, 10863, 13081, 13084, 13087, 13094, 13097, 13100, 13106, 13109, 13121, 10883, 10885, 13159, 13161, 13176, 13186, 13197, 13203, 13209, 13213, 13219, 13225, 13228, 13232, 13235, 13239, 13244, 13247, 13249, 13253, 13257, 13260, 13263, 13268, 13271, 12623, 13278, 13282, 13287, 13291, 13297, 13303, 13307, 13311, 13314, 13317, 13321, 13325, 13328, 13331, 13333, 13336, 12701, 13343, 12713, 12717, 12726, 13353, 12738, 12742, 13361, 12757, 13367, 13369, 13373, 12778, 4182, 13056, 4185, 13058, 13385, 11931, 11937, 13153, 13152, 13151, 13155, 13154, 12567, 4246, 4247, 13067, 4251, 4252, 13195, 13165, 13068, 13069, 4257, 12734, 13184, 4260, 13183, 4263, 13164, 13163, 13162, 4269, 13169, 13168, 13167, 13073, 13138, 13140, 13178, 13179, 12567, 4292, 4293, 13241, 4296, 4299, 4300, 13136, 13190, 13118, 13341, 4305, 4306, 13195, 4308, 13117, 13351, 4311, 12734, 13184, 4314, 13183, 4316, 13138, 13140, 13178, 13179, 12567, 4335, 4336, 13341, 4339, 4340, 13195, 4342, 13137, 4348, 4351, 13190, 13135, 13188, 13359, 13351, 4357, 12734, 13184, 4360, 13183, 13138, 4363, 4364, 4365, 4366, 13138, 13178, 13179, 12567, 4383, 4384, 13241, 4387, 4389, 4391, 4392, 13075, 13074, 13169, 4397, 13153, 13152, 13151, 13206, 13216, 13222, 12567, 4419, 4420, 13111, 4423, 4424, 13195, 13165, 4429, 13169, 13168, 13167, 13113, 4435, 12734, 13184, 4438, 13183, 4444, 13092, 13091, 13090, 13138, 13140, 13178, 13179, 12567, 4466, 4467, 13241, 13193, 4470, 4471, 13195, 4473, 13117, 13182, 4476, 12734, 13184, 4479, 13183, 4482, 4483, 13089, 13136, 13190, 4487, 13153, 13152, 13151, 13155, 13154, 13216, 12567, 4506, 4507, 4509, 4510, 4511, 4512, 12734, 13184, 4515, 13183, 4517, 4518, 4519, 13195, 13165, 4522, 4523, 13169, 13168, 13167, 4527, 4528, 4529, 4530, 13164, 13163, 13162, 4534, 4535, 13092, 13091, 13090, 4539, 4540, 13153, 13152, 13151, 13155, 13154, 13216, 13222, 12567, 4562, 4563, 4566, 13164, 13163, 13162, 13113, 4574, 13169, 13168, 13167, 13182, 4579, 12734, 13184, 4582, 13183, 13111, 4585, 4586, 13195, 13165, 13359, 4590, 4592, 4594, 4595, 13192, 13102, 13191, 4599, 4601, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4610, 13138, 13140, 13178, 13179, 12567, 4629, 4630, 13241, 13351, 4633, 12734, 13184, 4636, 13183, 4638, 4641, 4643, 4644, 4645, 13136, 13190, 13135, 13341, 4650, 4651, 13195, 4653, 13137, 4655, 13153, 13104, 13359, 4668, 13111, 4670, 13112, 4672, 13182, 13113, 4675, 13351, 4679, 4680, 4681, 13195, 4683, 13117, 4685, 4686, 13136, 13190, 13118, 13351, 13341, 13359, 13200, 13206, 13178, 13179, 12567, 4718, 4719, 13341, 4722, 4723, 13195, 4725, 13194, 4728, 12734, 13184, 4731, 13183, 4738, 13190, 13188, 13122, 13359, 13344, 4749, 4751, 13266, 13273, 4754, 13354, 13362, 13123, 13124, 13125, 13126, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 4774, 13153, 13152, 13151, 13155, 13154, 13216, 13222, 12567, 4792, 4793, 4795, 4796, 13164, 13163, 13162, 4800, 4801, 4802, 13169, 13168, 13167, 4806, 4807, 12734, 4809, 4810, 4811, 4812, 13140, 13266, 4817, 4818, 13341, 4820, 4821, 4822, 13138, 13140, 13178, 13222, 12567, 4841, 4842, 13241, 13354, 4846, 4847, 13266, 4852, 13143, 13273, 4859, 4860, 13145, 13144, 13341, 13344, 4866, 4867, 4868, 13148, 13362, 4872, 13200, 13206, 4875, 4880, 4883, 13136, 13190, 13135, 13359, 4888, 13341, 4890, 13195, 4892, 13137, 13351, 4895, 13153, 13152, 13151, 13155, 13154, 13216, 13222, 12567, 4915, 4916, 4918, 4919, 13169, 13168, 13167, 4923, 4924, 4925, 13195, 13165, 4928, 4929, 4930, 12734, 13184, 4933, 13183, 4935, 4936, 13164, 13163, 13162, 4940, 4941, 4942, 13138, 13140, 13178, 13143, 13266, 4955, 4958, 4959, 13145, 13144, 13351, 13354, 4965, 4966, 4967, 13146, 4969, 4970, 4971, 13147, 13341, 13344, 4976, 4977, 4978, 13148, 4980, 4981, 4982, 13149, 13153, 13152, 13151, 13155, 13154, 13216, 13222, 12567, 5003, 5004, 5006, 5007, 13164, 13163, 13162, 5011, 5012, 5013, 13195, 13165, 5016, 5017, 12734, 13184, 5020, 13183, 5022, 5023, 13169, 13168, 13167, 5027, 13191, 13171, 13170, 5031, 5032, 5033, 5034, 13195, 5036, 5037, 5039, 5041, 13173, 13189, 13172, 13200, 13206, 13178, 13179, 12567, 5063, 5064, 13241, 13181, 5067, 13180, 13182, 5070, 12734, 13184, 5073, 13183, 5075, 5081, 5082, 13190, 13189, 13188, 13359, 5087, 13192, 5089, 13191, 13193, 5092, 5093, 13195, 5095, 13194, 11948, 13651, 11954, 13657, 11960, 13663, 11966, 11969, 11972, 11975, 11981, 13675, 13681, 12858, 13687, 12868, 12005, 13693, 12877, 13699, 12019, 12892, 12897, 5241, 13710, 13200, 13206, 13216, 13222, 12567, 5271, 5272, 13241, 13273, 13255, 5284, 13266, 13273, 5290, 13275, 13274, 13718, 12047, 13723, 11812, 11813, 13294, 13301, 13300, 5330, 12685, 12656, 12661, 5344, 12750, 13339, 13338, 13341, 13344, 5356, 13346, 5360, 13349, 13348, 13351, 13354, 12734, 13357, 13359, 13362, 5374, 13364, 13363, 13375, 5382, 13378, 13377, 13733, 13736, 12928, 13740, 13743, 12937, 13747, 13750, 12947, 13754, 13757, 12957, 13761, 13764, 12967, 13768, 13771, 12976, 13775, 13778, 12986, 12990, 13720, 13725, 12086, 13387, 13392, 13725, 13790, 13393, 13394, 12092, 13653, 13659, 13665, 13671, 13677, 13007, 12098, 13014, 13018, 13022, 13026, 13807, 13720, 13725, 13037, 13041, 13816, 13785, 13809, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 13062, 13082, 13085, 13088, 13095, 13098, 13101, 13107, 13110, 13160, 13177, 13187, 13198, 13204, 13210, 13214, 13220, 13226, 13229, 13233, 13236, 13240, 13245, 13250, 13254, 13258, 13261, 13264, 13269, 13272, 13279, 13283, 13288, 13292, 13298, 13304, 13308, 13312, 13315, 13318, 13322, 13326, 13329, 13334, 13370, 13824, 4184, 13825, 4187, 12178, 12181, 13386, 4234, 4235, 4236, 4238, 4239, 4245, 13845, 13829, 4250, 4253, 4254, 4255, 4256, 4258, 4259, 4261, 13830, 4264, 4265, 4266, 13831, 13832, 4270, 4271, 4272, 4273, 4275, 4277, 4280, 4283, 12692, 4291, 4294, 4301, 4302, 4303, 4304, 4307, 4309, 4310, 4312, 4313, 4315, 4318, 4320, 4323, 4326, 12324, 4334, 13845, 4338, 4341, 4343, 4352, 4353, 4354, 4355, 4356, 4358, 4359, 4361, 4362, 4368, 4371, 4374, 12692, 4382, 4385, 13998, 4393, 4394, 4395, 14005, 4400, 4401, 4402, 4404, 4407, 4410, 12692, 4418, 13845, 4422, 4425, 4426, 4430, 4431, 4432, 4433, 13841, 4436, 4437, 4439, 4445, 4446, 4447, 4449, 4451, 4454, 4457, 12692, 4465, 4468, 4469, 4472, 4474, 4475, 4477, 4478, 4480, 4484, 4485, 4486, 4489, 4490, 4491, 4493, 4494, 4497, 12692, 4505, 13845, 4513, 4514, 4516, 4520, 4521, 4524, 4525, 4526, 4531, 4532, 4533, 4536, 4537, 4538, 4542, 4543, 4544, 4546, 4547, 4550, 4553, 12692, 4561, 13845, 4567, 4568, 4569, 4570, 4575, 4576, 4577, 4578, 4580, 4581, 4583, 4584, 4587, 4588, 4589, 14133, 4596, 4597, 4598, 14140, 4612, 4614, 4617, 4620, 12324, 4628, 4631, 4632, 4634, 4635, 4637, 4646, 4647, 4648, 4649, 4652, 4654, 4656, 4657, 4667, 4669, 4671, 4673, 4674, 4678, 4682, 4684, 4687, 4688, 4689, 4690, 4698, 4699, 4701, 4703, 4706, 4709, 12324, 4717, 13845, 4721, 4724, 4726, 13841, 4729, 4730, 4732, 4739, 4740, 4741, 4742, 4747, 13885, 13859, 4752, 4753, 4755, 13889, 4757, 13892, 13843, 4760, 4761, 4762, 13843, 4764, 13842, 13843, 4775, 4776, 4777, 4778, 4779, 4782, 4785, 4791, 13845, 4797, 4798, 4799, 4803, 4804, 4805, 4808, 4813, 4816, 4819, 4824, 4826, 4829, 4832, 12692, 4840, 4843, 4844, 13889, 4849, 4855, 13859, 4858, 4861, 4862, 4863, 4864, 13885, 4869, 14306, 4870, 13892, 4873, 4874, 4884, 4885, 4886, 4887, 4889, 4891, 4893, 4894, 4897, 4898, 4899, 4901, 4902, 4905, 4908, 4914, 13845, 4920, 4921, 4922, 4926, 4927, 4931, 4932, 4934, 4937, 4938, 4939, 4943, 4944, 4947, 4950, 4952, 13859, 4960, 4961, 4962, 4963, 13889, 4968, 14376, 4972, 14380, 4973, 4974, 13885, 4979, 14386, 4983, 14390, 4985, 4986, 4987, 4989, 4990, 4993, 4996, 5002, 13845, 5008, 5009, 5010, 5014, 5015, 5018, 5019, 5021, 5024, 5025, 5026, 5028, 5029, 5030, 5035, 5042, 5043, 5044, 5046, 5048, 5051, 5054, 12692, 5062, 5065, 5066, 5068, 5069, 5071, 5072, 5074, 5083, 5084, 5085, 5086, 5088, 5090, 5091, 5094, 5096, 13652, 13658, 13664, 13676, 13682, 13688, 13694, 13700, 13711, 5256, 5258, 5261, 5264, 5270, 5273, 5276, 13859, 5281, 5286, 5289, 13867, 5292, 5293, 13719, 13724, 5317, 5318, 5321, 5323, 5324, 5331, 5332, 5338, 12685, 12692, 5349, 13884, 5351, 5352, 5353, 5354, 13885, 13887, 5358, 13886, 13888, 5362, 5363, 5364, 5365, 13889, 5367, 13891, 5369, 13890, 5371, 5372, 13892, 13893, 5376, 5377, 13894, 13896, 5381, 13897, 5384, 5385, 13734, 13737, 13741, 13744, 13748, 13751, 13755, 13758, 13762, 13765, 13769, 13772, 13776, 13779, 14552, 14558, 5524, 5526, 5550, 13390, 13388, 5553, 5555, 14561, 14564, 14567, 14570, 5588, 5589, 14069, 14070, 14088, 14071, 14077, 14082, 14088, 14087, 14089, 14088, 14141, 14142, 14145, 14143, 14145, 14144, 14146, 14145, 14147, 14148, 14239, 14245, 14240, 14241, 14242, 14245, 14243, 14244, 14246, 14245, 14257, 14272, 14262, 14263, 14272, 14268, 14271, 14273, 14272, 14274, 14337, 14359, 14342, 14347, 14348, 14354, 14359, 14360, 14359, 14361, 14402, 14433, 14407, 14412, 14418, 14433, 14427, 14429, 14434, 14433, 6015, 6017, 6019, 14481, 14479, 6022, 6024, 14486, 14488, 14491, 14555, 14495, 14494, 14495, 14552, 14555, 14558, 14561, 14564, 14567, 14570, 6135, 6137, 14552, 14555, 14558, 14561, 14564, 14567, 14570, 13781, 13783, 6219, 13787, 13788, 13789, 13801, 13793, 13795, 13797, 13799, 13801, 13803, 13805, 13806, 6491, 13811, 13813, 13815, 121, 122, 123, 124, 125, 126, 127, 4183, 4186, 14720, 4191, 4192, 14732, 14773, 14733, 14776, 14740, 14737, 14741, 14729, 14738, 4248, 13911, 4249, 14782, 13923, 4262, 14790, 4267, 4268, 14795, 14732, 14733, 14735, 14730, 14753, 14736, 14763, 4285, 14740, 14741, 14737, 14738, 14739, 13939, 14804, 14742, 14746, 14745, 14806, 13952, 13958, 14732, 14733, 14735, 14730, 14753, 14736, 14763, 4328, 14738, 14737, 14739, 14741, 14740, 4337, 13966, 13972, 14744, 14743, 14742, 14749, 14748, 14826, 13984, 14732, 14735, 14734, 14753, 14736, 14763, 4376, 14740, 14739, 14741, 14738, 14737, 13995, 14839, 13432, 14747, 14842, 13438, 14732, 14846, 14733, 14735, 14730, 14753, 14736, 14763, 4412, 14741, 14740, 14738, 14729, 14737, 4421, 14013, 14856, 14726, 14721, 14858, 4434, 14028, 14725, 14728, 14723, 14722, 14866, 14732, 14733, 14735, 14734, 14753, 14736, 14763, 4459, 14739, 14740, 14741, 14737, 14738, 14039, 14874, 14046, 14052, 14747, 14883, 14732, 14886, 14733, 14889, 14735, 14730, 14763, 4499, 14729, 14741, 14740, 14737, 14738, 4508, 14067, 14075, 14898, 14900, 14903, 14906, 14732, 14909, 14733, 14912, 14735, 14730, 14753, 14736, 14763, 4555, 14738, 14740, 14741, 14737, 14729, 4564, 14109, 14724, 14919, 14725, 14749, 14726, 14923, 14124, 14931, 13488, 14935, 13494, 14732, 14733, 14735, 14730, 14753, 14736, 14763, 4622, 14740, 14739, 14737, 14738, 14741, 14154, 14944, 14161, 14744, 14743, 14742, 14950, 14175, 14956, 14749, 14748, 14744, 14728, 14727, 14749, 14748, 14732, 14733, 14194, 14966, 14749, 14748, 14744, 14743, 14742, 14749, 14748, 14732, 14733, 14735, 14730, 14753, 14736, 14763, 4711, 14741, 14739, 14737, 14740, 14738, 4720, 14209, 14215, 4727, 14220, 14744, 14731, 14749, 14748, 14986, 14732, 14733, 14735, 14734, 4748, 4750, 4756, 4758, 4759, 4763, 4765, 4766, 15007, 15010, 14735, 14730, 14753, 14736, 14737, 14741, 14740, 14729, 14738, 4794, 14255, 15016, 15019, 14735, 14734, 14732, 14733, 14735, 14734, 14753, 14736, 14763, 4834, 14739, 14737, 14738, 14740, 14741, 14288, 15031, 4845, 14747, 14746, 14745, 14744, 14743, 14742, 4857, 15038, 4865, 15043, 4871, 14744, 14743, 14742, 14749, 14748, 15050, 14323, 14732, 15058, 14733, 15061, 14735, 14730, 14753, 14736, 14737, 14740, 14738, 14729, 14741, 4917, 14335, 15067, 15070, 14352, 15075, 14735, 14730, 14744, 14743, 14747, 14746, 14745, 14742, 4957, 15083, 4964, 15088, 15090, 4975, 15095, 15097, 14732, 15100, 14733, 15103, 14735, 14730, 14753, 14736, 14737, 14729, 14741, 14738, 14740, 5005, 14400, 15109, 15112, 14416, 15117, 15120, 14432, 15124, 14732, 14733, 14735, 14730, 14753, 14736, 14763, 5056, 14739, 14737, 14741, 14740, 14738, 14444, 15132, 14448, 14454, 14744, 14731, 14749, 14748, 15140, 14465, 14471, 14732, 14733, 14735, 14734, 14753, 14736, 14739, 14737, 14740, 14738, 14741, 14503, 15162, 14749, 14748, 14742, 5278, 14744, 14743, 14746, 14745, 14747, 14749, 14748, 5291, 14751, 14750, 14753, 14752, 14754, 15177, 14759, 14755, 14758, 14760, 14761, 14758, 14756, 14759, 14760, 14761, 14759, 14761, 14760, 14757, 14758, 14762, 5346, 14763, 5348, 5350, 5355, 5357, 5359, 5361, 5366, 5368, 5370, 5373, 5375, 5378, 14764, 5380, 5383, 15215, 5498, 5503, 13716, 14515, 13383, 5551, 5552, 14515, 15221, 5577, 15223, 5579, 15225, 5581, 15227, 5583, 14780, 14783, 14784, 14797, 14808, 14811, 15203, 14822, 14828, 14829, 15085, 15092, 15203, 14962, 14854, 14860, 15052, 14875, 14878, 15203, 5708, 5709, 5710, 5711, 5713, 5715, 5716, 5718, 5719, 5720, 14960, 14961, 14959, 14957, 14921, 14925, 14929, 14932, 15024, 15203, 5759, 5760, 5761, 5762, 5763, 5764, 5765, 5766, 5767, 5768, 14945, 14952, 15203, 14957, 14959, 14958, 14960, 14961, 14962, 15203, 15024, 14968, 14969, 14970, 14978, 14988, 14999, 15000, 15001, 15003, 5855, 5856, 5857, 5858, 5859, 5860, 5861, 5862, 5863, 5864, 5872, 5873, 5875, 5876, 5877, 5879, 5881, 5882, 5883, 5884, 15052, 15053, 15056, 5941, 5942, 5944, 5946, 5947, 5949, 5950, 5952, 5953, 5954, 5979, 5980, 5982, 5984, 5986, 5987, 5990, 5991, 5993, 5994, 15135, 15142, 15145, 14473, 14475, 14477, 6020, 6021, 14483, 13679, 6030, 13685, 6036, 14489, 6038, 13697, 6044, 14493, 6046, 13716, 6070, 14515, 6072, 15215, 6096, 6101, 6102, 15156, 6105, 15223, 6107, 15225, 6109, 15227, 6111, 13716, 14515, 15215, 6165, 15217, 6171, 15219, 6173, 15221, 6179, 15223, 6181, 15225, 6183, 15227, 6185, 6198, 6209, 6227, 6228, 6233, 6236, 6446, 6449, 6452, 6455, 6464, 6474, 6477, 6482, 6501, 6504, 6509, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 14766, 14768, 4190, 4233, 14774, 4237, 4240, 4241, 4242, 4243, 4244, 15374, 14787, 14791, 14796, 4274, 4276, 4278, 4279, 4281, 4282, 4284, 4286, 4287, 4288, 4289, 4290, 15398, 4295, 4297, 4298, 14807, 14810, 14814, 4317, 4319, 4321, 4322, 4324, 4325, 4327, 4329, 4330, 4331, 4332, 4333, 15418, 14824, 4344, 4345, 4347, 4349, 4350, 14827, 14832, 4367, 4369, 4370, 4372, 4373, 4375, 4377, 4378, 4379, 4380, 4381, 15441, 4390, 14843, 4399, 14847, 4403, 4405, 4406, 4408, 4409, 4411, 4413, 4414, 4415, 4416, 4417, 15460, 4427, 4428, 14859, 14864, 4440, 4441, 4442, 4443, 14867, 4448, 4450, 4452, 4453, 4455, 4456, 4458, 4460, 4461, 4462, 4463, 4464, 15487, 14877, 14881, 4481, 14884, 4488, 14887, 4492, 4495, 4496, 4498, 4500, 4501, 4502, 4503, 4504, 15505, 14896, 14901, 14904, 14907, 4541, 14910, 4545, 4548, 4549, 4551, 4552, 4554, 4556, 4557, 4558, 4559, 4560, 15527, 4565, 14920, 4571, 4572, 4573, 14924, 14928, 14936, 4611, 4613, 4615, 4616, 4618, 4619, 4621, 4623, 4624, 4625, 4626, 4627, 15554, 14948, 4639, 4640, 4642, 14951, 14954, 4658, 4659, 4661, 4662, 4663, 4665, 4666, 4676, 4677, 14964, 14967, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4700, 4702, 4704, 4705, 4707, 4708, 4710, 4712, 4713, 4714, 4715, 4716, 15593, 14980, 14984, 4733, 4734, 4736, 4737, 14987, 4743, 4744, 4745, 4746, 15607, 15609, 15610, 15008, 4780, 4781, 4783, 4784, 4786, 4787, 4788, 4789, 4790, 15626, 15017, 15020, 4814, 4815, 4823, 4825, 4827, 4828, 4830, 4831, 4833, 4835, 4836, 4837, 4838, 4839, 15646, 15647, 4848, 4850, 4851, 4853, 4854, 4856, 15039, 15656, 15658, 4876, 4877, 4879, 4881, 4882, 15051, 15055, 4896, 15059, 4900, 4903, 4904, 4906, 4907, 4909, 4910, 4911, 4912, 4913, 15679, 15068, 15073, 15076, 4945, 4946, 4948, 4949, 4951, 4953, 4954, 4956, 15084, 15695, 15698, 4984, 15101, 4988, 4991, 4992, 4994, 4995, 4997, 4998, 4999, 5000, 5001, 15714, 15110, 15115, 15118, 15121, 15125, 5045, 5047, 5049, 5050, 5052, 5053, 5055, 5057, 5058, 5059, 5060, 5061, 15737, 15134, 15138, 5076, 5077, 5079, 5080, 15141, 15144, 15147, 5255, 5257, 5259, 5260, 5262, 5263, 5265, 5266, 5267, 5268, 5269, 15759, 5274, 5275, 5277, 5279, 5280, 5282, 5283, 5285, 5287, 5288, 15169, 5315, 5316, 5319, 5320, 5322, 5325, 5326, 5327, 5328, 5329, 5333, 5334, 5335, 5336, 5337, 5339, 5340, 5341, 5342, 5343, 5345, 5347, 15185, 15798, 15191, 15194, 15802, 15201, 15805, 15207, 5379, 15213, 5497, 5523, 5525, 5549, 15818, 5554, 15809, 5576, 5578, 5580, 5582, 15376, 15381, 5602, 13915, 5604, 5605, 15379, 15381, 15382, 15381, 5613, 5624, 5626, 5628, 5636, 5643, 5644, 5651, 5652, 5653, 14469, 5662, 14543, 5673, 14017, 5678, 5679, 15466, 5694, 5696, 5700, 15850, 14079, 15854, 15857, 5722, 5723, 5725, 5727, 5738, 5744, 5746, 14128, 5748, 5753, 14469, 5757, 14543, 15870, 15872, 15875, 5776, 5783, 5785, 5793, 5794, 5795, 5796, 5797, 5802, 5803, 5804, 5808, 5813, 5814, 5822, 15596, 5830, 14228, 15611, 15613, 5847, 5848, 5849, 15612, 15613, 5852, 15614, 15613, 15899, 15903, 15907, 15909, 15912, 15916, 14278, 14280, 14281, 14282, 14292, 14293, 14304, 14309, 5930, 5931, 5933, 15922, 14344, 15927, 15929, 14374, 14378, 14384, 14388, 15932, 14409, 15936, 14543, 15940, 6004, 6010, 6012, 6014, 6016, 6018, 15948, 6023, 6029, 6035, 6037, 6043, 6045, 6069, 6071, 15809, 6095, 6104, 6106, 6108, 6110, 6134, 6136, 15809, 6164, 6170, 6172, 6178, 6180, 6182, 6184, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 16135, 16137, 16139, 16146, 16148, 16151, 16153, 16158, 16165, 16167, 16170, 16172, 16174, 16177, 16180, 16185, 16187, 16190, 16192, 16201, 16203, 16206, 16208, 16210, 16212, 16216, 16218, 16223, 16225, 16228, 16230, 16241, 16244, 16246, 16248, 16257, 16259, 16262, 16264, 16266, 16278, 16280, 16283, 16285, 16290, 16295, 16297, 16300, 16306, 16308, 16311, 16315, 16317, 16320, 16322, 16324, 16328, 16330, 16335, 16341, 16343, 16345, 16347, 16349, 16353, 16357, 16359, 16362, 16364, 16370, 16372, 16378, 16381, 16388, 16390, 16392, 16394, 16396, 16401, 16403, 16406, 16415, 16417, 16419, 16421, 16423, 16432, 16434, 16437, 16439, 16445, 16447, 16454, 16456, 16458, 16460, 16464, 16467, 16469, 16472, 15170, 16475, 16477, 16480, 16482, 16485, 16487, 16490, 16492, 15186, 15800, 15195, 15804, 15208, 15214, 16478, 16494, 16495, 16470, 16465, 13900, 13898, 15187, 15196, 16452, 16451, 16494, 16494, 16130, 15196, 16465, 16470, 15187, 16452, 16451, 16494, 16495, 15187, 16465, 16470, 15196, 16478, 16494, 16495, 5571, 15807, 16133, 16131, 16494, 16435, 5600, 5601, 5603, 14785, 5607, 5608, 13925, 5610, 5611, 13929, 16144, 16143, 16494, 16149, 16156, 13944, 13950, 14812, 16163, 16162, 16494, 16168, 13970, 16178, 13975, 14830, 16430, 16429, 16407, 16430, 16183, 16494, 16188, 5661, 16195, 14001, 5665, 16199, 16197, 16494, 16204, 5674, 14020, 5680, 14862, 14030, 16221, 16220, 16494, 16226, 14044, 14879, 16235, 14055, 16239, 16237, 16494, 16242, 14894, 5714, 14083, 14090, 14095, 16255, 16253, 16494, 16260, 16267, 16270, 14111, 16269, 16270, 16271, 16270, 14116, 14926, 5747, 16430, 16429, 5754, 16404, 14135, 5758, 16276, 16275, 16494, 16281, 14946, 16291, 14167, 14173, 16430, 16429, 16573, 16302, 16301, 14469, 16404, 14197, 16309, 16313, 16312, 16494, 16318, 14213, 5824, 14982, 14222, 16333, 16332, 15024, 5836, 16404, 16407, 14232, 15085, 15203, 5845, 5846, 5850, 5851, 5853, 5854, 16430, 16429, 16494, 16435, 14258, 14264, 15021, 16430, 16429, 16407, 16404, 14277, 5896, 15024, 5898, 5899, 5900, 16355, 16354, 16494, 16360, 15085, 5909, 5910, 16368, 16373, 14298, 15040, 5918, 15203, 5920, 16430, 16429, 16379, 14458, 14469, 16386, 16384, 16494, 16435, 14338, 5945, 15071, 14355, 16430, 16429, 16407, 16404, 14368, 15085, 5967, 5968, 15092, 5970, 5971, 16413, 16411, 16494, 16435, 14403, 5983, 15113, 14419, 14423, 5992, 14435, 16430, 16429, 16494, 16435, 14543, 15136, 14458, 15183, 14469, 16452, 16451, 16494, 16495, 15196, 16465, 16470, 15187, 16478, 16494, 16495, 15196, 15187, 6092, 15807, 16452, 16451, 16494, 16495, 15187, 15196, 16470, 16465, 16478, 16494, 16495, 15187, 15196, 15203, 6161, 15807, 16652, 16506, 16652, 16651, 16508, 16507, 16511, 16510, 16509, 16516, 16515, 16514, 16513, 16640, 16639, 16630, 16631, 16629, 16633, 16632, 16638, 16634, 16636, 16635, 16638, 16637, 16640, 16639, 16652, 16642, 16652, 16651, 16646, 16645, 16644, 16643, 16648, 16647, 16652, 16650, 16652, 16651, 16656, 16655, 16654, 16653, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 16769, 16774, 16779, 16786, 16790, 16798, 16801, 16806, 16811, 16298, 16822, 16830, 16836, 16844, 16852, 16857, 16863, 16872, 16874, 16876, 5479, 16861, 16860, 5483, 5484, 5485, 16865, 16867, 16864, 5489, 16866, 5491, 5492, 5493, 14532, 5495, 15199, 16861, 16860, 5506, 5507, 5509, 5510, 5511, 5512, 15199, 16864, 16867, 5516, 16865, 5518, 16866, 14511, 5521, 14532, 16861, 16860, 5533, 5534, 5536, 5537, 5538, 14532, 16867, 16865, 16864, 5543, 16866, 5545, 14511, 5547, 15199, 5560, 16870, 16870, 16869, 5565, 5569, 5572, 14547, 16828, 16827, 5595, 5596, 5598, 5599, 16919, 5606, 16923, 5609, 16926, 5612, 16772, 16771, 5616, 5617, 5619, 5620, 16775, 5622, 5623, 5625, 5627, 16777, 16776, 5631, 5632, 5634, 5635, 5637, 16782, 5639, 16781, 16782, 5642, 5645, 16855, 16846, 5648, 5649, 5650, 16784, 16783, 5656, 5657, 5659, 5660, 5663, 5664, 16788, 16787, 5668, 5669, 5671, 5672, 16792, 16840, 5677, 5681, 16793, 16840, 16794, 16840, 5686, 16796, 16795, 5689, 5690, 5692, 5693, 5695, 5697, 5698, 5699, 16828, 16799, 5703, 5704, 5706, 5707, 5712, 5717, 5721, 5724, 16815, 16804, 16803, 5730, 5731, 5733, 5734, 5735, 5736, 5737, 5739, 5740, 5741, 5742, 5743, 5745, 16855, 16826, 5751, 5752, 5755, 5756, 16809, 16808, 5771, 5772, 5774, 5775, 5777, 16864, 5779, 16812, 16867, 5782, 5784, 16855, 16854, 5788, 5789, 16815, 16813, 16855, 16846, 5800, 5801, 5805, 5806, 5807, 16818, 5810, 16817, 16816, 16820, 16819, 5817, 5818, 5820, 5821, 5823, 5825, 16825, 16824, 16825, 5829, 16855, 16826, 5833, 5834, 5835, 16864, 5838, 16867, 16848, 5841, 5842, 5843, 5844, 17036, 17038, 17040, 16828, 16827, 5867, 5868, 5870, 5871, 5874, 5878, 5880, 16855, 16832, 5887, 5888, 5889, 16848, 5891, 16864, 16867, 16847, 5895, 5897, 16834, 16833, 5903, 5904, 5906, 5907, 5908, 5911, 16838, 16864, 16837, 5915, 5916, 5917, 5919, 16855, 16846, 5923, 5924, 16840, 5926, 16839, 16840, 5929, 5932, 16842, 16841, 5936, 5937, 5939, 5940, 5943, 5948, 5951, 16855, 16846, 5957, 5958, 5959, 16848, 16864, 5962, 16867, 16847, 5965, 5966, 5969, 16850, 16849, 5974, 5975, 5977, 5978, 5981, 5985, 5988, 5989, 5995, 16855, 16854, 5998, 5999, 6001, 6002, 6003, 6005, 16859, 16858, 16859, 6009, 6011, 6013, 16861, 16860, 6053, 6054, 6056, 6057, 6058, 15199, 6060, 16866, 6062, 16865, 16864, 16867, 14511, 6067, 14532, 6077, 16870, 16870, 16869, 6082, 6085, 6088, 15199, 6090, 14532, 6093, 14547, 16861, 16860, 6118, 6119, 6121, 6122, 6123, 14532, 6125, 15199, 16865, 6128, 16866, 16867, 6131, 16864, 14511, 6142, 16870, 16870, 16869, 6150, 6152, 15183, 6154, 14532, 14534, 6157, 15199, 6159, 14543, 6162, 14547, 6196, 6197, 6199, 6200, 6207, 6208, 6216, 6217, 6218, 6229, 6230, 6231, 6232, 6234, 6235, 16519, 16527, 16563, 16561, 16542, 16540, 15848, 15852, 16553, 16554, 16556, 16558, 16563, 16561, 15877, 15868, 17011, 16575, 16593, 16590, 15901, 15905, 15910, 15917, 15924, 15923, 15938, 15933, 6441, 6442, 6443, 6444, 6445, 6447, 6448, 6450, 6451, 6453, 6454, 6462, 6463, 6472, 6473, 6475, 6476, 6478, 6479, 6480, 6481, 6489, 6490, 6499, 6500, 6502, 6503, 6505, 6506, 6507, 6508, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 16138, 16154, 16173, 16193, 16209, 16231, 16247, 16265, 16286, 16323, 16348, 16365, 16395, 16422, 16440, 16461, 16483, 16488, 16493, 5480, 5481, 5486, 5487, 5488, 5490, 17312, 5494, 5496, 5504, 5505, 17320, 17323, 5513, 5514, 5515, 5517, 5519, 5520, 5522, 5531, 5532, 17338, 5539, 5540, 5541, 5542, 5544, 5546, 5548, 5561, 5562, 5563, 17358, 5573, 5593, 5594, 17363, 5614, 5615, 17375, 5621, 5629, 5630, 17386, 5638, 5640, 5641, 5646, 5647, 17399, 5654, 5655, 17404, 5666, 5667, 17412, 5675, 5676, 5682, 5683, 5684, 5685, 5687, 5688, 17427, 5701, 5702, 17437, 5726, 5728, 5729, 17448, 17452, 17455, 17457, 5749, 5750, 17463, 5769, 5770, 17469, 5778, 5780, 5781, 5786, 5787, 17482, 5790, 17289, 5792, 5798, 5799, 17488, 5809, 5811, 5812, 5815, 5816, 17499, 5826, 5827, 5828, 5831, 5832, 17511, 5837, 5839, 5840, 5865, 5866, 17527, 5885, 5886, 17536, 5890, 5892, 5893, 5894, 5901, 5902, 17548, 5912, 5913, 5914, 5921, 5922, 17563, 5925, 5927, 5928, 5934, 5935, 17573, 5955, 5956, 17582, 5960, 5961, 5963, 5964, 5972, 5973, 17595, 5996, 5997, 17606, 6006, 6007, 6008, 6051, 6052, 17620, 6059, 6061, 6063, 6064, 6065, 6066, 6068, 6078, 6079, 6080, 6089, 6091, 17644, 6094, 6116, 6117, 17649, 6124, 6126, 6127, 6129, 6130, 6132, 6133, 6143, 6144, 6145, 6153, 6155, 6156, 6158, 6160, 17677, 6163, 17680, 17682, 17684, 17686, 17689, 17691, 17693, 17368, 16522, 16521, 17366, 16521, 6243, 6244, 17370, 17366, 16529, 16530, 16530, 16528, 16532, 16531, 16532, 16533, 17400, 16535, 16536, 16534, 16536, 6266, 17407, 16538, 6269, 6271, 16543, 6274, 16961, 16543, 16546, 16547, 17432, 16547, 16545, 15855, 6287, 16548, 15855, 16550, 16551, 15851, 16548, 6294, 6295, 6296, 16552, 16555, 16555, 6305, 16557, 16560, 6309, 16560, 6314, 17464, 16563, 6317, 16566, 16565, 6320, 15873, 15876, 15876, 16567, 6325, 16566, 16570, 16568, 16570, 16569, 16571, 6336, 6337, 16571, 16574, 16577, 16576, 17490, 16578, 16577, 16581, 16580, 16579, 16581, 16582, 16584, 17023, 16584, 17512, 17520, 17520, 17519, 6363, 17522, 16589, 17523, 16588, 17521, 16589, 6370, 17522, 16598, 6373, 16596, 15900, 16597, 15900, 6378, 16598, 15904, 6382, 15914, 15913, 16601, 16600, 16599, 6388, 16601, 15914, 17675, 17544, 17675, 17590, 17559, 17551, 17559, 17558, 16611, 16610, 16612, 16610, 16615, 16616, 15925, 16613, 6414, 15930, 6416, 16616, 15930, 17591, 17675, 17675, 17590, 16623, 15937, 16625, 15934, 16625, 6431, 6432, 16621, 15937, 16626, 16627, 16627, 16628, 17723, 17725, 17728, 17730, 17732, 17734, 17736, 17738, 17740, 17742, 17744, 17746, 17748, 17750, 17752, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 17811, 17807, 17813, 17815, 17816, 17821, 17807, 17826, 17827, 17828, 17832, 17807, 17836, 17346, 17348, 17841, 17843, 17809, 17809, 17808, 17810, 17809, 17847, 17792, 17850, 17793, 17379, 17854, 17794, 17391, 17858, 17860, 17863, 17795, 17866, 17796, 17869, 17871, 17873, 17875, 17797, 17878, 17798, 17882, 17799, 17888, 17891, 17800, 17474, 17895, 17897, 5791, 17903, 17493, 17907, 17909, 17801, 17912, 17915, 17514, 17919, 17921, 17802, 17924, 17926, 17927, 17929, 17931, 17803, 17933, 17935, 17937, 17565, 17941, 17943, 17804, 17946, 17948, 17586, 17951, 17953, 17805, 17956, 17806, 17959, 17962, 17807, 17965, 17966, 17968, 17971, 17973, 17809, 17810, 17809, 17809, 17808, 17979, 17807, 17657, 17985, 17986, 17988, 17990, 17808, 17809, 17809, 17809, 17810, 17315, 17675, 17313, 17675, 17675, 17333, 17675, 17324, 17341, 17675, 17350, 17675, 17687, 17675, 17632, 17675, 17623, 17675, 17844, 18003, 6238, 6239, 6240, 6241, 6242, 6245, 6246, 6248, 6249, 6250, 6251, 6254, 6256, 6257, 6258, 6260, 6261, 6262, 6263, 6264, 6267, 6268, 6272, 6275, 6276, 6280, 6281, 6282, 6283, 6284, 6286, 6288, 6289, 6290, 6291, 6292, 6293, 17880, 17880, 6299, 6300, 6301, 17880, 18051, 17885, 17886, 6307, 6308, 6310, 17884, 17885, 6315, 6316, 6318, 6319, 6321, 6322, 6323, 6324, 6326, 6328, 6330, 6331, 6332, 6334, 6338, 6339, 18079, 6341, 6342, 6343, 6344, 6345, 6346, 6348, 6349, 6350, 6352, 6354, 6355, 6356, 6358, 6360, 6361, 6362, 6364, 6365, 6366, 6367, 6368, 6369, 6371, 6372, 6374, 6375, 6376, 6377, 6379, 6380, 6383, 6384, 6385, 6386, 6387, 6389, 6390, 6393, 6394, 6395, 6396, 6399, 6400, 6401, 6402, 6404, 6405, 6407, 6408, 6410, 6411, 6412, 6413, 6415, 6417, 6418, 6420, 6421, 6422, 6424, 6426, 6427, 6428, 6429, 6430, 6433, 6434, 6436, 6438, 6439, 6440, 18165, 17675, 17632, 17675, 17623, 17642, 17675, 17675, 17640, 17976, 17675, 18173, 17675, 17675, 17654, 17652, 17670, 17675, 17675, 17675, 17996, 17673, 18178, 18169, 17999, 17998, 18000, 18176, 18175, 18004, 18168, 18167, 18166, 18169, 18171, 18170, 18174, 18176, 18175, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 17812, 5482, 18307, 17822, 5508, 18312, 17833, 5535, 18317, 18320, 5564, 5566, 5567, 5568, 5570, 17848, 5597, 17851, 5618, 17855, 5633, 18334, 17861, 17864, 5658, 17867, 5670, 17876, 5691, 17879, 5705, 17883, 5732, 17889, 17892, 5773, 18353, 17898, 18355, 17904, 18358, 17910, 5819, 17913, 17916, 18364, 17922, 5869, 17925, 18369, 17932, 5905, 18374, 17938, 18377, 17944, 5938, 17947, 18382, 17954, 5976, 17957, 6000, 17960, 17963, 6055, 18392, 18395, 6081, 6083, 6084, 6086, 6087, 17980, 6120, 18404, 18407, 6146, 6147, 6148, 6149, 6151, 6191, 6192, 6193, 6194, 6202, 6203, 6204, 6205, 6211, 6212, 6213, 6215, 6221, 6222, 6223, 6224, 6225, 6226, 18434, 18436, 18010, 18438, 18330, 18441, 18443, 18446, 18449, 18451, 18453, 18030, 18341, 18340, 18342, 18455, 18457, 18459, 18461, 18042, 18465, 18467, 18469, 6297, 6298, 6302, 18473, 6304, 6306, 6311, 6312, 18058, 18484, 18063, 18487, 18488, 18490, 18071, 18495, 18499, 18502, 18504, 18508, 18512, 18516, 18518, 18520, 18522, 18106, 18109, 18527, 18529, 18530, 18532, 18534, 18536, 18537, 18541, 18545, 18548, 18552, 18554, 18555, 18556, 18559, 18563, 18565, 18156, 18567, 18571, 17726, 6457, 6459, 6460, 6461, 6466, 6467, 6468, 6469, 6470, 6471, 6485, 6486, 6487, 6488, 6493, 6494, 6495, 6496, 6497, 6498, 6510, 6512, 6513, 6515, 18425, 18432, 6520, 6521, 6522, 6551, 6552, 6553, 6556, 18584, 6559, 6560, 6562, 18595, 6565, 6566, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 17303, 18308, 17321, 18313, 17339, 18318, 17356, 18700, 17357, 17364, 17376, 17387, 17405, 17413, 17428, 17438, 17449, 17470, 17901, 17500, 17517, 17528, 18370, 17549, 17556, 17574, 18383, 17596, 17607, 17621, 18393, 17638, 18758, 18759, 17650, 18405, 18766, 18768, 18769, 18771, 18773, 18775, 18777, 18779, 18783, 18785, 18787, 18789, 18791, 6252, 18794, 18709, 18797, 18799, 6273, 6277, 6278, 18806, 18808, 18810, 18812, 18813, 18055, 18479, 18817, 18821, 18823, 18825, 18724, 18829, 18728, 18731, 18834, 18836, 18838, 18840, 18842, 18844, 18742, 18849, 18851, 18854, 18856, 18751, 18861, 18864, 18866, 18868, 18871, 18874, 18876, 18878, 18881, 6517, 6519, 18858, 18889, 6558, 6564, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 17304, 17823, 17340, 18951, 18702, 17365, 17377, 17388, 17406, 17414, 17429, 17439, 17450, 17471, 17501, 17529, 17550, 17575, 17597, 17608, 17622, 18976, 18760, 17651, 18981, 17668, 18945, 18984, 18947, 18986, 18949, 18989, 18992, 18993, 6255, 18452, 18033, 19000, 18462, 19003, 19004, 19005, 19007, 19008, 19011, 6329, 18962, 18505, 6347, 6353, 18964, 19017, 19019, 19021, 18966, 18968, 6406, 19024, 18970, 19026, 6437, 18974, 19030, 18979, 19034, 18885, 6554, 18893, 18897, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19076, 19094, 19097, 18688, 6195, 18691, 6206, 18694, 6214, 18990, 18703, 18439, 18705, 18707, 19106, 18710, 18711, 18713, 19108, 19109, 18715, 18717, 18049, 19113, 18719, 19115, 18721, 18492, 18722, 19117, 18725, 6335, 18727, 19120, 18729, 19121, 18732, 6359, 18524, 18531, 18734, 18538, 18736, 6392, 18738, 6398, 18741, 18549, 18743, 18557, 18745, 6423, 18747, 18568, 18749, 19132, 18752, 6458, 19031, 18761, 6484, 19035, 18886, 19138, 18894, 18898, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 6190, 19204, 6201, 19206, 6210, 19208, 18697, 6237, 6247, 6253, 18795, 6259, 6265, 6270, 19219, 6279, 6285, 6303, 18818, 6313, 6327, 18826, 6333, 19231, 6340, 18830, 6351, 18831, 6357, 19237, 6381, 6391, 18539, 6397, 18543, 6403, 19247, 6409, 6419, 19251, 6425, 6435, 18857, 6456, 19257, 18755, 6483, 18869, 18764, 19263, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19333, 6220, 18447, 18496, 18500, 18509, 18513, 18832, 18845, 18846, 18550, 19367, 18572, 19028, 6465, 19032, 6492, 19328, 19330, 19358, 19335, 19358, 19365, 19368, 19347, 19336, 19343, 19345, 19350, 19358, 19358, 19352, 19344, 19339, 19341, 19340, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18781, 18827, 18517, 18542, 18546, 18561, 18862, 18872, 6511, 6514, 19457, 6523, 6524, 6525, 19363, 6527, 6528, 6529, 6530, 19337, 19369, 6534, 6535, 6536, 19348, 19363, 6542, 6543, 19354, 6545, 6546, 6548, 6549, 6550, 19470, 19472, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19332, 6518, 6526, 6531, 19361, 6533, 6537, 19356, 19359, 19366, 6541, 6544, 19350, 19596, 19600, 19602, 19607, 19614, 19617, 19371, 6557, 19374, 6563, 19593, 19592, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 6516, 6532, 6538, 6539, 6540, 6547, 19714, 19605, 19610, 19723, 19727, 6555, 6561, 19734, 6570, 6571, 19732, 19713, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19841, 19842, 19844, 19615, 19846, 19729, 6567, 19851, 19851, 19851, 19852, 6574, 19840, 6576, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19847, 19969, 19848, 19730, 19850, 6568, 6569, 6572, 6573, 6575, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20097, 19973, 20101, 19854, 20103, 19979, 19981, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20224, 20099, 20227, 20229, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20353, 20355, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20480, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 6577, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20736, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20864, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127};
bool h_Op[]= {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#define THREADS_PER_BLOCK 128
#define BLOCKS_PER_GRID 1
#define SIZE_OF_IN 6656
#define SIZE_OF_AC 14464
__device__ void
ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float R[165*THREADS_PER_BLOCK];
const int t= THREADS_PER_BLOCK;
__shared__ float final;
final=0;
R[i + 0*t] = A[i + 0*t];
R[i + 1*t] = A[i + 1*t];
R[i + 2*t] = A[i + 2*t];
R[i + 3*t] = A[i + 3*t];
R[i + 4*t] = A[i + 4*t];
R[i + 5*t] = A[i + 5*t];
R[i + 6*t] = A[i + 6*t];
R[i + 7*t] = A[i + 7*t];
R[i + 8*t] = A[i + 8*t];
R[i + 9*t] = A[i + 9*t];
R[i + 10*t] = A[i + 10*t];
R[i + 11*t] = A[i + 11*t];
R[i + 12*t] = A[i + 12*t];
R[i + 13*t] = A[i + 13*t];
R[i + 14*t] = A[i + 14*t];
R[i + 15*t] = A[i + 15*t];
R[i + 16*t] = A[i + 16*t];
R[i + 17*t] = A[i + 17*t];
R[i + 18*t] = A[i + 18*t];
R[i + 19*t] = A[i + 19*t];
R[i + 20*t] = A[i + 20*t];
R[i + 21*t] = A[i + 21*t];
R[i + 22*t] = A[i + 22*t];
R[i + 23*t] = A[i + 23*t];
R[i + 24*t] = A[i + 24*t];
R[i + 25*t] = A[i + 25*t];
R[i + 26*t] = A[i + 26*t];
R[i + 27*t] = A[i + 27*t];
R[i + 28*t] = A[i + 28*t];
R[i + 29*t] = A[i + 29*t];
R[i + 30*t] = A[i + 30*t];
R[i + 31*t] = A[i + 31*t];
R[i + 32*t] = A[i + 32*t];
R[i + 33*t] = A[i + 33*t];
R[i + 34*t] = A[i + 34*t];
R[i + 35*t] = A[i + 35*t];
R[i + 36*t] = A[i + 36*t];
R[i + 37*t] = A[i + 37*t];
R[i + 38*t] = A[i + 38*t];
R[i + 39*t] = A[i + 39*t];
R[i + 40*t] = A[i + 40*t];
R[i + 41*t] = A[i + 41*t];
R[i + 42*t] = A[i + 42*t];
R[i + 43*t] = A[i + 43*t];
R[i + 44*t] = A[i + 44*t];
R[i + 45*t] = A[i + 45*t];
R[i + 46*t] = A[i + 46*t];
R[i + 47*t] = A[i + 47*t];
R[i + 48*t] = A[i + 48*t];
R[i + 49*t] = A[i + 49*t];
R[i + 50*t] = A[i + 50*t];
R[i + 51*t] = A[i + 51*t];
__syncthreads();
for (int iter=0; iter< n_iter; iter++) {
R[i + 52*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]];
R[i + 53*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]];
R[i + 54*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]];
R[i + 55*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]];
R[i + 56*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]];
R[i + 57*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]];
R[i + 58*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]];
R[i + 59*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]];
R[i + 60*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]];
R[i + 61*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]];
R[i + 62*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]];
__syncthreads();
R[i + 63*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]];
R[i + 64*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]];
R[i + 65*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]];
R[i + 66*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]];
R[i + 67*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]];
R[i + 68*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]];
R[i + 69*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]];
__syncthreads();
R[i + 70*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]];
R[i + 71*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]];
R[i + 72*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]];
R[i + 73*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]];
R[i + 74*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]];
R[i + 75*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]];
R[i + 76*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]];
R[i + 77*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]];
R[i + 78*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]];
__syncthreads();
R[i + 79*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]];
R[i + 80*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]];
R[i + 81*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]];
R[i + 82*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]];
R[i + 83*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]];
R[i + 84*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]];
R[i + 85*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]];
R[i + 86*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]];
R[i + 87*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]];
__syncthreads();
R[i + 88*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]];
R[i + 89*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]];
R[i + 90*t] = Op[i + 38*t] ? R[B[i + 38*t]] * R[C[i + 38*t]] : R[B[i + 38*t]] + R[C[i + 38*t]];
R[i + 91*t] = Op[i + 39*t] ? R[B[i + 39*t]] * R[C[i + 39*t]] : R[B[i + 39*t]] + R[C[i + 39*t]];
R[i + 92*t] = Op[i + 40*t] ? R[B[i + 40*t]] * R[C[i + 40*t]] : R[B[i + 40*t]] + R[C[i + 40*t]];
R[i + 93*t] = Op[i + 41*t] ? R[B[i + 41*t]] * R[C[i + 41*t]] : R[B[i + 41*t]] + R[C[i + 41*t]];
R[i + 94*t] = Op[i + 42*t] ? R[B[i + 42*t]] * R[C[i + 42*t]] : R[B[i + 42*t]] + R[C[i + 42*t]];
__syncthreads();
R[i + 95*t] = Op[i + 43*t] ? R[B[i + 43*t]] * R[C[i + 43*t]] : R[B[i + 43*t]] + R[C[i + 43*t]];
R[i + 96*t] = Op[i + 44*t] ? R[B[i + 44*t]] * R[C[i + 44*t]] : R[B[i + 44*t]] + R[C[i + 44*t]];
R[i + 97*t] = Op[i + 45*t] ? R[B[i + 45*t]] * R[C[i + 45*t]] : R[B[i + 45*t]] + R[C[i + 45*t]];
R[i + 98*t] = Op[i + 46*t] ? R[B[i + 46*t]] * R[C[i + 46*t]] : R[B[i + 46*t]] + R[C[i + 46*t]];
R[i + 99*t] = Op[i + 47*t] ? R[B[i + 47*t]] * R[C[i + 47*t]] : R[B[i + 47*t]] + R[C[i + 47*t]];
R[i + 100*t] = Op[i + 48*t] ? R[B[i + 48*t]] * R[C[i + 48*t]] : R[B[i + 48*t]] + R[C[i + 48*t]];
R[i + 101*t] = Op[i + 49*t] ? R[B[i + 49*t]] * R[C[i + 49*t]] : R[B[i + 49*t]] + R[C[i + 49*t]];
__syncthreads();
R[i + 102*t] = Op[i + 50*t] ? R[B[i + 50*t]] * R[C[i + 50*t]] : R[B[i + 50*t]] + R[C[i + 50*t]];
R[i + 103*t] = Op[i + 51*t] ? R[B[i + 51*t]] * R[C[i + 51*t]] : R[B[i + 51*t]] + R[C[i + 51*t]];
R[i + 104*t] = Op[i + 52*t] ? R[B[i + 52*t]] * R[C[i + 52*t]] : R[B[i + 52*t]] + R[C[i + 52*t]];
R[i + 105*t] = Op[i + 53*t] ? R[B[i + 53*t]] * R[C[i + 53*t]] : R[B[i + 53*t]] + R[C[i + 53*t]];
R[i + 106*t] = Op[i + 54*t] ? R[B[i + 54*t]] * R[C[i + 54*t]] : R[B[i + 54*t]] + R[C[i + 54*t]];
R[i + 107*t] = Op[i + 55*t] ? R[B[i + 55*t]] * R[C[i + 55*t]] : R[B[i + 55*t]] + R[C[i + 55*t]];
__syncthreads();
R[i + 108*t] = Op[i + 56*t] ? R[B[i + 56*t]] * R[C[i + 56*t]] : R[B[i + 56*t]] + R[C[i + 56*t]];
R[i + 109*t] = Op[i + 57*t] ? R[B[i + 57*t]] * R[C[i + 57*t]] : R[B[i + 57*t]] + R[C[i + 57*t]];
R[i + 110*t] = Op[i + 58*t] ? R[B[i + 58*t]] * R[C[i + 58*t]] : R[B[i + 58*t]] + R[C[i + 58*t]];
R[i + 111*t] = Op[i + 59*t] ? R[B[i + 59*t]] * R[C[i + 59*t]] : R[B[i + 59*t]] + R[C[i + 59*t]];
R[i + 112*t] = Op[i + 60*t] ? R[B[i + 60*t]] * R[C[i + 60*t]] : R[B[i + 60*t]] + R[C[i + 60*t]];
R[i + 113*t] = Op[i + 61*t] ? R[B[i + 61*t]] * R[C[i + 61*t]] : R[B[i + 61*t]] + R[C[i + 61*t]];
R[i + 114*t] = Op[i + 62*t] ? R[B[i + 62*t]] * R[C[i + 62*t]] : R[B[i + 62*t]] + R[C[i + 62*t]];
__syncthreads();
R[i + 115*t] = Op[i + 63*t] ? R[B[i + 63*t]] * R[C[i + 63*t]] : R[B[i + 63*t]] + R[C[i + 63*t]];
R[i + 116*t] = Op[i + 64*t] ? R[B[i + 64*t]] * R[C[i + 64*t]] : R[B[i + 64*t]] + R[C[i + 64*t]];
R[i + 117*t] = Op[i + 65*t] ? R[B[i + 65*t]] * R[C[i + 65*t]] : R[B[i + 65*t]] + R[C[i + 65*t]];
R[i + 118*t] = Op[i + 66*t] ? R[B[i + 66*t]] * R[C[i + 66*t]] : R[B[i + 66*t]] + R[C[i + 66*t]];
R[i + 119*t] = Op[i + 67*t] ? R[B[i + 67*t]] * R[C[i + 67*t]] : R[B[i + 67*t]] + R[C[i + 67*t]];
__syncthreads();
R[i + 120*t] = Op[i + 68*t] ? R[B[i + 68*t]] * R[C[i + 68*t]] : R[B[i + 68*t]] + R[C[i + 68*t]];
R[i + 121*t] = Op[i + 69*t] ? R[B[i + 69*t]] * R[C[i + 69*t]] : R[B[i + 69*t]] + R[C[i + 69*t]];
R[i + 122*t] = Op[i + 70*t] ? R[B[i + 70*t]] * R[C[i + 70*t]] : R[B[i + 70*t]] + R[C[i + 70*t]];
R[i + 123*t] = Op[i + 71*t] ? R[B[i + 71*t]] * R[C[i + 71*t]] : R[B[i + 71*t]] + R[C[i + 71*t]];
R[i + 124*t] = Op[i + 72*t] ? R[B[i + 72*t]] * R[C[i + 72*t]] : R[B[i + 72*t]] + R[C[i + 72*t]];
R[i + 125*t] = Op[i + 73*t] ? R[B[i + 73*t]] * R[C[i + 73*t]] : R[B[i + 73*t]] + R[C[i + 73*t]];
__syncthreads();
R[i + 126*t] = Op[i + 74*t] ? R[B[i + 74*t]] * R[C[i + 74*t]] : R[B[i + 74*t]] + R[C[i + 74*t]];
R[i + 127*t] = Op[i + 75*t] ? R[B[i + 75*t]] * R[C[i + 75*t]] : R[B[i + 75*t]] + R[C[i + 75*t]];
R[i + 128*t] = Op[i + 76*t] ? R[B[i + 76*t]] * R[C[i + 76*t]] : R[B[i + 76*t]] + R[C[i + 76*t]];
R[i + 129*t] = Op[i + 77*t] ? R[B[i + 77*t]] * R[C[i + 77*t]] : R[B[i + 77*t]] + R[C[i + 77*t]];
R[i + 130*t] = Op[i + 78*t] ? R[B[i + 78*t]] * R[C[i + 78*t]] : R[B[i + 78*t]] + R[C[i + 78*t]];
__syncthreads();
R[i + 131*t] = Op[i + 79*t] ? R[B[i + 79*t]] * R[C[i + 79*t]] : R[B[i + 79*t]] + R[C[i + 79*t]];
R[i + 132*t] = Op[i + 80*t] ? R[B[i + 80*t]] * R[C[i + 80*t]] : R[B[i + 80*t]] + R[C[i + 80*t]];
R[i + 133*t] = Op[i + 81*t] ? R[B[i + 81*t]] * R[C[i + 81*t]] : R[B[i + 81*t]] + R[C[i + 81*t]];
R[i + 134*t] = Op[i + 82*t] ? R[B[i + 82*t]] * R[C[i + 82*t]] : R[B[i + 82*t]] + R[C[i + 82*t]];
__syncthreads();
R[i + 135*t] = Op[i + 83*t] ? R[B[i + 83*t]] * R[C[i + 83*t]] : R[B[i + 83*t]] + R[C[i + 83*t]];
R[i + 136*t] = Op[i + 84*t] ? R[B[i + 84*t]] * R[C[i + 84*t]] : R[B[i + 84*t]] + R[C[i + 84*t]];
R[i + 137*t] = Op[i + 85*t] ? R[B[i + 85*t]] * R[C[i + 85*t]] : R[B[i + 85*t]] + R[C[i + 85*t]];
R[i + 138*t] = Op[i + 86*t] ? R[B[i + 86*t]] * R[C[i + 86*t]] : R[B[i + 86*t]] + R[C[i + 86*t]];
__syncthreads();
R[i + 139*t] = Op[i + 87*t] ? R[B[i + 87*t]] * R[C[i + 87*t]] : R[B[i + 87*t]] + R[C[i + 87*t]];
R[i + 140*t] = Op[i + 88*t] ? R[B[i + 88*t]] * R[C[i + 88*t]] : R[B[i + 88*t]] + R[C[i + 88*t]];
R[i + 141*t] = Op[i + 89*t] ? R[B[i + 89*t]] * R[C[i + 89*t]] : R[B[i + 89*t]] + R[C[i + 89*t]];
R[i + 142*t] = Op[i + 90*t] ? R[B[i + 90*t]] * R[C[i + 90*t]] : R[B[i + 90*t]] + R[C[i + 90*t]];
__syncthreads();
R[i + 143*t] = Op[i + 91*t] ? R[B[i + 91*t]] * R[C[i + 91*t]] : R[B[i + 91*t]] + R[C[i + 91*t]];
R[i + 144*t] = Op[i + 92*t] ? R[B[i + 92*t]] * R[C[i + 92*t]] : R[B[i + 92*t]] + R[C[i + 92*t]];
R[i + 145*t] = Op[i + 93*t] ? R[B[i + 93*t]] * R[C[i + 93*t]] : R[B[i + 93*t]] + R[C[i + 93*t]];
__syncthreads();
R[i + 146*t] = Op[i + 94*t] ? R[B[i + 94*t]] * R[C[i + 94*t]] : R[B[i + 94*t]] + R[C[i + 94*t]];
R[i + 147*t] = Op[i + 95*t] ? R[B[i + 95*t]] * R[C[i + 95*t]] : R[B[i + 95*t]] + R[C[i + 95*t]];
__syncthreads();
R[i + 148*t] = Op[i + 96*t] ? R[B[i + 96*t]] * R[C[i + 96*t]] : R[B[i + 96*t]] + R[C[i + 96*t]];
__syncthreads();
R[i + 149*t] = Op[i + 97*t] ? R[B[i + 97*t]] * R[C[i + 97*t]] : R[B[i + 97*t]] + R[C[i + 97*t]];
__syncthreads();
R[i + 150*t] = Op[i + 98*t] ? R[B[i + 98*t]] * R[C[i + 98*t]] : R[B[i + 98*t]] + R[C[i + 98*t]];
__syncthreads();
R[i + 151*t] = Op[i + 99*t] ? R[B[i + 99*t]] * R[C[i + 99*t]] : R[B[i + 99*t]] + R[C[i + 99*t]];
__syncthreads();
R[i + 152*t] = Op[i + 100*t] ? R[B[i + 100*t]] * R[C[i + 100*t]] : R[B[i + 100*t]] + R[C[i + 100*t]];
__syncthreads();
R[i + 153*t] = Op[i + 101*t] ? R[B[i + 101*t]] * R[C[i + 101*t]] : R[B[i + 101*t]] + R[C[i + 101*t]];
__syncthreads();
R[i + 154*t] = Op[i + 102*t] ? R[B[i + 102*t]] * R[C[i + 102*t]] : R[B[i + 102*t]] + R[C[i + 102*t]];
__syncthreads();
R[i + 155*t] = Op[i + 103*t] ? R[B[i + 103*t]] * R[C[i + 103*t]] : R[B[i + 103*t]] + R[C[i + 103*t]];
__syncthreads();
R[i + 156*t] = Op[i + 104*t] ? R[B[i + 104*t]] * R[C[i + 104*t]] : R[B[i + 104*t]] + R[C[i + 104*t]];
__syncthreads();
R[i + 157*t] = Op[i + 105*t] ? R[B[i + 105*t]] * R[C[i + 105*t]] : R[B[i + 105*t]] + R[C[i + 105*t]];
__syncthreads();
R[i + 158*t] = Op[i + 106*t] ? R[B[i + 106*t]] * R[C[i + 106*t]] : R[B[i + 106*t]] + R[C[i + 106*t]];
__syncthreads();
R[i + 159*t] = Op[i + 107*t] ? R[B[i + 107*t]] * R[C[i + 107*t]] : R[B[i + 107*t]] + R[C[i + 107*t]];
__syncthreads();
R[i + 160*t] = Op[i + 108*t] ? R[B[i + 108*t]] * R[C[i + 108*t]] : R[B[i + 108*t]] + R[C[i + 108*t]];
__syncthreads();
R[i + 161*t] = Op[i + 109*t] ? R[B[i + 109*t]] * R[C[i + 109*t]] : R[B[i + 109*t]] + R[C[i + 109*t]];
__syncthreads();
R[i + 162*t] = Op[i + 110*t] ? R[B[i + 110*t]] * R[C[i + 110*t]] : R[B[i + 110*t]] + R[C[i + 110*t]];
__syncthreads();
R[i + 163*t] = Op[i + 111*t] ? R[B[i + 111*t]] * R[C[i + 111*t]] : R[B[i + 111*t]] + R[C[i + 111*t]];
__syncthreads();
R[i + 164*t] = Op[i + 112*t] ? R[B[i + 112*t]] * R[C[i + 112*t]] : R[B[i + 112*t]] + R[C[i + 112*t]];
if (i==0) { final += R[164*t]; }
__syncthreads();
}
if (i==0) { A[0]= final;}
}
|
802ce32019dd3698b327deafbb9f20ce56f75c16.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/GpuIndexIVFPQ.h>
#include <faiss/IndexFlat.h>
#include <faiss/IndexIVFPQ.h>
#include <faiss/impl/ProductQuantizer.h>
#include <faiss/gpu/GpuIndexFlat.h>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/IVFPQ.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <limits>
namespace faiss { namespace gpu {
GpuIndexIVFPQ::GpuIndexIVFPQ(GpuResources* resources,
const faiss::IndexIVFPQ* index,
GpuIndexIVFPQConfig config) :
GpuIndexIVF(resources,
index->d,
index->metric_type,
index->metric_arg,
index->nlist,
config),
ivfpqConfig_(config),
subQuantizers_(0),
bitsPerCode_(0),
reserveMemoryVecs_(0),
index_(nullptr) {
copyFrom(index);
}
GpuIndexIVFPQ::GpuIndexIVFPQ(GpuResources* resources,
int dims,
int nlist,
int subQuantizers,
int bitsPerCode,
faiss::MetricType metric,
GpuIndexIVFPQConfig config) :
GpuIndexIVF(resources,
dims,
metric,
0,
nlist,
config),
ivfpqConfig_(config),
subQuantizers_(subQuantizers),
bitsPerCode_(bitsPerCode),
reserveMemoryVecs_(0),
index_(nullptr) {
verifySettings_();
// We haven't trained ourselves, so don't construct the PQ index yet
this->is_trained = false;
}
GpuIndexIVFPQ::~GpuIndexIVFPQ() {
delete index_;
}
void
GpuIndexIVFPQ::copyFrom(const faiss::IndexIVFPQ* index) {
DeviceScope scope(device_);
GpuIndexIVF::copyFrom(index);
// Clear out our old data
delete index_;
index_ = nullptr;
subQuantizers_ = index->pq.M;
bitsPerCode_ = index->pq.nbits;
// We only support this
FAISS_THROW_IF_NOT_MSG(index->pq.nbits == 8,
"GPU: only pq.nbits == 8 is supported");
FAISS_THROW_IF_NOT_MSG(index->by_residual,
"GPU: only by_residual = true is supported");
FAISS_THROW_IF_NOT_MSG(index->polysemous_ht == 0,
"GPU: polysemous codes not supported");
verifySettings_();
// The other index might not be trained
if (!index->is_trained) {
// copied in GpuIndex::copyFrom
FAISS_ASSERT(!is_trained);
return;
}
// Copy our lists as well
// The product quantizer must have data in it
FAISS_ASSERT(index->pq.centroids.size() > 0);
index_ = new IVFPQ(resources_,
index->metric_type,
index->metric_arg,
quantizer->getGpuData(),
subQuantizers_,
bitsPerCode_,
(float*) index->pq.centroids.data(),
ivfpqConfig_.indicesOptions,
ivfpqConfig_.useFloat16LookupTables,
memorySpace_);
// Doesn't make sense to reserve memory here
index_->setPrecomputedCodes(ivfpqConfig_.usePrecomputedTables);
// Copy database vectors, if any
const InvertedLists *ivf = index->invlists;
size_t nlist = ivf ? ivf->nlist : 0;
for (size_t i = 0; i < nlist; ++i) {
size_t list_size = ivf->list_size(i);
// GPU index can only support max int entries per list
FAISS_THROW_IF_NOT_FMT(list_size <=
(size_t) std::numeric_limits<int>::max(),
"GPU inverted list can only support "
"%zu entries; %zu found",
(size_t) std::numeric_limits<int>::max(),
list_size);
index_->addCodeVectorsFromCpu(
i, ivf->get_codes(i), ivf->get_ids(i), list_size);
}
}
void
GpuIndexIVFPQ::copyTo(faiss::IndexIVFPQ* index) const {
DeviceScope scope(device_);
// We must have the indices in order to copy to ourselves
FAISS_THROW_IF_NOT_MSG(ivfpqConfig_.indicesOptions != INDICES_IVF,
"Cannot copy to CPU as GPU index doesn't retain "
"indices (INDICES_IVF)");
GpuIndexIVF::copyTo(index);
//
// IndexIVFPQ information
//
index->by_residual = true;
index->use_precomputed_table = 0;
index->code_size = subQuantizers_;
index->pq = faiss::ProductQuantizer(this->d, subQuantizers_, bitsPerCode_);
index->do_polysemous_training = false;
index->polysemous_training = nullptr;
index->scan_table_threshold = 0;
index->max_codes = 0;
index->polysemous_ht = 0;
index->precomputed_table.clear();
InvertedLists *ivf = new ArrayInvertedLists(
nlist, index->code_size);
index->replace_invlists(ivf, true);
if (index_) {
// Copy the inverted lists
for (int i = 0; i < nlist; ++i) {
auto ids = getListIndices(i);
auto codes = getListCodes(i);
index->invlists->add_entries (i, ids.size(), ids.data(), codes.data());
}
// Copy PQ centroids
auto devPQCentroids = index_->getPQCentroids();
index->pq.centroids.resize(devPQCentroids.numElements());
fromDevice<float, 3>(devPQCentroids,
index->pq.centroids.data(),
resources_->getDefaultStream(device_));
if (ivfpqConfig_.usePrecomputedTables) {
index->precompute_table();
}
}
}
void
GpuIndexIVFPQ::reserveMemory(size_t numVecs) {
reserveMemoryVecs_ = numVecs;
if (index_) {
DeviceScope scope(device_);
index_->reserveMemory(numVecs);
}
}
void
GpuIndexIVFPQ::setPrecomputedCodes(bool enable) {
ivfpqConfig_.usePrecomputedTables = enable;
if (index_) {
DeviceScope scope(device_);
index_->setPrecomputedCodes(enable);
}
verifySettings_();
}
bool
GpuIndexIVFPQ::getPrecomputedCodes() const {
return ivfpqConfig_.usePrecomputedTables;
}
int
GpuIndexIVFPQ::getNumSubQuantizers() const {
return subQuantizers_;
}
int
GpuIndexIVFPQ::getBitsPerCode() const {
return bitsPerCode_;
}
int
GpuIndexIVFPQ::getCentroidsPerSubQuantizer() const {
return utils::pow2(bitsPerCode_);
}
size_t
GpuIndexIVFPQ::reclaimMemory() {
if (index_) {
DeviceScope scope(device_);
return index_->reclaimMemory();
}
return 0;
}
void
GpuIndexIVFPQ::reset() {
if (index_) {
DeviceScope scope(device_);
index_->reset();
this->ntotal = 0;
} else {
FAISS_ASSERT(this->ntotal == 0);
}
}
void
GpuIndexIVFPQ::trainResidualQuantizer_(Index::idx_t n, const float* x) {
// Code largely copied from faiss::IndexIVFPQ
// FIXME: GPUize more of this
n = ::min(n, (Index::idx_t) (1 << bitsPerCode_) * 64);
if (this->verbose) {
printf("computing residuals\n");
}
std::vector<Index::idx_t> assign(n);
quantizer->assign (n, x, assign.data());
std::vector<float> residuals(n * d);
// FIXME jhj convert to _n version
for (idx_t i = 0; i < n; i++) {
quantizer->compute_residual(x + i * d, &residuals[i * d], assign[i]);
}
if (this->verbose) {
printf("training %d x %d product quantizer on %ld vectors in %dD\n",
subQuantizers_, getCentroidsPerSubQuantizer(), n, this->d);
}
// Just use the CPU product quantizer to determine sub-centroids
faiss::ProductQuantizer pq(this->d, subQuantizers_, bitsPerCode_);
pq.verbose = this->verbose;
pq.train(n, residuals.data());
index_ = new IVFPQ(resources_,
metric_type,
metric_arg,
quantizer->getGpuData(),
subQuantizers_,
bitsPerCode_,
pq.centroids.data(),
ivfpqConfig_.indicesOptions,
ivfpqConfig_.useFloat16LookupTables,
memorySpace_);
if (reserveMemoryVecs_) {
index_->reserveMemory(reserveMemoryVecs_);
}
index_->setPrecomputedCodes(ivfpqConfig_.usePrecomputedTables);
}
void
GpuIndexIVFPQ::train(Index::idx_t n, const float* x) {
DeviceScope scope(device_);
if (this->is_trained) {
FAISS_ASSERT(quantizer->is_trained);
FAISS_ASSERT(quantizer->ntotal == nlist);
FAISS_ASSERT(index_);
return;
}
FAISS_ASSERT(!index_);
// FIXME: GPUize more of this
// First, make sure that the data is resident on the CPU, if it is not on the
// CPU, as we depend upon parts of the CPU code
auto hostData = toHost<float, 2>((float*) x,
resources_->getDefaultStream(device_),
{(int) n, (int) this->d});
trainQuantizer_(n, hostData.data());
trainResidualQuantizer_(n, hostData.data());
FAISS_ASSERT(index_);
this->is_trained = true;
}
void
GpuIndexIVFPQ::addImpl_(int n,
const float* x,
const Index::idx_t* xids) {
// Device is already set in GpuIndex::add
FAISS_ASSERT(index_);
FAISS_ASSERT(n > 0);
// Data is already resident on the GPU
Tensor<float, 2, true> data(const_cast<float*>(x), {n, (int) this->d});
static_assert(sizeof(long) == sizeof(Index::idx_t), "size mismatch");
Tensor<long, 1, true> labels(const_cast<long*>(xids), {n});
// Not all vectors may be able to be added (some may contain NaNs etc)
index_->classifyAndAddVectors(data, labels);
// but keep the ntotal based on the total number of vectors that we attempted
// to add
ntotal += n;
}
void
GpuIndexIVFPQ::searchImpl_(int n,
const float* x,
int k,
float* distances,
Index::idx_t* labels) const {
// Device is already set in GpuIndex::search
FAISS_ASSERT(index_);
FAISS_ASSERT(n > 0);
// Data is already resident on the GPU
Tensor<float, 2, true> queries(const_cast<float*>(x), {n, (int) this->d});
Tensor<float, 2, true> outDistances(distances, {n, k});
static_assert(sizeof(long) == sizeof(Index::idx_t), "size mismatch");
Tensor<long, 2, true> outLabels(const_cast<long*>(labels), {n, k});
index_->query(queries, nprobe, k, outDistances, outLabels);
}
int
GpuIndexIVFPQ::getListLength(int listId) const {
FAISS_ASSERT(index_);
return index_->getListLength(listId);
}
std::vector<unsigned char>
GpuIndexIVFPQ::getListCodes(int listId) const {
FAISS_ASSERT(index_);
DeviceScope scope(device_);
return index_->getListCodes(listId);
}
std::vector<long>
GpuIndexIVFPQ::getListIndices(int listId) const {
FAISS_ASSERT(index_);
DeviceScope scope(device_);
return index_->getListIndices(listId);
}
void
GpuIndexIVFPQ::verifySettings_() const {
// Our implementation has these restrictions:
// Must have some number of lists
FAISS_THROW_IF_NOT_MSG(nlist > 0, "nlist must be >0");
// up to a single byte per code
FAISS_THROW_IF_NOT_FMT(bitsPerCode_ <= 8,
"Bits per code must be <= 8 (passed %d)", bitsPerCode_);
// Sub-quantizers must evenly divide dimensions available
FAISS_THROW_IF_NOT_FMT(this->d % subQuantizers_ == 0,
"Number of sub-quantizers (%d) must be an "
"even divisor of the number of dimensions (%d)",
subQuantizers_, this->d);
// The number of bytes per encoded vector must be one we support
FAISS_THROW_IF_NOT_FMT(IVFPQ::isSupportedPQCodeLength(subQuantizers_),
"Number of bytes per encoded vector / sub-quantizers (%d) "
"is not supported",
subQuantizers_);
// We must have enough shared memory on the current device to store
// our lookup distances
int lookupTableSize = sizeof(float);
if (ivfpqConfig_.useFloat16LookupTables) {
lookupTableSize = sizeof(half);
}
// 64 bytes per code is only supported with usage of float16, at 2^8
// codes per subquantizer
size_t requiredSmemSize =
lookupTableSize * subQuantizers_ * utils::pow2(bitsPerCode_);
size_t smemPerBlock = getMaxSharedMemPerBlock(device_);
FAISS_THROW_IF_NOT_FMT(requiredSmemSize
<= getMaxSharedMemPerBlock(device_),
"Device %d has %zu bytes of shared memory, while "
"%d bits per code and %d sub-quantizers requires %zu "
"bytes. Consider useFloat16LookupTables and/or "
"reduce parameters",
device_, smemPerBlock, bitsPerCode_, subQuantizers_,
requiredSmemSize);
// If precomputed codes are disabled, we have an extra limitation in
// terms of the number of dimensions per subquantizer
FAISS_THROW_IF_NOT_FMT(ivfpqConfig_.usePrecomputedTables ||
IVFPQ::isSupportedNoPrecomputedSubDimSize(
this->d / subQuantizers_),
"Number of dimensions per sub-quantizer (%d) "
"is not currently supported without precomputed codes. "
"Only 1, 2, 3, 4, 6, 8, 10, 12, 16, 20, 24, 28, 32 dims "
"per sub-quantizer are currently supported with no "
"precomputed codes. "
"Precomputed codes supports any number of dimensions, but "
"will involve memory overheads.",
this->d / subQuantizers_);
}
} } // namespace
| 802ce32019dd3698b327deafbb9f20ce56f75c16.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/GpuIndexIVFPQ.h>
#include <faiss/IndexFlat.h>
#include <faiss/IndexIVFPQ.h>
#include <faiss/impl/ProductQuantizer.h>
#include <faiss/gpu/GpuIndexFlat.h>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/IVFPQ.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <limits>
namespace faiss { namespace gpu {
GpuIndexIVFPQ::GpuIndexIVFPQ(GpuResources* resources,
const faiss::IndexIVFPQ* index,
GpuIndexIVFPQConfig config) :
GpuIndexIVF(resources,
index->d,
index->metric_type,
index->metric_arg,
index->nlist,
config),
ivfpqConfig_(config),
subQuantizers_(0),
bitsPerCode_(0),
reserveMemoryVecs_(0),
index_(nullptr) {
copyFrom(index);
}
GpuIndexIVFPQ::GpuIndexIVFPQ(GpuResources* resources,
int dims,
int nlist,
int subQuantizers,
int bitsPerCode,
faiss::MetricType metric,
GpuIndexIVFPQConfig config) :
GpuIndexIVF(resources,
dims,
metric,
0,
nlist,
config),
ivfpqConfig_(config),
subQuantizers_(subQuantizers),
bitsPerCode_(bitsPerCode),
reserveMemoryVecs_(0),
index_(nullptr) {
verifySettings_();
// We haven't trained ourselves, so don't construct the PQ index yet
this->is_trained = false;
}
GpuIndexIVFPQ::~GpuIndexIVFPQ() {
delete index_;
}
void
GpuIndexIVFPQ::copyFrom(const faiss::IndexIVFPQ* index) {
DeviceScope scope(device_);
GpuIndexIVF::copyFrom(index);
// Clear out our old data
delete index_;
index_ = nullptr;
subQuantizers_ = index->pq.M;
bitsPerCode_ = index->pq.nbits;
// We only support this
FAISS_THROW_IF_NOT_MSG(index->pq.nbits == 8,
"GPU: only pq.nbits == 8 is supported");
FAISS_THROW_IF_NOT_MSG(index->by_residual,
"GPU: only by_residual = true is supported");
FAISS_THROW_IF_NOT_MSG(index->polysemous_ht == 0,
"GPU: polysemous codes not supported");
verifySettings_();
// The other index might not be trained
if (!index->is_trained) {
// copied in GpuIndex::copyFrom
FAISS_ASSERT(!is_trained);
return;
}
// Copy our lists as well
// The product quantizer must have data in it
FAISS_ASSERT(index->pq.centroids.size() > 0);
index_ = new IVFPQ(resources_,
index->metric_type,
index->metric_arg,
quantizer->getGpuData(),
subQuantizers_,
bitsPerCode_,
(float*) index->pq.centroids.data(),
ivfpqConfig_.indicesOptions,
ivfpqConfig_.useFloat16LookupTables,
memorySpace_);
// Doesn't make sense to reserve memory here
index_->setPrecomputedCodes(ivfpqConfig_.usePrecomputedTables);
// Copy database vectors, if any
const InvertedLists *ivf = index->invlists;
size_t nlist = ivf ? ivf->nlist : 0;
for (size_t i = 0; i < nlist; ++i) {
size_t list_size = ivf->list_size(i);
// GPU index can only support max int entries per list
FAISS_THROW_IF_NOT_FMT(list_size <=
(size_t) std::numeric_limits<int>::max(),
"GPU inverted list can only support "
"%zu entries; %zu found",
(size_t) std::numeric_limits<int>::max(),
list_size);
index_->addCodeVectorsFromCpu(
i, ivf->get_codes(i), ivf->get_ids(i), list_size);
}
}
void
GpuIndexIVFPQ::copyTo(faiss::IndexIVFPQ* index) const {
DeviceScope scope(device_);
// We must have the indices in order to copy to ourselves
FAISS_THROW_IF_NOT_MSG(ivfpqConfig_.indicesOptions != INDICES_IVF,
"Cannot copy to CPU as GPU index doesn't retain "
"indices (INDICES_IVF)");
GpuIndexIVF::copyTo(index);
//
// IndexIVFPQ information
//
index->by_residual = true;
index->use_precomputed_table = 0;
index->code_size = subQuantizers_;
index->pq = faiss::ProductQuantizer(this->d, subQuantizers_, bitsPerCode_);
index->do_polysemous_training = false;
index->polysemous_training = nullptr;
index->scan_table_threshold = 0;
index->max_codes = 0;
index->polysemous_ht = 0;
index->precomputed_table.clear();
InvertedLists *ivf = new ArrayInvertedLists(
nlist, index->code_size);
index->replace_invlists(ivf, true);
if (index_) {
// Copy the inverted lists
for (int i = 0; i < nlist; ++i) {
auto ids = getListIndices(i);
auto codes = getListCodes(i);
index->invlists->add_entries (i, ids.size(), ids.data(), codes.data());
}
// Copy PQ centroids
auto devPQCentroids = index_->getPQCentroids();
index->pq.centroids.resize(devPQCentroids.numElements());
fromDevice<float, 3>(devPQCentroids,
index->pq.centroids.data(),
resources_->getDefaultStream(device_));
if (ivfpqConfig_.usePrecomputedTables) {
index->precompute_table();
}
}
}
void
GpuIndexIVFPQ::reserveMemory(size_t numVecs) {
reserveMemoryVecs_ = numVecs;
if (index_) {
DeviceScope scope(device_);
index_->reserveMemory(numVecs);
}
}
void
GpuIndexIVFPQ::setPrecomputedCodes(bool enable) {
ivfpqConfig_.usePrecomputedTables = enable;
if (index_) {
DeviceScope scope(device_);
index_->setPrecomputedCodes(enable);
}
verifySettings_();
}
bool
GpuIndexIVFPQ::getPrecomputedCodes() const {
return ivfpqConfig_.usePrecomputedTables;
}
int
GpuIndexIVFPQ::getNumSubQuantizers() const {
return subQuantizers_;
}
int
GpuIndexIVFPQ::getBitsPerCode() const {
return bitsPerCode_;
}
int
GpuIndexIVFPQ::getCentroidsPerSubQuantizer() const {
return utils::pow2(bitsPerCode_);
}
size_t
GpuIndexIVFPQ::reclaimMemory() {
if (index_) {
DeviceScope scope(device_);
return index_->reclaimMemory();
}
return 0;
}
void
GpuIndexIVFPQ::reset() {
if (index_) {
DeviceScope scope(device_);
index_->reset();
this->ntotal = 0;
} else {
FAISS_ASSERT(this->ntotal == 0);
}
}
void
GpuIndexIVFPQ::trainResidualQuantizer_(Index::idx_t n, const float* x) {
// Code largely copied from faiss::IndexIVFPQ
// FIXME: GPUize more of this
n = std::min(n, (Index::idx_t) (1 << bitsPerCode_) * 64);
if (this->verbose) {
printf("computing residuals\n");
}
std::vector<Index::idx_t> assign(n);
quantizer->assign (n, x, assign.data());
std::vector<float> residuals(n * d);
// FIXME jhj convert to _n version
for (idx_t i = 0; i < n; i++) {
quantizer->compute_residual(x + i * d, &residuals[i * d], assign[i]);
}
if (this->verbose) {
printf("training %d x %d product quantizer on %ld vectors in %dD\n",
subQuantizers_, getCentroidsPerSubQuantizer(), n, this->d);
}
// Just use the CPU product quantizer to determine sub-centroids
faiss::ProductQuantizer pq(this->d, subQuantizers_, bitsPerCode_);
pq.verbose = this->verbose;
pq.train(n, residuals.data());
index_ = new IVFPQ(resources_,
metric_type,
metric_arg,
quantizer->getGpuData(),
subQuantizers_,
bitsPerCode_,
pq.centroids.data(),
ivfpqConfig_.indicesOptions,
ivfpqConfig_.useFloat16LookupTables,
memorySpace_);
if (reserveMemoryVecs_) {
index_->reserveMemory(reserveMemoryVecs_);
}
index_->setPrecomputedCodes(ivfpqConfig_.usePrecomputedTables);
}
void
GpuIndexIVFPQ::train(Index::idx_t n, const float* x) {
DeviceScope scope(device_);
if (this->is_trained) {
FAISS_ASSERT(quantizer->is_trained);
FAISS_ASSERT(quantizer->ntotal == nlist);
FAISS_ASSERT(index_);
return;
}
FAISS_ASSERT(!index_);
// FIXME: GPUize more of this
// First, make sure that the data is resident on the CPU, if it is not on the
// CPU, as we depend upon parts of the CPU code
auto hostData = toHost<float, 2>((float*) x,
resources_->getDefaultStream(device_),
{(int) n, (int) this->d});
trainQuantizer_(n, hostData.data());
trainResidualQuantizer_(n, hostData.data());
FAISS_ASSERT(index_);
this->is_trained = true;
}
void
GpuIndexIVFPQ::addImpl_(int n,
const float* x,
const Index::idx_t* xids) {
// Device is already set in GpuIndex::add
FAISS_ASSERT(index_);
FAISS_ASSERT(n > 0);
// Data is already resident on the GPU
Tensor<float, 2, true> data(const_cast<float*>(x), {n, (int) this->d});
static_assert(sizeof(long) == sizeof(Index::idx_t), "size mismatch");
Tensor<long, 1, true> labels(const_cast<long*>(xids), {n});
// Not all vectors may be able to be added (some may contain NaNs etc)
index_->classifyAndAddVectors(data, labels);
// but keep the ntotal based on the total number of vectors that we attempted
// to add
ntotal += n;
}
void
GpuIndexIVFPQ::searchImpl_(int n,
const float* x,
int k,
float* distances,
Index::idx_t* labels) const {
// Device is already set in GpuIndex::search
FAISS_ASSERT(index_);
FAISS_ASSERT(n > 0);
// Data is already resident on the GPU
Tensor<float, 2, true> queries(const_cast<float*>(x), {n, (int) this->d});
Tensor<float, 2, true> outDistances(distances, {n, k});
static_assert(sizeof(long) == sizeof(Index::idx_t), "size mismatch");
Tensor<long, 2, true> outLabels(const_cast<long*>(labels), {n, k});
index_->query(queries, nprobe, k, outDistances, outLabels);
}
int
GpuIndexIVFPQ::getListLength(int listId) const {
FAISS_ASSERT(index_);
return index_->getListLength(listId);
}
std::vector<unsigned char>
GpuIndexIVFPQ::getListCodes(int listId) const {
FAISS_ASSERT(index_);
DeviceScope scope(device_);
return index_->getListCodes(listId);
}
std::vector<long>
GpuIndexIVFPQ::getListIndices(int listId) const {
FAISS_ASSERT(index_);
DeviceScope scope(device_);
return index_->getListIndices(listId);
}
void
GpuIndexIVFPQ::verifySettings_() const {
// Our implementation has these restrictions:
// Must have some number of lists
FAISS_THROW_IF_NOT_MSG(nlist > 0, "nlist must be >0");
// up to a single byte per code
FAISS_THROW_IF_NOT_FMT(bitsPerCode_ <= 8,
"Bits per code must be <= 8 (passed %d)", bitsPerCode_);
// Sub-quantizers must evenly divide dimensions available
FAISS_THROW_IF_NOT_FMT(this->d % subQuantizers_ == 0,
"Number of sub-quantizers (%d) must be an "
"even divisor of the number of dimensions (%d)",
subQuantizers_, this->d);
// The number of bytes per encoded vector must be one we support
FAISS_THROW_IF_NOT_FMT(IVFPQ::isSupportedPQCodeLength(subQuantizers_),
"Number of bytes per encoded vector / sub-quantizers (%d) "
"is not supported",
subQuantizers_);
// We must have enough shared memory on the current device to store
// our lookup distances
int lookupTableSize = sizeof(float);
if (ivfpqConfig_.useFloat16LookupTables) {
lookupTableSize = sizeof(half);
}
// 64 bytes per code is only supported with usage of float16, at 2^8
// codes per subquantizer
size_t requiredSmemSize =
lookupTableSize * subQuantizers_ * utils::pow2(bitsPerCode_);
size_t smemPerBlock = getMaxSharedMemPerBlock(device_);
FAISS_THROW_IF_NOT_FMT(requiredSmemSize
<= getMaxSharedMemPerBlock(device_),
"Device %d has %zu bytes of shared memory, while "
"%d bits per code and %d sub-quantizers requires %zu "
"bytes. Consider useFloat16LookupTables and/or "
"reduce parameters",
device_, smemPerBlock, bitsPerCode_, subQuantizers_,
requiredSmemSize);
// If precomputed codes are disabled, we have an extra limitation in
// terms of the number of dimensions per subquantizer
FAISS_THROW_IF_NOT_FMT(ivfpqConfig_.usePrecomputedTables ||
IVFPQ::isSupportedNoPrecomputedSubDimSize(
this->d / subQuantizers_),
"Number of dimensions per sub-quantizer (%d) "
"is not currently supported without precomputed codes. "
"Only 1, 2, 3, 4, 6, 8, 10, 12, 16, 20, 24, 28, 32 dims "
"per sub-quantizer are currently supported with no "
"precomputed codes. "
"Precomputed codes supports any number of dimensions, but "
"will involve memory overheads.",
this->d / subQuantizers_);
}
} } // namespace
|
b072ad5014d62bcd3ede57e367c5ac59233ebd74.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from zidr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_zidr_smoothing_1_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex *drs,
magmaDoubleComplex *dr,
magmaDoubleComplex *dt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dt[ i+j*num_rows ] = drs[ i+j*num_rows ] - dr[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dt = drs - dr
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
drs magmaDoubleComplex_ptr
vector
@param[in]
dr magmaDoubleComplex_ptr
vector
@param[in,out]
dt magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zidr_smoothing_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex_ptr drs,
magmaDoubleComplex_ptr dr,
magmaDoubleComplex_ptr dt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zidr_smoothing_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, drs, dr, dt );
return MAGMA_SUCCESS;
}
__global__ void
magma_zidr_smoothing_2_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex omega,
magmaDoubleComplex *dx,
magmaDoubleComplex *dxs )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dxs[ i+j*num_rows ] = dxs[ i+j*num_rows ] + omega * dxs[ i+j*num_rows ]
- omega * dx[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dxs = dxs - gamma*(dxs-dx)
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
omega magmaDoubleComplex
scalar
@param[in]
dx magmaDoubleComplex_ptr
vector
@param[in,out]
dxs magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zidr_smoothing_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex omega,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr dxs,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_zidr_smoothing_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, omega, dx, dxs);
return MAGMA_SUCCESS;
}
| b072ad5014d62bcd3ede57e367c5ac59233ebd74.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
// These routines merge multiple kernels from zidr into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_zidr_smoothing_1_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex *drs,
magmaDoubleComplex *dr,
magmaDoubleComplex *dt )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dt[ i+j*num_rows ] = drs[ i+j*num_rows ] - dr[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dt = drs - dr
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
drs magmaDoubleComplex_ptr
vector
@param[in]
dr magmaDoubleComplex_ptr
vector
@param[in,out]
dt magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zidr_smoothing_1(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex_ptr drs,
magmaDoubleComplex_ptr dr,
magmaDoubleComplex_ptr dt,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zidr_smoothing_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, drs, dr, dt );
return MAGMA_SUCCESS;
}
__global__ void
magma_zidr_smoothing_2_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex omega,
magmaDoubleComplex *dx,
magmaDoubleComplex *dxs )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
dxs[ i+j*num_rows ] = dxs[ i+j*num_rows ] + omega * dxs[ i+j*num_rows ]
- omega * dx[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
dxs = dxs - gamma*(dxs-dx)
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
omega magmaDoubleComplex
scalar
@param[in]
dx magmaDoubleComplex_ptr
vector
@param[in,out]
dxs magmaDoubleComplex_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_zidr_smoothing_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDoubleComplex omega,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr dxs,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_zidr_smoothing_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, omega, dx, dxs);
return MAGMA_SUCCESS;
}
|
6a204cb55ffae92a2b516d3cefb825260fb91fbc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "AddSleep.cu"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
int NUM_THREADS;
int JOBS_PER_THREAD;
int QUEUE_SIZE=12800;
int SLEEP_TIME;
int MALLOC_SIZE;
int LOOP_SIZE;
extern "C"
void sleep_wrapper(int SLEEP_TIME);
/*
int main(int argc, char **argv){
printf("Starting AddSleep Test\n");
if(argc>4){
NUM_THREADS = atoi(argv[1]);
JOBS_PER_THREAD = atoi(argv[2]);
SLEEP_TIME = atoi(argv[3]);
MALLOC_SIZE = atoi(argv[4]);
LOOP_SIZE = atoi(argv[5]);
}else{
printf("This test requires five parameters:\n");
printf(" int NUM_THREADS, int JOBS_PER_THREAD, int SLEEP_TIME, int MALLOC_SIZE, int LOOP_SIZE\n");
printf("where NUM_THREADS is the number of seperate threads that will be sending work into gemtc\n");
printf(" JOBS_PER_THREAD is the number of tasks that a given thread will submit to gemtc\n");
printf(" SLEEP_TIME is the parameter that will be given to each AddSleep micro-kernel, in microseconds\n");
printf(" MALLOC_SIZE is the amount of memory that will be allocated and transfered with each sleep\n");
printf(" This number must be a multiple of 4, to comply with cuda's memory requirements\n");
printf(" LOOP_SIZE is the number of tasks a thread will submit to gemtc before waiting for results\n");
exit(1);
}
sleep_wrapper(SLEEP_TIME);
return 0;
}
*/
void sleep_wrapper(int SLEEP_TIME){
int nkernels = 1; // number of concurrent kernels
int nstreams = nkernels + 1; // use one more stream than concurrent kernel
int nbytes = nkernels * sizeof(clock_t); // number of data bytes
float kernel_time = SLEEP_TIME; // time the kernel should run in ms
float elapsed_time; // timing variables
int cuda_device = 0;
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
hipSetDevice(dev);
hipDeviceProp_t prop;
if (hipGetDeviceProperties(&prop, dev) == hipSuccess){
printf("Using device %d:\n", dev);
printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
prop.name, (int)prop.totalGlobalMem, (int)prop.major,
(int)prop.minor, (int)prop.clockRate);
}
clock_t time_clocks = (clock_t)(kernel_time * (int)prop.clockRate);
// allocate host memory
clock_t *a = 0; // pointer to the array data in host memory
// allocate device memory
clock_t *d_a = 0; // pointers to data and init value in the device memory
hipMalloc((void **)&d_a, nbytes);
// run the task
// wrapAddSleep<<<1,1>>>(d_sleepTime);
hipLaunchKernelGGL(( clock_block), dim3(1),dim3(1), 0, 0, &d_a[0], time_clocks);
// wait
hipDeviceSynchronize();
// return
// return 0;
}
| 6a204cb55ffae92a2b516d3cefb825260fb91fbc.cu | #include <stdio.h>
#include <stdlib.h>
#include "AddSleep.cu"
#include <cuda.h>
#include <cuda_runtime.h>
int NUM_THREADS;
int JOBS_PER_THREAD;
int QUEUE_SIZE=12800;
int SLEEP_TIME;
int MALLOC_SIZE;
int LOOP_SIZE;
extern "C"
void sleep_wrapper(int SLEEP_TIME);
/*
int main(int argc, char **argv){
printf("Starting AddSleep Test\n");
if(argc>4){
NUM_THREADS = atoi(argv[1]);
JOBS_PER_THREAD = atoi(argv[2]);
SLEEP_TIME = atoi(argv[3]);
MALLOC_SIZE = atoi(argv[4]);
LOOP_SIZE = atoi(argv[5]);
}else{
printf("This test requires five parameters:\n");
printf(" int NUM_THREADS, int JOBS_PER_THREAD, int SLEEP_TIME, int MALLOC_SIZE, int LOOP_SIZE\n");
printf("where NUM_THREADS is the number of seperate threads that will be sending work into gemtc\n");
printf(" JOBS_PER_THREAD is the number of tasks that a given thread will submit to gemtc\n");
printf(" SLEEP_TIME is the parameter that will be given to each AddSleep micro-kernel, in microseconds\n");
printf(" MALLOC_SIZE is the amount of memory that will be allocated and transfered with each sleep\n");
printf(" This number must be a multiple of 4, to comply with cuda's memory requirements\n");
printf(" LOOP_SIZE is the number of tasks a thread will submit to gemtc before waiting for results\n");
exit(1);
}
sleep_wrapper(SLEEP_TIME);
return 0;
}
*/
void sleep_wrapper(int SLEEP_TIME){
int nkernels = 1; // number of concurrent kernels
int nstreams = nkernels + 1; // use one more stream than concurrent kernel
int nbytes = nkernels * sizeof(clock_t); // number of data bytes
float kernel_time = SLEEP_TIME; // time the kernel should run in ms
float elapsed_time; // timing variables
int cuda_device = 0;
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp prop;
if (cudaGetDeviceProperties(&prop, dev) == cudaSuccess){
printf("Using device %d:\n", dev);
printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
prop.name, (int)prop.totalGlobalMem, (int)prop.major,
(int)prop.minor, (int)prop.clockRate);
}
clock_t time_clocks = (clock_t)(kernel_time * (int)prop.clockRate);
// allocate host memory
clock_t *a = 0; // pointer to the array data in host memory
// allocate device memory
clock_t *d_a = 0; // pointers to data and init value in the device memory
cudaMalloc((void **)&d_a, nbytes);
// run the task
// wrapAddSleep<<<1,1>>>(d_sleepTime);
clock_block<<<1,1>>>(&d_a[0], time_clocks);
// wait
cudaDeviceSynchronize();
// return
// return 0;
}
|
cc08860f91128ba9ea40fe5f3a2a4eec32577889.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//STL
#include <iostream>
#include <string>
#include <vector>
using namespace std;
string childInput;
unsigned i;
vector < float > inputVec;
string letter, subFp; const string sep( "_" );
//=========================== gpu ===========================
__device__ float d_Array[ 30 ]; //static gpu array
__global__ void printKernel()
{
unsigned ind = threadIdx.x;
printf( "d_Array[%i]: %f\n", ind, d_Array[ ind ] );
}
int main( int argc, char* argv[] )
{
childInput = argv[ argc - 1 ];
for ( i = 0; i < ( unsigned )childInput.size(); i++ )
{
letter = childInput[ i ];
if ( letter.compare( sep ) != 0 )
subFp.append( letter );
else
{
inputVec.push_back( stof( subFp ) );
subFp.clear();
}
}
hipMemcpyToSymbol( d_Array, &inputVec[ 0 ], sizeof( float ) * ( unsigned )inputVec.size() );
hipLaunchKernelGGL(( printKernel), dim3(1), dim3((unsigned)inputVec.size()) , 0, 0, );
hipFree( d_Array );
hipDeviceSynchronize();
hipDeviceReset();
return 0;
}
| cc08860f91128ba9ea40fe5f3a2a4eec32577889.cu | //STL
#include <iostream>
#include <string>
#include <vector>
using namespace std;
string childInput;
unsigned i;
vector < float > inputVec;
string letter, subFp; const string sep( "_" );
//=========================== gpu ===========================
__device__ float d_Array[ 30 ]; //static gpu array
__global__ void printKernel()
{
unsigned ind = threadIdx.x;
printf( "d_Array[%i]: %f\n", ind, d_Array[ ind ] );
}
int main( int argc, char* argv[] )
{
childInput = argv[ argc - 1 ];
for ( i = 0; i < ( unsigned )childInput.size(); i++ )
{
letter = childInput[ i ];
if ( letter.compare( sep ) != 0 )
subFp.append( letter );
else
{
inputVec.push_back( stof( subFp ) );
subFp.clear();
}
}
cudaMemcpyToSymbol( d_Array, &inputVec[ 0 ], sizeof( float ) * ( unsigned )inputVec.size() );
printKernel<<< 1, (unsigned)inputVec.size() >>> ();
cudaFree( d_Array );
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
f12319942acb4ab3ce1645396e77fd5be78c5d0f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void somme( int taille, float * a, float * b, float *c ){
int index=threadIdx.x+blockDim.x*blockIdx.x;
if(index>=taille) return;
c[index]=a[index]+b[index];
}
__global__ void prod( int taille, float * a, float b, float *c ){
int index=threadIdx.x+blockDim.x*blockIdx.x;
if(index>=taille) return;
c[index]=a[index]*b;
} | f12319942acb4ab3ce1645396e77fd5be78c5d0f.cu | __global__ void somme( int taille, float * a, float * b, float *c ){
int index=threadIdx.x+blockDim.x*blockIdx.x;
if(index>=taille) return;
c[index]=a[index]+b[index];
}
__global__ void prod( int taille, float * a, float b, float *c ){
int index=threadIdx.x+blockDim.x*blockIdx.x;
if(index>=taille) return;
c[index]=a[index]*b;
} |
435d0bca76fb3fcd2b1b41d7817a2dafc13be498.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020 NVIDIA Corporation.
* Copyright (c) 2018-2020 Chris Choy ([email protected]).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "gpu.cuh"
#include "math_functions.cuh"
#include <hipsparse.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPUtils.h>
#include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
#include <torch/extension.h>
#include <torch/script.h>
namespace minkowski {
hipDataType getTensorCudaDataType(torch::Tensor const &self) {
hipDataType cuda_data_type;
switch (self.scalar_type()) {
case torch::ScalarType::Float:
cuda_data_type = HIP_R_32F;
break;
case torch::ScalarType::Double:
cuda_data_type = HIP_R_64F;
break;
default:
TORCH_CHECK(false, "Tensor types must be either float32 or float64");
break;
}
return cuda_data_type;
}
template <typename th_int_type>
torch::Tensor coo_spmm(torch::Tensor const &rows, torch::Tensor const &cols,
torch::Tensor const &vals, int64_t const dim_i,
int64_t const dim_j, torch::Tensor const &mat2,
int64_t spmm_algorithm_id) {
#if defined __HIP_PLATFORM_HCC__
TORCH_CHECK(false, "spmm sparse-dense is not supported on HIP");
#elif defined(_WIN32) || defined(_WIN64)
TORCH_CHECK(false, "spmm sparse-dense CUDA is not supported on Windows");
#elif !defined(CUDART_VERSION)
TORCH_CHECK(false, "CUDART_VERSION not defined");
#endif
constexpr bool is_int32 = std::is_same<th_int_type, int32_t>::value;
constexpr bool is_int64 = std::is_same<th_int_type, int64_t>::value;
hipsparseSpMMAlg_t mm_alg;
#if defined(CUDART_VERSION) && (CUDART_VERSION < 10010)
TORCH_CHECK(false, "spmm sparse-dense requires CUDA 10.1 or greater");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) && \
(CUDART_VERSION < 11000)
switch (spmm_algorithm_id) {
case 1:
mm_alg = HIPSPARSE_COOMM_ALG1;
break;
case 2:
mm_alg = HIPSPARSE_COOMM_ALG2;
break;
case 3:
mm_alg = HIPSPARSE_COOMM_ALG3;
break;
default:
TORCH_CHECK(false, "Invalid algorithm id.", spmm_algorithm_id);
mm_alg = HIPSPARSE_MM_ALG_DEFAULT;
}
TORCH_CHECK(is_int32, "int64 hipsparseSpMM requires CUDA 11.1 or greater");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 11000)
switch (spmm_algorithm_id) {
case 1:
mm_alg = HIPSPARSE_SPMM_COO_ALG1;
break;
case 2:
mm_alg = HIPSPARSE_SPMM_COO_ALG2;
break;
case 3:
mm_alg = CUSPARSE_SPMM_COO_ALG3;
break;
case 4:
mm_alg = CUSPARSE_SPMM_COO_ALG4;
break;
default:
TORCH_CHECK(false, "Invalid algorithm id.", spmm_algorithm_id);
mm_alg = CUSPARSE_SPMM_ALG_DEFAULT;
}
TORCH_CHECK(is_int32, "int64 coosort not implemented");
// coosort not supported with int64 || (is_int64 && (mm_alg ==
// CUSPARSE_SPMM_COO_ALG4)));
#endif
at::ScalarType int_scalar_type = std::is_same<th_int_type, int32_t>::value
? at::ScalarType::Int
: at::ScalarType::Long;
TORCH_CHECK(rows.scalar_type() == int_scalar_type, "int type mismatch.");
TORCH_CHECK(rows.scalar_type() == cols.scalar_type(),
"rows and cols must have the same scalar type.");
TORCH_CHECK(rows.scalar_type() == cols.scalar_type(),
"rows and cols must have the same scalar type.");
TORCH_CHECK(vals.scalar_type() == mat2.scalar_type(),
"vals and mat2 must have the same scalar type.");
TORCH_CHECK(rows.is_cuda(), "rows must be CUDA, but got CPU");
TORCH_CHECK(cols.is_cuda(), "cols must be CUDA, but got CPU");
TORCH_CHECK(vals.is_cuda(), "vals must be CUDA, but got CPU");
TORCH_CHECK(mat2.is_cuda(), "mat2 must be CUDA, but got CPU");
TORCH_CHECK(at::cuda::check_device({rows, cols, vals, mat2}));
TORCH_CHECK(mat2.dim() == 2, "Tensor 'mat2' must have 2 dims, but has ",
mat2.dim());
// int64_t dim_i = self.size(0);
// int64_t dim_j = self.size(1);
int64_t dim_k = mat2.size(1);
torch::Tensor result = at::empty({dim_k, dim_i}, mat2.options());
if ((dim_j == 0) || (dim_k == 0)) {
return result;
}
// Dense matrices have to be contiguous for hipsparseSpMM to work
torch::Tensor const mat2_contig = mat2.contiguous();
auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
torch::Scalar beta = 0;
torch::Scalar alpha = 1;
// Create tensors to view just the current set of matrices
int64_t const nnz = rows.numel();
if (nnz == 0) {
result.transpose_(0, 1);
result.zero_();
return result;
}
hipDataType cuda_data_type = getTensorCudaDataType(mat2_contig);
th_int_type *row_indices_ptr =
reinterpret_cast<th_int_type *>(rows.data_ptr());
th_int_type *col_indices_ptr =
reinterpret_cast<th_int_type *>(cols.data_ptr());
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_TYPES(vals.scalar_type(), "coo_spmm", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t *values_ptr = reinterpret_cast<scalar_t *>(vals.data_ptr());
scalar_t *mat2_ptr = reinterpret_cast<scalar_t *>(mat2_contig.data_ptr());
scalar_t *result_ptr = reinterpret_cast<scalar_t *>(result.data_ptr());
//////////////////////////////////////
// Sort the sparse matrix COO
th_int_type *sorted_row_ptr =
(th_int_type *)c10::hip::HIPCachingAllocator::raw_alloc(
2 * (nnz + 1) * sizeof(th_int_type));
th_int_type *sorted_col_ptr = sorted_row_ptr + nnz + 1;
scalar_t *sorted_val_ptr =
(scalar_t *)c10::hip::HIPCachingAllocator::raw_alloc(
nnz * sizeof(scalar_t));
LOG_DEBUG("Allocated sorted row col val", nnz);
// Copy the indices
CUDA_CHECK(hipMemcpy(sorted_row_ptr, row_indices_ptr,
nnz * sizeof(th_int_type), hipMemcpyDeviceToDevice));
CUDA_CHECK(hipMemcpy(sorted_col_ptr, col_indices_ptr,
nnz * sizeof(th_int_type), hipMemcpyDeviceToDevice));
CUDA_CHECK(hipMemcpy(sorted_val_ptr, values_ptr, nnz * sizeof(scalar_t),
hipMemcpyDeviceToDevice));
thrust::sort_by_key(thrust::device, //
sorted_row_ptr, // key begin
sorted_row_ptr + nnz, // key end
thrust::make_zip_iterator( // value begin
thrust::make_tuple( //
sorted_col_ptr, //
sorted_val_ptr //
) //
));
LOG_DEBUG("Sorted row");
//////////////////////////////////////
size_t workspace_buffer_size = 0;
void *workspace_buffer = nullptr;
hipsparseSpMatDescr_t sparse_descr;
CUSPARSE_CHECK(hipsparseCreateCoo(
&sparse_descr, //
dim_i, dim_j, nnz, //
reinterpret_cast<void *>(sorted_row_ptr),
reinterpret_cast<void *>(sorted_col_ptr),
reinterpret_cast<void *>(sorted_val_ptr), //
std::is_same<th_int_type, int32_t>::value ? HIPSPARSE_INDEX_32I
: HIPSPARSE_INDEX_64I,
HIPSPARSE_INDEX_BASE_ZERO, cuda_data_type));
hipsparseDnMatDescr_t dense_descr;
CUSPARSE_CHECK(hipsparseCreateDnMat(&dense_descr, //
dim_k, dim_j, dim_k, //
reinterpret_cast<void *>(mat2_ptr), //
cuda_data_type, HIPSPARSE_ORDER_COL));
hipsparseDnMatDescr_t result_descr;
CUSPARSE_CHECK(hipsparseCreateDnMat(&result_descr, //
dim_i, dim_k, dim_i, //
reinterpret_cast<void *>(result_ptr), //
cuda_data_type, HIPSPARSE_ORDER_COL));
size_t required_workspace_buffer_size = 0;
CUSPARSE_CHECK(hipsparseSpMM_bufferSize(
cusparse_handle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
HIPSPARSE_OPERATION_TRANSPOSE, (void *)&alpha_val, sparse_descr,
dense_descr, (void *)&beta_val, result_descr, cuda_data_type, mm_alg,
&required_workspace_buffer_size));
LOG_DEBUG("Buffer size:", required_workspace_buffer_size);
if (required_workspace_buffer_size > workspace_buffer_size) {
if (workspace_buffer != nullptr) {
hipFree(workspace_buffer);
}
workspace_buffer_size = required_workspace_buffer_size;
LOG_DEBUG("hipMallocManaged");
hipMallocManaged(&workspace_buffer, workspace_buffer_size);
}
LOG_DEBUG("SPMM");
CUSPARSE_CHECK(hipsparseSpMM(cusparse_handle, //
HIPSPARSE_OPERATION_NON_TRANSPOSE, //
HIPSPARSE_OPERATION_TRANSPOSE, //
(void *)&alpha_val, //
sparse_descr, dense_descr, //
(void *)&beta_val, result_descr, //
cuda_data_type, mm_alg, workspace_buffer));
CUSPARSE_CHECK(hipsparseDestroySpMat(sparse_descr));
CUSPARSE_CHECK(hipsparseDestroyDnMat(dense_descr));
CUSPARSE_CHECK(hipsparseDestroyDnMat(result_descr));
LOG_DEBUG("Dealloc");
c10::hip::HIPCachingAllocator::raw_delete((void *)sorted_row_ptr);
c10::hip::HIPCachingAllocator::raw_delete((void *)sorted_val_ptr);
if (workspace_buffer != nullptr) {
hipFree(workspace_buffer);
}
});
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(0, 1);
CUDA_CHECK(hipGetLastError());
return result;
}
template torch::Tensor
coo_spmm<int32_t>(torch::Tensor const &rows, torch::Tensor const &cols,
torch::Tensor const &vals, int64_t const dim_i,
int64_t const dim_j, torch::Tensor const &mat2,
int64_t spmm_algorithm_id);
// template torch::Tensor
// coo_spmm<int64_t>(torch::Tensor const &rows, torch::Tensor const &cols,
// torch::Tensor const &vals, int64_t const dim_i,
// int64_t const dim_j, torch::Tensor const &mat2,
// int64_t spmm_algorithm_id);
} // namespace minkowski
| 435d0bca76fb3fcd2b1b41d7817a2dafc13be498.cu | /*
* Copyright (c) 2020 NVIDIA Corporation.
* Copyright (c) 2018-2020 Chris Choy ([email protected]).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "gpu.cuh"
#include "math_functions.cuh"
#include <cusparse.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAUtils.h>
#include <c10/cuda/CUDACachingAllocator.h>
#include <torch/extension.h>
#include <torch/script.h>
namespace minkowski {
cudaDataType getTensorCudaDataType(torch::Tensor const &self) {
cudaDataType cuda_data_type;
switch (self.scalar_type()) {
case torch::ScalarType::Float:
cuda_data_type = CUDA_R_32F;
break;
case torch::ScalarType::Double:
cuda_data_type = CUDA_R_64F;
break;
default:
TORCH_CHECK(false, "Tensor types must be either float32 or float64");
break;
}
return cuda_data_type;
}
template <typename th_int_type>
torch::Tensor coo_spmm(torch::Tensor const &rows, torch::Tensor const &cols,
torch::Tensor const &vals, int64_t const dim_i,
int64_t const dim_j, torch::Tensor const &mat2,
int64_t spmm_algorithm_id) {
#if defined __HIP_PLATFORM_HCC__
TORCH_CHECK(false, "spmm sparse-dense is not supported on HIP");
#elif defined(_WIN32) || defined(_WIN64)
TORCH_CHECK(false, "spmm sparse-dense CUDA is not supported on Windows");
#elif !defined(CUDART_VERSION)
TORCH_CHECK(false, "CUDART_VERSION not defined");
#endif
constexpr bool is_int32 = std::is_same<th_int_type, int32_t>::value;
constexpr bool is_int64 = std::is_same<th_int_type, int64_t>::value;
cusparseSpMMAlg_t mm_alg;
#if defined(CUDART_VERSION) && (CUDART_VERSION < 10010)
TORCH_CHECK(false, "spmm sparse-dense requires CUDA 10.1 or greater");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 10010) && \
(CUDART_VERSION < 11000)
switch (spmm_algorithm_id) {
case 1:
mm_alg = CUSPARSE_COOMM_ALG1;
break;
case 2:
mm_alg = CUSPARSE_COOMM_ALG2;
break;
case 3:
mm_alg = CUSPARSE_COOMM_ALG3;
break;
default:
TORCH_CHECK(false, "Invalid algorithm id.", spmm_algorithm_id);
mm_alg = CUSPARSE_MM_ALG_DEFAULT;
}
TORCH_CHECK(is_int32, "int64 cusparseSpMM requires CUDA 11.1 or greater");
#elif defined(CUDART_VERSION) && (CUDART_VERSION >= 11000)
switch (spmm_algorithm_id) {
case 1:
mm_alg = CUSPARSE_SPMM_COO_ALG1;
break;
case 2:
mm_alg = CUSPARSE_SPMM_COO_ALG2;
break;
case 3:
mm_alg = CUSPARSE_SPMM_COO_ALG3;
break;
case 4:
mm_alg = CUSPARSE_SPMM_COO_ALG4;
break;
default:
TORCH_CHECK(false, "Invalid algorithm id.", spmm_algorithm_id);
mm_alg = CUSPARSE_SPMM_ALG_DEFAULT;
}
TORCH_CHECK(is_int32, "int64 coosort not implemented");
// coosort not supported with int64 || (is_int64 && (mm_alg ==
// CUSPARSE_SPMM_COO_ALG4)));
#endif
at::ScalarType int_scalar_type = std::is_same<th_int_type, int32_t>::value
? at::ScalarType::Int
: at::ScalarType::Long;
TORCH_CHECK(rows.scalar_type() == int_scalar_type, "int type mismatch.");
TORCH_CHECK(rows.scalar_type() == cols.scalar_type(),
"rows and cols must have the same scalar type.");
TORCH_CHECK(rows.scalar_type() == cols.scalar_type(),
"rows and cols must have the same scalar type.");
TORCH_CHECK(vals.scalar_type() == mat2.scalar_type(),
"vals and mat2 must have the same scalar type.");
TORCH_CHECK(rows.is_cuda(), "rows must be CUDA, but got CPU");
TORCH_CHECK(cols.is_cuda(), "cols must be CUDA, but got CPU");
TORCH_CHECK(vals.is_cuda(), "vals must be CUDA, but got CPU");
TORCH_CHECK(mat2.is_cuda(), "mat2 must be CUDA, but got CPU");
TORCH_CHECK(at::cuda::check_device({rows, cols, vals, mat2}));
TORCH_CHECK(mat2.dim() == 2, "Tensor 'mat2' must have 2 dims, but has ",
mat2.dim());
// int64_t dim_i = self.size(0);
// int64_t dim_j = self.size(1);
int64_t dim_k = mat2.size(1);
torch::Tensor result = at::empty({dim_k, dim_i}, mat2.options());
if ((dim_j == 0) || (dim_k == 0)) {
return result;
}
// Dense matrices have to be contiguous for cusparseSpMM to work
torch::Tensor const mat2_contig = mat2.contiguous();
auto cusparse_handle = at::cuda::getCurrentCUDASparseHandle();
torch::Scalar beta = 0;
torch::Scalar alpha = 1;
// Create tensors to view just the current set of matrices
int64_t const nnz = rows.numel();
if (nnz == 0) {
result.transpose_(0, 1);
result.zero_();
return result;
}
cudaDataType cuda_data_type = getTensorCudaDataType(mat2_contig);
th_int_type *row_indices_ptr =
reinterpret_cast<th_int_type *>(rows.data_ptr());
th_int_type *col_indices_ptr =
reinterpret_cast<th_int_type *>(cols.data_ptr());
// Iterate through each set of 2D matrices within the 3D
// tensor inputs, performing a matrix multiply with each
AT_DISPATCH_FLOATING_TYPES(vals.scalar_type(), "coo_spmm", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t *values_ptr = reinterpret_cast<scalar_t *>(vals.data_ptr());
scalar_t *mat2_ptr = reinterpret_cast<scalar_t *>(mat2_contig.data_ptr());
scalar_t *result_ptr = reinterpret_cast<scalar_t *>(result.data_ptr());
//////////////////////////////////////
// Sort the sparse matrix COO
th_int_type *sorted_row_ptr =
(th_int_type *)c10::cuda::CUDACachingAllocator::raw_alloc(
2 * (nnz + 1) * sizeof(th_int_type));
th_int_type *sorted_col_ptr = sorted_row_ptr + nnz + 1;
scalar_t *sorted_val_ptr =
(scalar_t *)c10::cuda::CUDACachingAllocator::raw_alloc(
nnz * sizeof(scalar_t));
LOG_DEBUG("Allocated sorted row col val", nnz);
// Copy the indices
CUDA_CHECK(cudaMemcpy(sorted_row_ptr, row_indices_ptr,
nnz * sizeof(th_int_type), cudaMemcpyDeviceToDevice));
CUDA_CHECK(cudaMemcpy(sorted_col_ptr, col_indices_ptr,
nnz * sizeof(th_int_type), cudaMemcpyDeviceToDevice));
CUDA_CHECK(cudaMemcpy(sorted_val_ptr, values_ptr, nnz * sizeof(scalar_t),
cudaMemcpyDeviceToDevice));
thrust::sort_by_key(thrust::device, //
sorted_row_ptr, // key begin
sorted_row_ptr + nnz, // key end
thrust::make_zip_iterator( // value begin
thrust::make_tuple( //
sorted_col_ptr, //
sorted_val_ptr //
) //
));
LOG_DEBUG("Sorted row");
//////////////////////////////////////
size_t workspace_buffer_size = 0;
void *workspace_buffer = nullptr;
cusparseSpMatDescr_t sparse_descr;
CUSPARSE_CHECK(cusparseCreateCoo(
&sparse_descr, //
dim_i, dim_j, nnz, //
reinterpret_cast<void *>(sorted_row_ptr),
reinterpret_cast<void *>(sorted_col_ptr),
reinterpret_cast<void *>(sorted_val_ptr), //
std::is_same<th_int_type, int32_t>::value ? CUSPARSE_INDEX_32I
: CUSPARSE_INDEX_64I,
CUSPARSE_INDEX_BASE_ZERO, cuda_data_type));
cusparseDnMatDescr_t dense_descr;
CUSPARSE_CHECK(cusparseCreateDnMat(&dense_descr, //
dim_k, dim_j, dim_k, //
reinterpret_cast<void *>(mat2_ptr), //
cuda_data_type, CUSPARSE_ORDER_COL));
cusparseDnMatDescr_t result_descr;
CUSPARSE_CHECK(cusparseCreateDnMat(&result_descr, //
dim_i, dim_k, dim_i, //
reinterpret_cast<void *>(result_ptr), //
cuda_data_type, CUSPARSE_ORDER_COL));
size_t required_workspace_buffer_size = 0;
CUSPARSE_CHECK(cusparseSpMM_bufferSize(
cusparse_handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
CUSPARSE_OPERATION_TRANSPOSE, (void *)&alpha_val, sparse_descr,
dense_descr, (void *)&beta_val, result_descr, cuda_data_type, mm_alg,
&required_workspace_buffer_size));
LOG_DEBUG("Buffer size:", required_workspace_buffer_size);
if (required_workspace_buffer_size > workspace_buffer_size) {
if (workspace_buffer != nullptr) {
cudaFree(workspace_buffer);
}
workspace_buffer_size = required_workspace_buffer_size;
LOG_DEBUG("cudaMallocManaged");
cudaMallocManaged(&workspace_buffer, workspace_buffer_size);
}
LOG_DEBUG("SPMM");
CUSPARSE_CHECK(cusparseSpMM(cusparse_handle, //
CUSPARSE_OPERATION_NON_TRANSPOSE, //
CUSPARSE_OPERATION_TRANSPOSE, //
(void *)&alpha_val, //
sparse_descr, dense_descr, //
(void *)&beta_val, result_descr, //
cuda_data_type, mm_alg, workspace_buffer));
CUSPARSE_CHECK(cusparseDestroySpMat(sparse_descr));
CUSPARSE_CHECK(cusparseDestroyDnMat(dense_descr));
CUSPARSE_CHECK(cusparseDestroyDnMat(result_descr));
LOG_DEBUG("Dealloc");
c10::cuda::CUDACachingAllocator::raw_delete((void *)sorted_row_ptr);
c10::cuda::CUDACachingAllocator::raw_delete((void *)sorted_val_ptr);
if (workspace_buffer != nullptr) {
cudaFree(workspace_buffer);
}
});
// Need to transpose the result matrices since cusparse stores
// them in column-major order in memory
result.transpose_(0, 1);
CUDA_CHECK(cudaGetLastError());
return result;
}
template torch::Tensor
coo_spmm<int32_t>(torch::Tensor const &rows, torch::Tensor const &cols,
torch::Tensor const &vals, int64_t const dim_i,
int64_t const dim_j, torch::Tensor const &mat2,
int64_t spmm_algorithm_id);
// template torch::Tensor
// coo_spmm<int64_t>(torch::Tensor const &rows, torch::Tensor const &cols,
// torch::Tensor const &vals, int64_t const dim_i,
// int64_t const dim_j, torch::Tensor const &mat2,
// int64_t spmm_algorithm_id);
} // namespace minkowski
|
da0d8286c226df4c45ca13e3d557ce16610aebda.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/opencv.hpp>
#include <vector>
#include <iostream>
#include <chrono>
#include <string>
void blur(const unsigned char* rgb_in, unsigned char* rgb_out_blur, int rows, int cols) {
for (std::size_t row = 1; row < rows - 1; ++row) {
for (std::size_t col = 1; col < cols - 1; ++col) {
for (std::size_t rgb = 0; rgb < 3; ++rgb)
{
unsigned char hg = rgb_in[3 * ((row - 1) * cols + col - 1) + rgb];
unsigned char h = rgb_in[3 * ((row - 1) * cols + col) + rgb];
unsigned char hd = rgb_in[3 * ((row - 1) * cols + col + 1) + rgb];
unsigned char g = rgb_in[3 * (row * cols + col - 1) + rgb];
unsigned char c = rgb_in[3 * (row * cols + col) + rgb];
unsigned char d = rgb_in[3 * (row * cols + col + 1) + rgb];
unsigned char bg = rgb_in[3 * ((row + 1) * cols + col - 1) + rgb];
unsigned char b = rgb_in[3 * ((row + 1) * cols + col) + rgb];
unsigned char bd = rgb_in[3 * ((row + 1) * cols + col + 1) + rgb];
rgb_out_blur[3 * (row * cols + col) + rgb] = (hg + h + hd + g + c + d + bg + b + bd) / 9;
}
}
}
}
void sharpen(const unsigned char* rgb_in, unsigned char* rgb_out_sharpen, int rows, int cols) {
for (std::size_t row = 1; row < rows - 1; ++row) {
for (std::size_t col = 1; col < cols - 1; ++col) {
for (std::size_t rgb = 0; rgb < 3; ++rgb)
{
unsigned char h = rgb_in[3 * ((row - 1) * cols + col) + rgb];
unsigned char g = rgb_in[3 * (row * cols + col - 1) + rgb];
unsigned char c = rgb_in[3 * (row * cols + col) + rgb];
unsigned char d = rgb_in[3 * (row * cols + col + 1) + rgb];
unsigned char b = rgb_in[3 * ((row + 1) * cols + col) + rgb];
int somme = (-3 * (h + g + d + b) + 21 * c) / 9;
if (somme > 255) somme = 255;
if (somme < 0) somme = 0;
rgb_out_sharpen[3 * (row * cols + col) + rgb] = somme;
}
}
}
}
void edge_detect(const unsigned char* rgb_in, unsigned char* rgb_out_edge_detect, int rows, int cols) {
for (std::size_t row = 1; row < rows - 1; ++row) {
for (std::size_t col = 1; col < cols - 1; ++col) {
for (std::size_t rgb = 0; rgb < 3; ++rgb)
{
unsigned char h = rgb_in[3 * ((row - 1) * cols + col) + rgb];
unsigned char g = rgb_in[3 * (row * cols + col - 1) + rgb];
unsigned char c = rgb_in[3 * (row * cols + col) + rgb];
unsigned char d = rgb_in[3 * (row * cols + col + 1) + rgb];
unsigned char b = rgb_in[3 * ((row + 1) * cols + col) + rgb];
int somme = (9 * (h + g + d + b) - 36 * c) / 9;
if (somme > 255) somme = 255;
if (somme < 0) somme = 0;
rgb_out_edge_detect[3 * (row * cols + col) + rgb] = somme;
}
}
}
}
void main_blur(const unsigned char* rgb_in, unsigned char* rgb_out_blur, int rows, int cols) {
//Debut de chrono
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
blur(rgb_in, rgb_out_blur, rows, cols);
//Fin de chrono
end = std::chrono::system_clock::now();
int elapsedTime = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
std::cout << "blur_seq: " << elapsedTime << std::endl;
}
void main_sharpen(const unsigned char* rgb_in, unsigned char* rgb_out_sharpen, int rows, int cols)
{
//Debut de chrono
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
sharpen(rgb_in, rgb_out_sharpen, rows, cols);
//Fin de chrono
end = std::chrono::system_clock::now();
int elapsedTime = std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count();
std::cout << "sharpen_seq: " << elapsedTime << std::endl;
}
void main_edge_detect(const unsigned char* rgb_in, unsigned char* rgb_out_edge_detect, int rows, int cols)
{
//Debut de chrono
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
edge_detect(rgb_in, rgb_out_edge_detect, rows, cols);
//Fin de chrono
end = std::chrono::system_clock::now();
int elapsedTime = std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count();
std::cout << "edge_detect_seq: " << elapsedTime << std::endl;
}
void main_blur_edge_detect(const unsigned char* rgb_in, unsigned char* rgb_tmp_blur_edge_detect,
unsigned char* rgb_out_blur_edge_detect, int rows, int cols)
{
//Debut de chrono
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
blur(rgb_in, rgb_tmp_blur_edge_detect, rows, cols);
edge_detect(rgb_tmp_blur_edge_detect, rgb_out_blur_edge_detect, rows, cols);
//Fin de chrono
end = std::chrono::system_clock::now();
int elapsedTime = std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count();
std::cout << "blur_edge_detect_seq: " << elapsedTime << std::endl;
}
void main_edge_detect_blur(const unsigned char* rgb_in, unsigned char* rgb_tmp_edge_detect_blur,
unsigned char* rgb_out_edge_detect_blur, int rows, int cols)
{
//Debut de chrono
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
edge_detect(rgb_in, rgb_tmp_edge_detect_blur, rows, cols);
blur(rgb_tmp_edge_detect_blur, rgb_out_edge_detect_blur, rows, cols);
//Fin de chrono
end = std::chrono::system_clock::now();
int elapsedTime = std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count();
std::cout << "edge_detect_blur_seq: " << elapsedTime << std::endl;
}
int main(int argc, char *argv[])
{
// Declarations
hipError_t err;
std::string filename = std::string(argv[1]) + std::string(".") + std::string(argv[2]);
std::string out(argv[1]);
if (out == "in") {
out = std::string("out");
}
cv::Mat m_in = cv::imread(filename, cv::IMREAD_UNCHANGED);
auto rgb = m_in.data;
auto rows = m_in.rows;
auto cols = m_in.cols;
size_t taille_rgb = 3 * rows * cols;
std::vector< unsigned char > g_blur(taille_rgb);
std::vector< unsigned char > g_sharpen(taille_rgb);
std::vector< unsigned char > g_edge_detect(taille_rgb);
std::vector< unsigned char > g_blur_edge_detect(taille_rgb);
std::vector< unsigned char > g_edge_detect_blur(taille_rgb);
cv::Mat m_out_blur(rows, cols, CV_8UC3, g_blur.data());
cv::Mat m_out_sharpen(rows, cols, CV_8UC3, g_sharpen.data());
cv::Mat m_out_edge_detect(rows, cols, CV_8UC3, g_edge_detect.data());
cv::Mat m_out_blur_edge_detect(rows, cols, CV_8UC3, g_blur_edge_detect.data());
cv::Mat m_out_edge_detect_blur(rows, cols, CV_8UC3, g_edge_detect_blur.data());
unsigned char* rgb_in = nullptr;
unsigned char* rgb_out_blur = nullptr;
unsigned char* rgb_out_sharpen = nullptr;
unsigned char* rgb_out_edge_detect = nullptr;
unsigned char* rgb_tmp_blur_edge_detect = nullptr;
unsigned char* rgb_tmp_edge_detect_blur = nullptr;
unsigned char* rgb_out_blur_edge_detect = nullptr;
unsigned char* rgb_out_edge_detect_blur = nullptr;
// Init donnes kernel
err = hipHostMalloc(&rgb_in, taille_rgb);
if ( err != hipSuccess ) { std::cerr << "Error" << std::endl; }
err = hipHostMalloc(&rgb_out_blur, taille_rgb);
if ( err != hipSuccess ) { std::cerr << "Error" << std::endl; }
err = hipHostMalloc(&rgb_out_sharpen, taille_rgb);
if ( err != hipSuccess ) { std::cerr << "Error" << std::endl; }
err = hipHostMalloc(&rgb_out_edge_detect, taille_rgb);
if ( err != hipSuccess ) { std::cerr << "Error" << std::endl; }
err = hipHostMalloc(&rgb_tmp_blur_edge_detect, taille_rgb);
if ( err != hipSuccess ) { std::cerr << "Error" << std::endl; }
err = hipHostMalloc(&rgb_tmp_edge_detect_blur, taille_rgb);
if ( err != hipSuccess ) { std::cerr << "Error" << std::endl; }
err = hipHostMalloc(&rgb_out_blur_edge_detect, taille_rgb);
if ( err != hipSuccess ) { std::cerr << "Error" << std::endl; }
err = hipHostMalloc(&rgb_out_edge_detect_blur, taille_rgb);
if ( err != hipSuccess ) { std::cerr << "Error" << std::endl; }
err = hipMemcpy(rgb_in, rgb, taille_rgb, hipMemcpyHostToDevice);
if ( err != hipSuccess ) { std::cerr << "Error" << std::endl; }
// Execution
main_blur(rgb_in, rgb_out_blur, rows, cols);
main_sharpen(rgb_in, rgb_out_sharpen, rows, cols);
main_edge_detect(rgb_in, rgb_out_edge_detect, rows, cols);
main_blur_edge_detect(rgb_in, rgb_tmp_blur_edge_detect, rgb_out_blur_edge_detect, rows, cols);
main_edge_detect_blur(rgb_in, rgb_tmp_edge_detect_blur, rgb_out_edge_detect_blur, rows, cols);
// Recup donnees kernel
err = hipMemcpy(g_blur.data(), rgb_out_blur, taille_rgb, hipMemcpyDeviceToHost);
if ( err != hipSuccess ) { std::cerr << "Error" << std::endl; }
err = hipMemcpy(g_sharpen.data(), rgb_out_sharpen, taille_rgb, hipMemcpyDeviceToHost);
if ( err != hipSuccess ) { std::cerr << "Error" << std::endl; }
err = hipMemcpy(g_edge_detect.data(), rgb_out_edge_detect, taille_rgb, hipMemcpyDeviceToHost);
if ( err != hipSuccess ) { std::cerr << "Error" << std::endl; }
err = hipMemcpy(g_blur_edge_detect.data(), rgb_out_blur_edge_detect, taille_rgb, hipMemcpyDeviceToHost);
if ( err != hipSuccess ) { std::cerr << "Error" << std::endl; }
err = hipMemcpy(g_edge_detect_blur.data(), rgb_out_edge_detect_blur, taille_rgb, hipMemcpyDeviceToHost);
if ( err != hipSuccess ) { std::cerr << "Error" << std::endl; }
cv::imwrite(out + std::string("_seq_blur.") + std::string(argv[2]), m_out_blur);
cv::imwrite(out + std::string("_seq_sharpen.") + std::string(argv[2]), m_out_sharpen);
cv::imwrite(out + std::string("_seq_edge_detect.") + std::string(argv[2]), m_out_edge_detect);
cv::imwrite(out + std::string("_seq_blur_edge_detect.") + std::string(argv[2]), m_out_blur_edge_detect);
cv::imwrite(out + std::string("_seq_edge_detect_blur.") + std::string(argv[2]), m_out_edge_detect_blur);
// Nettoyage memoire
hipHostFree(rgb_in);
hipHostFree(rgb_out_blur);
hipHostFree(rgb_out_sharpen);
hipHostFree(rgb_out_edge_detect);
hipHostFree(rgb_tmp_blur_edge_detect);
hipHostFree(rgb_tmp_edge_detect_blur);
hipHostFree(rgb_out_blur_edge_detect);
hipHostFree(rgb_out_edge_detect_blur);
return 0;
} | da0d8286c226df4c45ca13e3d557ce16610aebda.cu | #include <opencv2/opencv.hpp>
#include <vector>
#include <iostream>
#include <chrono>
#include <string>
void blur(const unsigned char* rgb_in, unsigned char* rgb_out_blur, int rows, int cols) {
for (std::size_t row = 1; row < rows - 1; ++row) {
for (std::size_t col = 1; col < cols - 1; ++col) {
for (std::size_t rgb = 0; rgb < 3; ++rgb)
{
unsigned char hg = rgb_in[3 * ((row - 1) * cols + col - 1) + rgb];
unsigned char h = rgb_in[3 * ((row - 1) * cols + col) + rgb];
unsigned char hd = rgb_in[3 * ((row - 1) * cols + col + 1) + rgb];
unsigned char g = rgb_in[3 * (row * cols + col - 1) + rgb];
unsigned char c = rgb_in[3 * (row * cols + col) + rgb];
unsigned char d = rgb_in[3 * (row * cols + col + 1) + rgb];
unsigned char bg = rgb_in[3 * ((row + 1) * cols + col - 1) + rgb];
unsigned char b = rgb_in[3 * ((row + 1) * cols + col) + rgb];
unsigned char bd = rgb_in[3 * ((row + 1) * cols + col + 1) + rgb];
rgb_out_blur[3 * (row * cols + col) + rgb] = (hg + h + hd + g + c + d + bg + b + bd) / 9;
}
}
}
}
void sharpen(const unsigned char* rgb_in, unsigned char* rgb_out_sharpen, int rows, int cols) {
for (std::size_t row = 1; row < rows - 1; ++row) {
for (std::size_t col = 1; col < cols - 1; ++col) {
for (std::size_t rgb = 0; rgb < 3; ++rgb)
{
unsigned char h = rgb_in[3 * ((row - 1) * cols + col) + rgb];
unsigned char g = rgb_in[3 * (row * cols + col - 1) + rgb];
unsigned char c = rgb_in[3 * (row * cols + col) + rgb];
unsigned char d = rgb_in[3 * (row * cols + col + 1) + rgb];
unsigned char b = rgb_in[3 * ((row + 1) * cols + col) + rgb];
int somme = (-3 * (h + g + d + b) + 21 * c) / 9;
if (somme > 255) somme = 255;
if (somme < 0) somme = 0;
rgb_out_sharpen[3 * (row * cols + col) + rgb] = somme;
}
}
}
}
void edge_detect(const unsigned char* rgb_in, unsigned char* rgb_out_edge_detect, int rows, int cols) {
for (std::size_t row = 1; row < rows - 1; ++row) {
for (std::size_t col = 1; col < cols - 1; ++col) {
for (std::size_t rgb = 0; rgb < 3; ++rgb)
{
unsigned char h = rgb_in[3 * ((row - 1) * cols + col) + rgb];
unsigned char g = rgb_in[3 * (row * cols + col - 1) + rgb];
unsigned char c = rgb_in[3 * (row * cols + col) + rgb];
unsigned char d = rgb_in[3 * (row * cols + col + 1) + rgb];
unsigned char b = rgb_in[3 * ((row + 1) * cols + col) + rgb];
int somme = (9 * (h + g + d + b) - 36 * c) / 9;
if (somme > 255) somme = 255;
if (somme < 0) somme = 0;
rgb_out_edge_detect[3 * (row * cols + col) + rgb] = somme;
}
}
}
}
void main_blur(const unsigned char* rgb_in, unsigned char* rgb_out_blur, int rows, int cols) {
//Debut de chrono
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
blur(rgb_in, rgb_out_blur, rows, cols);
//Fin de chrono
end = std::chrono::system_clock::now();
int elapsedTime = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();
std::cout << "blur_seq: " << elapsedTime << std::endl;
}
void main_sharpen(const unsigned char* rgb_in, unsigned char* rgb_out_sharpen, int rows, int cols)
{
//Debut de chrono
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
sharpen(rgb_in, rgb_out_sharpen, rows, cols);
//Fin de chrono
end = std::chrono::system_clock::now();
int elapsedTime = std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count();
std::cout << "sharpen_seq: " << elapsedTime << std::endl;
}
void main_edge_detect(const unsigned char* rgb_in, unsigned char* rgb_out_edge_detect, int rows, int cols)
{
//Debut de chrono
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
edge_detect(rgb_in, rgb_out_edge_detect, rows, cols);
//Fin de chrono
end = std::chrono::system_clock::now();
int elapsedTime = std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count();
std::cout << "edge_detect_seq: " << elapsedTime << std::endl;
}
void main_blur_edge_detect(const unsigned char* rgb_in, unsigned char* rgb_tmp_blur_edge_detect,
unsigned char* rgb_out_blur_edge_detect, int rows, int cols)
{
//Debut de chrono
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
blur(rgb_in, rgb_tmp_blur_edge_detect, rows, cols);
edge_detect(rgb_tmp_blur_edge_detect, rgb_out_blur_edge_detect, rows, cols);
//Fin de chrono
end = std::chrono::system_clock::now();
int elapsedTime = std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count();
std::cout << "blur_edge_detect_seq: " << elapsedTime << std::endl;
}
void main_edge_detect_blur(const unsigned char* rgb_in, unsigned char* rgb_tmp_edge_detect_blur,
unsigned char* rgb_out_edge_detect_blur, int rows, int cols)
{
//Debut de chrono
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
edge_detect(rgb_in, rgb_tmp_edge_detect_blur, rows, cols);
blur(rgb_tmp_edge_detect_blur, rgb_out_edge_detect_blur, rows, cols);
//Fin de chrono
end = std::chrono::system_clock::now();
int elapsedTime = std::chrono::duration_cast<std::chrono::milliseconds>(end-start).count();
std::cout << "edge_detect_blur_seq: " << elapsedTime << std::endl;
}
int main(int argc, char *argv[])
{
// Declarations
cudaError_t err;
std::string filename = std::string(argv[1]) + std::string(".") + std::string(argv[2]);
std::string out(argv[1]);
if (out == "in") {
out = std::string("out");
}
cv::Mat m_in = cv::imread(filename, cv::IMREAD_UNCHANGED);
auto rgb = m_in.data;
auto rows = m_in.rows;
auto cols = m_in.cols;
size_t taille_rgb = 3 * rows * cols;
std::vector< unsigned char > g_blur(taille_rgb);
std::vector< unsigned char > g_sharpen(taille_rgb);
std::vector< unsigned char > g_edge_detect(taille_rgb);
std::vector< unsigned char > g_blur_edge_detect(taille_rgb);
std::vector< unsigned char > g_edge_detect_blur(taille_rgb);
cv::Mat m_out_blur(rows, cols, CV_8UC3, g_blur.data());
cv::Mat m_out_sharpen(rows, cols, CV_8UC3, g_sharpen.data());
cv::Mat m_out_edge_detect(rows, cols, CV_8UC3, g_edge_detect.data());
cv::Mat m_out_blur_edge_detect(rows, cols, CV_8UC3, g_blur_edge_detect.data());
cv::Mat m_out_edge_detect_blur(rows, cols, CV_8UC3, g_edge_detect_blur.data());
unsigned char* rgb_in = nullptr;
unsigned char* rgb_out_blur = nullptr;
unsigned char* rgb_out_sharpen = nullptr;
unsigned char* rgb_out_edge_detect = nullptr;
unsigned char* rgb_tmp_blur_edge_detect = nullptr;
unsigned char* rgb_tmp_edge_detect_blur = nullptr;
unsigned char* rgb_out_blur_edge_detect = nullptr;
unsigned char* rgb_out_edge_detect_blur = nullptr;
// Init donnes kernel
err = cudaMallocHost(&rgb_in, taille_rgb);
if ( err != cudaSuccess ) { std::cerr << "Error" << std::endl; }
err = cudaMallocHost(&rgb_out_blur, taille_rgb);
if ( err != cudaSuccess ) { std::cerr << "Error" << std::endl; }
err = cudaMallocHost(&rgb_out_sharpen, taille_rgb);
if ( err != cudaSuccess ) { std::cerr << "Error" << std::endl; }
err = cudaMallocHost(&rgb_out_edge_detect, taille_rgb);
if ( err != cudaSuccess ) { std::cerr << "Error" << std::endl; }
err = cudaMallocHost(&rgb_tmp_blur_edge_detect, taille_rgb);
if ( err != cudaSuccess ) { std::cerr << "Error" << std::endl; }
err = cudaMallocHost(&rgb_tmp_edge_detect_blur, taille_rgb);
if ( err != cudaSuccess ) { std::cerr << "Error" << std::endl; }
err = cudaMallocHost(&rgb_out_blur_edge_detect, taille_rgb);
if ( err != cudaSuccess ) { std::cerr << "Error" << std::endl; }
err = cudaMallocHost(&rgb_out_edge_detect_blur, taille_rgb);
if ( err != cudaSuccess ) { std::cerr << "Error" << std::endl; }
err = cudaMemcpy(rgb_in, rgb, taille_rgb, cudaMemcpyHostToDevice);
if ( err != cudaSuccess ) { std::cerr << "Error" << std::endl; }
// Execution
main_blur(rgb_in, rgb_out_blur, rows, cols);
main_sharpen(rgb_in, rgb_out_sharpen, rows, cols);
main_edge_detect(rgb_in, rgb_out_edge_detect, rows, cols);
main_blur_edge_detect(rgb_in, rgb_tmp_blur_edge_detect, rgb_out_blur_edge_detect, rows, cols);
main_edge_detect_blur(rgb_in, rgb_tmp_edge_detect_blur, rgb_out_edge_detect_blur, rows, cols);
// Recup donnees kernel
err = cudaMemcpy(g_blur.data(), rgb_out_blur, taille_rgb, cudaMemcpyDeviceToHost);
if ( err != cudaSuccess ) { std::cerr << "Error" << std::endl; }
err = cudaMemcpy(g_sharpen.data(), rgb_out_sharpen, taille_rgb, cudaMemcpyDeviceToHost);
if ( err != cudaSuccess ) { std::cerr << "Error" << std::endl; }
err = cudaMemcpy(g_edge_detect.data(), rgb_out_edge_detect, taille_rgb, cudaMemcpyDeviceToHost);
if ( err != cudaSuccess ) { std::cerr << "Error" << std::endl; }
err = cudaMemcpy(g_blur_edge_detect.data(), rgb_out_blur_edge_detect, taille_rgb, cudaMemcpyDeviceToHost);
if ( err != cudaSuccess ) { std::cerr << "Error" << std::endl; }
err = cudaMemcpy(g_edge_detect_blur.data(), rgb_out_edge_detect_blur, taille_rgb, cudaMemcpyDeviceToHost);
if ( err != cudaSuccess ) { std::cerr << "Error" << std::endl; }
cv::imwrite(out + std::string("_seq_blur.") + std::string(argv[2]), m_out_blur);
cv::imwrite(out + std::string("_seq_sharpen.") + std::string(argv[2]), m_out_sharpen);
cv::imwrite(out + std::string("_seq_edge_detect.") + std::string(argv[2]), m_out_edge_detect);
cv::imwrite(out + std::string("_seq_blur_edge_detect.") + std::string(argv[2]), m_out_blur_edge_detect);
cv::imwrite(out + std::string("_seq_edge_detect_blur.") + std::string(argv[2]), m_out_edge_detect_blur);
// Nettoyage memoire
cudaFreeHost(rgb_in);
cudaFreeHost(rgb_out_blur);
cudaFreeHost(rgb_out_sharpen);
cudaFreeHost(rgb_out_edge_detect);
cudaFreeHost(rgb_tmp_blur_edge_detect);
cudaFreeHost(rgb_tmp_edge_detect_blur);
cudaFreeHost(rgb_out_blur_edge_detect);
cudaFreeHost(rgb_out_edge_detect_blur);
return 0;
} |
9d9f06c94c06f60af52a81a6c6a7958f6c8c7ef3.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/cudafeatures2d.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// This algorithm is a variation of Union Find (UF) that calls FindAndCompress instead of simple Find used by UF.
// FindAndCompress updates the label of the starting pixel at each iteration of the loop.
// This means that, if the equivalence tree is like this:
// A
// /
// B
// /
// C
// then the first iteration updates the label of C, assigning it value B, and the second iteration assigns A.
// This way, another thread reading C during the process will find an updated value and will avoid a step.
// This algorithm performs better than BUF only sometimes (rarely?).
#define BLOCK_ROWS 16
#define BLOCK_COLS 16
using namespace cv;
namespace {
// Returns the root index of the UFTree
__device__ unsigned Find(const int *s_buf, unsigned n) {
// Warning: do not call Find on a background pixel
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
assert(label > 0);
}
return n;
}
// Returns the root index of the UFTree
__device__ unsigned FindCompress(int *s_buf, unsigned n) {
// Warning: do not call Find on a background pixel
unsigned id = n;
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
s_buf[id] = label;
assert(label > 0);
}
return n;
}
// Merges the UFTrees of a and b, linking one root to the other
__device__ void Union(int *s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a + 1);
done = (old == b + 1);
b = old - 1;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b + 1);
done = (old == a + 1);
a = old - 1;
}
else {
done = true;
}
} while (!done);
}
//Effettuo il controllo sui 4 vicini della maschera
//Prova a sincronizzare dopo ogni vicino
__global__ void LocalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned local_row = threadIdx.y;
unsigned local_col = threadIdx.x;
unsigned local_index = local_row * BLOCK_COLS + local_col;
unsigned global_row = blockIdx.y * BLOCK_ROWS + local_row;
unsigned global_col = blockIdx.x * BLOCK_COLS + local_col;
unsigned img_index = global_row * img.step + global_col;
__shared__ int s_buf[BLOCK_ROWS * BLOCK_COLS];
__shared__ unsigned char s_img[BLOCK_ROWS * BLOCK_COLS];
bool in_limits = (global_row < img.rows && global_col < img.cols);
s_buf[local_index] = local_index + 1;
s_img[local_index] = in_limits ? img[img_index] : 0xFF;
__syncthreads();
unsigned char v = s_img[local_index];
if (in_limits) {
if (v) {
if (local_col > 0 && s_img[local_index - 1]) {
Union(s_buf, local_index, local_index - 1);
}
if (local_row > 0 && s_img[local_index - BLOCK_COLS]) {
Union(s_buf, local_index, local_index - BLOCK_COLS);
}
}
else {
if (local_row > 0 && s_img[local_index - BLOCK_COLS]) {
if (local_col > 0 && s_img[local_index - 1]) {
Union(s_buf, local_index - 1, local_index - BLOCK_COLS);
}
if (local_col < BLOCK_COLS - 1 && s_img[local_index + 1]) {
Union(s_buf, local_index + 1, local_index - BLOCK_COLS);
}
}
}
}
__syncthreads();
if (in_limits) {
if (v) {
unsigned f = FindCompress(s_buf, local_index);
unsigned f_row = f / BLOCK_COLS;
unsigned f_col = f % BLOCK_COLS;
unsigned global_f = (blockIdx.y * BLOCK_ROWS + f_row) * (labels.step / labels.elem_size) + (blockIdx.x * BLOCK_COLS + f_col);
labels.data[global_row * labels.step / sizeof(int) + global_col] = global_f + 1; // C' distinzione tra background e foreground
}
else {
labels.data[global_row * labels.step / sizeof(int) + global_col] = 0;
}
}
}
__global__ void GlobalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned local_row = threadIdx.y;
unsigned local_col = threadIdx.x;
unsigned global_row = blockIdx.y * BLOCK_ROWS + local_row;
unsigned global_col = blockIdx.x * BLOCK_COLS + local_col;
unsigned img_index = global_row * img.step + global_col;
unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col;
bool in_limits = (global_row < img.rows && global_col < img.cols);
if (in_limits) {
unsigned char v = img[img_index];
if (v) {
if (global_col > 0 && local_col == 0 && img[img_index - 1]) {
Union(labels.data, labels_index, labels_index - 1);
}
if (global_row > 0 && local_row == 0 && img[img_index - img.step]) {
Union(labels.data, labels_index, labels_index - labels.step / sizeof(int));
}
}
else {
if (global_row > 0 && img[img_index - img.step]) {
if (global_col > 0 && (local_row == 0 || local_col == 0) && img[img_index - 1]) {
Union(labels.data, labels_index - 1, labels_index - labels.step / sizeof(int));
}
if ((global_col < img.cols - 1) && (local_row == 0 || local_col == BLOCK_COLS - 1) && img[img_index + 1]) {
Union(labels.data, labels_index + 1, labels_index - labels.step / sizeof(int));
}
}
}
}
}
__global__ void PathCompression(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned global_row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned global_col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col;
if (global_row < labels.rows && global_col < labels.cols) {
unsigned char val = img[global_row * img.step + global_col];
if (val) {
labels[labels_index] = Find(labels.data, labels_index) + 1;
}
}
}
}
class UF_InlineCompression : public GpuLabeling2D<Connectivity2D::CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
UF_InlineCompression() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
// Phase 1
// Etichetta i pixel localmente al blocco
LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
// Immagine di debug della prima fase
//cuda::GpuMat d_local_labels;
//d_img_labels_.copyTo(d_local_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//Mat1i local_labels(img_.size());
//d_local_labels.download(local_labels);
// Phase 2
// Collega tra loro gli alberi union-find dei diversi blocchi
GlobalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
// Immagine di debug della seconda fase
//cuda::GpuMat d_global_labels;
//d_img_labels_.copyTo(d_global_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//Mat1i global_labels(img_.size());
//d_global_labels.download(global_labels);
// Phase 3
// Collassa gli alberi union-find sulle radici
PathCompression << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
hipDeviceSynchronize();
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.size(), CV_32SC1);
perf_.stop();
return perf_.last();
}
double Dealloc() {
perf_.start();
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void LocalScan() {
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
hipDeviceSynchronize();
}
void GlobalScan() {
GlobalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
PathCompression << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
hipDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
LocalScan();
perf_.stop();
perf_.store(Step(StepType::FIRST_SCAN), perf_.last());
perf_.start();
GlobalScan();
perf_.stop();
perf_.store(Step(StepType::SECOND_SCAN), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(UF_InlineCompression);
| 9d9f06c94c06f60af52a81a6c6a7958f6c8c7ef3.cu | #include <opencv2/cudafeatures2d.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// This algorithm is a variation of Union Find (UF) that calls FindAndCompress instead of simple Find used by UF.
// FindAndCompress updates the label of the starting pixel at each iteration of the loop.
// This means that, if the equivalence tree is like this:
// A
// /
// B
// /
// C
// then the first iteration updates the label of C, assigning it value B, and the second iteration assigns A.
// This way, another thread reading C during the process will find an updated value and will avoid a step.
// This algorithm performs better than BUF only sometimes (rarely?).
#define BLOCK_ROWS 16
#define BLOCK_COLS 16
using namespace cv;
namespace {
// Returns the root index of the UFTree
__device__ unsigned Find(const int *s_buf, unsigned n) {
// Warning: do not call Find on a background pixel
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
assert(label > 0);
}
return n;
}
// Returns the root index of the UFTree
__device__ unsigned FindCompress(int *s_buf, unsigned n) {
// Warning: do not call Find on a background pixel
unsigned id = n;
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
s_buf[id] = label;
assert(label > 0);
}
return n;
}
// Merges the UFTrees of a and b, linking one root to the other
__device__ void Union(int *s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a + 1);
done = (old == b + 1);
b = old - 1;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b + 1);
done = (old == a + 1);
a = old - 1;
}
else {
done = true;
}
} while (!done);
}
//Effettuo il controllo sui 4 vicini della maschera
//Prova a sincronizzare dopo ogni vicino
__global__ void LocalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned local_row = threadIdx.y;
unsigned local_col = threadIdx.x;
unsigned local_index = local_row * BLOCK_COLS + local_col;
unsigned global_row = blockIdx.y * BLOCK_ROWS + local_row;
unsigned global_col = blockIdx.x * BLOCK_COLS + local_col;
unsigned img_index = global_row * img.step + global_col;
__shared__ int s_buf[BLOCK_ROWS * BLOCK_COLS];
__shared__ unsigned char s_img[BLOCK_ROWS * BLOCK_COLS];
bool in_limits = (global_row < img.rows && global_col < img.cols);
s_buf[local_index] = local_index + 1;
s_img[local_index] = in_limits ? img[img_index] : 0xFF;
__syncthreads();
unsigned char v = s_img[local_index];
if (in_limits) {
if (v) {
if (local_col > 0 && s_img[local_index - 1]) {
Union(s_buf, local_index, local_index - 1);
}
if (local_row > 0 && s_img[local_index - BLOCK_COLS]) {
Union(s_buf, local_index, local_index - BLOCK_COLS);
}
}
else {
if (local_row > 0 && s_img[local_index - BLOCK_COLS]) {
if (local_col > 0 && s_img[local_index - 1]) {
Union(s_buf, local_index - 1, local_index - BLOCK_COLS);
}
if (local_col < BLOCK_COLS - 1 && s_img[local_index + 1]) {
Union(s_buf, local_index + 1, local_index - BLOCK_COLS);
}
}
}
}
__syncthreads();
if (in_limits) {
if (v) {
unsigned f = FindCompress(s_buf, local_index);
unsigned f_row = f / BLOCK_COLS;
unsigned f_col = f % BLOCK_COLS;
unsigned global_f = (blockIdx.y * BLOCK_ROWS + f_row) * (labels.step / labels.elem_size) + (blockIdx.x * BLOCK_COLS + f_col);
labels.data[global_row * labels.step / sizeof(int) + global_col] = global_f + 1; // C'è distinzione tra background e foreground
}
else {
labels.data[global_row * labels.step / sizeof(int) + global_col] = 0;
}
}
}
__global__ void GlobalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned local_row = threadIdx.y;
unsigned local_col = threadIdx.x;
unsigned global_row = blockIdx.y * BLOCK_ROWS + local_row;
unsigned global_col = blockIdx.x * BLOCK_COLS + local_col;
unsigned img_index = global_row * img.step + global_col;
unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col;
bool in_limits = (global_row < img.rows && global_col < img.cols);
if (in_limits) {
unsigned char v = img[img_index];
if (v) {
if (global_col > 0 && local_col == 0 && img[img_index - 1]) {
Union(labels.data, labels_index, labels_index - 1);
}
if (global_row > 0 && local_row == 0 && img[img_index - img.step]) {
Union(labels.data, labels_index, labels_index - labels.step / sizeof(int));
}
}
else {
if (global_row > 0 && img[img_index - img.step]) {
if (global_col > 0 && (local_row == 0 || local_col == 0) && img[img_index - 1]) {
Union(labels.data, labels_index - 1, labels_index - labels.step / sizeof(int));
}
if ((global_col < img.cols - 1) && (local_row == 0 || local_col == BLOCK_COLS - 1) && img[img_index + 1]) {
Union(labels.data, labels_index + 1, labels_index - labels.step / sizeof(int));
}
}
}
}
}
__global__ void PathCompression(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned global_row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned global_col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col;
if (global_row < labels.rows && global_col < labels.cols) {
unsigned char val = img[global_row * img.step + global_col];
if (val) {
labels[labels_index] = Find(labels.data, labels_index) + 1;
}
}
}
}
class UF_InlineCompression : public GpuLabeling2D<Connectivity2D::CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
UF_InlineCompression() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
// Phase 1
// Etichetta i pixel localmente al blocco
LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
// Immagine di debug della prima fase
//cuda::GpuMat d_local_labels;
//d_img_labels_.copyTo(d_local_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//Mat1i local_labels(img_.size());
//d_local_labels.download(local_labels);
// Phase 2
// Collega tra loro gli alberi union-find dei diversi blocchi
GlobalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
// Immagine di debug della seconda fase
//cuda::GpuMat d_global_labels;
//d_img_labels_.copyTo(d_global_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//Mat1i global_labels(img_.size());
//d_global_labels.download(global_labels);
// Phase 3
// Collassa gli alberi union-find sulle radici
PathCompression << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
cudaDeviceSynchronize();
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.size(), CV_32SC1);
perf_.stop();
return perf_.last();
}
double Dealloc() {
perf_.start();
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void LocalScan() {
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
cudaDeviceSynchronize();
}
void GlobalScan() {
GlobalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
PathCompression << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
cudaDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
LocalScan();
perf_.stop();
perf_.store(Step(StepType::FIRST_SCAN), perf_.last());
perf_.start();
GlobalScan();
perf_.stop();
perf_.store(Step(StepType::SECOND_SCAN), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(UF_InlineCompression);
|
98087346ee138b082ee0dbe939160a4955a608c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef _WIN32
#pragma warning(disable : 4244)
#endif
#include "orttraining/training_ops/cuda/tensor/gather_grad_impl.h"
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <cub/iterator/counting_input_iterator.cuh>
#include <cub/iterator/discard_output_iterator.cuh>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/shared_inc/accumulation_type.h"
#include "core/providers/cuda/shared_inc/cuda_call.h"
namespace onnxruntime {
namespace cuda {
namespace gather_grad_internal {
// Note:
// For these implementations, first we generate sorted lists of dX and dY
// indices, ordered by dX indices. Then, we can consider segments of the sorted
// lists.
//
// Each continuous run of indices with the same dX value in dX_indices_sorted
// forms a segment.
//
// For example, given:
// dX_indices_sorted = [1, 1, 2, 2, 2, 3]
// dY_indices_sorted = [1, 4, 0, 3, 5, 2]
// The segments will be: '--' '-----' '
//
// The segments can be processed in parallel, or further divided into partial
// segments for increased parallelism.
// unit for handling indexing and counting of segments or partial segments
using SegmentIndex_t = GatheredIndexIndex_t;
constexpr GatheredIndexIndex_t kMaxPartialSegmentSize = 10;
template <typename TInputIterator, typename TOutputIterator>
__global__ void CopyKernel(TOutputIterator dst, TInputIterator src, int64_t length) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, length);
dst[id] = src[id];
}
// get sorted dX and dY indices, ordered by dX indices
template <typename TIndex>
void GetSortedIndices(
hipStream_t stream,
const CudaScratchBufferAllocator& allocator,
const TIndex* dX_indices,
GatheredIndexIndex_t num_gathered_indices,
IAllocatorUniquePtr<TIndex>& dX_indices_sorted_out,
IAllocatorUniquePtr<TIndex>& dY_indices_sorted_out) {
auto dY_indices = allocator.GetScratchBuffer<TIndex>(num_gathered_indices);
hipLaunchKernelGGL(( CopyKernel), dim3(CeilDiv(num_gathered_indices, GridDim::maxThreadsPerBlock)),
dim3(GridDim::maxThreadsPerBlock), 0, stream,
dY_indices.get(), hipcub::CountingInputIterator<TIndex>{0}, num_gathered_indices);
auto dX_indices_sorted = allocator.GetScratchBuffer<TIndex>(num_gathered_indices);
auto dY_indices_sorted = allocator.GetScratchBuffer<TIndex>(num_gathered_indices);
size_t temp_storage_size_bytes = 0;
CUDA_CALL_THROW(hipcub::DeviceRadixSort::SortPairs(
nullptr, temp_storage_size_bytes,
dX_indices, dX_indices_sorted.get(),
dY_indices.get(), dY_indices_sorted.get(),
num_gathered_indices, 0, sizeof(TIndex)*8, stream));
auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes);
CUDA_CALL_THROW(hipcub::DeviceRadixSort::SortPairs(
temp_storage.get(), temp_storage_size_bytes,
dX_indices, dX_indices_sorted.get(),
dY_indices.get(), dY_indices_sorted.get(),
num_gathered_indices, 0, sizeof(TIndex)*8, stream));
dX_indices_sorted_out = std::move(dX_indices_sorted);
dY_indices_sorted_out = std::move(dY_indices_sorted);
}
template <typename T>
IAllocatorUniquePtr<T> GetOffsetsFromCounts(
hipStream_t stream,
const CudaScratchBufferAllocator& allocator,
const T* counts, int32_t num_counts) {
auto offsets = allocator.GetScratchBuffer<T>(num_counts);
size_t temp_storage_size_bytes = 0;
CUDA_CALL_THROW(hipcub::DeviceScan::ExclusiveSum(
nullptr, temp_storage_size_bytes,
counts, offsets.get(), num_counts, stream));
auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes);
CUDA_CALL_THROW(hipcub::DeviceScan::ExclusiveSum(
temp_storage.get(), temp_storage_size_bytes,
counts, offsets.get(), num_counts, stream));
return offsets;
}
// adapted from here:
// https://github.com/pytorch/pytorch/blob/b186831c08e0e4e447eedb8a5cfab582995d37f9/aten/src/ATen/native/cuda/Embedding.cu#L121
template <typename T, typename TIndex, int NumElementsPerThread>
__global__ void DirectSumKernel(
const TIndex* dX_indices_sorted,
const TIndex* dY_indices_sorted,
const T* dY_data,
T* dX_data,
GatheredIndexIndex_t num_gathered_indices,
int64_t num_gathered_per_index,
int64_t gather_dimension_size,
int64_t num_batches) {
GatheredIndexIndex_t idx = blockIdx.x * 4 + threadIdx.y;
if (idx < num_gathered_indices && (idx == 0 || dX_indices_sorted[idx] != dX_indices_sorted[idx - 1])) {
do {
// All index values are expected to be within bounds [-s, s-1] along axis of size s.
auto target_row = dX_indices_sorted[idx];
if (target_row < 0) target_row += gather_dimension_size;
for (int64_t batch_idx = 0; batch_idx < num_batches; ++batch_idx) {
const auto gathered_element_idx_start = threadIdx.x + blockIdx.y * blockDim.x * NumElementsPerThread;
const auto dX_row_offset =
(batch_idx * gather_dimension_size + target_row) * num_gathered_per_index;
const auto dY_row_offset =
(batch_idx * num_gathered_indices + dY_indices_sorted[idx]) * num_gathered_per_index;
AccumulationType_t<T> dY_value[NumElementsPerThread];
AccumulationType_t<T> dX_value[NumElementsPerThread];
#pragma unroll
for (int ii = 0; ii < NumElementsPerThread; ii++) {
const auto gathered_element_idx = gathered_element_idx_start + ii * GPU_WARP_SIZE;
if (gathered_element_idx < num_gathered_per_index) {
dY_value[ii] = static_cast<AccumulationType_t<T>>(dY_data[dY_row_offset + gathered_element_idx]);
dX_value[ii] = static_cast<AccumulationType_t<T>>(dX_data[dX_row_offset + gathered_element_idx]);
}
}
#pragma unroll
for (int ii = 0; ii < NumElementsPerThread; ii++) {
dX_value[ii] += dY_value[ii];
}
#pragma unroll
for (int ii = 0; ii < NumElementsPerThread; ii++) {
const auto gathered_element_idx = gathered_element_idx_start + ii * GPU_WARP_SIZE;
if (gathered_element_idx < num_gathered_per_index) {
dX_data[dX_row_offset + gathered_element_idx] = static_cast<T>(dX_value[ii]);
}
}
}
idx++;
} while (idx < num_gathered_indices && dX_indices_sorted[idx] == dX_indices_sorted[idx - 1]);
}
}
// directly sum gathered dY values into the corresponding dX value
template <typename T, typename TIndex>
void DirectSumImpl(
hipStream_t stream,
const TIndex* dX_indices_sorted,
const TIndex* dY_indices_sorted,
const T* dY_data,
T* dX_data,
GatheredIndexIndex_t num_gathered_indices,
int64_t num_gathered_per_index,
int64_t gather_dimension_size,
int64_t num_batches) {
dim3 block(GPU_WARP_SIZE, 4);
dim3 grid(CeilDiv(num_gathered_indices, 4), CeilDiv(num_gathered_per_index, GridDim::maxElementsPerThread * GPU_WARP_SIZE));
hipLaunchKernelGGL(( DirectSumKernel<T, TIndex, GridDim::maxElementsPerThread>), dim3(grid), dim3(block), 0, stream,
dX_indices_sorted,
dY_indices_sorted,
dY_data,
dX_data,
num_gathered_indices,
num_gathered_per_index,
gather_dimension_size,
num_batches);
}
// partial sums implementation adapted from here:
// https://github.com/pytorch/pytorch/blob/b186831c08e0e4e447eedb8a5cfab582995d37f9/aten/src/ATen/native/cuda/EmbeddingBackwardKernel.cu
__global__ void ComputePerSegmentPartialSegmentCountsKernel(
SegmentIndex_t* ret, const GatheredIndexIndex_t* segment_offsets,
SegmentIndex_t num_of_segments, GatheredIndexIndex_t num_gathered_indices) {
const auto id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < num_of_segments) {
const auto idx_start = segment_offsets[id];
const auto idx_end = (id == num_of_segments - 1) ? num_gathered_indices : segment_offsets[id + 1];
const auto size = idx_end - idx_start;
ret[id] = CeilDiv(size, kMaxPartialSegmentSize);
}
}
__global__ void ComputePartialSegmentOffsetsKernel(
GatheredIndexIndex_t* ret,
const SegmentIndex_t* partials_per_segment,
const SegmentIndex_t* partials_per_segment_offset,
const GatheredIndexIndex_t* segment_offsets,
SegmentIndex_t num_of_segments) {
const auto id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < num_of_segments) {
auto idx = partials_per_segment_offset[id];
const auto num_partials = partials_per_segment[id];
const auto segment_offset = segment_offsets[id];
for (SegmentIndex_t i = 0; i < num_partials; ++i) {
ret[idx++] = segment_offset + i * kMaxPartialSegmentSize;
}
}
}
template <typename T, typename TIndex>
__global__ void ComputePartialSegmentSumsKernel(
const TIndex* dY_indices_sorted,
const T* dY_data,
GatheredIndexIndex_t num_gathered_indices,
int64_t num_gathered_per_index,
const GatheredIndexIndex_t* partial_segment_offsets,
SegmentIndex_t num_partial_segments,
AccumulationType_t<T>* partial_segment_sums,
const int64_t num_gathered_per_index_warp_size_multiple) {
const auto id = blockIdx.x * blockDim.x + threadIdx.x;
const auto partial_segment_id = id / num_gathered_per_index_warp_size_multiple;
const auto gathered_element_id = id % num_gathered_per_index_warp_size_multiple;
const auto batch_id = blockIdx.y;
if (gathered_element_id >= num_gathered_per_index) {
return;
}
if (partial_segment_id >= num_partial_segments) {
return;
}
const auto idx_begin = partial_segment_offsets[partial_segment_id];
const auto idx_end =
(partial_segment_id == num_partial_segments - 1) ? num_gathered_indices : partial_segment_offsets[partial_segment_id + 1];
AccumulationType_t<T> partial_segment_sum = 0;
for (auto idx = idx_begin; idx < idx_end; ++idx) {
const auto target_row = dY_indices_sorted[idx];
partial_segment_sum += static_cast<AccumulationType_t<T>>(
dY_data[batch_id * num_gathered_indices * num_gathered_per_index +
target_row * num_gathered_per_index +
gathered_element_id]);
}
partial_segment_sums[batch_id * num_partial_segments * num_gathered_per_index +
partial_segment_id * num_gathered_per_index +
gathered_element_id] =
partial_segment_sum;
}
template <typename T, typename TIndex>
__global__ void ComputeSegmentSumsAndScatterKernel(
const TIndex* dX_indices_sorted,
T* dX_data,
int64_t num_gathered_per_index,
const GatheredIndexIndex_t* segment_offsets,
SegmentIndex_t num_segments,
const AccumulationType_t<T>* partial_segment_sums,
const SegmentIndex_t* per_segment_partial_segment_offsets,
SegmentIndex_t num_partial_segments,
const int64_t num_gathered_per_index_warp_size_multiple,
const int64_t gather_dimension_size) {
const auto gid = blockIdx.x * blockDim.x + threadIdx.x;
const auto segment_id = gid / num_gathered_per_index_warp_size_multiple;
const auto gathered_element_id = gid % num_gathered_per_index_warp_size_multiple;
const auto batch_id = blockIdx.y;
if (gathered_element_id >= num_gathered_per_index) {
return;
}
if (segment_id >= num_segments) {
return;
}
const auto idx_begin = per_segment_partial_segment_offsets[segment_id];
const auto idx_end =
(segment_id == num_segments - 1) ? num_partial_segments : per_segment_partial_segment_offsets[segment_id + 1];
AccumulationType_t<T> segment_sum = 0;
for (auto idx = idx_begin; idx < idx_end; ++idx) {
segment_sum +=
partial_segment_sums[batch_id * num_partial_segments * num_gathered_per_index +
idx * num_gathered_per_index +
gathered_element_id];
}
auto target_row = dX_indices_sorted[segment_offsets[segment_id]];
// All index values are expected to be within bounds [-s, s-1] along axis of size s.
if (target_row < 0) target_row += gather_dimension_size;
dX_data[batch_id * gather_dimension_size * num_gathered_per_index +
target_row * num_gathered_per_index +
gathered_element_id] =
segment_sum;
}
// get partial sums of gathered dY values first, then sum the partial sums into
// the corresponding dX value
template <typename T, typename TIndex>
void PartialSumsImpl(
hipStream_t stream,
const CudaScratchBufferAllocator& allocator,
const TIndex* dX_indices_sorted,
const TIndex* dY_indices_sorted,
const T* dY_data,
T* dX_data,
GatheredIndexIndex_t num_gathered_indices,
int64_t num_gathered_per_index,
int64_t gather_dimension_size,
int64_t num_batches,
const GatheredIndexIndex_t* segment_offsets,
SegmentIndex_t num_segments) {
// each segment is split into partial segments of at most
// kMaxPartialSegmentSize index pairs.
// compute the number of partial segments per segment
auto per_segment_partial_segment_counts = allocator.GetScratchBuffer<SegmentIndex_t>(num_segments);
{
const auto blocks_per_grid = CeilDiv(num_gathered_indices, GridDim::maxThreadsPerBlock);
hipLaunchKernelGGL(( ComputePerSegmentPartialSegmentCountsKernel), dim3(blocks_per_grid), dim3(GridDim::maxThreadsPerBlock), 0, stream,
per_segment_partial_segment_counts.get(),
segment_offsets, num_segments, num_gathered_indices);
}
// compute partial segment offsets per segment
auto per_segment_partial_segment_offsets = GetOffsetsFromCounts(
stream, allocator, per_segment_partial_segment_counts.get(), num_segments);
SegmentIndex_t host_num_partial_segments = 0;
{
SegmentIndex_t last_segment_partial_segment_offset = 0,
last_segment_partial_segment_count = 0;
// CPU/GPU sync!
CUDA_CALL_THROW(hipMemcpyAsync(
&last_segment_partial_segment_offset,
&per_segment_partial_segment_offsets.get()[num_segments - 1],
sizeof(SegmentIndex_t), hipMemcpyDeviceToHost, stream));
// CPU/GPU sync!
CUDA_CALL_THROW(hipMemcpyAsync(
&last_segment_partial_segment_count,
&per_segment_partial_segment_counts.get()[num_segments - 1],
sizeof(SegmentIndex_t), hipMemcpyDeviceToHost, stream));
CUDA_CALL_THROW(hipStreamSynchronize(stream));
host_num_partial_segments =
last_segment_partial_segment_offset + last_segment_partial_segment_count;
}
// compute index offsets per partial segment
auto partial_segment_offsets = allocator.GetScratchBuffer<GatheredIndexIndex_t>(host_num_partial_segments);
{
const auto blocks_per_grid = CeilDiv(num_segments, GridDim::maxThreadsPerBlock);
hipLaunchKernelGGL(( ComputePartialSegmentOffsetsKernel), dim3(blocks_per_grid), dim3(GridDim::maxThreadsPerBlock), 0, stream,
partial_segment_offsets.get(),
per_segment_partial_segment_counts.get(),
per_segment_partial_segment_offsets.get(),
segment_offsets,
num_segments);
}
{
const auto num_gathered_per_index_warp_size_multiple =
CeilDiv(num_gathered_per_index, GPU_WARP_SIZE) * GPU_WARP_SIZE;
const auto threads_per_block =
std::min<int64_t>(num_gathered_per_index_warp_size_multiple, GridDim::maxThreadsPerBlock);
// compute partial segment sums
auto partial_segment_sums = allocator.GetScratchBuffer<AccumulationType_t<T>>(
num_batches * host_num_partial_segments * num_gathered_per_index);
{
const dim3 blocks_per_grid(
CeilDiv(host_num_partial_segments * num_gathered_per_index_warp_size_multiple, threads_per_block),
num_batches);
hipLaunchKernelGGL(( ComputePartialSegmentSumsKernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, stream,
dY_indices_sorted,
dY_data,
num_gathered_indices,
num_gathered_per_index,
partial_segment_offsets.get(),
host_num_partial_segments,
partial_segment_sums.get(),
num_gathered_per_index_warp_size_multiple);
}
// compute segment sums from partial segment sums
{
const dim3 blocks_per_grid(
CeilDiv(num_segments * num_gathered_per_index_warp_size_multiple, threads_per_block),
num_batches);
hipLaunchKernelGGL(( ComputeSegmentSumsAndScatterKernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, stream,
dX_indices_sorted,
dX_data,
num_gathered_per_index,
segment_offsets,
num_segments,
partial_segment_sums.get(),
per_segment_partial_segment_offsets.get(),
host_num_partial_segments,
num_gathered_per_index_warp_size_multiple,
gather_dimension_size);
}
}
}
template <typename T, typename TIndex>
void Impl(
hipStream_t stream,
const CudaScratchBufferAllocator& allocator,
const T* dY_data,
const TIndex* dX_indices,
const GatheredIndexIndex_t num_gathered_indices,
const int64_t gather_dimension_size,
const int64_t num_gathered_per_index,
const int64_t num_batches,
T* dX_data) {
IAllocatorUniquePtr<TIndex> dX_indices_sorted, dY_indices_sorted;
GetSortedIndices(
stream,
allocator,
dX_indices, num_gathered_indices,
dX_indices_sorted, dY_indices_sorted);
// get number of segments and segment counts
SegmentIndex_t host_num_segments = 0;
auto segment_counts = allocator.GetScratchBuffer<GatheredIndexIndex_t>(num_gathered_indices);
{
auto num_segments = allocator.GetScratchBuffer<SegmentIndex_t>(1);
size_t temp_storage_size_bytes = 0;
CUDA_CALL_THROW(hipcub::DeviceRunLengthEncode::Encode(
nullptr, temp_storage_size_bytes,
dX_indices_sorted.get(), cub::DiscardOutputIterator<TIndex>{}, segment_counts.get(),
num_segments.get(), num_gathered_indices, stream));
auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes);
CUDA_CALL_THROW(hipcub::DeviceRunLengthEncode::Encode(
temp_storage.get(), temp_storage_size_bytes,
dX_indices_sorted.get(), cub::DiscardOutputIterator<TIndex>{}, segment_counts.get(),
num_segments.get(), num_gathered_indices, stream));
// CPU/GPU sync!
CUDA_CALL_THROW(hipMemcpyAsync(
&host_num_segments, num_segments.get(), sizeof(SegmentIndex_t), hipMemcpyDeviceToHost, stream));
CUDA_CALL_THROW(hipStreamSynchronize(stream));
}
// get largest segment size and use that to select implementation
GatheredIndexIndex_t host_max_segment_count = 0;
{
auto max_segment_count = allocator.GetScratchBuffer<GatheredIndexIndex_t>(1);
size_t temp_storage_size_bytes = 0;
CUDA_CALL_THROW(hipcub::DeviceReduce::Max(
nullptr, temp_storage_size_bytes,
segment_counts.get(), max_segment_count.get(), host_num_segments, stream));
auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes);
CUDA_CALL_THROW(hipcub::DeviceReduce::Max(
temp_storage.get(), temp_storage_size_bytes,
segment_counts.get(), max_segment_count.get(), host_num_segments, stream));
// CPU/GPU sync!
CUDA_CALL_THROW(hipMemcpyAsync(
&host_max_segment_count, max_segment_count.get(), sizeof(GatheredIndexIndex_t), hipMemcpyDeviceToHost, stream));
CUDA_CALL_THROW(hipStreamSynchronize(stream));
}
constexpr GatheredIndexIndex_t kMaxSegmentSizeThreshold = 32;
if (host_max_segment_count <= kMaxSegmentSizeThreshold) {
DirectSumImpl(
stream, dX_indices_sorted.get(), dY_indices_sorted.get(),
dY_data, dX_data,
num_gathered_indices, num_gathered_per_index, gather_dimension_size, num_batches);
} else {
auto segment_offsets = GetOffsetsFromCounts(
stream, allocator, segment_counts.get(), host_num_segments);
segment_counts.reset();
PartialSumsImpl(
stream,
allocator,
dX_indices_sorted.get(), dY_indices_sorted.get(),
dY_data, dX_data,
num_gathered_indices, num_gathered_per_index, gather_dimension_size, num_batches,
segment_offsets.get(), host_num_segments);
}
}
// this is a backup implementation that doesn't incur GPU/CPU syncs, but
// doesn't perform well if there are many duplicate values in dX_indices
template <typename T, typename TIndex>
void Impl_Simplified(
hipStream_t stream,
const CudaScratchBufferAllocator& allocator,
const T* dY_data,
const TIndex* dX_indices,
const GatheredIndexIndex_t num_gathered_indices,
const int64_t gather_dimension_size,
const int64_t num_gathered_per_index,
const int64_t num_batches,
T* dX_data) {
IAllocatorUniquePtr<TIndex> dX_indices_sorted, dY_indices_sorted;
GetSortedIndices(
stream,
allocator,
dX_indices, num_gathered_indices,
dX_indices_sorted, dY_indices_sorted);
dim3 block(GPU_WARP_SIZE, 4);
dim3 grid(CeilDiv(num_gathered_indices, 4), CeilDiv(num_gathered_per_index, GridDim::maxElementsPerThread * GPU_WARP_SIZE));
hipLaunchKernelGGL(( DirectSumKernel<T, TIndex, GridDim::maxElementsPerThread>), dim3(grid), dim3(block), 0, stream,
dX_indices_sorted.get(),
dY_indices_sorted.get(),
dY_data,
dX_data,
num_gathered_indices,
num_gathered_per_index,
gather_dimension_size,
num_batches);
}
} // namespace gather_grad_internal
template <typename T, typename TIndex>
void GatherGradImpl(
hipStream_t stream,
const CudaScratchBufferAllocator& allocator,
const T* dY_data,
const TIndex* dX_indices,
const GatheredIndexIndex_t num_gathered_indices,
const int64_t gather_dimension_size,
const int64_t num_gathered_per_index,
const int64_t num_batches,
T* dX_data) {
gather_grad_internal::Impl(
stream,
allocator,
dY_data, dX_indices,
num_gathered_indices, gather_dimension_size, num_gathered_per_index, num_batches,
dX_data);
}
#define SPECIALIZED(T, TIndex) \
template void GatherGradImpl<T, TIndex>( \
hipStream_t stream, \
const CudaScratchBufferAllocator& allocator, \
const T* dY_data, \
const TIndex* dX_indices, \
const GatheredIndexIndex_t num_gathered_indices, \
const int64_t gather_dimension_size, \
const int64_t num_gathered_per_index, \
const int64_t num_batches, \
T* dX_data);
#define SPECIALIZED_WITH_IDX(T) \
SPECIALIZED(T, int32_t) \
SPECIALIZED(T, int64_t)
SPECIALIZED_WITH_IDX(float)
SPECIALIZED_WITH_IDX(half)
#if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__))
SPECIALIZED_WITH_IDX(nv_bfloat16)
#endif
#undef SPECIALIZED_WITH_IDX
#undef SPECIALIZED
} // namespace cuda
} // namespace onnxruntime
| 98087346ee138b082ee0dbe939160a4955a608c2.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef _WIN32
#pragma warning(disable : 4244)
#endif
#include "orttraining/training_ops/cuda/tensor/gather_grad_impl.h"
#include <cub/device/device_radix_sort.cuh>
#include <cub/device/device_reduce.cuh>
#include <cub/device/device_run_length_encode.cuh>
#include <cub/device/device_scan.cuh>
#include <cub/iterator/counting_input_iterator.cuh>
#include <cub/iterator/discard_output_iterator.cuh>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/shared_inc/accumulation_type.h"
#include "core/providers/cuda/shared_inc/cuda_call.h"
namespace onnxruntime {
namespace cuda {
namespace gather_grad_internal {
// Note:
// For these implementations, first we generate sorted lists of dX and dY
// indices, ordered by dX indices. Then, we can consider segments of the sorted
// lists.
//
// Each continuous run of indices with the same dX value in dX_indices_sorted
// forms a segment.
//
// For example, given:
// dX_indices_sorted = [1, 1, 2, 2, 2, 3]
// dY_indices_sorted = [1, 4, 0, 3, 5, 2]
// The segments will be: '--' '-----' '
//
// The segments can be processed in parallel, or further divided into partial
// segments for increased parallelism.
// unit for handling indexing and counting of segments or partial segments
using SegmentIndex_t = GatheredIndexIndex_t;
constexpr GatheredIndexIndex_t kMaxPartialSegmentSize = 10;
template <typename TInputIterator, typename TOutputIterator>
__global__ void CopyKernel(TOutputIterator dst, TInputIterator src, int64_t length) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, length);
dst[id] = src[id];
}
// get sorted dX and dY indices, ordered by dX indices
template <typename TIndex>
void GetSortedIndices(
cudaStream_t stream,
const CudaScratchBufferAllocator& allocator,
const TIndex* dX_indices,
GatheredIndexIndex_t num_gathered_indices,
IAllocatorUniquePtr<TIndex>& dX_indices_sorted_out,
IAllocatorUniquePtr<TIndex>& dY_indices_sorted_out) {
auto dY_indices = allocator.GetScratchBuffer<TIndex>(num_gathered_indices);
CopyKernel<<<CeilDiv(num_gathered_indices, GridDim::maxThreadsPerBlock),
GridDim::maxThreadsPerBlock, 0, stream>>>(
dY_indices.get(), cub::CountingInputIterator<TIndex>{0}, num_gathered_indices);
auto dX_indices_sorted = allocator.GetScratchBuffer<TIndex>(num_gathered_indices);
auto dY_indices_sorted = allocator.GetScratchBuffer<TIndex>(num_gathered_indices);
size_t temp_storage_size_bytes = 0;
CUDA_CALL_THROW(cub::DeviceRadixSort::SortPairs(
nullptr, temp_storage_size_bytes,
dX_indices, dX_indices_sorted.get(),
dY_indices.get(), dY_indices_sorted.get(),
num_gathered_indices, 0, sizeof(TIndex)*8, stream));
auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes);
CUDA_CALL_THROW(cub::DeviceRadixSort::SortPairs(
temp_storage.get(), temp_storage_size_bytes,
dX_indices, dX_indices_sorted.get(),
dY_indices.get(), dY_indices_sorted.get(),
num_gathered_indices, 0, sizeof(TIndex)*8, stream));
dX_indices_sorted_out = std::move(dX_indices_sorted);
dY_indices_sorted_out = std::move(dY_indices_sorted);
}
template <typename T>
IAllocatorUniquePtr<T> GetOffsetsFromCounts(
cudaStream_t stream,
const CudaScratchBufferAllocator& allocator,
const T* counts, int32_t num_counts) {
auto offsets = allocator.GetScratchBuffer<T>(num_counts);
size_t temp_storage_size_bytes = 0;
CUDA_CALL_THROW(cub::DeviceScan::ExclusiveSum(
nullptr, temp_storage_size_bytes,
counts, offsets.get(), num_counts, stream));
auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes);
CUDA_CALL_THROW(cub::DeviceScan::ExclusiveSum(
temp_storage.get(), temp_storage_size_bytes,
counts, offsets.get(), num_counts, stream));
return offsets;
}
// adapted from here:
// https://github.com/pytorch/pytorch/blob/b186831c08e0e4e447eedb8a5cfab582995d37f9/aten/src/ATen/native/cuda/Embedding.cu#L121
template <typename T, typename TIndex, int NumElementsPerThread>
__global__ void DirectSumKernel(
const TIndex* dX_indices_sorted,
const TIndex* dY_indices_sorted,
const T* dY_data,
T* dX_data,
GatheredIndexIndex_t num_gathered_indices,
int64_t num_gathered_per_index,
int64_t gather_dimension_size,
int64_t num_batches) {
GatheredIndexIndex_t idx = blockIdx.x * 4 + threadIdx.y;
if (idx < num_gathered_indices && (idx == 0 || dX_indices_sorted[idx] != dX_indices_sorted[idx - 1])) {
do {
// All index values are expected to be within bounds [-s, s-1] along axis of size s.
auto target_row = dX_indices_sorted[idx];
if (target_row < 0) target_row += gather_dimension_size;
for (int64_t batch_idx = 0; batch_idx < num_batches; ++batch_idx) {
const auto gathered_element_idx_start = threadIdx.x + blockIdx.y * blockDim.x * NumElementsPerThread;
const auto dX_row_offset =
(batch_idx * gather_dimension_size + target_row) * num_gathered_per_index;
const auto dY_row_offset =
(batch_idx * num_gathered_indices + dY_indices_sorted[idx]) * num_gathered_per_index;
AccumulationType_t<T> dY_value[NumElementsPerThread];
AccumulationType_t<T> dX_value[NumElementsPerThread];
#pragma unroll
for (int ii = 0; ii < NumElementsPerThread; ii++) {
const auto gathered_element_idx = gathered_element_idx_start + ii * GPU_WARP_SIZE;
if (gathered_element_idx < num_gathered_per_index) {
dY_value[ii] = static_cast<AccumulationType_t<T>>(dY_data[dY_row_offset + gathered_element_idx]);
dX_value[ii] = static_cast<AccumulationType_t<T>>(dX_data[dX_row_offset + gathered_element_idx]);
}
}
#pragma unroll
for (int ii = 0; ii < NumElementsPerThread; ii++) {
dX_value[ii] += dY_value[ii];
}
#pragma unroll
for (int ii = 0; ii < NumElementsPerThread; ii++) {
const auto gathered_element_idx = gathered_element_idx_start + ii * GPU_WARP_SIZE;
if (gathered_element_idx < num_gathered_per_index) {
dX_data[dX_row_offset + gathered_element_idx] = static_cast<T>(dX_value[ii]);
}
}
}
idx++;
} while (idx < num_gathered_indices && dX_indices_sorted[idx] == dX_indices_sorted[idx - 1]);
}
}
// directly sum gathered dY values into the corresponding dX value
template <typename T, typename TIndex>
void DirectSumImpl(
cudaStream_t stream,
const TIndex* dX_indices_sorted,
const TIndex* dY_indices_sorted,
const T* dY_data,
T* dX_data,
GatheredIndexIndex_t num_gathered_indices,
int64_t num_gathered_per_index,
int64_t gather_dimension_size,
int64_t num_batches) {
dim3 block(GPU_WARP_SIZE, 4);
dim3 grid(CeilDiv(num_gathered_indices, 4), CeilDiv(num_gathered_per_index, GridDim::maxElementsPerThread * GPU_WARP_SIZE));
DirectSumKernel<T, TIndex, GridDim::maxElementsPerThread><<<grid, block, 0, stream>>>(
dX_indices_sorted,
dY_indices_sorted,
dY_data,
dX_data,
num_gathered_indices,
num_gathered_per_index,
gather_dimension_size,
num_batches);
}
// partial sums implementation adapted from here:
// https://github.com/pytorch/pytorch/blob/b186831c08e0e4e447eedb8a5cfab582995d37f9/aten/src/ATen/native/cuda/EmbeddingBackwardKernel.cu
__global__ void ComputePerSegmentPartialSegmentCountsKernel(
SegmentIndex_t* ret, const GatheredIndexIndex_t* segment_offsets,
SegmentIndex_t num_of_segments, GatheredIndexIndex_t num_gathered_indices) {
const auto id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < num_of_segments) {
const auto idx_start = segment_offsets[id];
const auto idx_end = (id == num_of_segments - 1) ? num_gathered_indices : segment_offsets[id + 1];
const auto size = idx_end - idx_start;
ret[id] = CeilDiv(size, kMaxPartialSegmentSize);
}
}
__global__ void ComputePartialSegmentOffsetsKernel(
GatheredIndexIndex_t* ret,
const SegmentIndex_t* partials_per_segment,
const SegmentIndex_t* partials_per_segment_offset,
const GatheredIndexIndex_t* segment_offsets,
SegmentIndex_t num_of_segments) {
const auto id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < num_of_segments) {
auto idx = partials_per_segment_offset[id];
const auto num_partials = partials_per_segment[id];
const auto segment_offset = segment_offsets[id];
for (SegmentIndex_t i = 0; i < num_partials; ++i) {
ret[idx++] = segment_offset + i * kMaxPartialSegmentSize;
}
}
}
template <typename T, typename TIndex>
__global__ void ComputePartialSegmentSumsKernel(
const TIndex* dY_indices_sorted,
const T* dY_data,
GatheredIndexIndex_t num_gathered_indices,
int64_t num_gathered_per_index,
const GatheredIndexIndex_t* partial_segment_offsets,
SegmentIndex_t num_partial_segments,
AccumulationType_t<T>* partial_segment_sums,
const int64_t num_gathered_per_index_warp_size_multiple) {
const auto id = blockIdx.x * blockDim.x + threadIdx.x;
const auto partial_segment_id = id / num_gathered_per_index_warp_size_multiple;
const auto gathered_element_id = id % num_gathered_per_index_warp_size_multiple;
const auto batch_id = blockIdx.y;
if (gathered_element_id >= num_gathered_per_index) {
return;
}
if (partial_segment_id >= num_partial_segments) {
return;
}
const auto idx_begin = partial_segment_offsets[partial_segment_id];
const auto idx_end =
(partial_segment_id == num_partial_segments - 1) ? num_gathered_indices : partial_segment_offsets[partial_segment_id + 1];
AccumulationType_t<T> partial_segment_sum = 0;
for (auto idx = idx_begin; idx < idx_end; ++idx) {
const auto target_row = dY_indices_sorted[idx];
partial_segment_sum += static_cast<AccumulationType_t<T>>(
dY_data[batch_id * num_gathered_indices * num_gathered_per_index +
target_row * num_gathered_per_index +
gathered_element_id]);
}
partial_segment_sums[batch_id * num_partial_segments * num_gathered_per_index +
partial_segment_id * num_gathered_per_index +
gathered_element_id] =
partial_segment_sum;
}
template <typename T, typename TIndex>
__global__ void ComputeSegmentSumsAndScatterKernel(
const TIndex* dX_indices_sorted,
T* dX_data,
int64_t num_gathered_per_index,
const GatheredIndexIndex_t* segment_offsets,
SegmentIndex_t num_segments,
const AccumulationType_t<T>* partial_segment_sums,
const SegmentIndex_t* per_segment_partial_segment_offsets,
SegmentIndex_t num_partial_segments,
const int64_t num_gathered_per_index_warp_size_multiple,
const int64_t gather_dimension_size) {
const auto gid = blockIdx.x * blockDim.x + threadIdx.x;
const auto segment_id = gid / num_gathered_per_index_warp_size_multiple;
const auto gathered_element_id = gid % num_gathered_per_index_warp_size_multiple;
const auto batch_id = blockIdx.y;
if (gathered_element_id >= num_gathered_per_index) {
return;
}
if (segment_id >= num_segments) {
return;
}
const auto idx_begin = per_segment_partial_segment_offsets[segment_id];
const auto idx_end =
(segment_id == num_segments - 1) ? num_partial_segments : per_segment_partial_segment_offsets[segment_id + 1];
AccumulationType_t<T> segment_sum = 0;
for (auto idx = idx_begin; idx < idx_end; ++idx) {
segment_sum +=
partial_segment_sums[batch_id * num_partial_segments * num_gathered_per_index +
idx * num_gathered_per_index +
gathered_element_id];
}
auto target_row = dX_indices_sorted[segment_offsets[segment_id]];
// All index values are expected to be within bounds [-s, s-1] along axis of size s.
if (target_row < 0) target_row += gather_dimension_size;
dX_data[batch_id * gather_dimension_size * num_gathered_per_index +
target_row * num_gathered_per_index +
gathered_element_id] =
segment_sum;
}
// get partial sums of gathered dY values first, then sum the partial sums into
// the corresponding dX value
template <typename T, typename TIndex>
void PartialSumsImpl(
cudaStream_t stream,
const CudaScratchBufferAllocator& allocator,
const TIndex* dX_indices_sorted,
const TIndex* dY_indices_sorted,
const T* dY_data,
T* dX_data,
GatheredIndexIndex_t num_gathered_indices,
int64_t num_gathered_per_index,
int64_t gather_dimension_size,
int64_t num_batches,
const GatheredIndexIndex_t* segment_offsets,
SegmentIndex_t num_segments) {
// each segment is split into partial segments of at most
// kMaxPartialSegmentSize index pairs.
// compute the number of partial segments per segment
auto per_segment_partial_segment_counts = allocator.GetScratchBuffer<SegmentIndex_t>(num_segments);
{
const auto blocks_per_grid = CeilDiv(num_gathered_indices, GridDim::maxThreadsPerBlock);
ComputePerSegmentPartialSegmentCountsKernel<<<blocks_per_grid, GridDim::maxThreadsPerBlock, 0, stream>>>(
per_segment_partial_segment_counts.get(),
segment_offsets, num_segments, num_gathered_indices);
}
// compute partial segment offsets per segment
auto per_segment_partial_segment_offsets = GetOffsetsFromCounts(
stream, allocator, per_segment_partial_segment_counts.get(), num_segments);
SegmentIndex_t host_num_partial_segments = 0;
{
SegmentIndex_t last_segment_partial_segment_offset = 0,
last_segment_partial_segment_count = 0;
// CPU/GPU sync!
CUDA_CALL_THROW(cudaMemcpyAsync(
&last_segment_partial_segment_offset,
&per_segment_partial_segment_offsets.get()[num_segments - 1],
sizeof(SegmentIndex_t), cudaMemcpyDeviceToHost, stream));
// CPU/GPU sync!
CUDA_CALL_THROW(cudaMemcpyAsync(
&last_segment_partial_segment_count,
&per_segment_partial_segment_counts.get()[num_segments - 1],
sizeof(SegmentIndex_t), cudaMemcpyDeviceToHost, stream));
CUDA_CALL_THROW(cudaStreamSynchronize(stream));
host_num_partial_segments =
last_segment_partial_segment_offset + last_segment_partial_segment_count;
}
// compute index offsets per partial segment
auto partial_segment_offsets = allocator.GetScratchBuffer<GatheredIndexIndex_t>(host_num_partial_segments);
{
const auto blocks_per_grid = CeilDiv(num_segments, GridDim::maxThreadsPerBlock);
ComputePartialSegmentOffsetsKernel<<<blocks_per_grid, GridDim::maxThreadsPerBlock, 0, stream>>>(
partial_segment_offsets.get(),
per_segment_partial_segment_counts.get(),
per_segment_partial_segment_offsets.get(),
segment_offsets,
num_segments);
}
{
const auto num_gathered_per_index_warp_size_multiple =
CeilDiv(num_gathered_per_index, GPU_WARP_SIZE) * GPU_WARP_SIZE;
const auto threads_per_block =
std::min<int64_t>(num_gathered_per_index_warp_size_multiple, GridDim::maxThreadsPerBlock);
// compute partial segment sums
auto partial_segment_sums = allocator.GetScratchBuffer<AccumulationType_t<T>>(
num_batches * host_num_partial_segments * num_gathered_per_index);
{
const dim3 blocks_per_grid(
CeilDiv(host_num_partial_segments * num_gathered_per_index_warp_size_multiple, threads_per_block),
num_batches);
ComputePartialSegmentSumsKernel<<<blocks_per_grid, threads_per_block, 0, stream>>>(
dY_indices_sorted,
dY_data,
num_gathered_indices,
num_gathered_per_index,
partial_segment_offsets.get(),
host_num_partial_segments,
partial_segment_sums.get(),
num_gathered_per_index_warp_size_multiple);
}
// compute segment sums from partial segment sums
{
const dim3 blocks_per_grid(
CeilDiv(num_segments * num_gathered_per_index_warp_size_multiple, threads_per_block),
num_batches);
ComputeSegmentSumsAndScatterKernel<<<blocks_per_grid, threads_per_block, 0, stream>>>(
dX_indices_sorted,
dX_data,
num_gathered_per_index,
segment_offsets,
num_segments,
partial_segment_sums.get(),
per_segment_partial_segment_offsets.get(),
host_num_partial_segments,
num_gathered_per_index_warp_size_multiple,
gather_dimension_size);
}
}
}
template <typename T, typename TIndex>
void Impl(
cudaStream_t stream,
const CudaScratchBufferAllocator& allocator,
const T* dY_data,
const TIndex* dX_indices,
const GatheredIndexIndex_t num_gathered_indices,
const int64_t gather_dimension_size,
const int64_t num_gathered_per_index,
const int64_t num_batches,
T* dX_data) {
IAllocatorUniquePtr<TIndex> dX_indices_sorted, dY_indices_sorted;
GetSortedIndices(
stream,
allocator,
dX_indices, num_gathered_indices,
dX_indices_sorted, dY_indices_sorted);
// get number of segments and segment counts
SegmentIndex_t host_num_segments = 0;
auto segment_counts = allocator.GetScratchBuffer<GatheredIndexIndex_t>(num_gathered_indices);
{
auto num_segments = allocator.GetScratchBuffer<SegmentIndex_t>(1);
size_t temp_storage_size_bytes = 0;
CUDA_CALL_THROW(cub::DeviceRunLengthEncode::Encode(
nullptr, temp_storage_size_bytes,
dX_indices_sorted.get(), cub::DiscardOutputIterator<TIndex>{}, segment_counts.get(),
num_segments.get(), num_gathered_indices, stream));
auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes);
CUDA_CALL_THROW(cub::DeviceRunLengthEncode::Encode(
temp_storage.get(), temp_storage_size_bytes,
dX_indices_sorted.get(), cub::DiscardOutputIterator<TIndex>{}, segment_counts.get(),
num_segments.get(), num_gathered_indices, stream));
// CPU/GPU sync!
CUDA_CALL_THROW(cudaMemcpyAsync(
&host_num_segments, num_segments.get(), sizeof(SegmentIndex_t), cudaMemcpyDeviceToHost, stream));
CUDA_CALL_THROW(cudaStreamSynchronize(stream));
}
// get largest segment size and use that to select implementation
GatheredIndexIndex_t host_max_segment_count = 0;
{
auto max_segment_count = allocator.GetScratchBuffer<GatheredIndexIndex_t>(1);
size_t temp_storage_size_bytes = 0;
CUDA_CALL_THROW(cub::DeviceReduce::Max(
nullptr, temp_storage_size_bytes,
segment_counts.get(), max_segment_count.get(), host_num_segments, stream));
auto temp_storage = allocator.GetScratchBuffer<void>(temp_storage_size_bytes);
CUDA_CALL_THROW(cub::DeviceReduce::Max(
temp_storage.get(), temp_storage_size_bytes,
segment_counts.get(), max_segment_count.get(), host_num_segments, stream));
// CPU/GPU sync!
CUDA_CALL_THROW(cudaMemcpyAsync(
&host_max_segment_count, max_segment_count.get(), sizeof(GatheredIndexIndex_t), cudaMemcpyDeviceToHost, stream));
CUDA_CALL_THROW(cudaStreamSynchronize(stream));
}
constexpr GatheredIndexIndex_t kMaxSegmentSizeThreshold = 32;
if (host_max_segment_count <= kMaxSegmentSizeThreshold) {
DirectSumImpl(
stream, dX_indices_sorted.get(), dY_indices_sorted.get(),
dY_data, dX_data,
num_gathered_indices, num_gathered_per_index, gather_dimension_size, num_batches);
} else {
auto segment_offsets = GetOffsetsFromCounts(
stream, allocator, segment_counts.get(), host_num_segments);
segment_counts.reset();
PartialSumsImpl(
stream,
allocator,
dX_indices_sorted.get(), dY_indices_sorted.get(),
dY_data, dX_data,
num_gathered_indices, num_gathered_per_index, gather_dimension_size, num_batches,
segment_offsets.get(), host_num_segments);
}
}
// this is a backup implementation that doesn't incur GPU/CPU syncs, but
// doesn't perform well if there are many duplicate values in dX_indices
template <typename T, typename TIndex>
void Impl_Simplified(
cudaStream_t stream,
const CudaScratchBufferAllocator& allocator,
const T* dY_data,
const TIndex* dX_indices,
const GatheredIndexIndex_t num_gathered_indices,
const int64_t gather_dimension_size,
const int64_t num_gathered_per_index,
const int64_t num_batches,
T* dX_data) {
IAllocatorUniquePtr<TIndex> dX_indices_sorted, dY_indices_sorted;
GetSortedIndices(
stream,
allocator,
dX_indices, num_gathered_indices,
dX_indices_sorted, dY_indices_sorted);
dim3 block(GPU_WARP_SIZE, 4);
dim3 grid(CeilDiv(num_gathered_indices, 4), CeilDiv(num_gathered_per_index, GridDim::maxElementsPerThread * GPU_WARP_SIZE));
DirectSumKernel<T, TIndex, GridDim::maxElementsPerThread><<<grid, block, 0, stream>>>(
dX_indices_sorted.get(),
dY_indices_sorted.get(),
dY_data,
dX_data,
num_gathered_indices,
num_gathered_per_index,
gather_dimension_size,
num_batches);
}
} // namespace gather_grad_internal
template <typename T, typename TIndex>
void GatherGradImpl(
cudaStream_t stream,
const CudaScratchBufferAllocator& allocator,
const T* dY_data,
const TIndex* dX_indices,
const GatheredIndexIndex_t num_gathered_indices,
const int64_t gather_dimension_size,
const int64_t num_gathered_per_index,
const int64_t num_batches,
T* dX_data) {
gather_grad_internal::Impl(
stream,
allocator,
dY_data, dX_indices,
num_gathered_indices, gather_dimension_size, num_gathered_per_index, num_batches,
dX_data);
}
#define SPECIALIZED(T, TIndex) \
template void GatherGradImpl<T, TIndex>( \
cudaStream_t stream, \
const CudaScratchBufferAllocator& allocator, \
const T* dY_data, \
const TIndex* dX_indices, \
const GatheredIndexIndex_t num_gathered_indices, \
const int64_t gather_dimension_size, \
const int64_t num_gathered_per_index, \
const int64_t num_batches, \
T* dX_data);
#define SPECIALIZED_WITH_IDX(T) \
SPECIALIZED(T, int32_t) \
SPECIALIZED(T, int64_t)
SPECIALIZED_WITH_IDX(float)
SPECIALIZED_WITH_IDX(half)
#if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__))
SPECIALIZED_WITH_IDX(nv_bfloat16)
#endif
#undef SPECIALIZED_WITH_IDX
#undef SPECIALIZED
} // namespace cuda
} // namespace onnxruntime
|
9fc1d3976109ed8e870bdd536ec675503fdaf6e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "convolution.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "logging.h"
/*
The maximum number of threads in the block is limited to 1024. This is the product of whatever your threadblock dimensions are (x*y*z). For example (32,32,1) creates a block of 1024 threads. (33,32,1) is not legal, since 33*32*1 > 1024.
grid
x: 2^31 - 1
y,z: 65535
thread
x,y: 1024
z: 64
960
1000
1280
12544
*/
__global__ void gpu_conv_1(float * img, float * weight, float * out, int i_w, int i_h, int w_w, int w_h, int o_w, int o_h, int i_d, int g, int o, int i, int Kx, int Ky, int Sx, int Sy, int group) {
unsigned int m = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int n = blockIdx.x*blockDim.x + threadIdx.x;
if (Kx == 1) {
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy)*i_w + n*Sx] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w];
} else {
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy)*i_w + n*Sx] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy)*i_w + n*Sx+1] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 1];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy)*i_w + n*Sx+2] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 2];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+1)*i_w + n*Sx] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + Kx];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+1)*i_w + n*Sx+1] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + Kx+1];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+1)*i_w + n*Sx+2] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + Kx+2];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+2)*i_w + n*Sx] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 2*Kx];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+2)*i_w + n*Sx+1] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 2*Kx+1];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+2)*i_w + n*Sx+2] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 2*Kx+2];
}
}
__global__ void gpu_conv_2(float * img, float * weight, float * out, int i_w, int i_h, int w_w, int w_h, int o_w, int o_h, int Kx, int Ky, int Sx, int Sy, int group, int o_d, int i_d) {
unsigned int g = blockIdx.y;
unsigned int o = g*(o_d/group)+blockIdx.x;
int m = threadIdx.y;
int n = threadIdx.x;
for(int i=g*(i_d/group);i<(g+1)*(i_d/group);i++)
if (Kx == 1) {
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy)*i_w + n*Sx] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w];
} else {
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy)*i_w + n*Sx] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy)*i_w + n*Sx+1] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 1];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy)*i_w + n*Sx+2] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 2];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+1)*i_w + n*Sx] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + Kx];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+1)*i_w + n*Sx+1] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + Kx+1];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+1)*i_w + n*Sx+2] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + Kx+2];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+2)*i_w + n*Sx] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 2*Kx];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+2)*i_w + n*Sx+1] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 2*Kx+1];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+2)*i_w + n*Sx+2] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 2*Kx+2];
}
}
//add padding to blob
BLOB* pad(BLOB* in, int pad){
//create output blob
BLOB* out = blob_calloc(in->d, in->h+2*pad, in->w+pad*2);
//copy non-padded input into output blob
for(int z=0;z<in->d;z++)
for(int y=0;y<in->h;y++)
for(int x=0;x<in->w;x++)
blob_data(out,z,y+pad,x+pad)= blob_data(in,z,y,x);
//return pointer to padded blob
return out;
}
BLOB* load_weights(BLOB* b, conv_param_t* p){
//open weights file for reading
FILE* fp = fopen(p->weights, "rb");
if(fp==NULL)
error("could not open file %s for reading\n",p->weights);
//for fully connected layers the kernel size is equal to the input size
int Ky=(p->fc)?b->h:p->Ky;
int Kx=(p->fc)?b->w:p->Kx;
//allocate 3D blob, and emulate 4D in KxKy later
BLOB* w=blob_alloc(p->num_out, b->d/p->group, Ky*Kx);
//fill 4D weight structure
for(int g=0;g<p->group;g++)
for(int o=g*(p->num_out/p->group);o<(g+1)*(p->num_out/p->group);o++)
for(int i=g*(b->d/p->group);i<(g+1)*(b->d/p->group);i++)
//note: each output map has only b->d/p->group input maps. Hence the absolute index of i is subtracted when storing in w!
if((int)fread( &(blob_data(w,o,i-g*(b->d/p->group),0)),sizeof(float),Ky*Kx, fp)!=Ky*Kx)
error("loading weights from file %s\n", p->weights);
//close file
fclose(fp);
//return weight blob
return w;
}
float* load_1d(const char* fname, size_t num){
//open file for reading
FILE* fp = fopen(fname, "rb");
if(fp==NULL)
error("could not open file %s for reading\n",fname);
//read in array
float* arr= (float*) malloc(sizeof(float)*num);
if(fread(arr,sizeof(float), num, fp)!=num)
error("loading data from file %s\n", fname);
//close file
fclose(fp);
return arr;
}
//convolution, NOTE: destructive of BLOB* in. duplicate if further required!
BLOB* convolution(BLOB* input, conv_param_t* p){
//use local pointer
BLOB* in = input;
BLOB* out;
static bool output_reuse = false;
//padding of input if required
if(p->pad!=0)
in = pad(in, p->pad);
//if fully connected, the kernel size is set to the image size
int Ky=(p->fc)?in->h:p->Ky;
int Kx=(p->fc)?in->w:p->Kx;
//create blob to hold output
int height=(int)floor(((float)in->h - (float)Ky)/(float)p->Sy)+1;
int width =(int)floor(((float)in->w - (float)Kx)/(float)p->Sx)+1;
//load bias if required
if(p->bias==NULL){
//zero init
out = blob_calloc(p->num_out, height, width);
}else{
//not required to calloc
out = blob_alloc(p->num_out, height, width);
//load bias values from file
float* bias =load_1d(p->bias, p->num_out);
//set bias or init with zeroes
for(int o=0;o<out->d;o++)
for(int m=0;m<out->h;m++)
for(int n=0;n<out->w;n++)
blob_data(out,o,m,n)=bias[o];
//cleanup bias
free(bias);
}
//load weights
BLOB* w = load_weights(in, p);
float *in_gpu, *w_gpu;
static float *out_gpu;
dim3 block, grid;
if (!output_reuse){
blob2gpu(in_gpu, in);
}
else{
in_gpu = out_gpu;
}
blob2gpu(w_gpu, w);
// Allocs memory for the output of the conv in the ouput
out_gpu =
cudaCheckError(hipMalloc(&out_gpu, blob_bytes(out)));
blob2gpu(out_gpu, out);
if(out->w*out->h > 1023){
grid = dim3(4, 4, 1);
block = dim3(out->w/4, out->h/4);
int o, g, i;
//perform convolution
for( g=0;g<p->group;g++) {
for( o=g*(out->d/p->group);o<(g+1)*(out->d/p->group);o++) {
for( i=g*(in->d/p->group);i<(g+1)*(in->d/p->group);i++) {
hipLaunchKernelGGL(( gpu_conv_1) , dim3(grid), dim3(block), 0, 0, in_gpu, w_gpu, out_gpu, in->w , in->h, w->w , w->h, out->w, out->h, in->d, g, o, i, Kx, Ky, p->Sx, p->Sy, p->group);
}
}
}
}
else{
block = dim3(out->w, out->h);
grid = dim3(out->d/p->group, p->group);
// for( g=0;g<p->group;g++) {/
hipLaunchKernelGGL(( gpu_conv_2) , dim3(grid), dim3(block) , 0, 0, in_gpu, w_gpu, out_gpu, in->w , in->h, w->w , w->h, out->w, out->h, Kx, Ky, p->Sx, p->Sy, p->group, out->d, in->d);
// }
}
gpu2blob(out, out_gpu);
if (!output_reuse){
cudaCheckError(hipFree(in_gpu));
}
else{
output_reuse = false;
}
cudaCheckError(hipFree(w_gpu));
output_reuse = true;
// for(int g=0;g<p->group;g++)
// for(int o=g*(out->d/p->group);o<(g+1)*(out->d/p->group);o++)
// for(int i=g*(in->d/p->group);i<(g+1)*(in->d/p->group);i++)
// for(int m=0;m<out->h;m++)
// for(int n=0;n<out->w;n++)
// for(int k=0;k<Ky;k++)
// for(int l=0;l<Kx;l++)
// //note: absolute starting i is subtracted for the weights, see load_weights function for more info
// blob_data(out,o,m,n)+=blob_data(in, i, m*p->Sy+k, n*p->Sx+l) * blob_data(w, o, i-(g*(in->d/p->group)), k*Kx + l);
//free weights
blob_free(w);
//done with padded blob, free
if(p->pad!=0)
blob_free(in);
//perform batchnorm if needed
if(p->bn_mean!=NULL){
//load batchnorm mean and variance
float* mean = load_1d(p->bn_mean, out->d);
float* var = load_1d(p->bn_var, out->d);
//batchnorm
for(int o=0;o<out->d;o++)
for(int m=0;m<out->h;m++)
for(int n=0;n<out->w;n++)
blob_data(out,o,m,n)= (blob_data(out,o,m,n) - mean[o])/sqrtf(var[o]+p->bn_eps);
//free mean and variance
free(mean);
free(var);
}
//perform scale if needed
if(p->scale!=NULL){
//load scale parameters
float* scale = load_1d(p->scale, out->d);
float* scale_bias = load_1d(p->scale_bias, out->d);
//scale
for(int o=0;o<out->d;o++)
for(int m=0;m<out->h;m++)
for(int n=0;n<out->w;n++)
blob_data(out,o,m,n) = blob_data(out,o,m,n)*scale[o] + scale_bias[o];
//free parameters
free(scale);
free(scale_bias);
}
//perform relu
if(p->relu==true)
for(int i=0;i<blob_size(out); i++)
out->data[i] = fmax(0.0f, out->data[i]);
//return output
return out;
}
| 9fc1d3976109ed8e870bdd536ec675503fdaf6e7.cu | #include "convolution.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "logging.h"
/*
The maximum number of threads in the block is limited to 1024. This is the product of whatever your threadblock dimensions are (x*y*z). For example (32,32,1) creates a block of 1024 threads. (33,32,1) is not legal, since 33*32*1 > 1024.
grid
x: 2^31 - 1
y,z: 65535
thread
x,y: 1024
z: 64
960
1000
1280
12544
*/
__global__ void gpu_conv_1(float * img, float * weight, float * out, int i_w, int i_h, int w_w, int w_h, int o_w, int o_h, int i_d, int g, int o, int i, int Kx, int Ky, int Sx, int Sy, int group) {
unsigned int m = blockIdx.y*blockDim.y + threadIdx.y;
unsigned int n = blockIdx.x*blockDim.x + threadIdx.x;
if (Kx == 1) {
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy)*i_w + n*Sx] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w];
} else {
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy)*i_w + n*Sx] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy)*i_w + n*Sx+1] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 1];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy)*i_w + n*Sx+2] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 2];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+1)*i_w + n*Sx] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + Kx];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+1)*i_w + n*Sx+1] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + Kx+1];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+1)*i_w + n*Sx+2] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + Kx+2];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+2)*i_w + n*Sx] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 2*Kx];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+2)*i_w + n*Sx+1] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 2*Kx+1];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+2)*i_w + n*Sx+2] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 2*Kx+2];
}
}
__global__ void gpu_conv_2(float * img, float * weight, float * out, int i_w, int i_h, int w_w, int w_h, int o_w, int o_h, int Kx, int Ky, int Sx, int Sy, int group, int o_d, int i_d) {
unsigned int g = blockIdx.y;
unsigned int o = g*(o_d/group)+blockIdx.x;
int m = threadIdx.y;
int n = threadIdx.x;
for(int i=g*(i_d/group);i<(g+1)*(i_d/group);i++)
if (Kx == 1) {
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy)*i_w + n*Sx] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w];
} else {
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy)*i_w + n*Sx] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy)*i_w + n*Sx+1] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 1];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy)*i_w + n*Sx+2] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 2];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+1)*i_w + n*Sx] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + Kx];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+1)*i_w + n*Sx+1] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + Kx+1];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+1)*i_w + n*Sx+2] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + Kx+2];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+2)*i_w + n*Sx] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 2*Kx];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+2)*i_w + n*Sx+1] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 2*Kx+1];
out[o*o_w*o_h + m*o_w + n] +=
img[i*i_h*i_w + (m*Sy+2)*i_w + n*Sx+2] *
weight[o*w_w*w_h + (i-(g*(i_d/group)))*w_w + 2*Kx+2];
}
}
//add padding to blob
BLOB* pad(BLOB* in, int pad){
//create output blob
BLOB* out = blob_calloc(in->d, in->h+2*pad, in->w+pad*2);
//copy non-padded input into output blob
for(int z=0;z<in->d;z++)
for(int y=0;y<in->h;y++)
for(int x=0;x<in->w;x++)
blob_data(out,z,y+pad,x+pad)= blob_data(in,z,y,x);
//return pointer to padded blob
return out;
}
BLOB* load_weights(BLOB* b, conv_param_t* p){
//open weights file for reading
FILE* fp = fopen(p->weights, "rb");
if(fp==NULL)
error("could not open file %s for reading\n",p->weights);
//for fully connected layers the kernel size is equal to the input size
int Ky=(p->fc)?b->h:p->Ky;
int Kx=(p->fc)?b->w:p->Kx;
//allocate 3D blob, and emulate 4D in KxKy later
BLOB* w=blob_alloc(p->num_out, b->d/p->group, Ky*Kx);
//fill 4D weight structure
for(int g=0;g<p->group;g++)
for(int o=g*(p->num_out/p->group);o<(g+1)*(p->num_out/p->group);o++)
for(int i=g*(b->d/p->group);i<(g+1)*(b->d/p->group);i++)
//note: each output map has only b->d/p->group input maps. Hence the absolute index of i is subtracted when storing in w!
if((int)fread( &(blob_data(w,o,i-g*(b->d/p->group),0)),sizeof(float),Ky*Kx, fp)!=Ky*Kx)
error("loading weights from file %s\n", p->weights);
//close file
fclose(fp);
//return weight blob
return w;
}
float* load_1d(const char* fname, size_t num){
//open file for reading
FILE* fp = fopen(fname, "rb");
if(fp==NULL)
error("could not open file %s for reading\n",fname);
//read in array
float* arr= (float*) malloc(sizeof(float)*num);
if(fread(arr,sizeof(float), num, fp)!=num)
error("loading data from file %s\n", fname);
//close file
fclose(fp);
return arr;
}
//convolution, NOTE: destructive of BLOB* in. duplicate if further required!
BLOB* convolution(BLOB* input, conv_param_t* p){
//use local pointer
BLOB* in = input;
BLOB* out;
static bool output_reuse = false;
//padding of input if required
if(p->pad!=0)
in = pad(in, p->pad);
//if fully connected, the kernel size is set to the image size
int Ky=(p->fc)?in->h:p->Ky;
int Kx=(p->fc)?in->w:p->Kx;
//create blob to hold output
int height=(int)floor(((float)in->h - (float)Ky)/(float)p->Sy)+1;
int width =(int)floor(((float)in->w - (float)Kx)/(float)p->Sx)+1;
//load bias if required
if(p->bias==NULL){
//zero init
out = blob_calloc(p->num_out, height, width);
}else{
//not required to calloc
out = blob_alloc(p->num_out, height, width);
//load bias values from file
float* bias =load_1d(p->bias, p->num_out);
//set bias or init with zeroes
for(int o=0;o<out->d;o++)
for(int m=0;m<out->h;m++)
for(int n=0;n<out->w;n++)
blob_data(out,o,m,n)=bias[o];
//cleanup bias
free(bias);
}
//load weights
BLOB* w = load_weights(in, p);
float *in_gpu, *w_gpu;
static float *out_gpu;
dim3 block, grid;
if (!output_reuse){
blob2gpu(in_gpu, in);
}
else{
in_gpu = out_gpu;
}
blob2gpu(w_gpu, w);
// Allocs memory for the output of the conv in the ouput
out_gpu =
cudaCheckError(cudaMalloc(&out_gpu, blob_bytes(out)));
blob2gpu(out_gpu, out);
if(out->w*out->h > 1023){
grid = dim3(4, 4, 1);
block = dim3(out->w/4, out->h/4);
int o, g, i;
//perform convolution
for( g=0;g<p->group;g++) {
for( o=g*(out->d/p->group);o<(g+1)*(out->d/p->group);o++) {
for( i=g*(in->d/p->group);i<(g+1)*(in->d/p->group);i++) {
gpu_conv_1 <<<grid, block>>> (in_gpu, w_gpu, out_gpu, in->w , in->h, w->w , w->h, out->w, out->h, in->d, g, o, i, Kx, Ky, p->Sx, p->Sy, p->group);
}
}
}
}
else{
block = dim3(out->w, out->h);
grid = dim3(out->d/p->group, p->group);
// for( g=0;g<p->group;g++) {/
gpu_conv_2 <<< grid, block >>> (in_gpu, w_gpu, out_gpu, in->w , in->h, w->w , w->h, out->w, out->h, Kx, Ky, p->Sx, p->Sy, p->group, out->d, in->d);
// }
}
gpu2blob(out, out_gpu);
if (!output_reuse){
cudaCheckError(cudaFree(in_gpu));
}
else{
output_reuse = false;
}
cudaCheckError(cudaFree(w_gpu));
output_reuse = true;
// for(int g=0;g<p->group;g++)
// for(int o=g*(out->d/p->group);o<(g+1)*(out->d/p->group);o++)
// for(int i=g*(in->d/p->group);i<(g+1)*(in->d/p->group);i++)
// for(int m=0;m<out->h;m++)
// for(int n=0;n<out->w;n++)
// for(int k=0;k<Ky;k++)
// for(int l=0;l<Kx;l++)
// //note: absolute starting i is subtracted for the weights, see load_weights function for more info
// blob_data(out,o,m,n)+=blob_data(in, i, m*p->Sy+k, n*p->Sx+l) * blob_data(w, o, i-(g*(in->d/p->group)), k*Kx + l);
//free weights
blob_free(w);
//done with padded blob, free
if(p->pad!=0)
blob_free(in);
//perform batchnorm if needed
if(p->bn_mean!=NULL){
//load batchnorm mean and variance
float* mean = load_1d(p->bn_mean, out->d);
float* var = load_1d(p->bn_var, out->d);
//batchnorm
for(int o=0;o<out->d;o++)
for(int m=0;m<out->h;m++)
for(int n=0;n<out->w;n++)
blob_data(out,o,m,n)= (blob_data(out,o,m,n) - mean[o])/sqrtf(var[o]+p->bn_eps);
//free mean and variance
free(mean);
free(var);
}
//perform scale if needed
if(p->scale!=NULL){
//load scale parameters
float* scale = load_1d(p->scale, out->d);
float* scale_bias = load_1d(p->scale_bias, out->d);
//scale
for(int o=0;o<out->d;o++)
for(int m=0;m<out->h;m++)
for(int n=0;n<out->w;n++)
blob_data(out,o,m,n) = blob_data(out,o,m,n)*scale[o] + scale_bias[o];
//free parameters
free(scale);
free(scale_bias);
}
//perform relu
if(p->relu==true)
for(int i=0;i<blob_size(out); i++)
out->data[i] = fmax(0.0f, out->data[i]);
//return output
return out;
}
|
9c676bd66bdf3191b5d0e1aff4989b390cb9bf7a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file multibox_detection.cu
* \brief MultiBoxDetection op
* \author Joshua Zhang
*/
#include "./multibox_detection-inl.h"
#include <mshadow/cuda/tensor_gpu-inl.cuh>
#define MULTIBOX_DETECTION_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
template<typename DType>
__device__ void Clip(DType *value, const DType lower, const DType upper) {
if ((*value) < lower) *value = lower;
if ((*value) > upper) *value = upper;
}
template<typename DType>
__device__ void CalculateOverlap(const DType *a, const DType *b, DType *iou) {
DType w = max(DType(0), min(a[2], b[2]) - max(a[0], b[0]));
DType h = max(DType(0), min(a[3], b[3]) - max(a[1], b[1]));
DType i = w * h;
DType u = (a[2] - a[0]) * (a[3] - a[1]) + (b[2] - b[0]) * (b[3] - b[1]) - i;
(*iou) = u <= 0.f ? static_cast<DType>(0) : static_cast<DType>(i / u);
}
template<typename DType>
__global__
__launch_bounds__(cuda::kMaxThreadsPerBlock)
void DetectionForwardKernel(DType *out, const DType *cls_prob,
const DType *loc_pred, const DType *anchors,
DType *temp_space, const int num_classes,
const int num_anchors, const float threshold,
const bool clip, const float vx,
const float vy, const float vw,
const float vh, const float nms_threshold,
const bool force_suppress, const int nms_topk) {
const int nbatch = blockIdx.x; // each block for each batch
int index = threadIdx.x;
__shared__ int valid_count;
out += nbatch * num_anchors * 6;
cls_prob += nbatch * num_anchors * num_classes;
loc_pred += nbatch * num_anchors * 4;
if (index == 0) {
valid_count = 0;
}
__syncthreads();
// apply prediction to anchors
for (int i = index; i < num_anchors; i += blockDim.x) {
DType score = -1;
int id = 0;
for (int j = 1; j < num_classes; ++j) {
DType temp = cls_prob[j * num_anchors + i];
if (temp > score) {
score = temp;
id = j;
}
}
if (id > 0 && score < threshold) {
id = 0;
}
if (id > 0) {
// valid class
int pos = atomicAdd(&valid_count, 1);
out[pos * 6] = id - 1; // restore original class id
out[pos * 6 + 1] = (id == 0 ? DType(-1) : score);
int offset = i * 4;
DType al = anchors[offset];
DType at = anchors[offset + 1];
DType ar = anchors[offset + 2];
DType ab = anchors[offset + 3];
DType aw = ar - al;
DType ah = ab - at;
DType ax = (al + ar) / 2.f;
DType ay = (at + ab) / 2.f;
DType ox = loc_pred[offset] * vx * aw + ax;
DType oy = loc_pred[offset + 1] * vy * ah + ay;
DType ow = exp(loc_pred[offset + 2] * vw) * aw / 2;
DType oh = exp(loc_pred[offset + 3] * vh) * ah / 2;
DType xmin = ox - ow;
DType ymin = oy - oh;
DType xmax = ox + ow;
DType ymax = oy + oh;
if (clip) {
Clip(&xmin, DType(0), DType(1));
Clip(&ymin, DType(0), DType(1));
Clip(&xmax, DType(0), DType(1));
Clip(&ymax, DType(0), DType(1));
}
out[pos * 6 + 2] = xmin;
out[pos * 6 + 3] = ymin;
out[pos * 6 + 4] = xmax;
out[pos * 6 + 5] = ymax;
}
}
__syncthreads();
if (valid_count < 1 || nms_threshold <= 0 || nms_threshold > 1) return;
// if (index == 0) printf("%d\n", valid_count);
// descent sort according to scores
const int size = valid_count;
temp_space += nbatch * num_anchors * 6;
DType *src = out;
DType *dst = temp_space;
for (int width = 2; width < (size << 1); width <<= 1) {
int slices = (size - 1) / (blockDim.x * width) + 1;
int start = width * index * slices;
for (int slice = 0; slice < slices; ++slice) {
if (start >= size) break;
int middle = start + (width >> 1);
if (middle > size) middle = size;
int end = start + width;
if (end > size) end = size;
int i = start;
int j = middle;
for (int k = start; k < end; ++k) {
DType score_i = i < size ? src[i * 6 + 1] : DType(-1);
DType score_j = j < size ? src[j * 6 + 1] : DType(-1);
if (i < middle && (j >= end || score_i > score_j)) {
for (int n = 0; n < 6; ++n) {
dst[k * 6 + n] = src[i * 6 + n];
}
++i;
} else {
for (int n = 0; n < 6; ++n) {
dst[k * 6 + n] = src[j * 6 + n];
}
++j;
}
}
start += width;
}
__syncthreads();
src = src == out? temp_space : out;
dst = dst == out? temp_space : out;
}
__syncthreads();
if (src == temp_space) {
// copy from temp to out
for (int i = index; i < size * 6; i += blockDim.x) {
out[i] = temp_space[i];
}
__syncthreads();
}
// keep top k detections
int ntop = size;
if (nms_topk > 0 && nms_topk < ntop) {
ntop = nms_topk;
for (int i = ntop + index; i < size; i += blockDim.x) {
out[i * 6] = -1;
}
__syncthreads();
}
// apply NMS
for (int compare_pos = 0; compare_pos < ntop; ++compare_pos) {
DType compare_id = out[compare_pos * 6];
if (compare_id < 0) continue; // not a valid positive detection, skip
DType *compare_loc_ptr = out + compare_pos * 6 + 2;
for (int i = compare_pos + index + 1; i < ntop; i += blockDim.x) {
DType class_id = out[i * 6];
if (class_id < 0) continue;
if (force_suppress || (class_id == compare_id)) {
DType iou;
CalculateOverlap(compare_loc_ptr, out + i * 6 + 2, &iou);
if (iou >= nms_threshold) {
out[i * 6] = -1;
}
}
}
__syncthreads();
}
}
} // namespace cuda
template<typename DType>
inline void MultiBoxDetectionForward(const Tensor<gpu, 3, DType> &out,
const Tensor<gpu, 3, DType> &cls_prob,
const Tensor<gpu, 2, DType> &loc_pred,
const Tensor<gpu, 2, DType> &anchors,
const Tensor<gpu, 3, DType> &temp_space,
const float threshold,
const bool clip,
const mxnet::Tuple<float> &variances,
const float nms_threshold,
const bool force_suppress,
const int nms_topk) {
CHECK_EQ(variances.ndim(), 4) << "Variance size must be 4";
const int num_classes = cls_prob.size(1);
const int num_anchors = cls_prob.size(2);
const int num_batches = cls_prob.size(0);
const int num_threads = cuda::kMaxThreadsPerBlock;
int num_blocks = num_batches;
cuda::CheckLaunchParam(num_blocks, num_threads, "MultiBoxDetection Forward");
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
hipLaunchKernelGGL(( cuda::DetectionForwardKernel), dim3(num_blocks), dim3(num_threads), 0, stream, out.dptr_,
cls_prob.dptr_, loc_pred.dptr_, anchors.dptr_, temp_space.dptr_,
num_classes, num_anchors, threshold, clip,
variances[0], variances[1], variances[2], variances[3],
nms_threshold, force_suppress, nms_topk);
MULTIBOX_DETECTION_CUDA_CHECK(hipPeekAtLastError());
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator *CreateOp<gpu>(MultiBoxDetectionParam param, int dtype) {
Operator *op = nullptr;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new MultiBoxDetectionOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
| 9c676bd66bdf3191b5d0e1aff4989b390cb9bf7a.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file multibox_detection.cu
* \brief MultiBoxDetection op
* \author Joshua Zhang
*/
#include "./multibox_detection-inl.h"
#include <mshadow/cuda/tensor_gpu-inl.cuh>
#define MULTIBOX_DETECTION_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
template<typename DType>
__device__ void Clip(DType *value, const DType lower, const DType upper) {
if ((*value) < lower) *value = lower;
if ((*value) > upper) *value = upper;
}
template<typename DType>
__device__ void CalculateOverlap(const DType *a, const DType *b, DType *iou) {
DType w = max(DType(0), min(a[2], b[2]) - max(a[0], b[0]));
DType h = max(DType(0), min(a[3], b[3]) - max(a[1], b[1]));
DType i = w * h;
DType u = (a[2] - a[0]) * (a[3] - a[1]) + (b[2] - b[0]) * (b[3] - b[1]) - i;
(*iou) = u <= 0.f ? static_cast<DType>(0) : static_cast<DType>(i / u);
}
template<typename DType>
__global__
__launch_bounds__(cuda::kMaxThreadsPerBlock)
void DetectionForwardKernel(DType *out, const DType *cls_prob,
const DType *loc_pred, const DType *anchors,
DType *temp_space, const int num_classes,
const int num_anchors, const float threshold,
const bool clip, const float vx,
const float vy, const float vw,
const float vh, const float nms_threshold,
const bool force_suppress, const int nms_topk) {
const int nbatch = blockIdx.x; // each block for each batch
int index = threadIdx.x;
__shared__ int valid_count;
out += nbatch * num_anchors * 6;
cls_prob += nbatch * num_anchors * num_classes;
loc_pred += nbatch * num_anchors * 4;
if (index == 0) {
valid_count = 0;
}
__syncthreads();
// apply prediction to anchors
for (int i = index; i < num_anchors; i += blockDim.x) {
DType score = -1;
int id = 0;
for (int j = 1; j < num_classes; ++j) {
DType temp = cls_prob[j * num_anchors + i];
if (temp > score) {
score = temp;
id = j;
}
}
if (id > 0 && score < threshold) {
id = 0;
}
if (id > 0) {
// valid class
int pos = atomicAdd(&valid_count, 1);
out[pos * 6] = id - 1; // restore original class id
out[pos * 6 + 1] = (id == 0 ? DType(-1) : score);
int offset = i * 4;
DType al = anchors[offset];
DType at = anchors[offset + 1];
DType ar = anchors[offset + 2];
DType ab = anchors[offset + 3];
DType aw = ar - al;
DType ah = ab - at;
DType ax = (al + ar) / 2.f;
DType ay = (at + ab) / 2.f;
DType ox = loc_pred[offset] * vx * aw + ax;
DType oy = loc_pred[offset + 1] * vy * ah + ay;
DType ow = exp(loc_pred[offset + 2] * vw) * aw / 2;
DType oh = exp(loc_pred[offset + 3] * vh) * ah / 2;
DType xmin = ox - ow;
DType ymin = oy - oh;
DType xmax = ox + ow;
DType ymax = oy + oh;
if (clip) {
Clip(&xmin, DType(0), DType(1));
Clip(&ymin, DType(0), DType(1));
Clip(&xmax, DType(0), DType(1));
Clip(&ymax, DType(0), DType(1));
}
out[pos * 6 + 2] = xmin;
out[pos * 6 + 3] = ymin;
out[pos * 6 + 4] = xmax;
out[pos * 6 + 5] = ymax;
}
}
__syncthreads();
if (valid_count < 1 || nms_threshold <= 0 || nms_threshold > 1) return;
// if (index == 0) printf("%d\n", valid_count);
// descent sort according to scores
const int size = valid_count;
temp_space += nbatch * num_anchors * 6;
DType *src = out;
DType *dst = temp_space;
for (int width = 2; width < (size << 1); width <<= 1) {
int slices = (size - 1) / (blockDim.x * width) + 1;
int start = width * index * slices;
for (int slice = 0; slice < slices; ++slice) {
if (start >= size) break;
int middle = start + (width >> 1);
if (middle > size) middle = size;
int end = start + width;
if (end > size) end = size;
int i = start;
int j = middle;
for (int k = start; k < end; ++k) {
DType score_i = i < size ? src[i * 6 + 1] : DType(-1);
DType score_j = j < size ? src[j * 6 + 1] : DType(-1);
if (i < middle && (j >= end || score_i > score_j)) {
for (int n = 0; n < 6; ++n) {
dst[k * 6 + n] = src[i * 6 + n];
}
++i;
} else {
for (int n = 0; n < 6; ++n) {
dst[k * 6 + n] = src[j * 6 + n];
}
++j;
}
}
start += width;
}
__syncthreads();
src = src == out? temp_space : out;
dst = dst == out? temp_space : out;
}
__syncthreads();
if (src == temp_space) {
// copy from temp to out
for (int i = index; i < size * 6; i += blockDim.x) {
out[i] = temp_space[i];
}
__syncthreads();
}
// keep top k detections
int ntop = size;
if (nms_topk > 0 && nms_topk < ntop) {
ntop = nms_topk;
for (int i = ntop + index; i < size; i += blockDim.x) {
out[i * 6] = -1;
}
__syncthreads();
}
// apply NMS
for (int compare_pos = 0; compare_pos < ntop; ++compare_pos) {
DType compare_id = out[compare_pos * 6];
if (compare_id < 0) continue; // not a valid positive detection, skip
DType *compare_loc_ptr = out + compare_pos * 6 + 2;
for (int i = compare_pos + index + 1; i < ntop; i += blockDim.x) {
DType class_id = out[i * 6];
if (class_id < 0) continue;
if (force_suppress || (class_id == compare_id)) {
DType iou;
CalculateOverlap(compare_loc_ptr, out + i * 6 + 2, &iou);
if (iou >= nms_threshold) {
out[i * 6] = -1;
}
}
}
__syncthreads();
}
}
} // namespace cuda
template<typename DType>
inline void MultiBoxDetectionForward(const Tensor<gpu, 3, DType> &out,
const Tensor<gpu, 3, DType> &cls_prob,
const Tensor<gpu, 2, DType> &loc_pred,
const Tensor<gpu, 2, DType> &anchors,
const Tensor<gpu, 3, DType> &temp_space,
const float threshold,
const bool clip,
const mxnet::Tuple<float> &variances,
const float nms_threshold,
const bool force_suppress,
const int nms_topk) {
CHECK_EQ(variances.ndim(), 4) << "Variance size must be 4";
const int num_classes = cls_prob.size(1);
const int num_anchors = cls_prob.size(2);
const int num_batches = cls_prob.size(0);
const int num_threads = cuda::kMaxThreadsPerBlock;
int num_blocks = num_batches;
cuda::CheckLaunchParam(num_blocks, num_threads, "MultiBoxDetection Forward");
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
cuda::DetectionForwardKernel<<<num_blocks, num_threads, 0, stream>>>(out.dptr_,
cls_prob.dptr_, loc_pred.dptr_, anchors.dptr_, temp_space.dptr_,
num_classes, num_anchors, threshold, clip,
variances[0], variances[1], variances[2], variances[3],
nms_threshold, force_suppress, nms_topk);
MULTIBOX_DETECTION_CUDA_CHECK(cudaPeekAtLastError());
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator *CreateOp<gpu>(MultiBoxDetectionParam param, int dtype) {
Operator *op = nullptr;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new MultiBoxDetectionOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
|
e77ceef3cdaaffecccae7559f6b53cfdc580adb1.hip | // !!! This is a file automatically generated by hipify!!!
#include <dvcxx/backend/cuda/api.hpp>
#include <dvcxx/backend/cuda/exceptions.hpp>
#include <iostream>
namespace dvcxx {
namespace cuda {
namespace wrappers {
void memcpy( void* dest, const void* src, size_t len ) {
CUDA_THROW( hipMemcpy( dest, src, len, hipMemcpyDefault ) );
}
void memcpy2d( void* dest, size_t dpitch, const void* src, size_t spitch,
size_t width, size_t height ) {
CUDA_THROW( hipMemcpy2D( dest, dpitch, src, spitch, width, height,
hipMemcpyDefault ) );
}
void* malloc( size_t len ) {
void* ptr;
CUDA_THROW( hipMalloc( &ptr, len ) );
std::cout << "CUDA MALLOC " << len << ", " << ptr << std::endl;
return ptr;
}
void* malloc_pinned( size_t len ) {
void* ptr;
CUDA_THROW( hipHostMalloc( &ptr, len ) );
//std::cout << "CUDA MALLOC HOST" << len << ", " << ptr << std::endl;
return ptr;
}
void* malloc_unified( size_t len ) {
void* ptr;
CUDA_THROW( hipMallocManaged( &ptr, len ) );
std::cout << "CUDA MALLOC UNIFIED " << len << ", " << ptr << std::endl;
return ptr;
}
void free( void* ptr ) {
std::cout << "CUDA FREE " << ptr << std::endl;
CUDA_THROW( hipFree( ptr ) );
}
void free_pinned( void* ptr ) {
//std::cout << "CUDA FREE Host" << ptr << std::endl;
CUDA_THROW( hipHostFree( ptr ) );
}
void memset( void* data, int val, size_t len ) {
CUDA_THROW( hipMemset( data, val, len ) );
}
void device_sync() {
CUDA_THROW( hipDeviceSynchronize() );
}
} // namespace wrappers
} // namespace cuda
} // namespace dvcxx
| e77ceef3cdaaffecccae7559f6b53cfdc580adb1.cu | #include <dvcxx/backend/cuda/api.hpp>
#include <dvcxx/backend/cuda/exceptions.hpp>
#include <iostream>
namespace dvcxx {
namespace cuda {
namespace wrappers {
void memcpy( void* dest, const void* src, size_t len ) {
CUDA_THROW( cudaMemcpy( dest, src, len, cudaMemcpyDefault ) );
}
void memcpy2d( void* dest, size_t dpitch, const void* src, size_t spitch,
size_t width, size_t height ) {
CUDA_THROW( cudaMemcpy2D( dest, dpitch, src, spitch, width, height,
cudaMemcpyDefault ) );
}
void* malloc( size_t len ) {
void* ptr;
CUDA_THROW( cudaMalloc( &ptr, len ) );
std::cout << "CUDA MALLOC " << len << ", " << ptr << std::endl;
return ptr;
}
void* malloc_pinned( size_t len ) {
void* ptr;
CUDA_THROW( cudaMallocHost( &ptr, len ) );
//std::cout << "CUDA MALLOC HOST" << len << ", " << ptr << std::endl;
return ptr;
}
void* malloc_unified( size_t len ) {
void* ptr;
CUDA_THROW( cudaMallocManaged( &ptr, len ) );
std::cout << "CUDA MALLOC UNIFIED " << len << ", " << ptr << std::endl;
return ptr;
}
void free( void* ptr ) {
std::cout << "CUDA FREE " << ptr << std::endl;
CUDA_THROW( cudaFree( ptr ) );
}
void free_pinned( void* ptr ) {
//std::cout << "CUDA FREE Host" << ptr << std::endl;
CUDA_THROW( cudaFreeHost( ptr ) );
}
void memset( void* data, int val, size_t len ) {
CUDA_THROW( cudaMemset( data, val, len ) );
}
void device_sync() {
CUDA_THROW( cudaDeviceSynchronize() );
}
} // namespace wrappers
} // namespace cuda
} // namespace dvcxx
|
f75207eb1ce9a9dcdf6948c49582a33d821b7ecf.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
> File Name: 07cultime.cu
> Author: dong xu
> Mail: [email protected]
> Created Time: 20160408 101809
************************************************************************/
#include <stdio.h>
#include <hip/hip_runtime.h>
//__global__CPUGPU
__global__ void mul(int *dev_a,const int NUM)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int dis=blockDim.x * gridDim.x;
while(idx<NUM)
{
dev_a[idx]=dev_a[idx]%23*dev_a[idx]*5%9;
idx+=dis;
}
}
int main(void)
{
const int thread_pre_block = 64; //block
const int block_pre_grid = 8; //gridblock
const int NUM = 45056;
//
int host_a[NUM];
for(int i=0;i<NUM;i++)
host_a[i]=i;
//cudaErrorcudaSuccess(0)
hipError_t err = hipSuccess;
//GPU
int *dev_a;
err=hipMalloc((void **)&dev_a, sizeof(int)*NUM);
if(err!=hipSuccess)
{
perror("the hipMalloc on GPU is failed");
return 1;
}
//cudaMemcpyGPU
hipMemcpy(dev_a,host_a,sizeof(host_a),hipMemcpyHostToDevice);
dim3 threads = dim3(thread_pre_block);
dim3 blocks = dim3(block_pre_grid);
//event
float time_elapsed=0;
hipEvent_t start,stop;
hipEventCreate(&start); //Event
hipEventCreate(&stop);
hipEventRecord( start,0); //
hipLaunchKernelGGL(( mul), dim3(blocks), dim3(threads), 0, 0, dev_a,NUM);
hipEventRecord( stop,0); //
hipEventSynchronize(start); //Waits for an event to complete.
hipEventSynchronize(stop); //Waits for an event to complete.Record
hipEventElapsedTime(&time_elapsed,start,stop); //
hipMemcpy(&host_a,dev_a,sizeof(host_a),hipMemcpyDeviceToHost); //CPU
hipEventDestroy(start); //destory the event
hipEventDestroy(stop);
hipFree(dev_a);//GPU
printf("%f(ms)\n",time_elapsed);
return 0 ;
}
| f75207eb1ce9a9dcdf6948c49582a33d821b7ecf.cu | /*************************************************************************
> File Name: 07cultime.cu
> Author: dong xu
> Mail: [email protected]
> Created Time: 2016年04月08日 星期五 10时18分09秒
************************************************************************/
#include <stdio.h>
#include <cuda_runtime.h>
//__global__声明的函数,告诉编译器这段代码交由CPU调用,由GPU执行
__global__ void mul(int *dev_a,const int NUM)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int dis=blockDim.x * gridDim.x;
while(idx<NUM)
{
dev_a[idx]=dev_a[idx]%23*dev_a[idx]*5%9;
idx+=dis;
}
}
int main(void)
{
const int thread_pre_block = 64; //每个block的线程数量
const int block_pre_grid = 8; //grid中的block数量
const int NUM = 45056;
//申请主机内存,并进行初始化
int host_a[NUM];
for(int i=0;i<NUM;i++)
host_a[i]=i;
//定义cudaError,默认为cudaSuccess(0)
cudaError_t err = cudaSuccess;
//申请GPU存储空间
int *dev_a;
err=cudaMalloc((void **)&dev_a, sizeof(int)*NUM);
if(err!=cudaSuccess)
{
perror("the cudaMalloc on GPU is failed");
return 1;
}
//将要计算的数据使用cudaMemcpy传送到GPU
cudaMemcpy(dev_a,host_a,sizeof(host_a),cudaMemcpyHostToDevice);
dim3 threads = dim3(thread_pre_block);
dim3 blocks = dim3(block_pre_grid);
//使用event计算时间
float time_elapsed=0;
cudaEvent_t start,stop;
cudaEventCreate(&start); //创建Event
cudaEventCreate(&stop);
cudaEventRecord( start,0); //记录当前时间
mul<<<blocks, threads, 0, 0>>>(dev_a,NUM);
cudaEventRecord( stop,0); //记录当前时间
cudaEventSynchronize(start); //Waits for an event to complete.
cudaEventSynchronize(stop); //Waits for an event to complete.Record之前的任务
cudaEventElapsedTime(&time_elapsed,start,stop); //计算时间差
cudaMemcpy(&host_a,dev_a,sizeof(host_a),cudaMemcpyDeviceToHost); //计算结果回传到CPU
cudaEventDestroy(start); //destory the event
cudaEventDestroy(stop);
cudaFree(dev_a);//释放GPU内存
printf("执行时间:%f(ms)\n",time_elapsed);
return 0 ;
}
|
f981d1b99e220a416704ffc60915057f410009c6.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <math.h>
#include <time.h>
#include <random>
#include <vector>
#include <fstream>
#include <omp.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include "timer.h"
#include "cuda_error_check.h"
#include "alloc.h"
#include "reduce_block_1d.h"
#include "complex.h"
//#include "special_functions.cuh"
#include "update.h"
#include "staple.h"
#include "enum.h"
#include "tune.h"
#include "lattice_functions.h"
namespace U1{
__global__ void kernel_hotstart(double *lat, cuRNGState *rng_state){
size_t id = threadIdx.x + blockDim.x * blockIdx.x;
if( id >= HalfVolume() ) return ;
cuRNGState localState = rng_state[ id ];
for(int parity = 0; parity < 2; ++parity)
for(int mu = 0; mu < Dirs(); mu++){
lat[id + parity * HalfVolume() + mu * Volume()] = Random<double>(localState, 0., 2.) * M_PI;
}
rng_state[ id ] = localState;
}
void HotStart(Array<double> *dev_lat, CudaRNG *rng_state){
// kernel number of threads per block and number os blocks
int threads = 128;
int blocks = (HalfVolume() + threads - 1) / threads;
hipLaunchKernelGGL(( kernel_hotstart), dim3(blocks),dim3(threads), 0, 0, dev_lat->getPtr(), rng_state->getPtr());
}
void metropolis(double *lat){
std::uniform_real_distribution<double> rand02(0., 2.);
std::uniform_real_distribution<double> rand01(0,1);
for(int parity = 0; parity < 2; ++parity)
for(int mu = 0; mu < Dirs(); mu++){
#pragma omp parallel for
for(int id = 0; id < HalfVolume(); ++id){
double phase_old = lat[id + parity * HalfVolume() + mu * Volume()];
int idmu1 = indexEO_neg(id, parity, mu, 1);
double stapleRe = 0., stapleIm = 0.;
staple(lat, id, parity, mu, stapleRe, stapleIm);
double r = std::sqrt( stapleRe*stapleRe + stapleIm*stapleIm );
double t2 = atan2(stapleIm, stapleRe);
double new_phase = M_PI * rand02(generator[omp_get_thread_num()]);
double b = rand01(generator[omp_get_thread_num()]);
double S1 = cos(phase_old + t2);
double S2 = cos(new_phase + t2);
double dS = exp(Beta()*r*(S2-S1));
if(dS > b){
lat[id + parity * HalfVolume() + mu * Volume()] = new_phase;
PARAMS::accept_ratio += 1.;
}
}
}
}
void overrelaxation(double *lat){
for(int parity = 0; parity < 2; ++parity)
for(int mu = 0; mu < Dirs(); mu++){
#pragma omp parallel for
for(int id = 0; id < HalfVolume(); ++id){
double stapleRe = 0., stapleIm = 0.;
staple(lat, id, parity, mu, stapleRe, stapleIm);
int pos = id + parity * HalfVolume() + mu * Volume();
double phase_old = lat[pos];
double t2 = atan2(stapleIm, stapleRe);
double new_phase = fmod(6.* M_PI - phase_old - 2. * t2, 2.* M_PI);
lat[pos] = new_phase;
}
}
}
__global__ void kernel_metropolis_old(double *lat, int parity, int mu, cuRNGState *rng_state){
size_t id = threadIdx.x + blockDim.x * blockIdx.x;
if( id >= HalfVolume() ) return ;
cuRNGState localState = rng_state[ id ];
double phase_old = lat[id + parity * HalfVolume() + mu * Volume()];
int idmu1 = indexEO_neg(id, parity, mu, 1);
double stapleRe = 0., stapleIm = 0.;
staple(lat, id, parity, mu, stapleRe, stapleIm);
double r = sqrt( stapleRe*stapleRe + stapleIm*stapleIm );
double t2 = atan2(stapleIm, stapleRe);
double new_phase = Random<double>(localState) * 2. * M_PI;
double b = Random<double>(localState);
double S1 = cos(phase_old + t2);
double S2 = cos(new_phase + t2);
double dS = exp(Beta()*r*(S2-S1));
//complexd st(stapleRe, stapleIm);
//if(id==0) printf("%.12e\t%.12e \n", dS, exp(Beta()*(st*exp_ir(new_phase)).real())/exp(Beta()*(st*exp_ir(phase_old)).real()));
if(dS > b){
lat[id + parity * HalfVolume() + mu * Volume()] = new_phase;
}
rng_state[ id ] = localState;
}
__global__ void kernel_metropolis_test(double *lat, int parity, int mu, cuRNGState *rng_state){
size_t id = threadIdx.x + blockDim.x * blockIdx.x;
if( id >= HalfVolume() ) return ;
cuRNGState localState = rng_state[ id ];
double phase_old = lat[id + parity * HalfVolume() + mu * Volume()];
int idmu1 = indexEO_neg(id, parity, mu, 1);
/*complexd staple = Staple(lat, id, parity, mu);
double S1 = exp(Beta()*(1.0-staple*exp_ir(phase_old)).real());
double new_phase = Random<double>(localState) * 2. * M_PI;
double S2 = exp(Beta()*(1.0-staple*exp_ir(new_phase)).real());
double dS = S2/S1;
double b = Random<double>(localState);*/
complexd stapleSS, stapleST;
Staple(lat, id, parity, mu, stapleSS, stapleST);
double new_phase = Random<double>(localState) * 2. * M_PI;
double b = Random<double>(localState);
double SS1 = (Beta() / Aniso())*((stapleSS*exp_ir(phase_old)).real()) + (Beta() * Aniso())*( (stapleST*exp_ir(phase_old)).real());
double SS2 = (Beta() / Aniso())*( (stapleSS*exp_ir(new_phase)).real()) + (Beta() * Aniso())*( (stapleST*exp_ir(new_phase)).real());
double S1 = exp(SS1);
double S2 = exp(SS2);
double dS = S2/S1;
if(dS > b){
lat[id + parity * HalfVolume() + mu * Volume()] = new_phase;
}
rng_state[ id ] = localState;
}
__global__ void kernel_metropolis(double *lat, int parity, int mu, cuRNGState *rng_state){
size_t id = threadIdx.x + blockDim.x * blockIdx.x;
if( id >= HalfVolume() ) return ;
cuRNGState localState = rng_state[ id ];
double new_phase = Random<double>(localState) * 2. * M_PI;
double b = Random<double>(localState);
rng_state[ id ] = localState;
double dS = MetropolisFunc(lat, id, parity, mu, new_phase);
if(dS > b){
lat[id + parity * HalfVolume() + mu * Volume()] = new_phase;
}
}
__global__ void kernel_overrelaxation_very_old(double *lat, int parity, int mu){
size_t id = threadIdx.x + blockDim.x * blockIdx.x;
if( id >= HalfVolume() ) return ;
double stapleRe = 0., stapleIm = 0.;
staple_old(lat, id, parity, mu, stapleRe, stapleIm);
int pos = id + parity * HalfVolume() + mu * Volume();
double phase_old = lat[pos];
double t2 = atan2(stapleIm, stapleRe);
double new_phase = fmod(6.* M_PI - phase_old - 2. * t2, 2.* M_PI);
lat[pos] = new_phase;
}
__global__ void kernel_overrelaxation_old(double *lat, int parity, int mu){
size_t id = threadIdx.x + blockDim.x * blockIdx.x;
if( id >= HalfVolume() ) return ;
double stapleRe = 0., stapleIm = 0.;
staple(lat, id, parity, mu, stapleRe, stapleIm);
int pos = id + parity * HalfVolume() + mu * Volume();
double phase_old = lat[pos];
double t2 = atan2(stapleIm, stapleRe);
double new_phase = fmod(6.* M_PI - phase_old - 2. * t2, 2.* M_PI);
lat[pos] = new_phase;
}
__global__ void kernel_overrelaxation(double *lat, int parity, int mu){
size_t id = threadIdx.x + blockDim.x * blockIdx.x;
if( id >= HalfVolume() ) return ;
lat[id + parity * HalfVolume() + mu * Volume()] = OvrFunc(lat, id, parity, mu);
}
void UpdateLattice1(Array<double> *dev_lat, CudaRNG *rng_state, int metrop, int ovrn){
int threads = 128;
int blocks = (HalfVolume() + threads - 1) / threads;
// metropolis algorithm
for(int m = 0; m < metrop; ++m)
for(int parity = 0; parity < 2; ++parity)
for(int mu = 0; mu < Dirs(); ++mu)
hipLaunchKernelGGL(( kernel_metropolis), dim3(blocks),dim3(threads), 0, 0, dev_lat->getPtr(), parity, mu, rng_state->getPtr());
// overrelaxation algorithm
for(int ovr = 0; ovr < ovrn; ++ovr)
for(int parity = 0; parity < 2; ++parity)
for(int mu = 0; mu < Dirs(); ++mu)
hipLaunchKernelGGL(( kernel_overrelaxation), dim3(blocks),dim3(threads), 0, 0, dev_lat->getPtr(), parity, mu);
}
using namespace U1;
class Metropolis: Tunable{
private:
Array<double>* lat;
CudaRNG *rng_state;
int metrop;
int parity;
int mu;
int size;
double timesec;
#ifdef TIMMINGS
Timer time;
#endif
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return size; }
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
hipLaunchKernelGGL(( kernel_metropolis), dim3(tp.grid),dim3(tp.block), 0, stream, lat->getPtr(), parity, mu, rng_state->getPtr());
}
public:
Metropolis(Array<double>* lat, CudaRNG *rng_state, int metrop) : lat(lat), rng_state(rng_state), metrop(metrop){
size = HalfVolume();
timesec = 0.0;
}
~Metropolis(){};
void Run(const hipStream_t &stream){
#ifdef TIMMINGS
time.start();
#endif
for(int m = 0; m < metrop; ++m)
for(parity = 0; parity < 2; ++parity)
for(mu = 0; mu < Dirs(); ++mu)
apply(stream);
cudaDevSync();
cudaCheckError("Kernel execution failed");
#ifdef TIMMINGS
cudaDevSync();
time.stop();
timesec = time.getElapsedTimeInSec();
#endif
}
void Run(){ return Run(0);}
double flops(){ return ((double)flop() * 1.0e-9) / timesec;}
double bandwidth(){ return (double)bytes() / (timesec * (double)(1 << 30));}
long long flop() const { return 0;}
long long bytes() const{ return 0;}
double time(){ return timesec;}
void stat(){ cout << "Metropolis: " << time() << " s\t" << bandwidth() << " GB/s\t" << flops() << " GFlops" << endl;}
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << PARAMS::Grid[0] << "x";
vol << PARAMS::Grid[1] << "x";
vol << PARAMS::Grid[2] << "x";
vol << PARAMS::Grid[3];
aux << "threads=" << size;
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux.str().c_str());
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
void preTune() {
lat->Backup();
rng_state->Backup();
}
void postTune() {
lat->Restore();
rng_state->Restore();
}
};
class OverRelaxation: Tunable{
private:
Array<double>* lat;
int ovrn;
int parity;
int mu;
int size;
double timesec;
#ifdef TIMMINGS
Timer time;
#endif
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return size; }
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
hipLaunchKernelGGL(( kernel_overrelaxation), dim3(tp.grid),dim3(tp.block), 0, stream, lat->getPtr(), parity, mu);
}
public:
OverRelaxation(Array<double>* lat, int ovrn) : lat(lat), ovrn(ovrn){
size = HalfVolume();
timesec = 0.0;
}
~OverRelaxation(){};
void Run(const hipStream_t &stream){
#ifdef TIMMINGS
time.start();
#endif
for(int m = 0; m < ovrn; ++m)
for(parity = 0; parity < 2; ++parity)
for(mu = 0; mu < Dirs(); ++mu)
apply(stream);
cudaDevSync();
cudaCheckError("Kernel execution failed");
#ifdef TIMMINGS
cudaDevSync( );
time.stop();
timesec = time.getElapsedTimeInSec();
#endif
}
void Run(){ return Run(0);}
double flops(){ return ((double)flop() * 1.0e-9) / timesec;}
double bandwidth(){ return (double)bytes() / (timesec * (double)(1 << 30));}
long long flop() const { return 0;}
long long bytes() const{ return 0;}
double time(){ return timesec;}
void stat(){ cout << "OverRelaxation: " << time() << " s\t" << bandwidth() << " GB/s\t" << flops() << " GFlops" << endl;}
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << PARAMS::Grid[0] << "x";
vol << PARAMS::Grid[1] << "x";
vol << PARAMS::Grid[2] << "x";
vol << PARAMS::Grid[3];
aux << "threads=" << size;
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux.str().c_str());
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
void preTune() {
lat->Backup();
}
void postTune() {
lat->Restore();
}
};
void UpdateLattice(Array<double> *dev_lat, CudaRNG *rng_state, int metrop, int ovrn){
// metropolis algorithm
Metropolis mtp(dev_lat, rng_state, metrop);
mtp.Run();
// overrelaxation algorithm
OverRelaxation ovr(dev_lat, ovrn);
ovr.Run();
}
}
| f981d1b99e220a416704ffc60915057f410009c6.cu | #include <iostream>
#include <math.h>
#include <time.h>
#include <random>
#include <vector>
#include <fstream>
#include <omp.h>
#include <cuda.h>
#include <curand_kernel.h>
#include "timer.h"
#include "cuda_error_check.h"
#include "alloc.h"
#include "reduce_block_1d.h"
#include "complex.h"
//#include "special_functions.cuh"
#include "update.h"
#include "staple.h"
#include "enum.h"
#include "tune.h"
#include "lattice_functions.h"
namespace U1{
__global__ void kernel_hotstart(double *lat, cuRNGState *rng_state){
size_t id = threadIdx.x + blockDim.x * blockIdx.x;
if( id >= HalfVolume() ) return ;
cuRNGState localState = rng_state[ id ];
for(int parity = 0; parity < 2; ++parity)
for(int mu = 0; mu < Dirs(); mu++){
lat[id + parity * HalfVolume() + mu * Volume()] = Random<double>(localState, 0., 2.) * M_PI;
}
rng_state[ id ] = localState;
}
void HotStart(Array<double> *dev_lat, CudaRNG *rng_state){
// kernel number of threads per block and number os blocks
int threads = 128;
int blocks = (HalfVolume() + threads - 1) / threads;
kernel_hotstart<<<blocks,threads>>>(dev_lat->getPtr(), rng_state->getPtr());
}
void metropolis(double *lat){
std::uniform_real_distribution<double> rand02(0., 2.);
std::uniform_real_distribution<double> rand01(0,1);
for(int parity = 0; parity < 2; ++parity)
for(int mu = 0; mu < Dirs(); mu++){
#pragma omp parallel for
for(int id = 0; id < HalfVolume(); ++id){
double phase_old = lat[id + parity * HalfVolume() + mu * Volume()];
int idmu1 = indexEO_neg(id, parity, mu, 1);
double stapleRe = 0., stapleIm = 0.;
staple(lat, id, parity, mu, stapleRe, stapleIm);
double r = std::sqrt( stapleRe*stapleRe + stapleIm*stapleIm );
double t2 = atan2(stapleIm, stapleRe);
double new_phase = M_PI * rand02(generator[omp_get_thread_num()]);
double b = rand01(generator[omp_get_thread_num()]);
double S1 = cos(phase_old + t2);
double S2 = cos(new_phase + t2);
double dS = exp(Beta()*r*(S2-S1));
if(dS > b){
lat[id + parity * HalfVolume() + mu * Volume()] = new_phase;
PARAMS::accept_ratio += 1.;
}
}
}
}
void overrelaxation(double *lat){
for(int parity = 0; parity < 2; ++parity)
for(int mu = 0; mu < Dirs(); mu++){
#pragma omp parallel for
for(int id = 0; id < HalfVolume(); ++id){
double stapleRe = 0., stapleIm = 0.;
staple(lat, id, parity, mu, stapleRe, stapleIm);
int pos = id + parity * HalfVolume() + mu * Volume();
double phase_old = lat[pos];
double t2 = atan2(stapleIm, stapleRe);
double new_phase = fmod(6.* M_PI - phase_old - 2. * t2, 2.* M_PI);
lat[pos] = new_phase;
}
}
}
__global__ void kernel_metropolis_old(double *lat, int parity, int mu, cuRNGState *rng_state){
size_t id = threadIdx.x + blockDim.x * blockIdx.x;
if( id >= HalfVolume() ) return ;
cuRNGState localState = rng_state[ id ];
double phase_old = lat[id + parity * HalfVolume() + mu * Volume()];
int idmu1 = indexEO_neg(id, parity, mu, 1);
double stapleRe = 0., stapleIm = 0.;
staple(lat, id, parity, mu, stapleRe, stapleIm);
double r = sqrt( stapleRe*stapleRe + stapleIm*stapleIm );
double t2 = atan2(stapleIm, stapleRe);
double new_phase = Random<double>(localState) * 2. * M_PI;
double b = Random<double>(localState);
double S1 = cos(phase_old + t2);
double S2 = cos(new_phase + t2);
double dS = exp(Beta()*r*(S2-S1));
//complexd st(stapleRe, stapleIm);
//if(id==0) printf("%.12e\t%.12e \n", dS, exp(Beta()*(st*exp_ir(new_phase)).real())/exp(Beta()*(st*exp_ir(phase_old)).real()));
if(dS > b){
lat[id + parity * HalfVolume() + mu * Volume()] = new_phase;
}
rng_state[ id ] = localState;
}
__global__ void kernel_metropolis_test(double *lat, int parity, int mu, cuRNGState *rng_state){
size_t id = threadIdx.x + blockDim.x * blockIdx.x;
if( id >= HalfVolume() ) return ;
cuRNGState localState = rng_state[ id ];
double phase_old = lat[id + parity * HalfVolume() + mu * Volume()];
int idmu1 = indexEO_neg(id, parity, mu, 1);
/*complexd staple = Staple(lat, id, parity, mu);
double S1 = exp(Beta()*(1.0-staple*exp_ir(phase_old)).real());
double new_phase = Random<double>(localState) * 2. * M_PI;
double S2 = exp(Beta()*(1.0-staple*exp_ir(new_phase)).real());
double dS = S2/S1;
double b = Random<double>(localState);*/
complexd stapleSS, stapleST;
Staple(lat, id, parity, mu, stapleSS, stapleST);
double new_phase = Random<double>(localState) * 2. * M_PI;
double b = Random<double>(localState);
double SS1 = (Beta() / Aniso())*((stapleSS*exp_ir(phase_old)).real()) + (Beta() * Aniso())*( (stapleST*exp_ir(phase_old)).real());
double SS2 = (Beta() / Aniso())*( (stapleSS*exp_ir(new_phase)).real()) + (Beta() * Aniso())*( (stapleST*exp_ir(new_phase)).real());
double S1 = exp(SS1);
double S2 = exp(SS2);
double dS = S2/S1;
if(dS > b){
lat[id + parity * HalfVolume() + mu * Volume()] = new_phase;
}
rng_state[ id ] = localState;
}
__global__ void kernel_metropolis(double *lat, int parity, int mu, cuRNGState *rng_state){
size_t id = threadIdx.x + blockDim.x * blockIdx.x;
if( id >= HalfVolume() ) return ;
cuRNGState localState = rng_state[ id ];
double new_phase = Random<double>(localState) * 2. * M_PI;
double b = Random<double>(localState);
rng_state[ id ] = localState;
double dS = MetropolisFunc(lat, id, parity, mu, new_phase);
if(dS > b){
lat[id + parity * HalfVolume() + mu * Volume()] = new_phase;
}
}
__global__ void kernel_overrelaxation_very_old(double *lat, int parity, int mu){
size_t id = threadIdx.x + blockDim.x * blockIdx.x;
if( id >= HalfVolume() ) return ;
double stapleRe = 0., stapleIm = 0.;
staple_old(lat, id, parity, mu, stapleRe, stapleIm);
int pos = id + parity * HalfVolume() + mu * Volume();
double phase_old = lat[pos];
double t2 = atan2(stapleIm, stapleRe);
double new_phase = fmod(6.* M_PI - phase_old - 2. * t2, 2.* M_PI);
lat[pos] = new_phase;
}
__global__ void kernel_overrelaxation_old(double *lat, int parity, int mu){
size_t id = threadIdx.x + blockDim.x * blockIdx.x;
if( id >= HalfVolume() ) return ;
double stapleRe = 0., stapleIm = 0.;
staple(lat, id, parity, mu, stapleRe, stapleIm);
int pos = id + parity * HalfVolume() + mu * Volume();
double phase_old = lat[pos];
double t2 = atan2(stapleIm, stapleRe);
double new_phase = fmod(6.* M_PI - phase_old - 2. * t2, 2.* M_PI);
lat[pos] = new_phase;
}
__global__ void kernel_overrelaxation(double *lat, int parity, int mu){
size_t id = threadIdx.x + blockDim.x * blockIdx.x;
if( id >= HalfVolume() ) return ;
lat[id + parity * HalfVolume() + mu * Volume()] = OvrFunc(lat, id, parity, mu);
}
void UpdateLattice1(Array<double> *dev_lat, CudaRNG *rng_state, int metrop, int ovrn){
int threads = 128;
int blocks = (HalfVolume() + threads - 1) / threads;
// metropolis algorithm
for(int m = 0; m < metrop; ++m)
for(int parity = 0; parity < 2; ++parity)
for(int mu = 0; mu < Dirs(); ++mu)
kernel_metropolis<<<blocks,threads>>>(dev_lat->getPtr(), parity, mu, rng_state->getPtr());
// overrelaxation algorithm
for(int ovr = 0; ovr < ovrn; ++ovr)
for(int parity = 0; parity < 2; ++parity)
for(int mu = 0; mu < Dirs(); ++mu)
kernel_overrelaxation<<<blocks,threads>>>(dev_lat->getPtr(), parity, mu);
}
using namespace U1;
class Metropolis: Tunable{
private:
Array<double>* lat;
CudaRNG *rng_state;
int metrop;
int parity;
int mu;
int size;
double timesec;
#ifdef TIMMINGS
Timer time;
#endif
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return size; }
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_metropolis<<<tp.grid,tp.block, 0, stream>>>(lat->getPtr(), parity, mu, rng_state->getPtr());
}
public:
Metropolis(Array<double>* lat, CudaRNG *rng_state, int metrop) : lat(lat), rng_state(rng_state), metrop(metrop){
size = HalfVolume();
timesec = 0.0;
}
~Metropolis(){};
void Run(const cudaStream_t &stream){
#ifdef TIMMINGS
time.start();
#endif
for(int m = 0; m < metrop; ++m)
for(parity = 0; parity < 2; ++parity)
for(mu = 0; mu < Dirs(); ++mu)
apply(stream);
cudaDevSync();
cudaCheckError("Kernel execution failed");
#ifdef TIMMINGS
cudaDevSync();
time.stop();
timesec = time.getElapsedTimeInSec();
#endif
}
void Run(){ return Run(0);}
double flops(){ return ((double)flop() * 1.0e-9) / timesec;}
double bandwidth(){ return (double)bytes() / (timesec * (double)(1 << 30));}
long long flop() const { return 0;}
long long bytes() const{ return 0;}
double time(){ return timesec;}
void stat(){ cout << "Metropolis: " << time() << " s\t" << bandwidth() << " GB/s\t" << flops() << " GFlops" << endl;}
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << PARAMS::Grid[0] << "x";
vol << PARAMS::Grid[1] << "x";
vol << PARAMS::Grid[2] << "x";
vol << PARAMS::Grid[3];
aux << "threads=" << size;
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux.str().c_str());
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
void preTune() {
lat->Backup();
rng_state->Backup();
}
void postTune() {
lat->Restore();
rng_state->Restore();
}
};
class OverRelaxation: Tunable{
private:
Array<double>* lat;
int ovrn;
int parity;
int mu;
int size;
double timesec;
#ifdef TIMMINGS
Timer time;
#endif
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return size; }
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_overrelaxation<<<tp.grid,tp.block, 0, stream>>>(lat->getPtr(), parity, mu);
}
public:
OverRelaxation(Array<double>* lat, int ovrn) : lat(lat), ovrn(ovrn){
size = HalfVolume();
timesec = 0.0;
}
~OverRelaxation(){};
void Run(const cudaStream_t &stream){
#ifdef TIMMINGS
time.start();
#endif
for(int m = 0; m < ovrn; ++m)
for(parity = 0; parity < 2; ++parity)
for(mu = 0; mu < Dirs(); ++mu)
apply(stream);
cudaDevSync();
cudaCheckError("Kernel execution failed");
#ifdef TIMMINGS
cudaDevSync( );
time.stop();
timesec = time.getElapsedTimeInSec();
#endif
}
void Run(){ return Run(0);}
double flops(){ return ((double)flop() * 1.0e-9) / timesec;}
double bandwidth(){ return (double)bytes() / (timesec * (double)(1 << 30));}
long long flop() const { return 0;}
long long bytes() const{ return 0;}
double time(){ return timesec;}
void stat(){ cout << "OverRelaxation: " << time() << " s\t" << bandwidth() << " GB/s\t" << flops() << " GFlops" << endl;}
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << PARAMS::Grid[0] << "x";
vol << PARAMS::Grid[1] << "x";
vol << PARAMS::Grid[2] << "x";
vol << PARAMS::Grid[3];
aux << "threads=" << size;
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux.str().c_str());
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
void preTune() {
lat->Backup();
}
void postTune() {
lat->Restore();
}
};
void UpdateLattice(Array<double> *dev_lat, CudaRNG *rng_state, int metrop, int ovrn){
// metropolis algorithm
Metropolis mtp(dev_lat, rng_state, metrop);
mtp.Run();
// overrelaxation algorithm
OverRelaxation ovr(dev_lat, ovrn);
ovr.Run();
}
}
|
5a45083fee74626674373d22c8dc2ba9496d7683.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define CATCH_CONFIG_MAIN
#include "catch2/catch.hpp"
#include "common/cuda/Interp2D.cuh"
#include "common/cuda/cudaMemoryUtil.h"
#include <array>
#include <chrono>
#include <cmath>
#include <fstream>
#include <iostream>
__global__ void
gEvaluate(quad::Interp2D f, double x, double y, double* result)
{
*result = f(x, y);
}
double
Evaluate(quad::Interp2D f, double x, double y)
{
double* result = quad::cuda_malloc_managed<double>(1);
hipLaunchKernelGGL(( gEvaluate), dim3(1), dim3(1), 0, 0, f, x, y, result);
hipDeviceSynchronize();
double hResult = *result;
hipFree(result);
return hResult;
}
__global__ void
gClamp(quad::Interp2D f, double x, double y, double* result)
{
*result = f.clamp(x, y);
}
double
clamp(quad::Interp2D f, double x, double y)
{
double* result = quad::cuda_malloc_managed<double>(1);
hipLaunchKernelGGL(( gClamp), dim3(1), dim3(1), 0, 0, f, 2.5, 4.5, result);
hipDeviceSynchronize();
double hResult = *result;
hipFree(result);
return hResult;
}
void
test_clamp_interface()
{
constexpr std::size_t nx = 3; // rows
constexpr std::size_t ny = 2; // cols
std::array<double, nx> xs = {1., 2., 3.};
std::array<double, ny> ys = {4., 5.};
std::array<double, ny * nx> zs;
auto fxy = [](double x, double y) { return 3 * x * y + 2 * x + 4 * y; };
for (std::size_t i = 0; i != nx; ++i) {
double x = xs[i];
for (std::size_t j = 0; j != ny; ++j) {
double y = ys[j];
zs[j * nx + i] = fxy(x, y);
}
}
quad::Interp2D f(xs, ys, zs);
SECTION("interpolation works")
{
double x = 2.5;
double y = 4.5;
double true_result = 56.75;
double interpResult = Evaluate(f, x, y);
CHECK(interpResult == true_result);
}
SECTION("extrapolation gets clamped")
{
double clampRes = clamp(f, 0., 4.5);
double interpResult = clamp(f, 1., 4.5);
CHECK(clampRes == interpResult); // to the left
clampRes = clamp(f, 4., 4.5);
interpResult = clamp(f, 3., 4.5);
CHECK(clampRes == interpResult);
clampRes = clamp(f, 2., 3.);
interpResult = clamp(f, 2., 4.);
CHECK(clampRes == interpResult);
clampRes = clamp(f, 2., 5.5);
interpResult = clamp(f, 2., 5.);
CHECK(clampRes == interpResult);
clampRes = clamp(f, 0., 0.);
interpResult = clamp(f, 1., 4.);
CHECK(clampRes == interpResult);
clampRes = clamp(f, 4., 3.);
interpResult = clamp(f, 3., 4.);
CHECK(clampRes == interpResult);
clampRes = clamp(f, 0., 6.);
interpResult = clamp(f, 1., 5.);
CHECK(clampRes == interpResult);
clampRes = clamp(f, 4., 6.);
interpResult = clamp(f, 3., 5.);
CHECK(clampRes == interpResult);
}
}
void
test_interpolation_at_knots()
{
constexpr std::size_t nx = 3;
constexpr std::size_t ny = 2;
std::array<double, nx> const xs = {1., 2., 3.};
std::array<double, ny> const ys = {4., 5.};
auto fxy = [](double x, double y) { return 3 * x * y + 2 * x + 4 * y; };
std::array<double, ny * nx> zs;
for (std::size_t i = 0; i != nx; ++i) {
double x = xs[i];
for (std::size_t j = 0; j != ny; ++j) {
double y = ys[j];
zs[j * nx + i] = fxy(x, y);
}
}
quad::Interp2D f(xs, ys, zs);
for (std::size_t i = 0; i != nx; ++i) {
double x = xs[i];
for (std::size_t j = 0; j != ny; ++j) {
double y = ys[j];
CHECK(zs[j * nx + i] == fxy(x, y));
double interpResult = Evaluate(f, x, y);
CHECK(zs[j * nx + i] == interpResult);
}
}
}
void
test_on_bilinear()
{
constexpr std::size_t nx = 3;
constexpr std::size_t ny = 4;
std::array<double, nx> const xs = {1., 2., 3.};
std::array<double, ny> const ys = {1., 2., 3., 4.};
std::array<double, ny * nx> zs;
auto fxy = [](double x, double y) { return 2 * x + 3 * y - 5; };
for (std::size_t i = 0; i != nx; ++i) {
double x = xs[i];
for (std::size_t j = 0; j != ny; ++j) {
double y = ys[j];
zs[j * nx + i] = fxy(x, y);
CHECK(zs[j * nx + i] == fxy(x, y));
}
}
quad::Interp2D f(xs, ys, zs);
using IntegType = quad::Interp2D;
double interpResult = Evaluate(f, 2.5, 1.5);
CHECK(interpResult == 4.5);
}
TEST_CASE("clamp interface works")
{
test_clamp_interface();
}
TEST_CASE("Interp2D exact at knots")
{
test_interpolation_at_knots();
}
| 5a45083fee74626674373d22c8dc2ba9496d7683.cu | #define CATCH_CONFIG_MAIN
#include "catch2/catch.hpp"
#include "common/cuda/Interp2D.cuh"
#include "common/cuda/cudaMemoryUtil.h"
#include <array>
#include <chrono>
#include <cmath>
#include <fstream>
#include <iostream>
__global__ void
gEvaluate(quad::Interp2D f, double x, double y, double* result)
{
*result = f(x, y);
}
double
Evaluate(quad::Interp2D f, double x, double y)
{
double* result = quad::cuda_malloc_managed<double>(1);
gEvaluate<<<1, 1>>>(f, x, y, result);
cudaDeviceSynchronize();
double hResult = *result;
cudaFree(result);
return hResult;
}
__global__ void
gClamp(quad::Interp2D f, double x, double y, double* result)
{
*result = f.clamp(x, y);
}
double
clamp(quad::Interp2D f, double x, double y)
{
double* result = quad::cuda_malloc_managed<double>(1);
gClamp<<<1, 1>>>(f, 2.5, 4.5, result);
cudaDeviceSynchronize();
double hResult = *result;
cudaFree(result);
return hResult;
}
void
test_clamp_interface()
{
constexpr std::size_t nx = 3; // rows
constexpr std::size_t ny = 2; // cols
std::array<double, nx> xs = {1., 2., 3.};
std::array<double, ny> ys = {4., 5.};
std::array<double, ny * nx> zs;
auto fxy = [](double x, double y) { return 3 * x * y + 2 * x + 4 * y; };
for (std::size_t i = 0; i != nx; ++i) {
double x = xs[i];
for (std::size_t j = 0; j != ny; ++j) {
double y = ys[j];
zs[j * nx + i] = fxy(x, y);
}
}
quad::Interp2D f(xs, ys, zs);
SECTION("interpolation works")
{
double x = 2.5;
double y = 4.5;
double true_result = 56.75;
double interpResult = Evaluate(f, x, y);
CHECK(interpResult == true_result);
}
SECTION("extrapolation gets clamped")
{
double clampRes = clamp(f, 0., 4.5);
double interpResult = clamp(f, 1., 4.5);
CHECK(clampRes == interpResult); // to the left
clampRes = clamp(f, 4., 4.5);
interpResult = clamp(f, 3., 4.5);
CHECK(clampRes == interpResult);
clampRes = clamp(f, 2., 3.);
interpResult = clamp(f, 2., 4.);
CHECK(clampRes == interpResult);
clampRes = clamp(f, 2., 5.5);
interpResult = clamp(f, 2., 5.);
CHECK(clampRes == interpResult);
clampRes = clamp(f, 0., 0.);
interpResult = clamp(f, 1., 4.);
CHECK(clampRes == interpResult);
clampRes = clamp(f, 4., 3.);
interpResult = clamp(f, 3., 4.);
CHECK(clampRes == interpResult);
clampRes = clamp(f, 0., 6.);
interpResult = clamp(f, 1., 5.);
CHECK(clampRes == interpResult);
clampRes = clamp(f, 4., 6.);
interpResult = clamp(f, 3., 5.);
CHECK(clampRes == interpResult);
}
}
void
test_interpolation_at_knots()
{
constexpr std::size_t nx = 3;
constexpr std::size_t ny = 2;
std::array<double, nx> const xs = {1., 2., 3.};
std::array<double, ny> const ys = {4., 5.};
auto fxy = [](double x, double y) { return 3 * x * y + 2 * x + 4 * y; };
std::array<double, ny * nx> zs;
for (std::size_t i = 0; i != nx; ++i) {
double x = xs[i];
for (std::size_t j = 0; j != ny; ++j) {
double y = ys[j];
zs[j * nx + i] = fxy(x, y);
}
}
quad::Interp2D f(xs, ys, zs);
for (std::size_t i = 0; i != nx; ++i) {
double x = xs[i];
for (std::size_t j = 0; j != ny; ++j) {
double y = ys[j];
CHECK(zs[j * nx + i] == fxy(x, y));
double interpResult = Evaluate(f, x, y);
CHECK(zs[j * nx + i] == interpResult);
}
}
}
void
test_on_bilinear()
{
constexpr std::size_t nx = 3;
constexpr std::size_t ny = 4;
std::array<double, nx> const xs = {1., 2., 3.};
std::array<double, ny> const ys = {1., 2., 3., 4.};
std::array<double, ny * nx> zs;
auto fxy = [](double x, double y) { return 2 * x + 3 * y - 5; };
for (std::size_t i = 0; i != nx; ++i) {
double x = xs[i];
for (std::size_t j = 0; j != ny; ++j) {
double y = ys[j];
zs[j * nx + i] = fxy(x, y);
CHECK(zs[j * nx + i] == fxy(x, y));
}
}
quad::Interp2D f(xs, ys, zs);
using IntegType = quad::Interp2D;
double interpResult = Evaluate(f, 2.5, 1.5);
CHECK(interpResult == 4.5);
}
TEST_CASE("clamp interface works")
{
test_clamp_interface();
}
TEST_CASE("Interp2D exact at knots")
{
test_interpolation_at_knots();
}
|
af2ccace2be62bb3f1c7f4e02704424f7753de7e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef CROSS_ENTROPY_LOSS_LAYER_CUH_
#define CROSS_ENTROPY_LOSS_LAYER_CUH_
#include <assert.h>
#include <math.h>
#include "basics/layer.hpp"
#include "basics/tensor.cu"
#include "basics/session.hpp"
// TODO: implement CUDA kernel for backward()
#define BLOCKDIM 32
namespace CrossEntropyGPUKernels {
template <class Dtype>
__global__ void ForwardGPU(Tensor<Dtype>* bottom_0, Tensor<Dtype>* bottom_1, Tensor<Dtype>* top) {
assert(bottom_0->GetDims()[0] == bottom_1->GetDims()[0]);
assert(bottom_0->GetDims()[1] == 1);
assert(bottom_0->GetDims()[2] == 1);
assert(bottom_1->GetDims()[1] == 1);
assert(bottom_1->GetDims()[2] == 1);
assert(bottom_1->GetDims()[3] == 1);
assert(top->GetDims()[0] == 1);
assert(top->GetDims()[1] == 1);
assert(top->GetDims()[2] == 1);
assert(top->GetDims()[3] == 1);
size_t batch_size = bottom_0->GetDims()[0];
Dtype loss = 0;
for (size_t i = 0; i < batch_size; ++i) {
Dtype label = bottom_1->at(i,0,0,0);
Dtype p = bottom_0->at(i,0,0,label);
loss -= log(p);
}
top->at(0,0,0,0) = loss / batch_size;
}
template <class Dtype>
__global__ void BackwardGPU (Tensor<Dtype>* top,
Tensor<Dtype>* top_diff,
Tensor<Dtype>* bottom_0,
Tensor<Dtype>* bottom_1,
Tensor<Dtype>* bottom_diff_0) {
int batch_idx = threadIdx.x;
for (int j = 0; j < bottom_0->GetDims()[3]; ++j) {
bottom_diff_0->at(batch_idx,0,0,j) = 0;
}
Dtype label = bottom_1->at(batch_idx,0,0,0);
Dtype p = bottom_0->at(batch_idx,0,0,label);
bottom_diff_0->at(batch_idx,0,0,label) = top_diff->at(0,0,0,0)/(p+0.000001);
}
}
template <class Dtype>
class CrossEntropyLoss: public Layer<Dtype> {
public:
CrossEntropyLoss() {}
~CrossEntropyLoss() {}
void Forward(const std::vector<Tensor<Dtype>*>&, const std::vector<Tensor<Dtype>*>&);
void Backward(const std::vector<Tensor<Dtype>*>& , const std::vector<Tensor<Dtype>*>&,
const std::vector<Tensor<Dtype>*>&, const std::vector<Tensor<Dtype>*>&);
void GetTopsDims(const std::vector<size_t*> &, const std::vector<size_t*> &);
private:
};
template <class Dtype>
void CrossEntropyLoss<Dtype>::Forward(const std::vector<Tensor<Dtype>*> &bottoms, const std::vector<Tensor<Dtype>*> &tops) {
assert(bottoms.size() == 2); // Should have only two bottom tensors
assert(tops.size() == 1); // Should have only one top tensor
if (Session::GetSession()->gpu) {
hipLaunchKernelGGL(( CrossEntropyGPUKernels::ForwardGPU), dim3(1), dim3(1), 0, 0, bottoms[0], bottoms[1], tops[0]);
} else {
assert(bottoms[0]->GetDims()[0] == bottoms[1]->GetDims()[0]);
assert(bottoms[0]->GetDims()[1] == 1);
assert(bottoms[0]->GetDims()[2] == 1);
assert(bottoms[1]->GetDims()[1] == 1);
assert(bottoms[1]->GetDims()[2] == 1);
assert(bottoms[1]->GetDims()[3] == 1);
assert(tops[0]->GetDims()[0] == 1);
assert(tops[0]->GetDims()[1] == 1);
assert(tops[0]->GetDims()[2] == 1);
assert(tops[0]->GetDims()[3] == 1);
size_t batch_size = bottoms[0]->GetDims()[0];
Dtype loss = 0;
for (size_t i = 0; i < batch_size; ++i) {
Dtype label = bottoms[1]->at(i,0,0,0);
Dtype p = bottoms[0]->at(i,0,0,label);
loss -= log(p);
}
tops[0]->at(0,0,0,0) = loss / batch_size;
}
}
template <class Dtype>
void CrossEntropyLoss<Dtype>::Backward (const std::vector<Tensor<Dtype>*>& tops,
const std::vector<Tensor<Dtype>*>& tops_diff,
const std::vector<Tensor<Dtype>*>& bottoms,
const std::vector<Tensor<Dtype>*>& bottoms_diff) {
assert(tops.size() == 1);
assert(tops_diff.size() == 1);
assert(bottoms.size() == 2);
assert(bottoms_diff.size() == 2);
Tensor<Dtype>* top = tops[0];
Tensor<Dtype>* top_diff = tops_diff[0];
Tensor<Dtype>* bottom_0 = bottoms[0];
Tensor<Dtype>* bottom_1 = bottoms[1];
Tensor<Dtype>* bottom_diff_0 = bottoms_diff[0];
// Not backpropagate to labels
Session* S = Session::GetSession();
int batch_size = S->batch_size;
if (S->gpu) {
hipLaunchKernelGGL(( CrossEntropyGPUKernels::BackwardGPU<Dtype>), dim3(1),dim3(batch_size), 0, 0, top,top_diff,bottom_0,bottom_1,bottom_diff_0);
} else {
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < bottom_0->GetDims()[3]; ++j) {
bottom_diff_0->at(i,0,0,j) = 0;
}
Dtype label = bottom_1->at(i,0,0,0);
Dtype p = bottom_0->at(i,0,0,label);
bottom_diff_0->at(i,0,0,label) = top_diff->at(0,0,0,0)/(p+0.000001);
}
}
}
template <class Dtype>
void CrossEntropyLoss<Dtype>::GetTopsDims(const std::vector<size_t*> &bottoms_dims, const std::vector<size_t*> &tops_dims) {
assert(bottoms_dims.size() == 2);
assert(tops_dims.size() == 1);
tops_dims[0][0] = 1;
tops_dims[0][1] = 1;
tops_dims[0][2] = 1;
tops_dims[0][3] = 1;
}
#endif // CROSS_ENTROPY_LOSS_LAYER_CUH_
| af2ccace2be62bb3f1c7f4e02704424f7753de7e.cu |
#ifndef CROSS_ENTROPY_LOSS_LAYER_CUH_
#define CROSS_ENTROPY_LOSS_LAYER_CUH_
#include <assert.h>
#include <math.h>
#include "basics/layer.hpp"
#include "basics/tensor.cu"
#include "basics/session.hpp"
// TODO: implement CUDA kernel for backward()
#define BLOCKDIM 32
namespace CrossEntropyGPUKernels {
template <class Dtype>
__global__ void ForwardGPU(Tensor<Dtype>* bottom_0, Tensor<Dtype>* bottom_1, Tensor<Dtype>* top) {
assert(bottom_0->GetDims()[0] == bottom_1->GetDims()[0]);
assert(bottom_0->GetDims()[1] == 1);
assert(bottom_0->GetDims()[2] == 1);
assert(bottom_1->GetDims()[1] == 1);
assert(bottom_1->GetDims()[2] == 1);
assert(bottom_1->GetDims()[3] == 1);
assert(top->GetDims()[0] == 1);
assert(top->GetDims()[1] == 1);
assert(top->GetDims()[2] == 1);
assert(top->GetDims()[3] == 1);
size_t batch_size = bottom_0->GetDims()[0];
Dtype loss = 0;
for (size_t i = 0; i < batch_size; ++i) {
Dtype label = bottom_1->at(i,0,0,0);
Dtype p = bottom_0->at(i,0,0,label);
loss -= log(p);
}
top->at(0,0,0,0) = loss / batch_size;
}
template <class Dtype>
__global__ void BackwardGPU (Tensor<Dtype>* top,
Tensor<Dtype>* top_diff,
Tensor<Dtype>* bottom_0,
Tensor<Dtype>* bottom_1,
Tensor<Dtype>* bottom_diff_0) {
int batch_idx = threadIdx.x;
for (int j = 0; j < bottom_0->GetDims()[3]; ++j) {
bottom_diff_0->at(batch_idx,0,0,j) = 0;
}
Dtype label = bottom_1->at(batch_idx,0,0,0);
Dtype p = bottom_0->at(batch_idx,0,0,label);
bottom_diff_0->at(batch_idx,0,0,label) = top_diff->at(0,0,0,0)/(p+0.000001);
}
}
template <class Dtype>
class CrossEntropyLoss: public Layer<Dtype> {
public:
CrossEntropyLoss() {}
~CrossEntropyLoss() {}
void Forward(const std::vector<Tensor<Dtype>*>&, const std::vector<Tensor<Dtype>*>&);
void Backward(const std::vector<Tensor<Dtype>*>& , const std::vector<Tensor<Dtype>*>&,
const std::vector<Tensor<Dtype>*>&, const std::vector<Tensor<Dtype>*>&);
void GetTopsDims(const std::vector<size_t*> &, const std::vector<size_t*> &);
private:
};
template <class Dtype>
void CrossEntropyLoss<Dtype>::Forward(const std::vector<Tensor<Dtype>*> &bottoms, const std::vector<Tensor<Dtype>*> &tops) {
assert(bottoms.size() == 2); // Should have only two bottom tensors
assert(tops.size() == 1); // Should have only one top tensor
if (Session::GetSession()->gpu) {
CrossEntropyGPUKernels::ForwardGPU<<<1, 1>>>(bottoms[0], bottoms[1], tops[0]);
} else {
assert(bottoms[0]->GetDims()[0] == bottoms[1]->GetDims()[0]);
assert(bottoms[0]->GetDims()[1] == 1);
assert(bottoms[0]->GetDims()[2] == 1);
assert(bottoms[1]->GetDims()[1] == 1);
assert(bottoms[1]->GetDims()[2] == 1);
assert(bottoms[1]->GetDims()[3] == 1);
assert(tops[0]->GetDims()[0] == 1);
assert(tops[0]->GetDims()[1] == 1);
assert(tops[0]->GetDims()[2] == 1);
assert(tops[0]->GetDims()[3] == 1);
size_t batch_size = bottoms[0]->GetDims()[0];
Dtype loss = 0;
for (size_t i = 0; i < batch_size; ++i) {
Dtype label = bottoms[1]->at(i,0,0,0);
Dtype p = bottoms[0]->at(i,0,0,label);
loss -= log(p);
}
tops[0]->at(0,0,0,0) = loss / batch_size;
}
}
template <class Dtype>
void CrossEntropyLoss<Dtype>::Backward (const std::vector<Tensor<Dtype>*>& tops,
const std::vector<Tensor<Dtype>*>& tops_diff,
const std::vector<Tensor<Dtype>*>& bottoms,
const std::vector<Tensor<Dtype>*>& bottoms_diff) {
assert(tops.size() == 1);
assert(tops_diff.size() == 1);
assert(bottoms.size() == 2);
assert(bottoms_diff.size() == 2);
Tensor<Dtype>* top = tops[0];
Tensor<Dtype>* top_diff = tops_diff[0];
Tensor<Dtype>* bottom_0 = bottoms[0];
Tensor<Dtype>* bottom_1 = bottoms[1];
Tensor<Dtype>* bottom_diff_0 = bottoms_diff[0];
// Not backpropagate to labels
Session* S = Session::GetSession();
int batch_size = S->batch_size;
if (S->gpu) {
CrossEntropyGPUKernels::BackwardGPU<Dtype><<<1,batch_size>>>(top,top_diff,bottom_0,bottom_1,bottom_diff_0);
} else {
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < bottom_0->GetDims()[3]; ++j) {
bottom_diff_0->at(i,0,0,j) = 0;
}
Dtype label = bottom_1->at(i,0,0,0);
Dtype p = bottom_0->at(i,0,0,label);
bottom_diff_0->at(i,0,0,label) = top_diff->at(0,0,0,0)/(p+0.000001);
}
}
}
template <class Dtype>
void CrossEntropyLoss<Dtype>::GetTopsDims(const std::vector<size_t*> &bottoms_dims, const std::vector<size_t*> &tops_dims) {
assert(bottoms_dims.size() == 2);
assert(tops_dims.size() == 1);
tops_dims[0][0] = 1;
tops_dims[0][1] = 1;
tops_dims[0][2] = 1;
tops_dims[0][3] = 1;
}
#endif // CROSS_ENTROPY_LOSS_LAYER_CUH_
|
8a2842c0a30f0af6d14291de1d0ffcce7e314793.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
void test(double* input, int n, int p) {
double* device_input;
hipMalloc((void **)&device_input, sizeof(double) * n * p);
hipMemcpy(device_input, input, sizeof(double) * n * p, hipMemcpyHostToDevice);
hipFree(device_input);
}
| 8a2842c0a30f0af6d14291de1d0ffcce7e314793.cu | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "CycleTimer.h"
void test(double* input, int n, int p) {
double* device_input;
cudaMalloc((void **)&device_input, sizeof(double) * n * p);
cudaMemcpy(device_input, input, sizeof(double) * n * p, cudaMemcpyHostToDevice);
cudaFree(device_input);
}
|
27ba1f47372be5c4cce48ff7f27f641de4793037.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2012-2013 The Ohio State University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#include <limits.h>
#include <float.h>
#include "../include/common.h"
#include "../include/gpuCudaLib.h"
#include "scanImpl.cu"
#ifdef HAS_GMM
#include "gmm.h"
#endif
#define CHECK_POINTER(p) do { \
if(p == NULL){ \
perror("Failed to allocate host memory"); \
exit(-1); \
}} while(0)
#define SHARED_SIZE_LIMIT 1024
__device__ static int gpu_strcmp(const char *s1, const char *s2, int len){
int res = 0;
for(int i=0;i < len;i++){
if(s1[i]<s2[i]){
res = -1;
break;
}else if(s1[i]>s2[i]){
res = 1;
break;
}
}
return res;
}
/* use one GPU thread to count the number of unique key */
__global__ static void count_unique_keys_int(int *key, int tupleNum, int * result){
int i = 0;
int res = 1;
for(i=0;i<tupleNum -1;i++){
if(key[i+1] != key[i])
res ++;
}
*result = res;
}
__global__ static void count_unique_keys_float(float *key, int tupleNum, int * result){
int i = 0;
int res = 1;
for(i=0;i<tupleNum -1;i++){
if(key[i+1] != key[i])
res ++;
}
*result = res;
}
__global__ static void count_unique_keys_string(char *key, int tupleNum, int keySize,int * result){
int i = 0;
int res = 1;
for(i=0;i<tupleNum -1;i++){
if(gpu_strcmp(key+i*keySize, key+(i+1)*keySize,keySize) != 0)
res ++;
}
*result = res;
}
/*
* Count the number of each key using one single GPU thread.
*/
__global__ static void count_key_num_int(int *key, int tupleNum, int * count){
int pos = 0, i = 0;
int lcount = 1;
for(i = 0;i <tupleNum -1; i ++){
if(i == tupleNum -2){
if(key[i] != key[i+1]){
count[pos] = lcount;
count[pos+1] = 1;
}else{
count[pos] = lcount +1;
}
}else{
if(key[i] != key[i+1]){
count[pos] = lcount;
lcount = 1;
pos ++;
}else{
lcount ++;
}
}
}
}
__global__ static void count_key_num_float(float *key, int tupleNum, int * count){
int pos = 0, i = 0;
int lcount = 1;
for(i = 0;i <tupleNum -1; i ++){
if(i == tupleNum -2){
if(key[i] != key[i+1]){
count[pos] = lcount;
count[pos+1] = 1;
}else{
count[pos] = lcount +1;
}
}else{
if(key[i] != key[i+1]){
count[pos] = lcount;
lcount = 1;
pos ++;
}else{
lcount ++;
}
}
}
}
__global__ static void count_key_num_string(char *key, int tupleNum, int keySize, int * count){
int pos = 0, i = 0;
int lcount = 1;
for(i = 0;i <tupleNum -1; i ++){
if(i == tupleNum -2){
if(gpu_strcmp(key+i*keySize, key+(i+1)*keySize,keySize)!=0){
count[pos] = lcount;
count[pos+1] = 1;
}else{
count[pos] = lcount +1;
}
}else{
if(gpu_strcmp(key+i*keySize, key+(i+1)*keySize,keySize)!=0){
count[pos] = lcount;
lcount = 1;
pos ++;
}else{
lcount ++;
}
}
}
}
__device__ static inline void ComparatorInt(
int &keyA,int &valA,int &keyB,int &valB,int dir)
{
int t;
if ((keyA > keyB) == dir)
{
t = keyA;
keyA = keyB;
keyB = t;
t = valA;
valA = valB;
valB = t;
}
}
__device__ static inline void ComparatorFloat(
float &keyA,int &valA,float &keyB,int &valB,int dir)
{
float t1;
int t2;
if ((keyA > keyB) == dir)
{
t1 = keyA;
keyA = keyB;
keyB = t1;
t2 = valA;
valA = valB;
valB = t2;
}
}
__device__ static inline void Comparator(
char * keyA,
int &valA,
char * keyB,
int &valB,
int keySize,
int dir
)
{
int t;
char buf[32];
if ((gpu_strcmp(keyA,keyB,keySize) == 1) == dir)
{
memcpy(buf, keyA, keySize);
memcpy(keyA, keyB, keySize);
memcpy(keyB, buf, keySize);
t = valA;
valA = valB;
valB = t;
}
}
#define NTHREAD (SHARED_SIZE_LIMIT/2)
__global__ static void sort_key_string(char * key, int tupleNum, int keySize, char *result, int *pos,int dir){
int lid = threadIdx.x;
int bid = blockIdx.x;
__shared__ char bufKey[SHARED_SIZE_LIMIT * 32];
__shared__ int bufVal[SHARED_SIZE_LIMIT];
int gid = bid * SHARED_SIZE_LIMIT + lid;
memcpy(bufKey + lid*keySize, key + gid*keySize, keySize);
bufVal[lid] = gid;
memcpy(bufKey + (lid+blockDim.x)*keySize, key +(gid+blockDim.x)*keySize, keySize);
bufVal[lid+blockDim.x] = gid+ blockDim.x;
__syncthreads();
for (int size = 2; size < tupleNum && size < SHARED_SIZE_LIMIT; size <<= 1){
int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0);
for (int stride = size / 2; stride > 0; stride >>= 1){
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
bufKey+pos*keySize, bufVal[pos + 0],
bufKey+(pos+stride)*keySize, bufVal[pos + stride],
keySize,
ddd
);
}
}
{
for (int stride = blockDim.x ; stride > 0; stride >>= 1)
{
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
bufKey+pos*keySize, bufVal[pos + 0],
bufKey+(pos+stride)*keySize, bufVal[pos + stride],
keySize,
dir
);
}
}
__syncthreads();
memcpy(result + gid*keySize, bufKey + lid*keySize, keySize);
((int *)pos)[gid] = bufVal[lid];
memcpy(result + (gid+blockDim.x)*keySize, bufKey + (lid+blockDim.x)*keySize,keySize);
((int *)pos)[gid+blockDim.x] = bufVal[lid+blockDim.x];
}
/*
* Sorting small number of intergers.
*/
__global__ static void sort_key_int(int * key, int tupleNum, int *result, int *pos,int dir){
int lid = threadIdx.x;
int bid = blockIdx.x;
__shared__ int bufKey[SHARED_SIZE_LIMIT];
__shared__ int bufVal[SHARED_SIZE_LIMIT];
int gid = bid * SHARED_SIZE_LIMIT + lid;
bufKey[lid] = key[gid];
bufVal[lid] = gid;
bufKey[lid + blockDim.x] = key[gid + blockDim.x];
bufVal[lid+blockDim.x] = gid+ blockDim.x;
__syncthreads();
for (int size = 2; size < tupleNum && size < SHARED_SIZE_LIMIT; size <<= 1){
int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0);
for (int stride = size / 2; stride > 0; stride >>= 1){
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
ComparatorInt(
bufKey[pos + 0], bufVal[pos + 0],
bufKey[pos + stride], bufVal[pos + stride],
ddd
);
}
}
{
for (int stride = blockDim.x ; stride > 0; stride >>= 1)
{
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
ComparatorInt(
bufKey[pos + 0], bufVal[pos + 0],
bufKey[pos + stride], bufVal[pos + stride],
dir
);
}
}
__syncthreads();
result[gid] = bufKey[lid];
pos[gid] = bufVal[lid];
result[gid + blockDim.x] = bufKey[lid + blockDim.x];
pos[gid+blockDim.x] = bufVal[lid+blockDim.x];
}
/*
* Sorting small number of floats.
*/
__global__ static void sort_key_float(float * key, int tupleNum, float *result, int *pos,int dir){
int lid = threadIdx.x;
int bid = blockIdx.x;
__shared__ float bufKey[SHARED_SIZE_LIMIT];
__shared__ int bufVal[SHARED_SIZE_LIMIT];
int gid = bid * SHARED_SIZE_LIMIT + lid;
bufKey[lid] = key[gid];
bufVal[lid] = gid;
bufKey[lid + blockDim.x] = key[gid + blockDim.x];
bufVal[lid+blockDim.x] = gid+ blockDim.x;
__syncthreads();
for (int size = 2; size < tupleNum && size < SHARED_SIZE_LIMIT; size <<= 1){
int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0);
for (int stride = size / 2; stride > 0; stride >>= 1){
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
ComparatorFloat(
bufKey[pos + 0], bufVal[pos + 0],
bufKey[pos + stride], bufVal[pos + stride],
ddd
);
}
}
{
for (int stride = blockDim.x ; stride > 0; stride >>= 1)
{
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
ComparatorFloat(
bufKey[pos + 0], bufVal[pos + 0],
bufKey[pos + stride], bufVal[pos + stride],
dir
);
}
}
__syncthreads();
result[gid] = bufKey[lid];
pos[gid] = bufVal[lid];
result[gid + blockDim.x] = bufKey[lid + blockDim.x];
pos[gid+blockDim.x] = bufVal[lid+blockDim.x];
}
/*
* Naive sort. One thread per block.
*/
__global__ static void sec_sort_key_int(int *key, int *psum, int *count ,int tupleNum, int *inputPos, int* outputPos){
int tid = blockIdx.x;
int start = psum[tid];
int end = start + count[tid] - 1;
for(int i=start; i< end-1; i++){
int min = key[i];
int pos = i;
for(int j=i+1;j<end;j++){
if(min > key[j]){
min = key[j];
pos = j;
}
}
outputPos[i] = inputPos[pos];
}
}
__global__ static void sec_sort_key_float(float *key, int *psum, int *count ,int tupleNum, int *inputPos, int* outputPos){
int tid = blockIdx.x;
int start = psum[tid];
int end = start + count[tid] - 1;
for(int i=start; i< end-1; i++){
float min = key[i];
int pos = i;
for(int j=i+1;j<end;j++){
if(min > key[j]){
min = key[j];
pos = j;
}
}
outputPos[i] = inputPos[pos];
}
}
__global__ static void sec_sort_key_string(char *key, int keySize, int *psum, int *count ,int tupleNum, int *inputPos, int* outputPos){
int tid = blockIdx.x;
int start = psum[tid];
int end = start + count[tid] - 1;
for(int i=start; i< end-1; i++){
char min[128];
memcpy(min,key + i*keySize, keySize);
int pos = i;
for(int j=i+1;j<end;j++){
if(gpu_strcmp(min, key+j*keySize,keySize)>0){
memcpy(min,key + j*keySize, keySize);
pos = j;
}
}
outputPos[i] = inputPos[pos];
}
}
__global__ static void set_key_string(char *key, int tupleNum){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=tid;i<tupleNum;i+=stride)
key[i] = CHAR_MAX;
}
__global__ static void set_key_int(int *key, int tupleNum){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=tid;i<tupleNum;i+=stride)
key[i] = INT_MAX;
}
__global__ static void set_key_float(float *key, int tupleNum){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=tid;i<tupleNum;i+=stride)
key[i] = FLT_MAX;
}
/*
* gather the elements from the @col into @result.
*/
__global__ static void gather_col_int(int * keyPos, int* col, int newNum, int tupleNum, int*result){
int stride = blockDim.x * gridDim.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=index;i<newNum;i+=stride){
int pos = keyPos[i];
if(pos<tupleNum)
result[i] = col[pos];
}
}
__global__ static void gather_col_float(int * keyPos, float* col, int newNum, int tupleNum, float*result){
int stride = blockDim.x * gridDim.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=index;i<newNum;i+=stride){
int pos = keyPos[i];
if(pos<tupleNum)
result[i] = col[pos];
}
}
__global__ static void gather_col_string(int * keyPos, char* col, int newNum, int tupleNum, int keySize,char*result){
int stride = blockDim.x * gridDim.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=index;i<newNum;i+=stride){
int pos = keyPos[i];
if(pos<tupleNum)
memcpy(result + i*keySize, col + pos*keySize, keySize);
}
}
/* generate the final result*/
__global__ static void gather_result(int * keyPos, char ** col, int newNum, int tupleNum, int *size, int colNum, char **result){
int stride = blockDim.x * gridDim.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
for(int j=0;j<colNum;j++){
for(int i=index;i<newNum;i+=stride){
int pos = keyPos[i];
if(pos<tupleNum)
memcpy(result[j] + i*size[j], col[j] +pos*size[j], size[j]);
}
}
}
/*
* orderBy: sort the input data by the order by columns
*
* Prerequisite:
* input data are not compressed
*
* Input:
* odNode: the groupby node which contains the input data and groupby information
* pp: records the statistics such as kernel execution time
*
* Return:
* a new table node
*/
struct tableNode * orderBy(struct orderByNode * odNode, struct statistic *pp){
extern char *col_buf;
struct timeval t;
struct tableNode * res = NULL;
struct timespec start, end;
clock_gettime(CLOCK_REALTIME,&start);
assert(odNode->table->tupleNum < SHARED_SIZE_LIMIT);
res = (struct tableNode *)malloc(sizeof(struct tableNode));
CHECK_POINTER(res);
res->tupleNum = odNode->table->tupleNum;
res->totalAttr = odNode->table->totalAttr;
res->tupleSize = odNode->table->tupleSize;
res->attrType = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->attrType);
res->attrSize = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->attrSize);
res->attrTotalSize = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->attrTotalSize);
res->dataPos = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->dataPos);
res->dataFormat = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->dataFormat);
res->content = (char **) malloc(sizeof(char *) * res->totalAttr);
CHECK_POINTER(res->content);
int gpuTupleNum = odNode->table->tupleNum;
char * gpuKey, **column, ** gpuContent;
char * gpuSortedKey;
int *gpuSize, *gpuPos;
column = (char**) malloc(sizeof(char*) *res->totalAttr);
CHECK_POINTER(column);
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&gpuContent, sizeof(char *) * res->totalAttr));
for(int i=0;i<res->totalAttr;i++){
res->attrType[i] = odNode->table->attrType[i];
res->attrSize[i] = odNode->table->attrSize[i];
res->attrTotalSize[i] = odNode->table->attrTotalSize[i];
res->dataPos[i] = MEM;
res->dataFormat[i] = UNCOMPRESSED;
res->content[i] = (char *) malloc( res->attrSize[i] * res->tupleNum);
CHECK_POINTER(res->content[i]);
int attrSize = res->attrSize[i];
if(odNode->table->dataPos[i] == MEM){
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&column[i], attrSize *res->tupleNum));
gettimeofday(&t, NULL);
printf("[gvm] %lf intercepting diskIO\n", t.tv_sec + t.tv_usec / 1000000.0);
memcpy(col_buf, odNode->table->content[i], attrSize*res->tupleNum);
gettimeofday(&t, NULL);
printf("[gvm] %lf intercepted diskIO\n", t.tv_sec + t.tv_usec / 1000000.0);
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(column[i], col_buf, attrSize*res->tupleNum, hipMemcpyHostToDevice));
}else if (odNode->table->dataPos[i] == GPU){
column[i] = odNode->table->content[i];
}
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(&gpuContent[i], &column[i], sizeof(char *), hipMemcpyHostToDevice));
}
int newNum = 1;
while(newNum<gpuTupleNum){
newNum *=2;
}
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&gpuPos, sizeof(int)*newNum));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&gpuSize, sizeof(int) * res->totalAttr));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpuSize, res->attrSize, sizeof(int) * res->totalAttr, hipMemcpyHostToDevice););
char ** gpuResult;
char ** result;
result = (char**)malloc(sizeof(char *) * res->totalAttr);
CHECK_POINTER(result);
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&gpuResult, sizeof(char*)*res->totalAttr));
for(int i=0;i<res->totalAttr;i++){
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&result[i], res->attrSize[i]* gpuTupleNum));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(&gpuResult[i], &result[i], sizeof(char*), hipMemcpyHostToDevice));
}
/* Sort by the first orderby column first */
int dir;
if(odNode->orderBySeq[0] == ASC)
dir = 1;
else
dir = 0;
int index = odNode->orderByIndex[0];
int type = odNode->table->attrType[index];
if(type == INT){
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuKey, sizeof(int) * newNum));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuSortedKey, sizeof(int) * newNum));
hipLaunchKernelGGL(( set_key_int), dim3(8),dim3(128), 0, 0, (int*)gpuKey,newNum);
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpuKey, column[index], sizeof(int)*gpuTupleNum,hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( sort_key_int), dim3(1), dim3(newNum/2), 0, 0, (int*)gpuKey, newNum, (int*)gpuSortedKey, gpuPos, dir);
}else if (type == FLOAT){
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuKey, sizeof(float) * newNum));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuSortedKey, sizeof(float) * newNum));
hipLaunchKernelGGL(( set_key_float), dim3(8),dim3(128), 0, 0, (float*)gpuKey,newNum);
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpuKey, column[index], sizeof(int)*gpuTupleNum,hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( sort_key_float), dim3(1), dim3(newNum/2), 0, 0, (float*)gpuKey, newNum, (float*)gpuSortedKey, gpuPos, dir);
}else if (type == STRING){
int keySize = odNode->table->attrSize[index];
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuKey, keySize * newNum));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuSortedKey, keySize * newNum));
hipLaunchKernelGGL(( set_key_string), dim3(8),dim3(128), 0, 0, gpuKey,newNum*keySize);
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpuKey, column[index], keySize*gpuTupleNum,hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( sort_key_string), dim3(1), dim3(newNum/2), 0, 0, gpuKey, newNum, keySize,gpuSortedKey, gpuPos, dir);
}
/* Currently we only support no more than 2 orderBy columns */
if (odNode->orderByNum == 2){
int keySize = odNode->table->attrSize[index];
int secIndex = odNode->orderByIndex[1];
int keySize2 = odNode->table->attrSize[secIndex];
int secType = odNode->table->attrType[secIndex];
int * keyNum , *keyCount, *keyPsum;
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&keyNum, sizeof(int)));
if(type == INT){
hipLaunchKernelGGL(( count_unique_keys_int), dim3(1),dim3(1), 0, 0, (int *)gpuSortedKey, gpuTupleNum,keyNum);
}else if (type == FLOAT){
hipLaunchKernelGGL(( count_unique_keys_float), dim3(1),dim3(1), 0, 0, (float *)gpuSortedKey, gpuTupleNum, keyNum);
}else if (type == STRING){
hipLaunchKernelGGL(( count_unique_keys_string), dim3(1),dim3(1), 0, 0, gpuKey, gpuTupleNum,keySize,keyNum);
}
int cpuKeyNum;
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(&cpuKeyNum, keyNum, sizeof(int), hipMemcpyDeviceToHost));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&keyCount, sizeof(int)* cpuKeyNum));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&keyPsum, sizeof(int)* cpuKeyNum));
CUDA_SAFE_CALL_NO_SYNC(hipMemset(keyPsum, 0, sizeof(int) * cpuKeyNum));
if(type == INT){
hipLaunchKernelGGL(( count_key_num_int), dim3(1),dim3(1), 0, 0, (int*)gpuKey,gpuTupleNum,keyCount);
}else if (type == FLOAT){
hipLaunchKernelGGL(( count_key_num_float), dim3(1),dim3(1), 0, 0, (float*)gpuKey,gpuTupleNum,keyCount);
}else if (type == STRING){
hipLaunchKernelGGL(( count_key_num_string), dim3(1),dim3(1), 0, 0, gpuKey,gpuTupleNum,keySize,keyCount);
}
scanImpl(keyCount, cpuKeyNum, keyPsum, pp);
int * gpuPos2;
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&gpuPos2, sizeof(int)* newNum));
char * gpuKey2;
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&gpuKey2, keySize2 * newNum));
if(secType == INT){
hipLaunchKernelGGL(( gather_col_int), dim3(8),dim3(128), 0, 0, gpuPos,(int*)column[secIndex],newNum, gpuTupleNum, (int*)gpuKey2);
hipLaunchKernelGGL(( sec_sort_key_int), dim3(cpuKeyNum),dim3(1), 0, 0, (int*)gpuKey2, keyPsum, keyCount , gpuTupleNum, gpuPos, gpuPos2);
}else if (secType == FLOAT){
hipLaunchKernelGGL(( gather_col_float), dim3(8),dim3(128), 0, 0, gpuPos,(float*)column[secIndex],newNum, gpuTupleNum, (float*)gpuKey2);
hipLaunchKernelGGL(( sec_sort_key_float), dim3(cpuKeyNum),dim3(1), 0, 0, (float*)gpuKey2, keyPsum, keyCount , gpuTupleNum, gpuPos, gpuPos2);
}else if (secType == STRING){
hipLaunchKernelGGL(( gather_col_string), dim3(8),dim3(128), 0, 0, gpuPos,column[secIndex],newNum, gpuTupleNum, keySize2,gpuKey2);
hipLaunchKernelGGL(( sec_sort_key_string), dim3(cpuKeyNum),dim3(1), 0, 0, gpuKey2, keySize2, keyPsum, keyCount , gpuTupleNum, gpuPos, gpuPos2);
}
hipLaunchKernelGGL(( gather_result), dim3(8),dim3(128), 0, 0, gpuPos2, gpuContent, newNum, gpuTupleNum, gpuSize,res->totalAttr,gpuResult);
CUDA_SAFE_CALL_NO_SYNC(hipFree(keyCount));
CUDA_SAFE_CALL_NO_SYNC(hipFree(keyNum));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuPos2));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuKey2));
}else{
hipLaunchKernelGGL(( gather_result), dim3(8),dim3(128), 0, 0, gpuPos, gpuContent, newNum, gpuTupleNum, gpuSize,res->totalAttr,gpuResult);
}
for(int i=0; i<res->totalAttr;i++){
int size = res->attrSize[i] * gpuTupleNum;
memset(res->content[i],0, size);
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(res->content[i], result[i],size, hipMemcpyDeviceToHost));
}
for(int i=0;i<res->totalAttr;i++){
CUDA_SAFE_CALL_NO_SYNC(hipFree(column[i]));
CUDA_SAFE_CALL_NO_SYNC(hipFree(result[i]));
}
free(column);
free(result);
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuKey));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuSortedKey));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuContent));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuResult));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuSize));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuPos));
clock_gettime(CLOCK_REALTIME,&end);
double timeE = (end.tv_sec - start.tv_sec)* BILLION + end.tv_nsec - start.tv_nsec;
printf("OrderBy Time: %lf\n", timeE/(1000*1000));
return res;
}
| 27ba1f47372be5c4cce48ff7f27f641de4793037.cu | /*
Copyright (c) 2012-2013 The Ohio State University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdio.h>
#include <cuda.h>
#include <assert.h>
#include <limits.h>
#include <float.h>
#include "../include/common.h"
#include "../include/gpuCudaLib.h"
#include "scanImpl.cu"
#ifdef HAS_GMM
#include "gmm.h"
#endif
#define CHECK_POINTER(p) do { \
if(p == NULL){ \
perror("Failed to allocate host memory"); \
exit(-1); \
}} while(0)
#define SHARED_SIZE_LIMIT 1024
__device__ static int gpu_strcmp(const char *s1, const char *s2, int len){
int res = 0;
for(int i=0;i < len;i++){
if(s1[i]<s2[i]){
res = -1;
break;
}else if(s1[i]>s2[i]){
res = 1;
break;
}
}
return res;
}
/* use one GPU thread to count the number of unique key */
__global__ static void count_unique_keys_int(int *key, int tupleNum, int * result){
int i = 0;
int res = 1;
for(i=0;i<tupleNum -1;i++){
if(key[i+1] != key[i])
res ++;
}
*result = res;
}
__global__ static void count_unique_keys_float(float *key, int tupleNum, int * result){
int i = 0;
int res = 1;
for(i=0;i<tupleNum -1;i++){
if(key[i+1] != key[i])
res ++;
}
*result = res;
}
__global__ static void count_unique_keys_string(char *key, int tupleNum, int keySize,int * result){
int i = 0;
int res = 1;
for(i=0;i<tupleNum -1;i++){
if(gpu_strcmp(key+i*keySize, key+(i+1)*keySize,keySize) != 0)
res ++;
}
*result = res;
}
/*
* Count the number of each key using one single GPU thread.
*/
__global__ static void count_key_num_int(int *key, int tupleNum, int * count){
int pos = 0, i = 0;
int lcount = 1;
for(i = 0;i <tupleNum -1; i ++){
if(i == tupleNum -2){
if(key[i] != key[i+1]){
count[pos] = lcount;
count[pos+1] = 1;
}else{
count[pos] = lcount +1;
}
}else{
if(key[i] != key[i+1]){
count[pos] = lcount;
lcount = 1;
pos ++;
}else{
lcount ++;
}
}
}
}
__global__ static void count_key_num_float(float *key, int tupleNum, int * count){
int pos = 0, i = 0;
int lcount = 1;
for(i = 0;i <tupleNum -1; i ++){
if(i == tupleNum -2){
if(key[i] != key[i+1]){
count[pos] = lcount;
count[pos+1] = 1;
}else{
count[pos] = lcount +1;
}
}else{
if(key[i] != key[i+1]){
count[pos] = lcount;
lcount = 1;
pos ++;
}else{
lcount ++;
}
}
}
}
__global__ static void count_key_num_string(char *key, int tupleNum, int keySize, int * count){
int pos = 0, i = 0;
int lcount = 1;
for(i = 0;i <tupleNum -1; i ++){
if(i == tupleNum -2){
if(gpu_strcmp(key+i*keySize, key+(i+1)*keySize,keySize)!=0){
count[pos] = lcount;
count[pos+1] = 1;
}else{
count[pos] = lcount +1;
}
}else{
if(gpu_strcmp(key+i*keySize, key+(i+1)*keySize,keySize)!=0){
count[pos] = lcount;
lcount = 1;
pos ++;
}else{
lcount ++;
}
}
}
}
__device__ static inline void ComparatorInt(
int &keyA,int &valA,int &keyB,int &valB,int dir)
{
int t;
if ((keyA > keyB) == dir)
{
t = keyA;
keyA = keyB;
keyB = t;
t = valA;
valA = valB;
valB = t;
}
}
__device__ static inline void ComparatorFloat(
float &keyA,int &valA,float &keyB,int &valB,int dir)
{
float t1;
int t2;
if ((keyA > keyB) == dir)
{
t1 = keyA;
keyA = keyB;
keyB = t1;
t2 = valA;
valA = valB;
valB = t2;
}
}
__device__ static inline void Comparator(
char * keyA,
int &valA,
char * keyB,
int &valB,
int keySize,
int dir
)
{
int t;
char buf[32];
if ((gpu_strcmp(keyA,keyB,keySize) == 1) == dir)
{
memcpy(buf, keyA, keySize);
memcpy(keyA, keyB, keySize);
memcpy(keyB, buf, keySize);
t = valA;
valA = valB;
valB = t;
}
}
#define NTHREAD (SHARED_SIZE_LIMIT/2)
__global__ static void sort_key_string(char * key, int tupleNum, int keySize, char *result, int *pos,int dir){
int lid = threadIdx.x;
int bid = blockIdx.x;
__shared__ char bufKey[SHARED_SIZE_LIMIT * 32];
__shared__ int bufVal[SHARED_SIZE_LIMIT];
int gid = bid * SHARED_SIZE_LIMIT + lid;
memcpy(bufKey + lid*keySize, key + gid*keySize, keySize);
bufVal[lid] = gid;
memcpy(bufKey + (lid+blockDim.x)*keySize, key +(gid+blockDim.x)*keySize, keySize);
bufVal[lid+blockDim.x] = gid+ blockDim.x;
__syncthreads();
for (int size = 2; size < tupleNum && size < SHARED_SIZE_LIMIT; size <<= 1){
int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0);
for (int stride = size / 2; stride > 0; stride >>= 1){
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
bufKey+pos*keySize, bufVal[pos + 0],
bufKey+(pos+stride)*keySize, bufVal[pos + stride],
keySize,
ddd
);
}
}
{
for (int stride = blockDim.x ; stride > 0; stride >>= 1)
{
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
bufKey+pos*keySize, bufVal[pos + 0],
bufKey+(pos+stride)*keySize, bufVal[pos + stride],
keySize,
dir
);
}
}
__syncthreads();
memcpy(result + gid*keySize, bufKey + lid*keySize, keySize);
((int *)pos)[gid] = bufVal[lid];
memcpy(result + (gid+blockDim.x)*keySize, bufKey + (lid+blockDim.x)*keySize,keySize);
((int *)pos)[gid+blockDim.x] = bufVal[lid+blockDim.x];
}
/*
* Sorting small number of intergers.
*/
__global__ static void sort_key_int(int * key, int tupleNum, int *result, int *pos,int dir){
int lid = threadIdx.x;
int bid = blockIdx.x;
__shared__ int bufKey[SHARED_SIZE_LIMIT];
__shared__ int bufVal[SHARED_SIZE_LIMIT];
int gid = bid * SHARED_SIZE_LIMIT + lid;
bufKey[lid] = key[gid];
bufVal[lid] = gid;
bufKey[lid + blockDim.x] = key[gid + blockDim.x];
bufVal[lid+blockDim.x] = gid+ blockDim.x;
__syncthreads();
for (int size = 2; size < tupleNum && size < SHARED_SIZE_LIMIT; size <<= 1){
int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0);
for (int stride = size / 2; stride > 0; stride >>= 1){
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
ComparatorInt(
bufKey[pos + 0], bufVal[pos + 0],
bufKey[pos + stride], bufVal[pos + stride],
ddd
);
}
}
{
for (int stride = blockDim.x ; stride > 0; stride >>= 1)
{
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
ComparatorInt(
bufKey[pos + 0], bufVal[pos + 0],
bufKey[pos + stride], bufVal[pos + stride],
dir
);
}
}
__syncthreads();
result[gid] = bufKey[lid];
pos[gid] = bufVal[lid];
result[gid + blockDim.x] = bufKey[lid + blockDim.x];
pos[gid+blockDim.x] = bufVal[lid+blockDim.x];
}
/*
* Sorting small number of floats.
*/
__global__ static void sort_key_float(float * key, int tupleNum, float *result, int *pos,int dir){
int lid = threadIdx.x;
int bid = blockIdx.x;
__shared__ float bufKey[SHARED_SIZE_LIMIT];
__shared__ int bufVal[SHARED_SIZE_LIMIT];
int gid = bid * SHARED_SIZE_LIMIT + lid;
bufKey[lid] = key[gid];
bufVal[lid] = gid;
bufKey[lid + blockDim.x] = key[gid + blockDim.x];
bufVal[lid+blockDim.x] = gid+ blockDim.x;
__syncthreads();
for (int size = 2; size < tupleNum && size < SHARED_SIZE_LIMIT; size <<= 1){
int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0);
for (int stride = size / 2; stride > 0; stride >>= 1){
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
ComparatorFloat(
bufKey[pos + 0], bufVal[pos + 0],
bufKey[pos + stride], bufVal[pos + stride],
ddd
);
}
}
{
for (int stride = blockDim.x ; stride > 0; stride >>= 1)
{
__syncthreads();
int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
ComparatorFloat(
bufKey[pos + 0], bufVal[pos + 0],
bufKey[pos + stride], bufVal[pos + stride],
dir
);
}
}
__syncthreads();
result[gid] = bufKey[lid];
pos[gid] = bufVal[lid];
result[gid + blockDim.x] = bufKey[lid + blockDim.x];
pos[gid+blockDim.x] = bufVal[lid+blockDim.x];
}
/*
* Naive sort. One thread per block.
*/
__global__ static void sec_sort_key_int(int *key, int *psum, int *count ,int tupleNum, int *inputPos, int* outputPos){
int tid = blockIdx.x;
int start = psum[tid];
int end = start + count[tid] - 1;
for(int i=start; i< end-1; i++){
int min = key[i];
int pos = i;
for(int j=i+1;j<end;j++){
if(min > key[j]){
min = key[j];
pos = j;
}
}
outputPos[i] = inputPos[pos];
}
}
__global__ static void sec_sort_key_float(float *key, int *psum, int *count ,int tupleNum, int *inputPos, int* outputPos){
int tid = blockIdx.x;
int start = psum[tid];
int end = start + count[tid] - 1;
for(int i=start; i< end-1; i++){
float min = key[i];
int pos = i;
for(int j=i+1;j<end;j++){
if(min > key[j]){
min = key[j];
pos = j;
}
}
outputPos[i] = inputPos[pos];
}
}
__global__ static void sec_sort_key_string(char *key, int keySize, int *psum, int *count ,int tupleNum, int *inputPos, int* outputPos){
int tid = blockIdx.x;
int start = psum[tid];
int end = start + count[tid] - 1;
for(int i=start; i< end-1; i++){
char min[128];
memcpy(min,key + i*keySize, keySize);
int pos = i;
for(int j=i+1;j<end;j++){
if(gpu_strcmp(min, key+j*keySize,keySize)>0){
memcpy(min,key + j*keySize, keySize);
pos = j;
}
}
outputPos[i] = inputPos[pos];
}
}
__global__ static void set_key_string(char *key, int tupleNum){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=tid;i<tupleNum;i+=stride)
key[i] = CHAR_MAX;
}
__global__ static void set_key_int(int *key, int tupleNum){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=tid;i<tupleNum;i+=stride)
key[i] = INT_MAX;
}
__global__ static void set_key_float(float *key, int tupleNum){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=tid;i<tupleNum;i+=stride)
key[i] = FLT_MAX;
}
/*
* gather the elements from the @col into @result.
*/
__global__ static void gather_col_int(int * keyPos, int* col, int newNum, int tupleNum, int*result){
int stride = blockDim.x * gridDim.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=index;i<newNum;i+=stride){
int pos = keyPos[i];
if(pos<tupleNum)
result[i] = col[pos];
}
}
__global__ static void gather_col_float(int * keyPos, float* col, int newNum, int tupleNum, float*result){
int stride = blockDim.x * gridDim.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=index;i<newNum;i+=stride){
int pos = keyPos[i];
if(pos<tupleNum)
result[i] = col[pos];
}
}
__global__ static void gather_col_string(int * keyPos, char* col, int newNum, int tupleNum, int keySize,char*result){
int stride = blockDim.x * gridDim.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=index;i<newNum;i+=stride){
int pos = keyPos[i];
if(pos<tupleNum)
memcpy(result + i*keySize, col + pos*keySize, keySize);
}
}
/* generate the final result*/
__global__ static void gather_result(int * keyPos, char ** col, int newNum, int tupleNum, int *size, int colNum, char **result){
int stride = blockDim.x * gridDim.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
for(int j=0;j<colNum;j++){
for(int i=index;i<newNum;i+=stride){
int pos = keyPos[i];
if(pos<tupleNum)
memcpy(result[j] + i*size[j], col[j] +pos*size[j], size[j]);
}
}
}
/*
* orderBy: sort the input data by the order by columns
*
* Prerequisite:
* input data are not compressed
*
* Input:
* odNode: the groupby node which contains the input data and groupby information
* pp: records the statistics such as kernel execution time
*
* Return:
* a new table node
*/
struct tableNode * orderBy(struct orderByNode * odNode, struct statistic *pp){
extern char *col_buf;
struct timeval t;
struct tableNode * res = NULL;
struct timespec start, end;
clock_gettime(CLOCK_REALTIME,&start);
assert(odNode->table->tupleNum < SHARED_SIZE_LIMIT);
res = (struct tableNode *)malloc(sizeof(struct tableNode));
CHECK_POINTER(res);
res->tupleNum = odNode->table->tupleNum;
res->totalAttr = odNode->table->totalAttr;
res->tupleSize = odNode->table->tupleSize;
res->attrType = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->attrType);
res->attrSize = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->attrSize);
res->attrTotalSize = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->attrTotalSize);
res->dataPos = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->dataPos);
res->dataFormat = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->dataFormat);
res->content = (char **) malloc(sizeof(char *) * res->totalAttr);
CHECK_POINTER(res->content);
int gpuTupleNum = odNode->table->tupleNum;
char * gpuKey, **column, ** gpuContent;
char * gpuSortedKey;
int *gpuSize, *gpuPos;
column = (char**) malloc(sizeof(char*) *res->totalAttr);
CHECK_POINTER(column);
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpuContent, sizeof(char *) * res->totalAttr));
for(int i=0;i<res->totalAttr;i++){
res->attrType[i] = odNode->table->attrType[i];
res->attrSize[i] = odNode->table->attrSize[i];
res->attrTotalSize[i] = odNode->table->attrTotalSize[i];
res->dataPos[i] = MEM;
res->dataFormat[i] = UNCOMPRESSED;
res->content[i] = (char *) malloc( res->attrSize[i] * res->tupleNum);
CHECK_POINTER(res->content[i]);
int attrSize = res->attrSize[i];
if(odNode->table->dataPos[i] == MEM){
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&column[i], attrSize *res->tupleNum));
gettimeofday(&t, NULL);
printf("[gvm] %lf intercepting diskIO\n", t.tv_sec + t.tv_usec / 1000000.0);
memcpy(col_buf, odNode->table->content[i], attrSize*res->tupleNum);
gettimeofday(&t, NULL);
printf("[gvm] %lf intercepted diskIO\n", t.tv_sec + t.tv_usec / 1000000.0);
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(column[i], col_buf, attrSize*res->tupleNum, cudaMemcpyHostToDevice));
}else if (odNode->table->dataPos[i] == GPU){
column[i] = odNode->table->content[i];
}
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&gpuContent[i], &column[i], sizeof(char *), cudaMemcpyHostToDevice));
}
int newNum = 1;
while(newNum<gpuTupleNum){
newNum *=2;
}
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpuPos, sizeof(int)*newNum));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpuSize, sizeof(int) * res->totalAttr));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuSize, res->attrSize, sizeof(int) * res->totalAttr, cudaMemcpyHostToDevice););
char ** gpuResult;
char ** result;
result = (char**)malloc(sizeof(char *) * res->totalAttr);
CHECK_POINTER(result);
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpuResult, sizeof(char*)*res->totalAttr));
for(int i=0;i<res->totalAttr;i++){
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&result[i], res->attrSize[i]* gpuTupleNum));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&gpuResult[i], &result[i], sizeof(char*), cudaMemcpyHostToDevice));
}
/* Sort by the first orderby column first */
int dir;
if(odNode->orderBySeq[0] == ASC)
dir = 1;
else
dir = 0;
int index = odNode->orderByIndex[0];
int type = odNode->table->attrType[index];
if(type == INT){
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuKey, sizeof(int) * newNum));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuSortedKey, sizeof(int) * newNum));
set_key_int<<<8,128>>>((int*)gpuKey,newNum);
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuKey, column[index], sizeof(int)*gpuTupleNum,cudaMemcpyDeviceToDevice));
sort_key_int<<<1, newNum/2>>>((int*)gpuKey, newNum, (int*)gpuSortedKey, gpuPos, dir);
}else if (type == FLOAT){
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuKey, sizeof(float) * newNum));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuSortedKey, sizeof(float) * newNum));
set_key_float<<<8,128>>>((float*)gpuKey,newNum);
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuKey, column[index], sizeof(int)*gpuTupleNum,cudaMemcpyDeviceToDevice));
sort_key_float<<<1, newNum/2>>>((float*)gpuKey, newNum, (float*)gpuSortedKey, gpuPos, dir);
}else if (type == STRING){
int keySize = odNode->table->attrSize[index];
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuKey, keySize * newNum));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuSortedKey, keySize * newNum));
set_key_string<<<8,128>>>(gpuKey,newNum*keySize);
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuKey, column[index], keySize*gpuTupleNum,cudaMemcpyDeviceToDevice));
sort_key_string<<<1, newNum/2>>>(gpuKey, newNum, keySize,gpuSortedKey, gpuPos, dir);
}
/* Currently we only support no more than 2 orderBy columns */
if (odNode->orderByNum == 2){
int keySize = odNode->table->attrSize[index];
int secIndex = odNode->orderByIndex[1];
int keySize2 = odNode->table->attrSize[secIndex];
int secType = odNode->table->attrType[secIndex];
int * keyNum , *keyCount, *keyPsum;
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&keyNum, sizeof(int)));
if(type == INT){
count_unique_keys_int<<<1,1>>>((int *)gpuSortedKey, gpuTupleNum,keyNum);
}else if (type == FLOAT){
count_unique_keys_float<<<1,1>>>((float *)gpuSortedKey, gpuTupleNum, keyNum);
}else if (type == STRING){
count_unique_keys_string<<<1,1>>>(gpuKey, gpuTupleNum,keySize,keyNum);
}
int cpuKeyNum;
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&cpuKeyNum, keyNum, sizeof(int), cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&keyCount, sizeof(int)* cpuKeyNum));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&keyPsum, sizeof(int)* cpuKeyNum));
CUDA_SAFE_CALL_NO_SYNC(cudaMemset(keyPsum, 0, sizeof(int) * cpuKeyNum));
if(type == INT){
count_key_num_int<<<1,1>>>((int*)gpuKey,gpuTupleNum,keyCount);
}else if (type == FLOAT){
count_key_num_float<<<1,1>>>((float*)gpuKey,gpuTupleNum,keyCount);
}else if (type == STRING){
count_key_num_string<<<1,1>>>(gpuKey,gpuTupleNum,keySize,keyCount);
}
scanImpl(keyCount, cpuKeyNum, keyPsum, pp);
int * gpuPos2;
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpuPos2, sizeof(int)* newNum));
char * gpuKey2;
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpuKey2, keySize2 * newNum));
if(secType == INT){
gather_col_int<<<8,128>>>(gpuPos,(int*)column[secIndex],newNum, gpuTupleNum, (int*)gpuKey2);
sec_sort_key_int<<<cpuKeyNum,1>>>((int*)gpuKey2, keyPsum, keyCount , gpuTupleNum, gpuPos, gpuPos2);
}else if (secType == FLOAT){
gather_col_float<<<8,128>>>(gpuPos,(float*)column[secIndex],newNum, gpuTupleNum, (float*)gpuKey2);
sec_sort_key_float<<<cpuKeyNum,1>>>((float*)gpuKey2, keyPsum, keyCount , gpuTupleNum, gpuPos, gpuPos2);
}else if (secType == STRING){
gather_col_string<<<8,128>>>(gpuPos,column[secIndex],newNum, gpuTupleNum, keySize2,gpuKey2);
sec_sort_key_string<<<cpuKeyNum,1>>>(gpuKey2, keySize2, keyPsum, keyCount , gpuTupleNum, gpuPos, gpuPos2);
}
gather_result<<<8,128>>>(gpuPos2, gpuContent, newNum, gpuTupleNum, gpuSize,res->totalAttr,gpuResult);
CUDA_SAFE_CALL_NO_SYNC(cudaFree(keyCount));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(keyNum));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuPos2));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuKey2));
}else{
gather_result<<<8,128>>>(gpuPos, gpuContent, newNum, gpuTupleNum, gpuSize,res->totalAttr,gpuResult);
}
for(int i=0; i<res->totalAttr;i++){
int size = res->attrSize[i] * gpuTupleNum;
memset(res->content[i],0, size);
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(res->content[i], result[i],size, cudaMemcpyDeviceToHost));
}
for(int i=0;i<res->totalAttr;i++){
CUDA_SAFE_CALL_NO_SYNC(cudaFree(column[i]));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(result[i]));
}
free(column);
free(result);
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuKey));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuSortedKey));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuContent));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuResult));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuSize));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuPos));
clock_gettime(CLOCK_REALTIME,&end);
double timeE = (end.tv_sec - start.tv_sec)* BILLION + end.tv_nsec - start.tv_nsec;
printf("OrderBy Time: %lf\n", timeE/(1000*1000));
return res;
}
|
15930d05eb8ee8f9b199421fd32339088787c637.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_pow3o2 (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(pow)(a[offset_a + gid_0 + gid_1 * ld_a], REAL3o2);
}
} | 15930d05eb8ee8f9b199421fd32339088787c637.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_pow3o2 (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
b[offset_b + gid_0 + gid_1 * ld_b] = CAST(pow)(a[offset_a + gid_0 + gid_1 * ld_a], REAL3o2);
}
} |
85deb5b8a1ef0b2513f9e1f6e2f1f307b665f059.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#define DATATYPE int
#define ARRAYLEN 2048
#define REP 128
//#define PRINTNEED
#define TIMETESTEVENT
#include <hip/hip_runtime.h>
#include "repeat.h"
__global__ void test_register_latency(double *time,DATATYPE *out,int its)
{
int p=3;
int q=1;
int r,x=2,y=5,z=7;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(r=p;p=q;q=x;x=y;y=z;z=r;)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0/its;
out[0] =r;
time[0] = time_tmp;
}
__constant__ DATATYPE d_const_array[ARRAYLEN];
__global__ void test_const_latency(double *time,DATATYPE *out,int its)
{
int p=0;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(p=d_const_array[p];)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0/its;
out[1] =p;
time[1] = time_tmp;
}
__global__ void test_shared_latency(double *time,DATATYPE *out,int its,DATATYPE *array)
{
__shared__ DATATYPE shared_array[ARRAYLEN];
int i;
for (i=0;i<ARRAYLEN;i++)
{
shared_array[i]=array[i];
}
int p=0;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(p=shared_array[p];)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0/its;
out[2] =p;
time[2] = time_tmp;
}
__global__ void test_local_latency(double *time,DATATYPE *out,int its,DATATYPE *array)
{
DATATYPE local_array[ARRAYLEN];
int i;
for (i=0;i<ARRAYLEN;i++)
{
local_array[i]=array[i];
}
int p=0;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(p=local_array[p];)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0/its;
out[3] =p;
time[3] = time_tmp;
}
__global__ void test_global_latency(double *time,DATATYPE *out,int its,DATATYPE *array)
{
int p=0;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(p=array[p];)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0/its;
out[4] =p;
time[4] = time_tmp;
}
texture <int,1,hipReadModeElementType> texref;
__global__ void test_texture_latency(double *time,DATATYPE *out,int its)
{
int p=0;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(p=tex1Dfetch(texref,p);)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0/its;
out[5] =p;
time[5] = time_tmp;
}
void call_test_latency(int step,int its,double *h_time)
{
DATATYPE *h_array;
h_array=(DATATYPE*)malloc(sizeof(DATATYPE)*ARRAYLEN);
for (int i=0;i<ARRAYLEN;i++)
{
h_array[i]=(i+step)%ARRAYLEN;
}
DATATYPE *d_array;
hipMalloc((void**)&d_array,sizeof(DATATYPE)*ARRAYLEN);
hipMemcpy(d_array,h_array,sizeof(DATATYPE)*ARRAYLEN,hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_const_array,h_array,sizeof(DATATYPE)*ARRAYLEN);
/*texture*/
hipBindTexture(NULL,texref,d_array,sizeof(DATATYPE)*ARRAYLEN);
double *d_time;
hipMalloc((void**)&d_time,sizeof(double)*6);
DATATYPE *d_out,*h_out;
h_out=(DATATYPE *)malloc(sizeof(DATATYPE)*6);
hipMalloc((void**)&d_out,sizeof(DATATYPE)*6);
hipLaunchKernelGGL(( test_register_latency) , dim3(1),dim3(1), 0, 0, d_time,d_out,its);
hipLaunchKernelGGL(( test_const_latency) , dim3(1),dim3(1), 0, 0, d_time,d_out,its);
hipLaunchKernelGGL(( test_shared_latency) , dim3(1),dim3(1), 0, 0, d_time,d_out,its,d_array);
hipLaunchKernelGGL(( test_local_latency) , dim3(1),dim3(1), 0, 0, d_time,d_out,its,d_array);
hipLaunchKernelGGL(( test_global_latency) , dim3(1),dim3(1), 0, 0, d_time,d_out,its,d_array);
hipLaunchKernelGGL(( test_texture_latency) , dim3(1),dim3(1), 0, 0, d_time,d_out,its);
hipMemcpy(h_out,d_out,sizeof(DATATYPE)*6,hipMemcpyDeviceToHost);
hipMemcpy(h_time,d_time,sizeof(double)*6,hipMemcpyDeviceToHost);
printf("%d:\t%f\t%f\t%f\t%f\t%f\t%f\n",step,h_time[0],h_time[1],h_time[2],h_time[3],h_time[4],h_time[5]);
// printf("out=%d\t%d\t%d\t%d\t%d\n",h_out[0],h_out[1],h_out[2],h_out[3],h_out[4]);
hipUnbindTexture(texref);
hipFree(d_array);
hipFree(d_time);
hipFree(d_out);
free(h_array);
free(h_out);
}
int main()
{
double *h_time;
h_time = (double *) malloc(sizeof(double) * 6 * 1024);
printf("step:register\t constant\t shared\t local\t global\t texture\n");
for (int i = 1; i <= 1024; i++) {
call_test_latency(i, 30, &h_time[(i - 1) * 6]);
}
printf("average:\t");
for (int i = 0; i < 6; i++) {
double average = 0.0;
for (int j = 0; j < 1024; j++) {
average += h_time[j * 6 + i];
}
average /= 1024.0;
printf("%f\t", average);
}
printf("\n");
return 0;
} | 85deb5b8a1ef0b2513f9e1f6e2f1f307b665f059.cu | #include <stdlib.h>
#include <stdio.h>
#define DATATYPE int
#define ARRAYLEN 2048
#define REP 128
//#define PRINTNEED
#define TIMETESTEVENT
#include <cuda_runtime.h>
#include "repeat.h"
__global__ void test_register_latency(double *time,DATATYPE *out,int its)
{
int p=3;
int q=1;
int r,x=2,y=5,z=7;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(r=p;p=q;q=x;x=y;y=z;z=r;)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0/its;
out[0] =r;
time[0] = time_tmp;
}
__constant__ DATATYPE d_const_array[ARRAYLEN];
__global__ void test_const_latency(double *time,DATATYPE *out,int its)
{
int p=0;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(p=d_const_array[p];)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0/its;
out[1] =p;
time[1] = time_tmp;
}
__global__ void test_shared_latency(double *time,DATATYPE *out,int its,DATATYPE *array)
{
__shared__ DATATYPE shared_array[ARRAYLEN];
int i;
for (i=0;i<ARRAYLEN;i++)
{
shared_array[i]=array[i];
}
int p=0;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(p=shared_array[p];)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0/its;
out[2] =p;
time[2] = time_tmp;
}
__global__ void test_local_latency(double *time,DATATYPE *out,int its,DATATYPE *array)
{
DATATYPE local_array[ARRAYLEN];
int i;
for (i=0;i<ARRAYLEN;i++)
{
local_array[i]=array[i];
}
int p=0;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(p=local_array[p];)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0/its;
out[3] =p;
time[3] = time_tmp;
}
__global__ void test_global_latency(double *time,DATATYPE *out,int its,DATATYPE *array)
{
int p=0;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(p=array[p];)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0/its;
out[4] =p;
time[4] = time_tmp;
}
texture <int,1,cudaReadModeElementType> texref;
__global__ void test_texture_latency(double *time,DATATYPE *out,int its)
{
int p=0;
double time_tmp=0.0;
unsigned int start_time=0, stop_time=0;
for (int i=0;i<its;i++)
{
__syncthreads();
start_time = clock();
repeat128(p=tex1Dfetch(texref,p);)
stop_time = clock();
time_tmp+=(stop_time-start_time);
}
time_tmp=time_tmp/128.0/its;
out[5] =p;
time[5] = time_tmp;
}
void call_test_latency(int step,int its,double *h_time)
{
DATATYPE *h_array;
h_array=(DATATYPE*)malloc(sizeof(DATATYPE)*ARRAYLEN);
for (int i=0;i<ARRAYLEN;i++)
{
h_array[i]=(i+step)%ARRAYLEN;
}
DATATYPE *d_array;
cudaMalloc((void**)&d_array,sizeof(DATATYPE)*ARRAYLEN);
cudaMemcpy(d_array,h_array,sizeof(DATATYPE)*ARRAYLEN,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_const_array,h_array,sizeof(DATATYPE)*ARRAYLEN);
/*texture*/
cudaBindTexture(NULL,texref,d_array,sizeof(DATATYPE)*ARRAYLEN);
double *d_time;
cudaMalloc((void**)&d_time,sizeof(double)*6);
DATATYPE *d_out,*h_out;
h_out=(DATATYPE *)malloc(sizeof(DATATYPE)*6);
cudaMalloc((void**)&d_out,sizeof(DATATYPE)*6);
test_register_latency <<<1,1>>>(d_time,d_out,its);
test_const_latency <<<1,1>>>(d_time,d_out,its);
test_shared_latency <<<1,1>>>(d_time,d_out,its,d_array);
test_local_latency <<<1,1>>>(d_time,d_out,its,d_array);
test_global_latency <<<1,1>>>(d_time,d_out,its,d_array);
test_texture_latency <<<1,1>>>(d_time,d_out,its);
cudaMemcpy(h_out,d_out,sizeof(DATATYPE)*6,cudaMemcpyDeviceToHost);
cudaMemcpy(h_time,d_time,sizeof(double)*6,cudaMemcpyDeviceToHost);
printf("%d:\t%f\t%f\t%f\t%f\t%f\t%f\n",step,h_time[0],h_time[1],h_time[2],h_time[3],h_time[4],h_time[5]);
// printf("out=%d\t%d\t%d\t%d\t%d\n",h_out[0],h_out[1],h_out[2],h_out[3],h_out[4]);
cudaUnbindTexture(texref);
cudaFree(d_array);
cudaFree(d_time);
cudaFree(d_out);
free(h_array);
free(h_out);
}
int main()
{
double *h_time;
h_time = (double *) malloc(sizeof(double) * 6 * 1024);
printf("step:register\t constant\t shared\t local\t global\t texture\n");
for (int i = 1; i <= 1024; i++) {
call_test_latency(i, 30, &h_time[(i - 1) * 6]);
}
printf("average:\t");
for (int i = 0; i < 6; i++) {
double average = 0.0;
for (int j = 0; j < 1024; j++) {
average += h_time[j * 6 + i];
}
average /= 1024.0;
printf("%f\t", average);
}
printf("\n");
return 0;
} |
51cc0d7d46cf26a7eef8b9aa7ffcf03e13739e00.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "solution2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *f = NULL;
hipMalloc(&f, XSIZE*YSIZE);
float lambda = 1;
int nx = 1;
int ny = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
solution2), dim3(gridBlock),dim3(threadBlock), 0, 0, f,lambda,nx,ny);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
solution2), dim3(gridBlock),dim3(threadBlock), 0, 0, f,lambda,nx,ny);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
solution2), dim3(gridBlock),dim3(threadBlock), 0, 0, f,lambda,nx,ny);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 51cc0d7d46cf26a7eef8b9aa7ffcf03e13739e00.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "solution2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *f = NULL;
cudaMalloc(&f, XSIZE*YSIZE);
float lambda = 1;
int nx = 1;
int ny = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
solution2<<<gridBlock,threadBlock>>>(f,lambda,nx,ny);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
solution2<<<gridBlock,threadBlock>>>(f,lambda,nx,ny);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
solution2<<<gridBlock,threadBlock>>>(f,lambda,nx,ny);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
16a7af02c081f00591e2b65d978dcb775a103099.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void transpose_read_row_write_column(int * mat, int * transpose, int nx, int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
transpose[ix * ny + iy] = mat[iy * nx + ix];
}
} | 16a7af02c081f00591e2b65d978dcb775a103099.cu | #include "includes.h"
__global__ void transpose_read_row_write_column(int * mat, int * transpose, int nx, int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
transpose[ix * ny + iy] = mat[iy * nx + ix];
}
} |
511d5e2be0604812c163d8c7b4cbdce3b83625b5.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <omp.h>
#include <sys/time.h>
#include <omp.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <ctype.h>
#include <cstring>
#include "tbb/parallel_sort.h"
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <helper_cuda.h>
using namespace std;
const double pi=3.141592653589793238462643383279502884197;
int ref_line[20];
char ref_file[20][16];
int sam_line[20];
char sam_file[20][16];
const int GPU_N = 2;
const int GBSize = 1024 * 1024 * 1024;
const int block_size = 512;
const int TILE_SIZE = 1024;
struct NODE
{
double ra,dec;
int pix;
};
const int cntSize = 805306368;
double diffTime(timeval start,timeval end)
{
return (end.tv_sec - start.tv_sec) * 1000 + (end.tv_usec - start.tv_usec) * 0.001;
}
bool cmp(NODE a,NODE b)
{
return a.pix < b.pix;
}
void readFile(char *file,int N, NODE nn[])
{
FILE *fd = fopen(file,"r");
if(fd == NULL)
printf("Read %s error!\n",file);
for(int i = 0; i < N; ++i)
fscanf(fd,"%d%lf%lf",&nn[i].pix,&nn[i].ra,&nn[i].dec);
fclose(fd);
}
__host__ __device__
int binary_search(int key, NODE *node, int N)
{
int st = 0;
int ed = N - 1;
while(st < ed)
{
int mid = st + ((ed - st) >> 1);
if(node[mid].pix <= key)
st = mid + 1;
else
ed = mid;
}
if(node[ed].pix > key)
return ed;
return -1;
}
__host__ __device__ double radians(double degree)
{
return degree * pi / 180.0;
}
__host__ __device__
bool matched(double ra1,double dec1,double ra2,double dec2,double radius)
{
double z1 = sin(radians(dec1));
double x1 = cos(radians(dec1)) * cos(radians(ra1));
double y1 = cos(radians(dec1)) * sin(radians(ra1));
double z2 = sin(radians(dec2));
double x2 = cos(radians(dec2)) * cos(radians(ra2));
double y2 = cos(radians(dec2)) * sin(radians(ra2));
double distance = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2) + (z1 - z2) * (z1 - z2);
double dist2 = 4 * pow(sin(radians(0.0056 / 2)),2);
if(distance <= dist2)
return true;
return false;
}
__global__
void kernel_singleCM(NODE *ref_node, int ref_N, NODE *sam_node, int sam_N, int *sam_match,int *sam_matchedCnt,int ref_offset,int sam_offset)
{
__shared__ int s_ref_pix[TILE_SIZE];
__shared__ double s_ref_ra[TILE_SIZE];
__shared__ double s_ref_dec[TILE_SIZE];
__shared__ int start_pix,end_pix;
__shared__ int start_ref_pos,end_ref_pos;
__shared__ int block_sam_N,block_ref_N;
__shared__ int iteration;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < sam_N)
sam_matchedCnt[tid] = 0;
if(threadIdx.x == 0)
{
if(blockIdx.x == gridDim.x - 1) // the last block
block_sam_N = sam_N - blockIdx.x * blockDim.x;
else
block_sam_N = blockDim.x;
start_pix = sam_node[tid].pix;
end_pix = sam_node[tid + block_sam_N - 1].pix;
if(start_pix == 0)
start_ref_pos = 0;
else
start_ref_pos = binary_search(start_pix - 1,ref_node,ref_N);
end_ref_pos = binary_search(end_pix,ref_node,ref_N);
if(end_ref_pos == -1)
end_ref_pos = ref_N - 1;
else
end_ref_pos--;
block_ref_N = end_ref_pos - start_ref_pos + 1;
iteration = ceil(block_ref_N * 1.0 / TILE_SIZE);
}
__syncthreads();
if(start_ref_pos == -1 || end_ref_pos < start_ref_pos)
return;
int pix,cnt = 0;
double sam_ra,sam_dec;
if(tid < sam_N)
{
pix = sam_node[tid].pix;
sam_ra = sam_node[tid].ra;
sam_dec = sam_node[tid].dec;
cnt = 0;
}
__syncthreads();
for(int ite = 0; ite < iteration; ++ite)
{
__syncthreads();
for(int k = 0; k < TILE_SIZE / blockDim.x; ++k)
{
int ref_pos = start_ref_pos + ite * TILE_SIZE + blockDim.x * k + threadIdx.x;
int s_ref_pos = blockDim.x * k + threadIdx.x;
if(ref_pos <= end_ref_pos)
{
s_ref_pix[s_ref_pos] = ref_node[ref_pos].pix;
s_ref_ra[s_ref_pos] = ref_node[ref_pos].ra;
s_ref_dec[s_ref_pos] = ref_node[ref_pos].dec;
}
else
s_ref_pix[s_ref_pos] = -1;
}
__syncthreads();
if(tid >= sam_N)
continue;
for(int j = 0; j < TILE_SIZE; ++j)
{
if(s_ref_pix[j] == -1 || s_ref_pix[j] > pix)
break;
if(s_ref_pix[j] < pix)
continue;
if(s_ref_pix[j] == pix && matched(sam_ra,sam_dec,s_ref_ra[j],s_ref_dec[j],0.0056))
{
cnt++;
if(cnt <= 5)
sam_match[tid * 5 + cnt - 1] = ref_offset + start_ref_pos + ite * TILE_SIZE + j;
}
}
}
if(tid < sam_N)
sam_matchedCnt[tid] = cnt;
}
void singleCM(NODE h_ref_node[], int ref_N, NODE h_sam_node[], int sam_N, int h_sam_match[],int h_sam_matchedCnt[])
{
//the maximum number of sample points that can be matched each time by each card
int part_sam_N = 25000000;
int part_ref_N = 8 * part_sam_N;
NODE *d_ref_node[GPU_N];
NODE *d_sam_node[GPU_N];
int *d_sam_match[GPU_N], *d_sam_matchedCnt[GPU_N];
int chunk_N = (int)ceil(sam_N * 1.0 / part_sam_N);
int chunk_id = 0;
omp_set_num_threads(GPU_N);
#pragma omp parallel
{
int i = omp_get_thread_num() % GPU_N;
checkCudaErrors(hipSetDevice(i));
checkCudaErrors(hipDeviceReset());
size_t free_mem,total_mem;
checkCudaErrors(hipMemGetInfo(&free_mem,&total_mem));
printf("Card %d before malloc %.2lf GB, total memory %.2lf GB\n",i,free_mem * 1.0 / GBSize,total_mem * 1.0 / GBSize);
checkCudaErrors(hipMalloc(&d_ref_node[i],sizeof(NODE) * part_ref_N));
checkCudaErrors(hipMalloc(&d_sam_node[i],sizeof(NODE) * part_sam_N));
checkCudaErrors(hipMalloc(&d_sam_match[i],sizeof(int) * part_sam_N * 5));
checkCudaErrors(hipMalloc(&d_sam_matchedCnt[i],sizeof(int) * part_sam_N));
checkCudaErrors(hipMemGetInfo(&free_mem,&total_mem));
printf("Card %d after malloc %.2lf GB, total memory %.2lf GB\n",i,free_mem * 1.0 / GBSize,total_mem * 1.0 / GBSize);
while(chunk_id < chunk_N)
//the total number of sample points processed by this card
{
#pragma omp atomic
chunk_id++;
int cur_sam_N;
if(chunk_id == chunk_N) // the last round
cur_sam_N = sam_N - (chunk_id - 1) * part_sam_N;
else
cur_sam_N = part_sam_N;
int start_sam_pos = (chunk_id - 1) * part_sam_N;
int end_sam_pos = start_sam_pos + cur_sam_N - 1;
int start_pix = h_sam_node[start_sam_pos].pix;
int end_pix = h_sam_node[end_sam_pos].pix;
int start_ref_pos;
if(start_pix == 0)
start_ref_pos = 0;
else
start_ref_pos = binary_search(start_pix - 1,h_ref_node,ref_N);
// start_ref_pos = get_start(start_pix,h_ref_node,ref_N);
if(start_ref_pos == -1)
continue;
int end_ref_pos = binary_search(end_pix,h_ref_node,ref_N) - 1;
if(end_ref_pos == -2)
end_ref_pos = ref_N - 1;
int cur_ref_N = end_ref_pos - start_ref_pos + 1;
dim3 block(block_size);
dim3 grid(min(65536,(int)ceil(cur_sam_N * 1.0 / block.x)));
if(cur_ref_N == 0)
continue;
printf("\n\nCard %d chunk-%d\n",i,chunk_id - 1);
printf("block.x %d grid.x %d\n",block.x,grid.x);
printf("start_sam_pos %d start_sam_pix %d end_sam_pos %d end_sam_pix %d sam_N %d\n",start_sam_pos,start_pix,end_sam_pos,end_pix,cur_sam_N);
printf("start_ref_pos %d start_ref_pix %d end_ref_pos %d end_ref_pix %d ref_N %d\n",start_ref_pos,h_ref_node[start_ref_pos].pix,end_ref_pos,h_ref_node[end_ref_pos].pix,cur_ref_N);
checkCudaErrors(hipMemset(d_sam_matchedCnt[i],0,sizeof(int) * part_sam_N));
checkCudaErrors(hipMemcpy(d_sam_node[i],h_sam_node + start_sam_pos,cur_sam_N * sizeof(NODE),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_ref_node[i],h_ref_node + start_ref_pos,cur_ref_N * sizeof(NODE), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_singleCM), dim3(grid),dim3(block), 0, 0, d_ref_node[i],cur_ref_N,d_sam_node[i],cur_sam_N,d_sam_match[i],d_sam_matchedCnt[i],start_ref_pos,start_sam_pos);
checkCudaErrors(hipMemcpy(h_sam_matchedCnt + start_sam_pos,d_sam_matchedCnt[i],cur_sam_N * sizeof(int),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_sam_match + start_sam_pos * 5,d_sam_match[i],cur_sam_N * 5 * sizeof(int),hipMemcpyDeviceToHost));
}
}
unsigned long long sum = 0;
int cnt[1000];
memset(cnt,0,sizeof(cnt));
for(int i = sam_N - 1; i >= 0; --i)
{
sum += h_sam_matchedCnt[i];
/*
cout << i << " " << h_sam_matchedCnt[i] << endl;
cout << h_sam_node[i].ra << " " << h_sam_node[i].dec << endl;
cout << "\n----------------\n" << endl;
for(int j = i * 5; j < i * 5 + min(5,h_sam_matchedCnt[i]); ++j)
{
int pos = h_sam_match[j];
cout << h_ref_node[pos].ra << " " << h_ref_node[pos].dec << endl;
}
cout << "\n--------------------\n" << endl;
*/
}
cout << "sum " << sum << endl;
cout << "ave " << sum * 1.0 / sam_N << endl;
}
int main(int argc, char *argv[])
{
struct timeval start,end;
int ref_N = atoi(argv[3]);
int sam_N = atoi(argv[4]);
// const int ref_N = 200006373;
time_t rawtime;
FILE *fd = fopen(argv[1],"r");
for(int i = 0; i < 20; ++i)
fscanf(fd,"%d%s",&ref_line[i],ref_file[i]);
fclose(fd);
fd = fopen(argv[2],"r");
for(int i = 0; i < 20; ++i)
fscanf(fd,"%d%s",&sam_line[i],sam_file[i]);
fclose(fd);
NODE *ref_node,*sam_node;
int *sam_matchedCnt;
int *sam_match;
ref_node = (NODE *)malloc(sizeof(NODE) * ref_N);
sam_node = (NODE *)malloc(sizeof(NODE) * sam_N);
sam_matchedCnt = (int *)malloc(sizeof(int) * sam_N);
sam_match = (int *)malloc(sizeof(int) * sam_N * 5);
time(&rawtime);
printf("before read ref file : %s\n",ctime(&rawtime));
omp_set_num_threads(20);
#pragma omp parallel
{
int i = omp_get_thread_num() % 20;
int offset = i * ref_line[0];
readFile(ref_file[i],ref_line[i],ref_node + offset);
}
time(&rawtime);
printf("after read ref file : %s\n",ctime(&rawtime));
#pragma omp parallel
{
int i = omp_get_thread_num() % 20;
int offset = i * sam_line[0];
readFile(sam_file[i],sam_line[i],sam_node + offset);
}
time(&rawtime);
printf("after read sam file : %s\n",ctime(&rawtime));
gettimeofday(&start,NULL);
tbb::parallel_sort(ref_node,ref_node + ref_N,cmp);
tbb::parallel_sort(sam_node,sam_node + sam_N,cmp);
gettimeofday(&end,NULL);
printf("sort time %.3f \n",diffTime(start,end) * 0.001);
time(&rawtime);
printf("after sort : %s\n",ctime(&rawtime));
gettimeofday(&start,NULL);
singleCM(ref_node,ref_N,sam_node,sam_N,sam_match,sam_matchedCnt);
gettimeofday(&end,NULL);
printf("single CM %.3f s\n",diffTime(start,end) * 0.001);
printf("single CM %.3f min\n",diffTime(start,end) * 0.001 / 60);
time(&rawtime);
printf("singleCM : %s\n",ctime(&rawtime));
int *ref_match = (int*)malloc(sizeof(int) * ref_N * 5);
int *ref_matchedCnt = (int*)malloc(sizeof(int) * ref_N);
gettimeofday(&start,NULL);
singleCM(sam_node,sam_N,ref_node,ref_N,ref_match,ref_matchedCnt);
gettimeofday(&end,NULL);
printf("singe CM-2 %.3f s\n",diffTime(start,end) * 0.001);
free(sam_node);
free(ref_node);
free(ref_match);
free(sam_match);
free(ref_matchedCnt);
free(sam_matchedCnt);
return 0;
}
| 511d5e2be0604812c163d8c7b4cbdce3b83625b5.cu | #include <iostream>
#include <cstdio>
#include <cstdlib>
#include <omp.h>
#include <sys/time.h>
#include <omp.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <ctype.h>
#include <cstring>
#include "tbb/parallel_sort.h"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <helper_cuda.h>
using namespace std;
const double pi=3.141592653589793238462643383279502884197;
int ref_line[20];
char ref_file[20][16];
int sam_line[20];
char sam_file[20][16];
const int GPU_N = 2;
const int GBSize = 1024 * 1024 * 1024;
const int block_size = 512;
const int TILE_SIZE = 1024;
struct NODE
{
double ra,dec;
int pix;
};
const int cntSize = 805306368;
double diffTime(timeval start,timeval end)
{
return (end.tv_sec - start.tv_sec) * 1000 + (end.tv_usec - start.tv_usec) * 0.001;
}
bool cmp(NODE a,NODE b)
{
return a.pix < b.pix;
}
void readFile(char *file,int N, NODE nn[])
{
FILE *fd = fopen(file,"r");
if(fd == NULL)
printf("Read %s error!\n",file);
for(int i = 0; i < N; ++i)
fscanf(fd,"%d%lf%lf",&nn[i].pix,&nn[i].ra,&nn[i].dec);
fclose(fd);
}
__host__ __device__
int binary_search(int key, NODE *node, int N)
{
int st = 0;
int ed = N - 1;
while(st < ed)
{
int mid = st + ((ed - st) >> 1);
if(node[mid].pix <= key)
st = mid + 1;
else
ed = mid;
}
if(node[ed].pix > key)
return ed;
return -1;
}
__host__ __device__ double radians(double degree)
{
return degree * pi / 180.0;
}
__host__ __device__
bool matched(double ra1,double dec1,double ra2,double dec2,double radius)
{
double z1 = sin(radians(dec1));
double x1 = cos(radians(dec1)) * cos(radians(ra1));
double y1 = cos(radians(dec1)) * sin(radians(ra1));
double z2 = sin(radians(dec2));
double x2 = cos(radians(dec2)) * cos(radians(ra2));
double y2 = cos(radians(dec2)) * sin(radians(ra2));
double distance = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2) + (z1 - z2) * (z1 - z2);
double dist2 = 4 * pow(sin(radians(0.0056 / 2)),2);
if(distance <= dist2)
return true;
return false;
}
__global__
void kernel_singleCM(NODE *ref_node, int ref_N, NODE *sam_node, int sam_N, int *sam_match,int *sam_matchedCnt,int ref_offset,int sam_offset)
{
__shared__ int s_ref_pix[TILE_SIZE];
__shared__ double s_ref_ra[TILE_SIZE];
__shared__ double s_ref_dec[TILE_SIZE];
__shared__ int start_pix,end_pix;
__shared__ int start_ref_pos,end_ref_pos;
__shared__ int block_sam_N,block_ref_N;
__shared__ int iteration;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < sam_N)
sam_matchedCnt[tid] = 0;
if(threadIdx.x == 0)
{
if(blockIdx.x == gridDim.x - 1) // the last block
block_sam_N = sam_N - blockIdx.x * blockDim.x;
else
block_sam_N = blockDim.x;
start_pix = sam_node[tid].pix;
end_pix = sam_node[tid + block_sam_N - 1].pix;
if(start_pix == 0)
start_ref_pos = 0;
else
start_ref_pos = binary_search(start_pix - 1,ref_node,ref_N);
end_ref_pos = binary_search(end_pix,ref_node,ref_N);
if(end_ref_pos == -1)
end_ref_pos = ref_N - 1;
else
end_ref_pos--;
block_ref_N = end_ref_pos - start_ref_pos + 1;
iteration = ceil(block_ref_N * 1.0 / TILE_SIZE);
}
__syncthreads();
if(start_ref_pos == -1 || end_ref_pos < start_ref_pos)
return;
int pix,cnt = 0;
double sam_ra,sam_dec;
if(tid < sam_N)
{
pix = sam_node[tid].pix;
sam_ra = sam_node[tid].ra;
sam_dec = sam_node[tid].dec;
cnt = 0;
}
__syncthreads();
for(int ite = 0; ite < iteration; ++ite)
{
__syncthreads();
for(int k = 0; k < TILE_SIZE / blockDim.x; ++k)
{
int ref_pos = start_ref_pos + ite * TILE_SIZE + blockDim.x * k + threadIdx.x;
int s_ref_pos = blockDim.x * k + threadIdx.x;
if(ref_pos <= end_ref_pos)
{
s_ref_pix[s_ref_pos] = ref_node[ref_pos].pix;
s_ref_ra[s_ref_pos] = ref_node[ref_pos].ra;
s_ref_dec[s_ref_pos] = ref_node[ref_pos].dec;
}
else
s_ref_pix[s_ref_pos] = -1;
}
__syncthreads();
if(tid >= sam_N)
continue;
for(int j = 0; j < TILE_SIZE; ++j)
{
if(s_ref_pix[j] == -1 || s_ref_pix[j] > pix)
break;
if(s_ref_pix[j] < pix)
continue;
if(s_ref_pix[j] == pix && matched(sam_ra,sam_dec,s_ref_ra[j],s_ref_dec[j],0.0056))
{
cnt++;
if(cnt <= 5)
sam_match[tid * 5 + cnt - 1] = ref_offset + start_ref_pos + ite * TILE_SIZE + j;
}
}
}
if(tid < sam_N)
sam_matchedCnt[tid] = cnt;
}
void singleCM(NODE h_ref_node[], int ref_N, NODE h_sam_node[], int sam_N, int h_sam_match[],int h_sam_matchedCnt[])
{
//the maximum number of sample points that can be matched each time by each card
int part_sam_N = 25000000;
int part_ref_N = 8 * part_sam_N;
NODE *d_ref_node[GPU_N];
NODE *d_sam_node[GPU_N];
int *d_sam_match[GPU_N], *d_sam_matchedCnt[GPU_N];
int chunk_N = (int)ceil(sam_N * 1.0 / part_sam_N);
int chunk_id = 0;
omp_set_num_threads(GPU_N);
#pragma omp parallel
{
int i = omp_get_thread_num() % GPU_N;
checkCudaErrors(cudaSetDevice(i));
checkCudaErrors(cudaDeviceReset());
size_t free_mem,total_mem;
checkCudaErrors(cudaMemGetInfo(&free_mem,&total_mem));
printf("Card %d before malloc %.2lf GB, total memory %.2lf GB\n",i,free_mem * 1.0 / GBSize,total_mem * 1.0 / GBSize);
checkCudaErrors(cudaMalloc(&d_ref_node[i],sizeof(NODE) * part_ref_N));
checkCudaErrors(cudaMalloc(&d_sam_node[i],sizeof(NODE) * part_sam_N));
checkCudaErrors(cudaMalloc(&d_sam_match[i],sizeof(int) * part_sam_N * 5));
checkCudaErrors(cudaMalloc(&d_sam_matchedCnt[i],sizeof(int) * part_sam_N));
checkCudaErrors(cudaMemGetInfo(&free_mem,&total_mem));
printf("Card %d after malloc %.2lf GB, total memory %.2lf GB\n",i,free_mem * 1.0 / GBSize,total_mem * 1.0 / GBSize);
while(chunk_id < chunk_N)
//the total number of sample points processed by this card
{
#pragma omp atomic
chunk_id++;
int cur_sam_N;
if(chunk_id == chunk_N) // the last round
cur_sam_N = sam_N - (chunk_id - 1) * part_sam_N;
else
cur_sam_N = part_sam_N;
int start_sam_pos = (chunk_id - 1) * part_sam_N;
int end_sam_pos = start_sam_pos + cur_sam_N - 1;
int start_pix = h_sam_node[start_sam_pos].pix;
int end_pix = h_sam_node[end_sam_pos].pix;
int start_ref_pos;
if(start_pix == 0)
start_ref_pos = 0;
else
start_ref_pos = binary_search(start_pix - 1,h_ref_node,ref_N);
// start_ref_pos = get_start(start_pix,h_ref_node,ref_N);
if(start_ref_pos == -1)
continue;
int end_ref_pos = binary_search(end_pix,h_ref_node,ref_N) - 1;
if(end_ref_pos == -2)
end_ref_pos = ref_N - 1;
int cur_ref_N = end_ref_pos - start_ref_pos + 1;
dim3 block(block_size);
dim3 grid(min(65536,(int)ceil(cur_sam_N * 1.0 / block.x)));
if(cur_ref_N == 0)
continue;
printf("\n\nCard %d chunk-%d\n",i,chunk_id - 1);
printf("block.x %d grid.x %d\n",block.x,grid.x);
printf("start_sam_pos %d start_sam_pix %d end_sam_pos %d end_sam_pix %d sam_N %d\n",start_sam_pos,start_pix,end_sam_pos,end_pix,cur_sam_N);
printf("start_ref_pos %d start_ref_pix %d end_ref_pos %d end_ref_pix %d ref_N %d\n",start_ref_pos,h_ref_node[start_ref_pos].pix,end_ref_pos,h_ref_node[end_ref_pos].pix,cur_ref_N);
checkCudaErrors(cudaMemset(d_sam_matchedCnt[i],0,sizeof(int) * part_sam_N));
checkCudaErrors(cudaMemcpy(d_sam_node[i],h_sam_node + start_sam_pos,cur_sam_N * sizeof(NODE),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_ref_node[i],h_ref_node + start_ref_pos,cur_ref_N * sizeof(NODE), cudaMemcpyHostToDevice));
kernel_singleCM<<<grid,block>>>(d_ref_node[i],cur_ref_N,d_sam_node[i],cur_sam_N,d_sam_match[i],d_sam_matchedCnt[i],start_ref_pos,start_sam_pos);
checkCudaErrors(cudaMemcpy(h_sam_matchedCnt + start_sam_pos,d_sam_matchedCnt[i],cur_sam_N * sizeof(int),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_sam_match + start_sam_pos * 5,d_sam_match[i],cur_sam_N * 5 * sizeof(int),cudaMemcpyDeviceToHost));
}
}
unsigned long long sum = 0;
int cnt[1000];
memset(cnt,0,sizeof(cnt));
for(int i = sam_N - 1; i >= 0; --i)
{
sum += h_sam_matchedCnt[i];
/*
cout << i << " " << h_sam_matchedCnt[i] << endl;
cout << h_sam_node[i].ra << " " << h_sam_node[i].dec << endl;
cout << "\n----------------\n" << endl;
for(int j = i * 5; j < i * 5 + min(5,h_sam_matchedCnt[i]); ++j)
{
int pos = h_sam_match[j];
cout << h_ref_node[pos].ra << " " << h_ref_node[pos].dec << endl;
}
cout << "\n--------------------\n" << endl;
*/
}
cout << "sum " << sum << endl;
cout << "ave " << sum * 1.0 / sam_N << endl;
}
int main(int argc, char *argv[])
{
struct timeval start,end;
int ref_N = atoi(argv[3]);
int sam_N = atoi(argv[4]);
// const int ref_N = 200006373;
time_t rawtime;
FILE *fd = fopen(argv[1],"r");
for(int i = 0; i < 20; ++i)
fscanf(fd,"%d%s",&ref_line[i],ref_file[i]);
fclose(fd);
fd = fopen(argv[2],"r");
for(int i = 0; i < 20; ++i)
fscanf(fd,"%d%s",&sam_line[i],sam_file[i]);
fclose(fd);
NODE *ref_node,*sam_node;
int *sam_matchedCnt;
int *sam_match;
ref_node = (NODE *)malloc(sizeof(NODE) * ref_N);
sam_node = (NODE *)malloc(sizeof(NODE) * sam_N);
sam_matchedCnt = (int *)malloc(sizeof(int) * sam_N);
sam_match = (int *)malloc(sizeof(int) * sam_N * 5);
time(&rawtime);
printf("before read ref file : %s\n",ctime(&rawtime));
omp_set_num_threads(20);
#pragma omp parallel
{
int i = omp_get_thread_num() % 20;
int offset = i * ref_line[0];
readFile(ref_file[i],ref_line[i],ref_node + offset);
}
time(&rawtime);
printf("after read ref file : %s\n",ctime(&rawtime));
#pragma omp parallel
{
int i = omp_get_thread_num() % 20;
int offset = i * sam_line[0];
readFile(sam_file[i],sam_line[i],sam_node + offset);
}
time(&rawtime);
printf("after read sam file : %s\n",ctime(&rawtime));
gettimeofday(&start,NULL);
tbb::parallel_sort(ref_node,ref_node + ref_N,cmp);
tbb::parallel_sort(sam_node,sam_node + sam_N,cmp);
gettimeofday(&end,NULL);
printf("sort time %.3f \n",diffTime(start,end) * 0.001);
time(&rawtime);
printf("after sort : %s\n",ctime(&rawtime));
gettimeofday(&start,NULL);
singleCM(ref_node,ref_N,sam_node,sam_N,sam_match,sam_matchedCnt);
gettimeofday(&end,NULL);
printf("single CM %.3f s\n",diffTime(start,end) * 0.001);
printf("single CM %.3f min\n",diffTime(start,end) * 0.001 / 60);
time(&rawtime);
printf("singleCM : %s\n",ctime(&rawtime));
int *ref_match = (int*)malloc(sizeof(int) * ref_N * 5);
int *ref_matchedCnt = (int*)malloc(sizeof(int) * ref_N);
gettimeofday(&start,NULL);
singleCM(sam_node,sam_N,ref_node,ref_N,ref_match,ref_matchedCnt);
gettimeofday(&end,NULL);
printf("singe CM-2 %.3f s\n",diffTime(start,end) * 0.001);
free(sam_node);
free(ref_node);
free(ref_match);
free(sam_match);
free(ref_matchedCnt);
free(sam_matchedCnt);
return 0;
}
|
e8c87f5e9b0333a64e75f94e574088a8dc9f94c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template void caffe_gpu_set<unsigned int>(const int N, const unsigned int alpha, unsigned int* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sqrt_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
| e8c87f5e9b0333a64e75f94e574088a8dc9f94c9.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template void caffe_gpu_set<unsigned int>(const int N, const unsigned int alpha, unsigned int* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void sqrt_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = sqrt(a[index]);
}
}
template <>
void caffe_gpu_sqrt<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_sqrt<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sqrt_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
58685d944cffc7e8a365a7f296b35d90cf88e806.hip | // !!! This is a file automatically generated by hipify!!!
#include <gtest/gtest.h>
#include "test_quantile.h"
#include "../helpers.h"
#include "../../../src/collective/communicator-inl.cuh"
#include "../../../src/common/hist_util.cuh"
#include "../../../src/common/quantile.cuh"
namespace xgboost {
namespace {
struct IsSorted {
XGBOOST_DEVICE bool operator()(common::SketchEntry const& a, common::SketchEntry const& b) const {
return a.value < b.value;
}
};
}
namespace common {
TEST(GPUQuantile, Basic) {
constexpr size_t kRows = 1000, kCols = 100, kBins = 256;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, kBins, kCols, kRows, 0);
dh::caching_device_vector<Entry> entries;
dh::device_vector<bst_row_t> cuts_ptr(kCols+1);
thrust::fill(cuts_ptr.begin(), cuts_ptr.end(), 0);
// Push empty
sketch.Push(dh::ToSpan(entries), dh::ToSpan(cuts_ptr), dh::ToSpan(cuts_ptr), 0);
ASSERT_EQ(sketch.Data().size(), 0);
}
void TestSketchUnique(float sparsity) {
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [kRows, kCols, sparsity](int32_t seed, size_t n_bins, MetaInfo const& info) {
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage;
std::string interface_str = RandomDataGenerator{kRows, kCols, sparsity}
.Seed(seed)
.Device(0)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch);
auto n_cuts = detail::RequiredSampleCutsPerColumn(n_bins, kRows);
dh::caching_device_vector<size_t> column_sizes_scan;
HostDeviceVector<size_t> cut_sizes_scan;
auto batch = adapter.Value();
data::IsValidFunctor is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_iter = dh::MakeTransformIterator<data::COOTuple>(
thrust::make_counting_iterator(0llu),
[=] __device__(size_t idx) { return batch.GetElement(idx); });
auto end = kCols * kRows;
detail::GetColumnSizesScan(0, kCols, n_cuts, IterSpan{batch_iter, end}, is_valid,
&cut_sizes_scan, &column_sizes_scan);
auto const& cut_sizes = cut_sizes_scan.HostVector();
ASSERT_LE(sketch.Data().size(), cut_sizes.back());
std::vector<size_t> h_columns_ptr(sketch.ColumnsPtr().size());
dh::CopyDeviceSpanToVector(&h_columns_ptr, sketch.ColumnsPtr());
ASSERT_EQ(sketch.Data().size(), h_columns_ptr.back());
sketch.Unique();
std::vector<SketchEntry> h_data(sketch.Data().size());
thrust::copy(dh::tcbegin(sketch.Data()), dh::tcend(sketch.Data()), h_data.begin());
for (size_t i = 1; i < h_columns_ptr.size(); ++i) {
auto begin = h_columns_ptr[i - 1];
auto column = common::Span<SketchEntry>(h_data).subspan(begin, h_columns_ptr[i] - begin);
ASSERT_TRUE(std::is_sorted(column.begin(), column.end(), IsSorted{}));
}
});
}
TEST(GPUQuantile, Unique) {
TestSketchUnique(0);
TestSketchUnique(0.5);
}
// if with_error is true, the test tolerates floating point error
void TestQuantileElemRank(int32_t device, Span<SketchEntry const> in,
Span<bst_row_t const> d_columns_ptr, bool with_error = false) {
dh::safe_cuda(hipSetDevice(device));
std::vector<SketchEntry> h_in(in.size());
dh::CopyDeviceSpanToVector(&h_in, in);
std::vector<bst_row_t> h_columns_ptr(d_columns_ptr.size());
dh::CopyDeviceSpanToVector(&h_columns_ptr, d_columns_ptr);
for (size_t i = 1; i < d_columns_ptr.size(); ++i) {
auto column_id = i - 1;
auto beg = h_columns_ptr[column_id];
auto end = h_columns_ptr[i];
auto in_column = Span<SketchEntry>{h_in}.subspan(beg, end - beg);
for (size_t idx = 1; idx < in_column.size(); ++idx) {
float prev_rmin = in_column[idx - 1].rmin;
float prev_rmax = in_column[idx - 1].rmax;
float rmin_next = in_column[idx].RMinNext();
if (with_error) {
ASSERT_GE(in_column[idx].rmin + in_column[idx].rmin * kRtEps,
prev_rmin);
ASSERT_GE(in_column[idx].rmax + in_column[idx].rmin * kRtEps, prev_rmax);
ASSERT_GE(in_column[idx].rmax + in_column[idx].rmin * kRtEps,
rmin_next);
} else {
ASSERT_GE(in_column[idx].rmin, prev_rmin);
ASSERT_GE(in_column[idx].rmax, prev_rmax);
ASSERT_GE(in_column[idx].rmax, rmin_next);
}
}
}
}
TEST(GPUQuantile, Prune) {
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins, MetaInfo const& info) {
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage;
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch);
auto n_cuts = detail::RequiredSampleCutsPerColumn(n_bins, kRows);
// LE because kRows * kCols is pushed into sketch, after removing
// duplicated entries we might not have that much inputs for prune.
ASSERT_LE(sketch.Data().size(), n_cuts * kCols);
sketch.Prune(n_bins);
ASSERT_LE(sketch.Data().size(), kRows * kCols);
// This is not necessarily true for all inputs without calling unique after
// prune.
ASSERT_TRUE(thrust::is_sorted(thrust::device, sketch.Data().data(),
sketch.Data().data() + sketch.Data().size(),
detail::SketchUnique{}));
TestQuantileElemRank(0, sketch.Data(), sketch.ColumnsPtr());
});
}
TEST(GPUQuantile, MergeEmpty) {
constexpr size_t kRows = 1000, kCols = 100;
size_t n_bins = 10;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_0(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage_0;
std::string interface_str_0 =
RandomDataGenerator{kRows, kCols, 0}.Device(0).GenerateArrayInterface(
&storage_0);
data::CupyAdapter adapter_0(interface_str_0);
MetaInfo info;
AdapterDeviceSketch(adapter_0.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch_0);
std::vector<SketchEntry> entries_before(sketch_0.Data().size());
dh::CopyDeviceSpanToVector(&entries_before, sketch_0.Data());
std::vector<bst_row_t> ptrs_before(sketch_0.ColumnsPtr().size());
dh::CopyDeviceSpanToVector(&ptrs_before, sketch_0.ColumnsPtr());
thrust::device_vector<size_t> columns_ptr(kCols + 1);
// Merge an empty sketch
sketch_0.Merge(dh::ToSpan(columns_ptr), Span<SketchEntry>{});
std::vector<SketchEntry> entries_after(sketch_0.Data().size());
dh::CopyDeviceSpanToVector(&entries_after, sketch_0.Data());
std::vector<bst_row_t> ptrs_after(sketch_0.ColumnsPtr().size());
dh::CopyDeviceSpanToVector(&ptrs_after, sketch_0.ColumnsPtr());
CHECK_EQ(entries_before.size(), entries_after.size());
CHECK_EQ(ptrs_before.size(), ptrs_after.size());
for (size_t i = 0; i < entries_before.size(); ++i) {
CHECK_EQ(entries_before[i].value, entries_after[i].value);
CHECK_EQ(entries_before[i].rmin, entries_after[i].rmin);
CHECK_EQ(entries_before[i].rmax, entries_after[i].rmax);
CHECK_EQ(entries_before[i].wmin, entries_after[i].wmin);
}
for (size_t i = 0; i < ptrs_before.size(); ++i) {
CHECK_EQ(ptrs_before[i], ptrs_after[i]);
}
}
TEST(GPUQuantile, MergeBasic) {
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins, MetaInfo const &info) {
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_0(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage_0;
std::string interface_str_0 = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage_0);
data::CupyAdapter adapter_0(interface_str_0);
AdapterDeviceSketch(adapter_0.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch_0);
SketchContainer sketch_1(ft, n_bins, kCols, kRows * kRows, 0);
HostDeviceVector<float> storage_1;
std::string interface_str_1 = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage_1);
data::CupyAdapter adapter_1(interface_str_1);
AdapterDeviceSketch(adapter_1.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch_1);
size_t size_before_merge = sketch_0.Data().size();
sketch_0.Merge(sketch_1.ColumnsPtr(), sketch_1.Data());
if (info.weights_.Size() != 0) {
TestQuantileElemRank(0, sketch_0.Data(), sketch_0.ColumnsPtr(), true);
sketch_0.FixError();
TestQuantileElemRank(0, sketch_0.Data(), sketch_0.ColumnsPtr(), false);
} else {
TestQuantileElemRank(0, sketch_0.Data(), sketch_0.ColumnsPtr());
}
auto columns_ptr = sketch_0.ColumnsPtr();
std::vector<bst_row_t> h_columns_ptr(columns_ptr.size());
dh::CopyDeviceSpanToVector(&h_columns_ptr, columns_ptr);
ASSERT_EQ(h_columns_ptr.back(), sketch_1.Data().size() + size_before_merge);
sketch_0.Unique();
ASSERT_TRUE(
thrust::is_sorted(thrust::device, sketch_0.Data().data(),
sketch_0.Data().data() + sketch_0.Data().size(),
detail::SketchUnique{}));
});
}
void TestMergeDuplicated(int32_t n_bins, size_t cols, size_t rows, float frac) {
MetaInfo info;
int32_t seed = 0;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_0(ft, n_bins, cols, rows, 0);
HostDeviceVector<float> storage_0;
std::string interface_str_0 = RandomDataGenerator{rows, cols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage_0);
data::CupyAdapter adapter_0(interface_str_0);
AdapterDeviceSketch(adapter_0.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_0);
size_t f_rows = rows * frac;
SketchContainer sketch_1(ft, n_bins, cols, f_rows, 0);
HostDeviceVector<float> storage_1;
std::string interface_str_1 = RandomDataGenerator{f_rows, cols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage_1);
auto data_1 = storage_1.DeviceSpan();
auto tuple_it = thrust::make_tuple(
thrust::make_counting_iterator<size_t>(0ul), data_1.data());
using Tuple = thrust::tuple<size_t, float>;
auto it = thrust::make_zip_iterator(tuple_it);
thrust::transform(thrust::device, it, it + data_1.size(), data_1.data(),
[=] __device__(Tuple const &tuple) {
auto i = thrust::get<0>(tuple);
if (thrust::get<0>(tuple) % 2 == 0) {
return 0.0f;
} else {
return thrust::get<1>(tuple);
}
});
data::CupyAdapter adapter_1(interface_str_1);
AdapterDeviceSketch(adapter_1.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_1);
size_t size_before_merge = sketch_0.Data().size();
sketch_0.Merge(sketch_1.ColumnsPtr(), sketch_1.Data());
TestQuantileElemRank(0, sketch_0.Data(), sketch_0.ColumnsPtr());
auto columns_ptr = sketch_0.ColumnsPtr();
std::vector<bst_row_t> h_columns_ptr(columns_ptr.size());
dh::CopyDeviceSpanToVector(&h_columns_ptr, columns_ptr);
ASSERT_EQ(h_columns_ptr.back(), sketch_1.Data().size() + size_before_merge);
sketch_0.Unique();
columns_ptr = sketch_0.ColumnsPtr();
dh::CopyDeviceSpanToVector(&h_columns_ptr, columns_ptr);
std::vector<SketchEntry> h_data(sketch_0.Data().size());
dh::CopyDeviceSpanToVector(&h_data, sketch_0.Data());
for (size_t i = 1; i < h_columns_ptr.size(); ++i) {
auto begin = h_columns_ptr[i - 1];
auto column = Span<SketchEntry> {h_data}.subspan(begin, h_columns_ptr[i] - begin);
ASSERT_TRUE(std::is_sorted(column.begin(), column.end(), IsSorted{}));
}
}
TEST(GPUQuantile, MergeDuplicated) {
size_t n_bins = 256;
constexpr size_t kRows = 1000, kCols = 100;
for (float frac = 0.5; frac < 2.5; frac += 0.5) {
TestMergeDuplicated(n_bins, kRows, kCols, frac);
}
}
TEST(GPUQuantile, MultiMerge) {
constexpr size_t kRows = 20, kCols = 1;
int32_t world = 2;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins,
MetaInfo const &info) {
// Set up single node version
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_on_single_node(ft, n_bins, kCols, kRows, 0);
size_t intermediate_num_cuts = ::min(
kRows * world, static_cast<size_t>(n_bins * WQSketch::kFactor));
std::vector<SketchContainer> containers;
for (auto rank = 0; rank < world; ++rank) {
HostDeviceVector<float> storage;
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(rank + seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
HostDeviceVector<FeatureType> ft;
containers.emplace_back(ft, n_bins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&containers.back());
}
for (auto &sketch : containers) {
sketch.Prune(intermediate_num_cuts);
sketch_on_single_node.Merge(sketch.ColumnsPtr(), sketch.Data());
sketch_on_single_node.FixError();
}
TestQuantileElemRank(0, sketch_on_single_node.Data(),
sketch_on_single_node.ColumnsPtr());
sketch_on_single_node.Unique();
TestQuantileElemRank(0, sketch_on_single_node.Data(),
sketch_on_single_node.ColumnsPtr());
});
}
namespace {
void TestAllReduceBasic(int32_t n_gpus) {
auto const world = collective::GetWorldSize();
CHECK_EQ(world, n_gpus);
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins, MetaInfo const& info) {
auto const device = collective::GetRank();
// Set up single node version;
HostDeviceVector<FeatureType> ft({}, device);
SketchContainer sketch_on_single_node(ft, n_bins, kCols, kRows, device);
size_t intermediate_num_cuts = ::min(
kRows * world, static_cast<size_t>(n_bins * WQSketch::kFactor));
std::vector<SketchContainer> containers;
for (auto rank = 0; rank < world; ++rank) {
HostDeviceVector<float> storage({}, device);
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(device)
.Seed(rank + seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
HostDeviceVector<FeatureType> ft({}, device);
containers.emplace_back(ft, n_bins, kCols, kRows, device);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&containers.back());
}
for (auto &sketch : containers) {
sketch.Prune(intermediate_num_cuts);
sketch_on_single_node.Merge(sketch.ColumnsPtr(), sketch.Data());
sketch_on_single_node.FixError();
}
sketch_on_single_node.Unique();
TestQuantileElemRank(device, sketch_on_single_node.Data(),
sketch_on_single_node.ColumnsPtr(), true);
// Set up distributed version. We rely on using rank as seed to generate
// the exact same copy of data.
auto rank = collective::GetRank();
SketchContainer sketch_distributed(ft, n_bins, kCols, kRows, device);
HostDeviceVector<float> storage({}, device);
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(device)
.Seed(rank + seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_distributed);
sketch_distributed.AllReduce();
sketch_distributed.Unique();
ASSERT_EQ(sketch_distributed.ColumnsPtr().size(),
sketch_on_single_node.ColumnsPtr().size());
ASSERT_EQ(sketch_distributed.Data().size(),
sketch_on_single_node.Data().size());
TestQuantileElemRank(device, sketch_distributed.Data(),
sketch_distributed.ColumnsPtr(), true);
std::vector<SketchEntry> single_node_data(
sketch_on_single_node.Data().size());
dh::CopyDeviceSpanToVector(&single_node_data, sketch_on_single_node.Data());
std::vector<SketchEntry> distributed_data(sketch_distributed.Data().size());
dh::CopyDeviceSpanToVector(&distributed_data, sketch_distributed.Data());
float Eps = 2e-4 * world;
for (size_t i = 0; i < single_node_data.size(); ++i) {
ASSERT_NEAR(single_node_data[i].value, distributed_data[i].value, Eps);
ASSERT_NEAR(single_node_data[i].rmax, distributed_data[i].rmax, Eps);
ASSERT_NEAR(single_node_data[i].rmin, distributed_data[i].rmin, Eps);
ASSERT_NEAR(single_node_data[i].wmin, distributed_data[i].wmin, Eps);
}
});
}
} // anonymous namespace
TEST(GPUQuantile, MGPUAllReduceBasic) {
auto const n_gpus = AllVisibleGPUs();
if (n_gpus <= 1) {
GTEST_SKIP() << "Skipping MGPUAllReduceBasic test with # GPUs = " << n_gpus;
}
RunWithInMemoryCommunicator(n_gpus, TestAllReduceBasic, n_gpus);
}
namespace {
void TestSameOnAllWorkers(std::int32_t n_gpus) {
auto world = collective::GetWorldSize();
CHECK_EQ(world, n_gpus);
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins,
MetaInfo const &info) {
auto const rank = collective::GetRank();
auto const device = rank;
HostDeviceVector<FeatureType> ft({}, device);
SketchContainer sketch_distributed(ft, n_bins, kCols, kRows, device);
HostDeviceVector<float> storage({}, device);
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(device)
.Seed(rank + seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_distributed);
sketch_distributed.AllReduce();
sketch_distributed.Unique();
TestQuantileElemRank(device, sketch_distributed.Data(), sketch_distributed.ColumnsPtr(), true);
// Test for all workers having the same sketch.
size_t n_data = sketch_distributed.Data().size();
collective::Allreduce<collective::Operation::kMax>(&n_data, 1);
ASSERT_EQ(n_data, sketch_distributed.Data().size());
size_t size_as_float =
sketch_distributed.Data().size_bytes() / sizeof(float);
auto local_data = Span<float const>{
reinterpret_cast<float const *>(sketch_distributed.Data().data()),
size_as_float};
dh::caching_device_vector<float> all_workers(size_as_float * world);
thrust::fill(all_workers.begin(), all_workers.end(), 0);
thrust::copy(thrust::device, local_data.data(),
local_data.data() + local_data.size(),
all_workers.begin() + local_data.size() * rank);
collective::AllReduce<collective::Operation::kSum>(device, all_workers.data().get(),
all_workers.size());
collective::Synchronize(device);
auto base_line = dh::ToSpan(all_workers).subspan(0, size_as_float);
std::vector<float> h_base_line(base_line.size());
dh::CopyDeviceSpanToVector(&h_base_line, base_line);
size_t offset = 0;
for (decltype(world) i = 0; i < world; ++i) {
auto comp = dh::ToSpan(all_workers).subspan(offset, size_as_float);
std::vector<float> h_comp(comp.size());
dh::CopyDeviceSpanToVector(&h_comp, comp);
ASSERT_EQ(comp.size(), base_line.size());
for (size_t j = 0; j < h_comp.size(); ++j) {
ASSERT_NEAR(h_base_line[j], h_comp[j], kRtEps);
}
offset += size_as_float;
}
});
}
} // anonymous namespace
TEST(GPUQuantile, MGPUSameOnAllWorkers) {
auto const n_gpus = AllVisibleGPUs();
if (n_gpus <= 1) {
GTEST_SKIP() << "Skipping MGPUSameOnAllWorkers test with # GPUs = " << n_gpus;
}
RunWithInMemoryCommunicator(n_gpus, TestSameOnAllWorkers, n_gpus);
}
TEST(GPUQuantile, Push) {
size_t constexpr kRows = 100;
std::vector<float> data(kRows);
std::fill(data.begin(), data.begin() + (data.size() / 2), 0.3f);
std::fill(data.begin() + (data.size() / 2), data.end(), 0.5f);
int32_t n_bins = 128;
bst_feature_t constexpr kCols = 1;
std::vector<Entry> entries(kRows);
for (bst_feature_t i = 0; i < entries.size(); ++i) {
Entry e{i, data[i]};
entries[i] = e;
}
dh::device_vector<Entry> d_entries(entries);
dh::device_vector<size_t> columns_ptr(2);
columns_ptr[0] = 0;
columns_ptr[1] = kRows;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, n_bins, kCols, kRows, 0);
sketch.Push(dh::ToSpan(d_entries), dh::ToSpan(columns_ptr), dh::ToSpan(columns_ptr), kRows, {});
auto sketch_data = sketch.Data();
thrust::host_vector<SketchEntry> h_sketch_data(sketch_data.size());
auto ptr = thrust::device_ptr<SketchEntry const>(sketch_data.data());
thrust::copy(ptr, ptr + sketch_data.size(), h_sketch_data.begin());
ASSERT_EQ(h_sketch_data.size(), 2);
auto v_0 = h_sketch_data[0];
ASSERT_EQ(v_0.rmin, 0);
ASSERT_EQ(v_0.wmin, kRows / 2.0f);
ASSERT_EQ(v_0.rmax, kRows / 2.0f);
auto v_1 = h_sketch_data[1];
ASSERT_EQ(v_1.rmin, kRows / 2.0f);
ASSERT_EQ(v_1.wmin, kRows / 2.0f);
ASSERT_EQ(v_1.rmax, static_cast<float>(kRows));
}
TEST(GPUQuantile, MultiColPush) {
size_t constexpr kRows = 100, kCols = 4;
std::vector<float> data(kRows * kCols);
std::fill(data.begin(), data.begin() + (data.size() / 2), 0.3f);
std::vector<Entry> entries(kRows * kCols);
for (bst_feature_t c = 0; c < kCols; ++c) {
for (size_t r = 0; r < kRows; ++r) {
float v = (r >= kRows / 2) ? 0.7 : 0.4;
auto e = Entry{c, v};
entries[c * kRows + r] = e;
}
}
int32_t n_bins = 16;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, n_bins, kCols, kRows, 0);
dh::device_vector<Entry> d_entries {entries};
dh::device_vector<size_t> columns_ptr(kCols + 1, 0);
for (size_t i = 1; i < kCols + 1; ++i) {
columns_ptr[i] = kRows;
}
thrust::inclusive_scan(thrust::device, columns_ptr.begin(), columns_ptr.end(),
columns_ptr.begin());
dh::device_vector<size_t> cuts_ptr(columns_ptr);
sketch.Push(dh::ToSpan(d_entries), dh::ToSpan(columns_ptr),
dh::ToSpan(cuts_ptr), kRows * kCols, {});
auto sketch_data = sketch.Data();
ASSERT_EQ(sketch_data.size(), kCols * 2);
auto ptr = thrust::device_ptr<SketchEntry const>(sketch_data.data());
std::vector<SketchEntry> h_sketch_data(sketch_data.size());
thrust::copy(ptr, ptr + sketch_data.size(), h_sketch_data.begin());
for (size_t i = 0; i < kCols; ++i) {
auto v_0 = h_sketch_data[i * 2];
ASSERT_EQ(v_0.rmin, 0);
ASSERT_EQ(v_0.wmin, kRows / 2.0f);
ASSERT_EQ(v_0.rmax, kRows / 2.0f);
auto v_1 = h_sketch_data[i * 2 + 1];
ASSERT_EQ(v_1.rmin, kRows / 2.0f);
ASSERT_EQ(v_1.wmin, kRows / 2.0f);
ASSERT_EQ(v_1.rmax, static_cast<float>(kRows));
}
}
} // namespace common
} // namespace xgboost
| 58685d944cffc7e8a365a7f296b35d90cf88e806.cu | #include <gtest/gtest.h>
#include "test_quantile.h"
#include "../helpers.h"
#include "../../../src/collective/communicator-inl.cuh"
#include "../../../src/common/hist_util.cuh"
#include "../../../src/common/quantile.cuh"
namespace xgboost {
namespace {
struct IsSorted {
XGBOOST_DEVICE bool operator()(common::SketchEntry const& a, common::SketchEntry const& b) const {
return a.value < b.value;
}
};
}
namespace common {
TEST(GPUQuantile, Basic) {
constexpr size_t kRows = 1000, kCols = 100, kBins = 256;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, kBins, kCols, kRows, 0);
dh::caching_device_vector<Entry> entries;
dh::device_vector<bst_row_t> cuts_ptr(kCols+1);
thrust::fill(cuts_ptr.begin(), cuts_ptr.end(), 0);
// Push empty
sketch.Push(dh::ToSpan(entries), dh::ToSpan(cuts_ptr), dh::ToSpan(cuts_ptr), 0);
ASSERT_EQ(sketch.Data().size(), 0);
}
void TestSketchUnique(float sparsity) {
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [kRows, kCols, sparsity](int32_t seed, size_t n_bins, MetaInfo const& info) {
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage;
std::string interface_str = RandomDataGenerator{kRows, kCols, sparsity}
.Seed(seed)
.Device(0)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch);
auto n_cuts = detail::RequiredSampleCutsPerColumn(n_bins, kRows);
dh::caching_device_vector<size_t> column_sizes_scan;
HostDeviceVector<size_t> cut_sizes_scan;
auto batch = adapter.Value();
data::IsValidFunctor is_valid(std::numeric_limits<float>::quiet_NaN());
auto batch_iter = dh::MakeTransformIterator<data::COOTuple>(
thrust::make_counting_iterator(0llu),
[=] __device__(size_t idx) { return batch.GetElement(idx); });
auto end = kCols * kRows;
detail::GetColumnSizesScan(0, kCols, n_cuts, IterSpan{batch_iter, end}, is_valid,
&cut_sizes_scan, &column_sizes_scan);
auto const& cut_sizes = cut_sizes_scan.HostVector();
ASSERT_LE(sketch.Data().size(), cut_sizes.back());
std::vector<size_t> h_columns_ptr(sketch.ColumnsPtr().size());
dh::CopyDeviceSpanToVector(&h_columns_ptr, sketch.ColumnsPtr());
ASSERT_EQ(sketch.Data().size(), h_columns_ptr.back());
sketch.Unique();
std::vector<SketchEntry> h_data(sketch.Data().size());
thrust::copy(dh::tcbegin(sketch.Data()), dh::tcend(sketch.Data()), h_data.begin());
for (size_t i = 1; i < h_columns_ptr.size(); ++i) {
auto begin = h_columns_ptr[i - 1];
auto column = common::Span<SketchEntry>(h_data).subspan(begin, h_columns_ptr[i] - begin);
ASSERT_TRUE(std::is_sorted(column.begin(), column.end(), IsSorted{}));
}
});
}
TEST(GPUQuantile, Unique) {
TestSketchUnique(0);
TestSketchUnique(0.5);
}
// if with_error is true, the test tolerates floating point error
void TestQuantileElemRank(int32_t device, Span<SketchEntry const> in,
Span<bst_row_t const> d_columns_ptr, bool with_error = false) {
dh::safe_cuda(cudaSetDevice(device));
std::vector<SketchEntry> h_in(in.size());
dh::CopyDeviceSpanToVector(&h_in, in);
std::vector<bst_row_t> h_columns_ptr(d_columns_ptr.size());
dh::CopyDeviceSpanToVector(&h_columns_ptr, d_columns_ptr);
for (size_t i = 1; i < d_columns_ptr.size(); ++i) {
auto column_id = i - 1;
auto beg = h_columns_ptr[column_id];
auto end = h_columns_ptr[i];
auto in_column = Span<SketchEntry>{h_in}.subspan(beg, end - beg);
for (size_t idx = 1; idx < in_column.size(); ++idx) {
float prev_rmin = in_column[idx - 1].rmin;
float prev_rmax = in_column[idx - 1].rmax;
float rmin_next = in_column[idx].RMinNext();
if (with_error) {
ASSERT_GE(in_column[idx].rmin + in_column[idx].rmin * kRtEps,
prev_rmin);
ASSERT_GE(in_column[idx].rmax + in_column[idx].rmin * kRtEps, prev_rmax);
ASSERT_GE(in_column[idx].rmax + in_column[idx].rmin * kRtEps,
rmin_next);
} else {
ASSERT_GE(in_column[idx].rmin, prev_rmin);
ASSERT_GE(in_column[idx].rmax, prev_rmax);
ASSERT_GE(in_column[idx].rmax, rmin_next);
}
}
}
}
TEST(GPUQuantile, Prune) {
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins, MetaInfo const& info) {
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage;
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch);
auto n_cuts = detail::RequiredSampleCutsPerColumn(n_bins, kRows);
// LE because kRows * kCols is pushed into sketch, after removing
// duplicated entries we might not have that much inputs for prune.
ASSERT_LE(sketch.Data().size(), n_cuts * kCols);
sketch.Prune(n_bins);
ASSERT_LE(sketch.Data().size(), kRows * kCols);
// This is not necessarily true for all inputs without calling unique after
// prune.
ASSERT_TRUE(thrust::is_sorted(thrust::device, sketch.Data().data(),
sketch.Data().data() + sketch.Data().size(),
detail::SketchUnique{}));
TestQuantileElemRank(0, sketch.Data(), sketch.ColumnsPtr());
});
}
TEST(GPUQuantile, MergeEmpty) {
constexpr size_t kRows = 1000, kCols = 100;
size_t n_bins = 10;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_0(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage_0;
std::string interface_str_0 =
RandomDataGenerator{kRows, kCols, 0}.Device(0).GenerateArrayInterface(
&storage_0);
data::CupyAdapter adapter_0(interface_str_0);
MetaInfo info;
AdapterDeviceSketch(adapter_0.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch_0);
std::vector<SketchEntry> entries_before(sketch_0.Data().size());
dh::CopyDeviceSpanToVector(&entries_before, sketch_0.Data());
std::vector<bst_row_t> ptrs_before(sketch_0.ColumnsPtr().size());
dh::CopyDeviceSpanToVector(&ptrs_before, sketch_0.ColumnsPtr());
thrust::device_vector<size_t> columns_ptr(kCols + 1);
// Merge an empty sketch
sketch_0.Merge(dh::ToSpan(columns_ptr), Span<SketchEntry>{});
std::vector<SketchEntry> entries_after(sketch_0.Data().size());
dh::CopyDeviceSpanToVector(&entries_after, sketch_0.Data());
std::vector<bst_row_t> ptrs_after(sketch_0.ColumnsPtr().size());
dh::CopyDeviceSpanToVector(&ptrs_after, sketch_0.ColumnsPtr());
CHECK_EQ(entries_before.size(), entries_after.size());
CHECK_EQ(ptrs_before.size(), ptrs_after.size());
for (size_t i = 0; i < entries_before.size(); ++i) {
CHECK_EQ(entries_before[i].value, entries_after[i].value);
CHECK_EQ(entries_before[i].rmin, entries_after[i].rmin);
CHECK_EQ(entries_before[i].rmax, entries_after[i].rmax);
CHECK_EQ(entries_before[i].wmin, entries_after[i].wmin);
}
for (size_t i = 0; i < ptrs_before.size(); ++i) {
CHECK_EQ(ptrs_before[i], ptrs_after[i]);
}
}
TEST(GPUQuantile, MergeBasic) {
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins, MetaInfo const &info) {
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_0(ft, n_bins, kCols, kRows, 0);
HostDeviceVector<float> storage_0;
std::string interface_str_0 = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage_0);
data::CupyAdapter adapter_0(interface_str_0);
AdapterDeviceSketch(adapter_0.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch_0);
SketchContainer sketch_1(ft, n_bins, kCols, kRows * kRows, 0);
HostDeviceVector<float> storage_1;
std::string interface_str_1 = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage_1);
data::CupyAdapter adapter_1(interface_str_1);
AdapterDeviceSketch(adapter_1.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(), &sketch_1);
size_t size_before_merge = sketch_0.Data().size();
sketch_0.Merge(sketch_1.ColumnsPtr(), sketch_1.Data());
if (info.weights_.Size() != 0) {
TestQuantileElemRank(0, sketch_0.Data(), sketch_0.ColumnsPtr(), true);
sketch_0.FixError();
TestQuantileElemRank(0, sketch_0.Data(), sketch_0.ColumnsPtr(), false);
} else {
TestQuantileElemRank(0, sketch_0.Data(), sketch_0.ColumnsPtr());
}
auto columns_ptr = sketch_0.ColumnsPtr();
std::vector<bst_row_t> h_columns_ptr(columns_ptr.size());
dh::CopyDeviceSpanToVector(&h_columns_ptr, columns_ptr);
ASSERT_EQ(h_columns_ptr.back(), sketch_1.Data().size() + size_before_merge);
sketch_0.Unique();
ASSERT_TRUE(
thrust::is_sorted(thrust::device, sketch_0.Data().data(),
sketch_0.Data().data() + sketch_0.Data().size(),
detail::SketchUnique{}));
});
}
void TestMergeDuplicated(int32_t n_bins, size_t cols, size_t rows, float frac) {
MetaInfo info;
int32_t seed = 0;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_0(ft, n_bins, cols, rows, 0);
HostDeviceVector<float> storage_0;
std::string interface_str_0 = RandomDataGenerator{rows, cols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage_0);
data::CupyAdapter adapter_0(interface_str_0);
AdapterDeviceSketch(adapter_0.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_0);
size_t f_rows = rows * frac;
SketchContainer sketch_1(ft, n_bins, cols, f_rows, 0);
HostDeviceVector<float> storage_1;
std::string interface_str_1 = RandomDataGenerator{f_rows, cols, 0}
.Device(0)
.Seed(seed)
.GenerateArrayInterface(&storage_1);
auto data_1 = storage_1.DeviceSpan();
auto tuple_it = thrust::make_tuple(
thrust::make_counting_iterator<size_t>(0ul), data_1.data());
using Tuple = thrust::tuple<size_t, float>;
auto it = thrust::make_zip_iterator(tuple_it);
thrust::transform(thrust::device, it, it + data_1.size(), data_1.data(),
[=] __device__(Tuple const &tuple) {
auto i = thrust::get<0>(tuple);
if (thrust::get<0>(tuple) % 2 == 0) {
return 0.0f;
} else {
return thrust::get<1>(tuple);
}
});
data::CupyAdapter adapter_1(interface_str_1);
AdapterDeviceSketch(adapter_1.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_1);
size_t size_before_merge = sketch_0.Data().size();
sketch_0.Merge(sketch_1.ColumnsPtr(), sketch_1.Data());
TestQuantileElemRank(0, sketch_0.Data(), sketch_0.ColumnsPtr());
auto columns_ptr = sketch_0.ColumnsPtr();
std::vector<bst_row_t> h_columns_ptr(columns_ptr.size());
dh::CopyDeviceSpanToVector(&h_columns_ptr, columns_ptr);
ASSERT_EQ(h_columns_ptr.back(), sketch_1.Data().size() + size_before_merge);
sketch_0.Unique();
columns_ptr = sketch_0.ColumnsPtr();
dh::CopyDeviceSpanToVector(&h_columns_ptr, columns_ptr);
std::vector<SketchEntry> h_data(sketch_0.Data().size());
dh::CopyDeviceSpanToVector(&h_data, sketch_0.Data());
for (size_t i = 1; i < h_columns_ptr.size(); ++i) {
auto begin = h_columns_ptr[i - 1];
auto column = Span<SketchEntry> {h_data}.subspan(begin, h_columns_ptr[i] - begin);
ASSERT_TRUE(std::is_sorted(column.begin(), column.end(), IsSorted{}));
}
}
TEST(GPUQuantile, MergeDuplicated) {
size_t n_bins = 256;
constexpr size_t kRows = 1000, kCols = 100;
for (float frac = 0.5; frac < 2.5; frac += 0.5) {
TestMergeDuplicated(n_bins, kRows, kCols, frac);
}
}
TEST(GPUQuantile, MultiMerge) {
constexpr size_t kRows = 20, kCols = 1;
int32_t world = 2;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins,
MetaInfo const &info) {
// Set up single node version
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_on_single_node(ft, n_bins, kCols, kRows, 0);
size_t intermediate_num_cuts = std::min(
kRows * world, static_cast<size_t>(n_bins * WQSketch::kFactor));
std::vector<SketchContainer> containers;
for (auto rank = 0; rank < world; ++rank) {
HostDeviceVector<float> storage;
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(0)
.Seed(rank + seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
HostDeviceVector<FeatureType> ft;
containers.emplace_back(ft, n_bins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&containers.back());
}
for (auto &sketch : containers) {
sketch.Prune(intermediate_num_cuts);
sketch_on_single_node.Merge(sketch.ColumnsPtr(), sketch.Data());
sketch_on_single_node.FixError();
}
TestQuantileElemRank(0, sketch_on_single_node.Data(),
sketch_on_single_node.ColumnsPtr());
sketch_on_single_node.Unique();
TestQuantileElemRank(0, sketch_on_single_node.Data(),
sketch_on_single_node.ColumnsPtr());
});
}
namespace {
void TestAllReduceBasic(int32_t n_gpus) {
auto const world = collective::GetWorldSize();
CHECK_EQ(world, n_gpus);
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins, MetaInfo const& info) {
auto const device = collective::GetRank();
// Set up single node version;
HostDeviceVector<FeatureType> ft({}, device);
SketchContainer sketch_on_single_node(ft, n_bins, kCols, kRows, device);
size_t intermediate_num_cuts = std::min(
kRows * world, static_cast<size_t>(n_bins * WQSketch::kFactor));
std::vector<SketchContainer> containers;
for (auto rank = 0; rank < world; ++rank) {
HostDeviceVector<float> storage({}, device);
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(device)
.Seed(rank + seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
HostDeviceVector<FeatureType> ft({}, device);
containers.emplace_back(ft, n_bins, kCols, kRows, device);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&containers.back());
}
for (auto &sketch : containers) {
sketch.Prune(intermediate_num_cuts);
sketch_on_single_node.Merge(sketch.ColumnsPtr(), sketch.Data());
sketch_on_single_node.FixError();
}
sketch_on_single_node.Unique();
TestQuantileElemRank(device, sketch_on_single_node.Data(),
sketch_on_single_node.ColumnsPtr(), true);
// Set up distributed version. We rely on using rank as seed to generate
// the exact same copy of data.
auto rank = collective::GetRank();
SketchContainer sketch_distributed(ft, n_bins, kCols, kRows, device);
HostDeviceVector<float> storage({}, device);
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(device)
.Seed(rank + seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_distributed);
sketch_distributed.AllReduce();
sketch_distributed.Unique();
ASSERT_EQ(sketch_distributed.ColumnsPtr().size(),
sketch_on_single_node.ColumnsPtr().size());
ASSERT_EQ(sketch_distributed.Data().size(),
sketch_on_single_node.Data().size());
TestQuantileElemRank(device, sketch_distributed.Data(),
sketch_distributed.ColumnsPtr(), true);
std::vector<SketchEntry> single_node_data(
sketch_on_single_node.Data().size());
dh::CopyDeviceSpanToVector(&single_node_data, sketch_on_single_node.Data());
std::vector<SketchEntry> distributed_data(sketch_distributed.Data().size());
dh::CopyDeviceSpanToVector(&distributed_data, sketch_distributed.Data());
float Eps = 2e-4 * world;
for (size_t i = 0; i < single_node_data.size(); ++i) {
ASSERT_NEAR(single_node_data[i].value, distributed_data[i].value, Eps);
ASSERT_NEAR(single_node_data[i].rmax, distributed_data[i].rmax, Eps);
ASSERT_NEAR(single_node_data[i].rmin, distributed_data[i].rmin, Eps);
ASSERT_NEAR(single_node_data[i].wmin, distributed_data[i].wmin, Eps);
}
});
}
} // anonymous namespace
TEST(GPUQuantile, MGPUAllReduceBasic) {
auto const n_gpus = AllVisibleGPUs();
if (n_gpus <= 1) {
GTEST_SKIP() << "Skipping MGPUAllReduceBasic test with # GPUs = " << n_gpus;
}
RunWithInMemoryCommunicator(n_gpus, TestAllReduceBasic, n_gpus);
}
namespace {
void TestSameOnAllWorkers(std::int32_t n_gpus) {
auto world = collective::GetWorldSize();
CHECK_EQ(world, n_gpus);
constexpr size_t kRows = 1000, kCols = 100;
RunWithSeedsAndBins(kRows, [=](int32_t seed, size_t n_bins,
MetaInfo const &info) {
auto const rank = collective::GetRank();
auto const device = rank;
HostDeviceVector<FeatureType> ft({}, device);
SketchContainer sketch_distributed(ft, n_bins, kCols, kRows, device);
HostDeviceVector<float> storage({}, device);
std::string interface_str = RandomDataGenerator{kRows, kCols, 0}
.Device(device)
.Seed(rank + seed)
.GenerateArrayInterface(&storage);
data::CupyAdapter adapter(interface_str);
AdapterDeviceSketch(adapter.Value(), n_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_distributed);
sketch_distributed.AllReduce();
sketch_distributed.Unique();
TestQuantileElemRank(device, sketch_distributed.Data(), sketch_distributed.ColumnsPtr(), true);
// Test for all workers having the same sketch.
size_t n_data = sketch_distributed.Data().size();
collective::Allreduce<collective::Operation::kMax>(&n_data, 1);
ASSERT_EQ(n_data, sketch_distributed.Data().size());
size_t size_as_float =
sketch_distributed.Data().size_bytes() / sizeof(float);
auto local_data = Span<float const>{
reinterpret_cast<float const *>(sketch_distributed.Data().data()),
size_as_float};
dh::caching_device_vector<float> all_workers(size_as_float * world);
thrust::fill(all_workers.begin(), all_workers.end(), 0);
thrust::copy(thrust::device, local_data.data(),
local_data.data() + local_data.size(),
all_workers.begin() + local_data.size() * rank);
collective::AllReduce<collective::Operation::kSum>(device, all_workers.data().get(),
all_workers.size());
collective::Synchronize(device);
auto base_line = dh::ToSpan(all_workers).subspan(0, size_as_float);
std::vector<float> h_base_line(base_line.size());
dh::CopyDeviceSpanToVector(&h_base_line, base_line);
size_t offset = 0;
for (decltype(world) i = 0; i < world; ++i) {
auto comp = dh::ToSpan(all_workers).subspan(offset, size_as_float);
std::vector<float> h_comp(comp.size());
dh::CopyDeviceSpanToVector(&h_comp, comp);
ASSERT_EQ(comp.size(), base_line.size());
for (size_t j = 0; j < h_comp.size(); ++j) {
ASSERT_NEAR(h_base_line[j], h_comp[j], kRtEps);
}
offset += size_as_float;
}
});
}
} // anonymous namespace
TEST(GPUQuantile, MGPUSameOnAllWorkers) {
auto const n_gpus = AllVisibleGPUs();
if (n_gpus <= 1) {
GTEST_SKIP() << "Skipping MGPUSameOnAllWorkers test with # GPUs = " << n_gpus;
}
RunWithInMemoryCommunicator(n_gpus, TestSameOnAllWorkers, n_gpus);
}
TEST(GPUQuantile, Push) {
size_t constexpr kRows = 100;
std::vector<float> data(kRows);
std::fill(data.begin(), data.begin() + (data.size() / 2), 0.3f);
std::fill(data.begin() + (data.size() / 2), data.end(), 0.5f);
int32_t n_bins = 128;
bst_feature_t constexpr kCols = 1;
std::vector<Entry> entries(kRows);
for (bst_feature_t i = 0; i < entries.size(); ++i) {
Entry e{i, data[i]};
entries[i] = e;
}
dh::device_vector<Entry> d_entries(entries);
dh::device_vector<size_t> columns_ptr(2);
columns_ptr[0] = 0;
columns_ptr[1] = kRows;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, n_bins, kCols, kRows, 0);
sketch.Push(dh::ToSpan(d_entries), dh::ToSpan(columns_ptr), dh::ToSpan(columns_ptr), kRows, {});
auto sketch_data = sketch.Data();
thrust::host_vector<SketchEntry> h_sketch_data(sketch_data.size());
auto ptr = thrust::device_ptr<SketchEntry const>(sketch_data.data());
thrust::copy(ptr, ptr + sketch_data.size(), h_sketch_data.begin());
ASSERT_EQ(h_sketch_data.size(), 2);
auto v_0 = h_sketch_data[0];
ASSERT_EQ(v_0.rmin, 0);
ASSERT_EQ(v_0.wmin, kRows / 2.0f);
ASSERT_EQ(v_0.rmax, kRows / 2.0f);
auto v_1 = h_sketch_data[1];
ASSERT_EQ(v_1.rmin, kRows / 2.0f);
ASSERT_EQ(v_1.wmin, kRows / 2.0f);
ASSERT_EQ(v_1.rmax, static_cast<float>(kRows));
}
TEST(GPUQuantile, MultiColPush) {
size_t constexpr kRows = 100, kCols = 4;
std::vector<float> data(kRows * kCols);
std::fill(data.begin(), data.begin() + (data.size() / 2), 0.3f);
std::vector<Entry> entries(kRows * kCols);
for (bst_feature_t c = 0; c < kCols; ++c) {
for (size_t r = 0; r < kRows; ++r) {
float v = (r >= kRows / 2) ? 0.7 : 0.4;
auto e = Entry{c, v};
entries[c * kRows + r] = e;
}
}
int32_t n_bins = 16;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch(ft, n_bins, kCols, kRows, 0);
dh::device_vector<Entry> d_entries {entries};
dh::device_vector<size_t> columns_ptr(kCols + 1, 0);
for (size_t i = 1; i < kCols + 1; ++i) {
columns_ptr[i] = kRows;
}
thrust::inclusive_scan(thrust::device, columns_ptr.begin(), columns_ptr.end(),
columns_ptr.begin());
dh::device_vector<size_t> cuts_ptr(columns_ptr);
sketch.Push(dh::ToSpan(d_entries), dh::ToSpan(columns_ptr),
dh::ToSpan(cuts_ptr), kRows * kCols, {});
auto sketch_data = sketch.Data();
ASSERT_EQ(sketch_data.size(), kCols * 2);
auto ptr = thrust::device_ptr<SketchEntry const>(sketch_data.data());
std::vector<SketchEntry> h_sketch_data(sketch_data.size());
thrust::copy(ptr, ptr + sketch_data.size(), h_sketch_data.begin());
for (size_t i = 0; i < kCols; ++i) {
auto v_0 = h_sketch_data[i * 2];
ASSERT_EQ(v_0.rmin, 0);
ASSERT_EQ(v_0.wmin, kRows / 2.0f);
ASSERT_EQ(v_0.rmax, kRows / 2.0f);
auto v_1 = h_sketch_data[i * 2 + 1];
ASSERT_EQ(v_1.rmin, kRows / 2.0f);
ASSERT_EQ(v_1.wmin, kRows / 2.0f);
ASSERT_EQ(v_1.rmax, static_cast<float>(kRows));
}
}
} // namespace common
} // namespace xgboost
|
65635aa286fed878092e54412f3b21a0e65e4e74.hip | // !!! This is a file automatically generated by hipify!!!
/**
* gramschmidt.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define M 2048
#define N 2048
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 256
#define DIM_THREAD_BLOCK_Y 1
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void gramschmidt(DATA_TYPE* A, DATA_TYPE* R, DATA_TYPE* Q)
{
int i,j,k;
DATA_TYPE nrm;
for (k = 0; k < N; k++)
{
nrm = 0;
for (i = 0; i < M; i++)
{
nrm += A[i*N + k] * A[i*N + k];
}
R[k*N + k] = sqrt(nrm);
for (i = 0; i < M; i++)
{
Q[i*N + k] = A[i*N + k] / R[k*N + k];
}
for (j = k + 1; j < N; j++)
{
R[k*N + j] = 0;
for (i = 0; i < M; i++)
{
R[k*N + j] += Q[i*N + k] * A[i*N + j];
}
for (i = 0; i < M; i++)
{
A[i*N + j] = A[i*N + j] - Q[i*N + k] * R[k*N + j];
}
}
}
}
void init_array(DATA_TYPE* A)
{
int i, j;
for (i = 0; i < M; i++)
{
for (j = 0; j < N; j++)
{
A[i*N + j] = ((DATA_TYPE) (i+1)*(j+1)) / (M+1);
}
}
}
void compareResults(DATA_TYPE* A, DATA_TYPE* A_outputFromGpu)
{
int i, j, fail;
fail = 0;
for (i=0; i < M; i++)
{
for (j=0; j < N; j++)
{
if (percentDiff(A[i*N + j], A_outputFromGpu[i*N + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
printf("i: %d j: %d \n1: %f\n 2: %f\n", i, j, A[i*N + j], A_outputFromGpu[i*N + j]);
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
return;
}
__global__ void gramschmidt_kernel1(DATA_TYPE *a, DATA_TYPE *r, DATA_TYPE *q, int k)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid==0)
{
DATA_TYPE nrm = 0.0;
int i;
for (i = 0; i < M; i++)
{
nrm += a[i * N + k] * a[i * N + k];
}
r[k * N + k] = sqrt(nrm);
}
}
__global__ void gramschmidt_kernel2(DATA_TYPE *a, DATA_TYPE *r, DATA_TYPE *q, int k)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < M)
{
q[i * N + k] = a[i * N + k] / r[k * N + k];
}
}
__global__ void gramschmidt_kernel3(DATA_TYPE *a, DATA_TYPE *r, DATA_TYPE *q, int k)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if ((j > k) && (j < N))
{
r[k*N + j] = 0.0;
int i;
for (i = 0; i < M; i++)
{
r[k*N + j] += q[i*N + k] * a[i*N + j];
}
for (i = 0; i < M; i++)
{
a[i*N + j] -= q[i*N + k] * r[k*N + j];
}
}
}
void gramschmidtCuda(DATA_TYPE* A, DATA_TYPE* R, DATA_TYPE* Q, DATA_TYPE* A_outputFromGpu)
{
double t_start, t_end;
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 gridKernel1(1, 1);
dim3 gridKernel2((size_t)ceil(((float)N) / ((float)DIM_THREAD_BLOCK_X)), 1);
dim3 gridKernel3((size_t)ceil(((float)N) / ((float)DIM_THREAD_BLOCK_X)), 1);
DATA_TYPE *A_gpu;
DATA_TYPE *R_gpu;
DATA_TYPE *Q_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * M * N);
hipMalloc((void **)&R_gpu, sizeof(DATA_TYPE) * M * N);
hipMalloc((void **)&Q_gpu, sizeof(DATA_TYPE) * M * N);
hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * M * N, hipMemcpyHostToDevice);
t_start = rtclock();
int k;
for (k = 0; k < N; k++)
{
hipLaunchKernelGGL(( gramschmidt_kernel1), dim3(gridKernel1),dim3(block), 0, 0, A_gpu, R_gpu, Q_gpu, k);
hipDeviceSynchronize();
hipLaunchKernelGGL(( gramschmidt_kernel2), dim3(gridKernel2),dim3(block), 0, 0, A_gpu, R_gpu, Q_gpu, k);
hipDeviceSynchronize();
hipLaunchKernelGGL(( gramschmidt_kernel3), dim3(gridKernel3),dim3(block), 0, 0, A_gpu, R_gpu, Q_gpu, k);
hipDeviceSynchronize();
}
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
hipMemcpy(A_outputFromGpu, A_gpu, sizeof(DATA_TYPE) * M * N, hipMemcpyDeviceToHost);
hipFree(A_gpu);
hipFree(R_gpu);
hipFree(Q_gpu);
}
int main(int argc, char *argv[])
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* A_outputFromGpu;
DATA_TYPE* R;
DATA_TYPE* Q;
A = (DATA_TYPE*)malloc(M*N*sizeof(DATA_TYPE));
A_outputFromGpu = (DATA_TYPE*)malloc(M*N*sizeof(DATA_TYPE));
R = (DATA_TYPE*)malloc(M*N*sizeof(DATA_TYPE));
Q = (DATA_TYPE*)malloc(M*N*sizeof(DATA_TYPE));
init_array(A);
GPU_argv_init();
gramschmidtCuda(A, R, Q, A_outputFromGpu);
t_start = rtclock();
gramschmidt(A, R, Q);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(A, A_outputFromGpu);
free(A);
free(A_outputFromGpu);
free(R);
free(Q);
return 0;
}
| 65635aa286fed878092e54412f3b21a0e65e4e74.cu | /**
* gramschmidt.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <cuda.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define M 2048
#define N 2048
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 256
#define DIM_THREAD_BLOCK_Y 1
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void gramschmidt(DATA_TYPE* A, DATA_TYPE* R, DATA_TYPE* Q)
{
int i,j,k;
DATA_TYPE nrm;
for (k = 0; k < N; k++)
{
nrm = 0;
for (i = 0; i < M; i++)
{
nrm += A[i*N + k] * A[i*N + k];
}
R[k*N + k] = sqrt(nrm);
for (i = 0; i < M; i++)
{
Q[i*N + k] = A[i*N + k] / R[k*N + k];
}
for (j = k + 1; j < N; j++)
{
R[k*N + j] = 0;
for (i = 0; i < M; i++)
{
R[k*N + j] += Q[i*N + k] * A[i*N + j];
}
for (i = 0; i < M; i++)
{
A[i*N + j] = A[i*N + j] - Q[i*N + k] * R[k*N + j];
}
}
}
}
void init_array(DATA_TYPE* A)
{
int i, j;
for (i = 0; i < M; i++)
{
for (j = 0; j < N; j++)
{
A[i*N + j] = ((DATA_TYPE) (i+1)*(j+1)) / (M+1);
}
}
}
void compareResults(DATA_TYPE* A, DATA_TYPE* A_outputFromGpu)
{
int i, j, fail;
fail = 0;
for (i=0; i < M; i++)
{
for (j=0; j < N; j++)
{
if (percentDiff(A[i*N + j], A_outputFromGpu[i*N + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
printf("i: %d j: %d \n1: %f\n 2: %f\n", i, j, A[i*N + j], A_outputFromGpu[i*N + j]);
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
return;
}
__global__ void gramschmidt_kernel1(DATA_TYPE *a, DATA_TYPE *r, DATA_TYPE *q, int k)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid==0)
{
DATA_TYPE nrm = 0.0;
int i;
for (i = 0; i < M; i++)
{
nrm += a[i * N + k] * a[i * N + k];
}
r[k * N + k] = sqrt(nrm);
}
}
__global__ void gramschmidt_kernel2(DATA_TYPE *a, DATA_TYPE *r, DATA_TYPE *q, int k)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < M)
{
q[i * N + k] = a[i * N + k] / r[k * N + k];
}
}
__global__ void gramschmidt_kernel3(DATA_TYPE *a, DATA_TYPE *r, DATA_TYPE *q, int k)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if ((j > k) && (j < N))
{
r[k*N + j] = 0.0;
int i;
for (i = 0; i < M; i++)
{
r[k*N + j] += q[i*N + k] * a[i*N + j];
}
for (i = 0; i < M; i++)
{
a[i*N + j] -= q[i*N + k] * r[k*N + j];
}
}
}
void gramschmidtCuda(DATA_TYPE* A, DATA_TYPE* R, DATA_TYPE* Q, DATA_TYPE* A_outputFromGpu)
{
double t_start, t_end;
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 gridKernel1(1, 1);
dim3 gridKernel2((size_t)ceil(((float)N) / ((float)DIM_THREAD_BLOCK_X)), 1);
dim3 gridKernel3((size_t)ceil(((float)N) / ((float)DIM_THREAD_BLOCK_X)), 1);
DATA_TYPE *A_gpu;
DATA_TYPE *R_gpu;
DATA_TYPE *Q_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * M * N);
cudaMalloc((void **)&R_gpu, sizeof(DATA_TYPE) * M * N);
cudaMalloc((void **)&Q_gpu, sizeof(DATA_TYPE) * M * N);
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * M * N, cudaMemcpyHostToDevice);
t_start = rtclock();
int k;
for (k = 0; k < N; k++)
{
gramschmidt_kernel1<<<gridKernel1,block>>>(A_gpu, R_gpu, Q_gpu, k);
cudaThreadSynchronize();
gramschmidt_kernel2<<<gridKernel2,block>>>(A_gpu, R_gpu, Q_gpu, k);
cudaThreadSynchronize();
gramschmidt_kernel3<<<gridKernel3,block>>>(A_gpu, R_gpu, Q_gpu, k);
cudaThreadSynchronize();
}
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
cudaMemcpy(A_outputFromGpu, A_gpu, sizeof(DATA_TYPE) * M * N, cudaMemcpyDeviceToHost);
cudaFree(A_gpu);
cudaFree(R_gpu);
cudaFree(Q_gpu);
}
int main(int argc, char *argv[])
{
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* A_outputFromGpu;
DATA_TYPE* R;
DATA_TYPE* Q;
A = (DATA_TYPE*)malloc(M*N*sizeof(DATA_TYPE));
A_outputFromGpu = (DATA_TYPE*)malloc(M*N*sizeof(DATA_TYPE));
R = (DATA_TYPE*)malloc(M*N*sizeof(DATA_TYPE));
Q = (DATA_TYPE*)malloc(M*N*sizeof(DATA_TYPE));
init_array(A);
GPU_argv_init();
gramschmidtCuda(A, R, Q, A_outputFromGpu);
t_start = rtclock();
gramschmidt(A, R, Q);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(A, A_outputFromGpu);
free(A);
free(A_outputFromGpu);
free(R);
free(Q);
return 0;
}
|
b802135e80716fe2b2b0441d8e1130bdd9a62e05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include <math.h>
#include "caffe/layers/adacos_add_m_scale_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void AdaCosinAddmForward(const int n, const int dim, const Dtype* label,
Dtype* top_data, Dtype threshold, Dtype bais, Dtype* flag, Dtype* bi_data) {
CUDA_KERNEL_LOOP(index, n) {
int gt = static_cast<int>(label[index]);
bi_data[index * dim + gt] = 0.f;
if(top_data[index * dim + gt] < 1.0f) {
Dtype theta = acos(top_data[index * dim + gt]);
if (top_data[index * dim + gt] > threshold) {
top_data[index * dim + gt] = cos(theta + bais);
}
else
{
top_data[index * dim + gt] = top_data[index * dim + gt] - bais * sin(bais);
flag[index * dim + gt] = 1.0f;
}
}
}
}
template <typename Dtype>
__global__ void AdaCosinAddmBackward(const int n, const int dim, const Dtype* label,
Dtype* bottom_diff, const Dtype* cos_data, Dtype bais, const Dtype* flag) {
CUDA_KERNEL_LOOP(index, n) {
int gt = static_cast<int>(label[index]);
if(flag[index * dim + gt] == 0.0f) {
Dtype cos_theta = cos_data[index * dim + gt];
Dtype sin_theta = sqrt(1 - pow(cos_theta,2));
bottom_diff[index * dim + gt] *= (cos(bais) + sin(bais) * cos_theta / sin_theta);
}
}
}
template <typename Dtype>
__global__ void ComputeAcos(const int n, Dtype* input_data) {
CUDA_KERNEL_LOOP(index, n) {
input_data[index] = (Dtype)acos(input_data[index]);
}
}
template <typename Dtype>
void AdaCosAddmScaleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* label_data = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* tpflag = top_flag.mutable_gpu_data();
Dtype* cos_t = cos_theta.mutable_gpu_data();
Dtype* bi_data = Bi_.mutable_gpu_data();
Dtype* mutable_bottom_data = bottom[0]->mutable_gpu_data();
int num = bottom[0]->num();
int count = bottom[0]->count();
int dim = count / num;
caffe_copy(count, bottom_data, top_data);
caffe_copy(count, bottom_data, cos_t);
caffe_gpu_set(count, Dtype(0), tpflag);
caffe_gpu_set(count, Dtype(1.0), bi_data);
AdaCosinAddmForward<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (
num, dim, label_data, top_data, threshold, m_, tpflag, bi_data);
CUDA_POST_KERNEL_CHECK;
//compute cos_theta_med
ComputeAcos<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (num, mutable_bottom_data);
CUDA_POST_KERNEL_CHECK;
Dtype avg_theta;
caffe_gpu_asum(count, mutable_bottom_data, &avg_theta);
avg_theta = avg_theta / count;
avg_theta = ::min(double(avg_theta), M_PI / 4);
cos_theta_med = cos(avg_theta);
//compute s_d
caffe_gpu_mul(count, cos_t, bi_data, bi_data);
caffe_gpu_scal(count, s_d, bi_data);
caffe_gpu_exp(count, bi_data, bi_data);
caffe_gpu_asum(count, bi_data, &s_d);
s_d = log(s_d / num) / cos_theta_med;
//recovery bottom_data for debugging and visualization
caffe_copy(count, cos_t, mutable_bottom_data);
//scale
caffe_gpu_scal(count, s_d, top_data);
}
template <typename Dtype>
void AdaCosAddmScaleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* label_data = bottom[1]->gpu_data();
const Dtype* cos_t = cos_theta.gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* tpflag = top_flag.gpu_data();
int count = bottom[0]->count();
int num = bottom[0]->num();
int dim = count / num;
caffe_copy(count, top_diff, bottom_diff);
caffe_gpu_scal(count, s_d, bottom_diff);
AdaCosinAddmBackward<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (
num, dim, label_data, bottom_diff, cos_t, m_, tpflag);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(AdaCosAddmScaleLayer);
} // namespace caffe
| b802135e80716fe2b2b0441d8e1130bdd9a62e05.cu | #include <algorithm>
#include <vector>
#include <math.h>
#include "caffe/layers/adacos_add_m_scale_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void AdaCosinAddmForward(const int n, const int dim, const Dtype* label,
Dtype* top_data, Dtype threshold, Dtype bais, Dtype* flag, Dtype* bi_data) {
CUDA_KERNEL_LOOP(index, n) {
int gt = static_cast<int>(label[index]);
bi_data[index * dim + gt] = 0.f;
if(top_data[index * dim + gt] < 1.0f) {
Dtype theta = acos(top_data[index * dim + gt]);
if (top_data[index * dim + gt] > threshold) {
top_data[index * dim + gt] = cos(theta + bais);
}
else
{
top_data[index * dim + gt] = top_data[index * dim + gt] - bais * sin(bais);
flag[index * dim + gt] = 1.0f;
}
}
}
}
template <typename Dtype>
__global__ void AdaCosinAddmBackward(const int n, const int dim, const Dtype* label,
Dtype* bottom_diff, const Dtype* cos_data, Dtype bais, const Dtype* flag) {
CUDA_KERNEL_LOOP(index, n) {
int gt = static_cast<int>(label[index]);
if(flag[index * dim + gt] == 0.0f) {
Dtype cos_theta = cos_data[index * dim + gt];
Dtype sin_theta = sqrt(1 - pow(cos_theta,2));
bottom_diff[index * dim + gt] *= (cos(bais) + sin(bais) * cos_theta / sin_theta);
}
}
}
template <typename Dtype>
__global__ void ComputeAcos(const int n, Dtype* input_data) {
CUDA_KERNEL_LOOP(index, n) {
input_data[index] = (Dtype)acos(input_data[index]);
}
}
template <typename Dtype>
void AdaCosAddmScaleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* label_data = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* tpflag = top_flag.mutable_gpu_data();
Dtype* cos_t = cos_theta.mutable_gpu_data();
Dtype* bi_data = Bi_.mutable_gpu_data();
Dtype* mutable_bottom_data = bottom[0]->mutable_gpu_data();
int num = bottom[0]->num();
int count = bottom[0]->count();
int dim = count / num;
caffe_copy(count, bottom_data, top_data);
caffe_copy(count, bottom_data, cos_t);
caffe_gpu_set(count, Dtype(0), tpflag);
caffe_gpu_set(count, Dtype(1.0), bi_data);
AdaCosinAddmForward<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (
num, dim, label_data, top_data, threshold, m_, tpflag, bi_data);
CUDA_POST_KERNEL_CHECK;
//compute cos_theta_med
ComputeAcos<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (num, mutable_bottom_data);
CUDA_POST_KERNEL_CHECK;
Dtype avg_theta;
caffe_gpu_asum(count, mutable_bottom_data, &avg_theta);
avg_theta = avg_theta / count;
avg_theta = std::min(double(avg_theta), M_PI / 4);
cos_theta_med = cos(avg_theta);
//compute s_d
caffe_gpu_mul(count, cos_t, bi_data, bi_data);
caffe_gpu_scal(count, s_d, bi_data);
caffe_gpu_exp(count, bi_data, bi_data);
caffe_gpu_asum(count, bi_data, &s_d);
s_d = log(s_d / num) / cos_theta_med;
//recovery bottom_data for debugging and visualization
caffe_copy(count, cos_t, mutable_bottom_data);
//scale
caffe_gpu_scal(count, s_d, top_data);
}
template <typename Dtype>
void AdaCosAddmScaleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* label_data = bottom[1]->gpu_data();
const Dtype* cos_t = cos_theta.gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* tpflag = top_flag.gpu_data();
int count = bottom[0]->count();
int num = bottom[0]->num();
int dim = count / num;
caffe_copy(count, top_diff, bottom_diff);
caffe_gpu_scal(count, s_d, bottom_diff);
AdaCosinAddmBackward<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (
num, dim, label_data, bottom_diff, cos_t, m_, tpflag);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(AdaCosAddmScaleLayer);
} // namespace caffe
|
b073cef4713e798c53d45369e3e2fcb608adfce0.hip | // !!! This is a file automatically generated by hipify!!!
/* MD5C.cpp is the cpp version of MD5C.c
** in order to utilize timing module in cpp,
** as well as better programming flexibility
*/
#include <cstdlib>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#define S11 7
#define S12 12
#define S13 17
#define S14 22
#define S21 5
#define S22 9
#define S23 14
#define S24 20
#define S31 4
#define S32 11
#define S33 16
#define S34 23
#define S41 6
#define S42 10
#define S43 15
#define S44 21
/* F, G, H and I are basic MD5 functions */
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4.
Rotation is separate from addition to prevent recomputation. */
#define FF(a, b, c, d, x, s, ac) { \
(a) += F ((b), (c), (d)) + (x) + (uint32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define GG(a, b, c, d, x, s, ac) { \
(a) += G ((b), (c), (d)) + (x) + (uint32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define HH(a, b, c, d, x, s, ac) { \
(a) += H ((b), (c), (d)) + (x) + (uint32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define II(a, b, c, d, x, s, ac) { \
(a) += I ((b), (c), (d)) + (x) + (uint32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
__device__ inline void getHash(unsigned char *data, uint32_t length, uint32_t *a1, uint32_t *b1, uint32_t *c1, uint32_t *d1) {
// init with magic constants
const uint32_t a0 = 0x67452301;
const uint32_t b0 = 0xEFCDAB89;
const uint32_t c0 = 0x98BADCFE;
const uint32_t d0 = 0x10325476;
// Calculate the padding is kind of mystery - temporarily!
uint32_t padding[14] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0};
int i = 0;
for (i = 0; i < length; i++) {
padding[i / 4] |= data[i] << ((i % 4) * 8);
}
padding[i / 4] |= 0x80 << ((i % 4) * 8);
uint32_t bitlen = length * 8;
#define in0 (padding[0])
#define in1 (padding[1])
#define in2 (padding[2])
#define in3 (padding[3])
#define in4 (padding[4])
#define in5 (padding[5])
#define in6 (padding[6])
#define in7 (padding[7])
#define in8 (padding[8])
#define in9 (padding[9])
#define in10 (padding[10])
#define in11 (padding[11])
#define in12 (padding[12])
#define in13 (padding[13])
#define in14 (bitlen)
#define in15 (0)
uint32_t a = a0;
uint32_t b = b0;
uint32_t c = c0;
uint32_t d = d0;
/* Round 1 */
FF ( a, b, c, d, in0 , S11, 3614090360); /* 1 */
FF ( d, a, b, c, in1 , S12, 3905402710); /* 2 */
FF ( c, d, a, b, in2 , S13, 606105819); /* 3 */
FF ( b, c, d, a, in3 , S14, 3250441966); /* 4 */
FF ( a, b, c, d, in4 , S11, 4118548399); /* 5 */
FF ( d, a, b, c, in5 , S12, 1200080426); /* 6 */
FF ( c, d, a, b, in6 , S13, 2821735955); /* 7 */
FF ( b, c, d, a, in7 , S14, 4249261313); /* 8 */
FF ( a, b, c, d, in8 , S11, 1770035416); /* 9 */
FF ( d, a, b, c, in9 , S12, 2336552879); /* 10 */
FF ( c, d, a, b, in10, S13, 4294925233); /* 11 */
FF ( b, c, d, a, in11, S14, 2304563134); /* 12 */
FF ( a, b, c, d, in12, S11, 1804603682); /* 13 */
FF ( d, a, b, c, in13, S12, 4254626195); /* 14 */
FF ( c, d, a, b, in14, S13, 2792965006); /* 15 */
FF ( b, c, d, a, in15, S14, 1236535329); /* 16 */
/* Round 2 */
GG ( a, b, c, d, in1 , S21, 4129170786); /* 17 */
GG ( d, a, b, c, in6 , S22, 3225465664); /* 18 */
GG ( c, d, a, b, in11, S23, 643717713); /* 19 */
GG ( b, c, d, a, in0 , S24, 3921069994); /* 20 */
GG ( a, b, c, d, in5 , S21, 3593408605); /* 21 */
GG ( d, a, b, c, in10, S22, 38016083); /* 22 */
GG ( c, d, a, b, in15, S23, 3634488961); /* 23 */
GG ( b, c, d, a, in4 , S24, 3889429448); /* 24 */
GG ( a, b, c, d, in9 , S21, 568446438); /* 25 */
GG ( d, a, b, c, in14, S22, 3275163606); /* 26 */
GG ( c, d, a, b, in3 , S23, 4107603335); /* 27 */
GG ( b, c, d, a, in8 , S24, 1163531501); /* 28 */
GG ( a, b, c, d, in13, S21, 2850285829); /* 29 */
GG ( d, a, b, c, in2 , S22, 4243563512); /* 30 */
GG ( c, d, a, b, in7 , S23, 1735328473); /* 31 */
GG ( b, c, d, a, in12, S24, 2368359562); /* 32 */
/* Round 3 */
HH ( a, b, c, d, in5 , S31, 4294588738); /* 33 */
HH ( d, a, b, c, in8 , S32, 2272392833); /* 34 */
HH ( c, d, a, b, in11, S33, 1839030562); /* 35 */
HH ( b, c, d, a, in14, S34, 4259657740); /* 36 */
HH ( a, b, c, d, in1 , S31, 2763975236); /* 37 */
HH ( d, a, b, c, in4 , S32, 1272893353); /* 38 */
HH ( c, d, a, b, in7 , S33, 4139469664); /* 39 */
HH ( b, c, d, a, in10, S34, 3200236656); /* 40 */
HH ( a, b, c, d, in13, S31, 681279174); /* 41 */
HH ( d, a, b, c, in0 , S32, 3936430074); /* 42 */
HH ( c, d, a, b, in3 , S33, 3572445317); /* 43 */
HH ( b, c, d, a, in6 , S34, 76029189); /* 44 */
HH ( a, b, c, d, in9 , S31, 3654602809); /* 45 */
HH ( d, a, b, c, in12, S32, 3873151461); /* 46 */
HH ( c, d, a, b, in15, S33, 530742520); /* 47 */
HH ( b, c, d, a, in2 , S34, 3299628645); /* 48 */
/* Round 4 */
II ( a, b, c, d, in0 , S41, 4096336452); /* 49 */
II ( d, a, b, c, in7 , S42, 1126891415); /* 50 */
II ( c, d, a, b, in14, S43, 2878612391); /* 51 */
II ( b, c, d, a, in5 , S44, 4237533241); /* 52 */
II ( a, b, c, d, in12, S41, 1700485571); /* 53 */
II ( d, a, b, c, in3 , S42, 2399980690); /* 54 */
II ( c, d, a, b, in10, S43, 4293915773); /* 55 */
II ( b, c, d, a, in1 , S44, 2240044497); /* 56 */
II ( a, b, c, d, in8 , S41, 1873313359); /* 57 */
II ( d, a, b, c, in15, S42, 4264355552); /* 58 */
II ( c, d, a, b, in6 , S43, 2734768916); /* 59 */
II ( b, c, d, a, in13, S44, 1309151649); /* 60 */
II ( a, b, c, d, in4 , S41, 4149444226); /* 61 */
II ( d, a, b, c, in11, S42, 3174756917); /* 62 */
II ( c, d, a, b, in2 , S43, 718787259); /* 63 */
II ( b, c, d, a, in9 , S44, 3951481745); /* 64 */
a += a0;
b += b0;
c += c0;
d += d0;
*a1 = a;
*b1 = b;
*c1 = c;
*d1 = d;
} | b073cef4713e798c53d45369e3e2fcb608adfce0.cu | /* MD5C.cpp is the cpp version of MD5C.c
** in order to utilize timing module in cpp,
** as well as better programming flexibility
*/
#include <cstdlib>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#define S11 7
#define S12 12
#define S13 17
#define S14 22
#define S21 5
#define S22 9
#define S23 14
#define S24 20
#define S31 4
#define S32 11
#define S33 16
#define S34 23
#define S41 6
#define S42 10
#define S43 15
#define S44 21
/* F, G, H and I are basic MD5 functions */
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4.
Rotation is separate from addition to prevent recomputation. */
#define FF(a, b, c, d, x, s, ac) { \
(a) += F ((b), (c), (d)) + (x) + (uint32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define GG(a, b, c, d, x, s, ac) { \
(a) += G ((b), (c), (d)) + (x) + (uint32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define HH(a, b, c, d, x, s, ac) { \
(a) += H ((b), (c), (d)) + (x) + (uint32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define II(a, b, c, d, x, s, ac) { \
(a) += I ((b), (c), (d)) + (x) + (uint32_t)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
__device__ inline void getHash(unsigned char *data, uint32_t length, uint32_t *a1, uint32_t *b1, uint32_t *c1, uint32_t *d1) {
// init with magic constants
const uint32_t a0 = 0x67452301;
const uint32_t b0 = 0xEFCDAB89;
const uint32_t c0 = 0x98BADCFE;
const uint32_t d0 = 0x10325476;
// Calculate the padding is kind of mystery - temporarily!
uint32_t padding[14] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0};
int i = 0;
for (i = 0; i < length; i++) {
padding[i / 4] |= data[i] << ((i % 4) * 8);
}
padding[i / 4] |= 0x80 << ((i % 4) * 8);
uint32_t bitlen = length * 8;
#define in0 (padding[0])
#define in1 (padding[1])
#define in2 (padding[2])
#define in3 (padding[3])
#define in4 (padding[4])
#define in5 (padding[5])
#define in6 (padding[6])
#define in7 (padding[7])
#define in8 (padding[8])
#define in9 (padding[9])
#define in10 (padding[10])
#define in11 (padding[11])
#define in12 (padding[12])
#define in13 (padding[13])
#define in14 (bitlen)
#define in15 (0)
uint32_t a = a0;
uint32_t b = b0;
uint32_t c = c0;
uint32_t d = d0;
/* Round 1 */
FF ( a, b, c, d, in0 , S11, 3614090360); /* 1 */
FF ( d, a, b, c, in1 , S12, 3905402710); /* 2 */
FF ( c, d, a, b, in2 , S13, 606105819); /* 3 */
FF ( b, c, d, a, in3 , S14, 3250441966); /* 4 */
FF ( a, b, c, d, in4 , S11, 4118548399); /* 5 */
FF ( d, a, b, c, in5 , S12, 1200080426); /* 6 */
FF ( c, d, a, b, in6 , S13, 2821735955); /* 7 */
FF ( b, c, d, a, in7 , S14, 4249261313); /* 8 */
FF ( a, b, c, d, in8 , S11, 1770035416); /* 9 */
FF ( d, a, b, c, in9 , S12, 2336552879); /* 10 */
FF ( c, d, a, b, in10, S13, 4294925233); /* 11 */
FF ( b, c, d, a, in11, S14, 2304563134); /* 12 */
FF ( a, b, c, d, in12, S11, 1804603682); /* 13 */
FF ( d, a, b, c, in13, S12, 4254626195); /* 14 */
FF ( c, d, a, b, in14, S13, 2792965006); /* 15 */
FF ( b, c, d, a, in15, S14, 1236535329); /* 16 */
/* Round 2 */
GG ( a, b, c, d, in1 , S21, 4129170786); /* 17 */
GG ( d, a, b, c, in6 , S22, 3225465664); /* 18 */
GG ( c, d, a, b, in11, S23, 643717713); /* 19 */
GG ( b, c, d, a, in0 , S24, 3921069994); /* 20 */
GG ( a, b, c, d, in5 , S21, 3593408605); /* 21 */
GG ( d, a, b, c, in10, S22, 38016083); /* 22 */
GG ( c, d, a, b, in15, S23, 3634488961); /* 23 */
GG ( b, c, d, a, in4 , S24, 3889429448); /* 24 */
GG ( a, b, c, d, in9 , S21, 568446438); /* 25 */
GG ( d, a, b, c, in14, S22, 3275163606); /* 26 */
GG ( c, d, a, b, in3 , S23, 4107603335); /* 27 */
GG ( b, c, d, a, in8 , S24, 1163531501); /* 28 */
GG ( a, b, c, d, in13, S21, 2850285829); /* 29 */
GG ( d, a, b, c, in2 , S22, 4243563512); /* 30 */
GG ( c, d, a, b, in7 , S23, 1735328473); /* 31 */
GG ( b, c, d, a, in12, S24, 2368359562); /* 32 */
/* Round 3 */
HH ( a, b, c, d, in5 , S31, 4294588738); /* 33 */
HH ( d, a, b, c, in8 , S32, 2272392833); /* 34 */
HH ( c, d, a, b, in11, S33, 1839030562); /* 35 */
HH ( b, c, d, a, in14, S34, 4259657740); /* 36 */
HH ( a, b, c, d, in1 , S31, 2763975236); /* 37 */
HH ( d, a, b, c, in4 , S32, 1272893353); /* 38 */
HH ( c, d, a, b, in7 , S33, 4139469664); /* 39 */
HH ( b, c, d, a, in10, S34, 3200236656); /* 40 */
HH ( a, b, c, d, in13, S31, 681279174); /* 41 */
HH ( d, a, b, c, in0 , S32, 3936430074); /* 42 */
HH ( c, d, a, b, in3 , S33, 3572445317); /* 43 */
HH ( b, c, d, a, in6 , S34, 76029189); /* 44 */
HH ( a, b, c, d, in9 , S31, 3654602809); /* 45 */
HH ( d, a, b, c, in12, S32, 3873151461); /* 46 */
HH ( c, d, a, b, in15, S33, 530742520); /* 47 */
HH ( b, c, d, a, in2 , S34, 3299628645); /* 48 */
/* Round 4 */
II ( a, b, c, d, in0 , S41, 4096336452); /* 49 */
II ( d, a, b, c, in7 , S42, 1126891415); /* 50 */
II ( c, d, a, b, in14, S43, 2878612391); /* 51 */
II ( b, c, d, a, in5 , S44, 4237533241); /* 52 */
II ( a, b, c, d, in12, S41, 1700485571); /* 53 */
II ( d, a, b, c, in3 , S42, 2399980690); /* 54 */
II ( c, d, a, b, in10, S43, 4293915773); /* 55 */
II ( b, c, d, a, in1 , S44, 2240044497); /* 56 */
II ( a, b, c, d, in8 , S41, 1873313359); /* 57 */
II ( d, a, b, c, in15, S42, 4264355552); /* 58 */
II ( c, d, a, b, in6 , S43, 2734768916); /* 59 */
II ( b, c, d, a, in13, S44, 1309151649); /* 60 */
II ( a, b, c, d, in4 , S41, 4149444226); /* 61 */
II ( d, a, b, c, in11, S42, 3174756917); /* 62 */
II ( c, d, a, b, in2 , S43, 718787259); /* 63 */
II ( b, c, d, a, in9 , S44, 3951481745); /* 64 */
a += a0;
b += b0;
c += c0;
d += d0;
*a1 = a;
*b1 = b;
*c1 = c;
*d1 = d;
} |
79f0a423c6c8eabae05a0c6de54bc97accac3963.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// kernels.cu
// diffusion2d-GPU
//
// Created by Manuel Diaz on 7/26/16.
// Copyright 2016 Manuel Diaz. All rights reserved.
//
extern "C" {
#include "diffusion2d.h"
}
#define checkCuda(error) __checkCuda(error, __FILE__, __LINE__)
__constant__ REAL d_kx;
__constant__ REAL d_ky;
/*********************************************/
/* A method for checking error in CUDA calls */
/*********************************************/
inline void __checkCuda(hipError_t error, const char *file, const int line)
{
#if defined(DISPL)
if (error != hipSuccess)
{
printf("checkCuda error at %s:%i: %s\n", file, line, hipGetErrorString(hipGetLastError()));
exit(-1);
}
#endif
return;
}
/***********************/
/* Runge Kutta Methods */
/***********************/
__global__ void Compute_RK(
REAL * __restrict__ u,
const REAL * __restrict__ uo,
const REAL * __restrict__ Lu,
const unsigned int step,
const unsigned int nx,
const unsigned int ny,
const REAL dt){
// local threads indexes
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
// Compute Runge-Kutta step
// compute single index
unsigned int o=i+nx*j;
// update only internal cells
if (i>2 && i<nx-3 && j>2 && j<ny-3)
{
switch (step) {
case 1: // step 1
u[o] = uo[o]+dt*(Lu[o]);
break;
case 2: // step 2
u[o] = 0.75*uo[o]+0.25*(u[o]+dt*(Lu[o]));
break;
case 3: // step 3
u[o] = (uo[o]+2*(u[o]+dt*(Lu[o])))/3;
break;
}
}
// else do nothing!
}
__global__ void Compute_Laplace2d(
const REAL * __restrict__ u,
REAL * __restrict__ Lu,
const unsigned int nx,
const unsigned int ny)
{
unsigned int i, j, o;
i = threadIdx.x + blockIdx.x * blockDim.x;
j = threadIdx.y + blockIdx.y * blockDim.y;
o = i+(nx*j);
if (i>2 && i<nx-3 && j>2 && j<ny-3)
Lu[o] = d_kx*(- u[o-2] +16*u[o-1] -30*u[o]+16*u[o+1] - u[o+2] )+
d_ky*(-u[o-nx-nx]+16*u[o-nx]-30*u[o]+16*u[o+nx]-u[o+nx+nx]);
else
Lu[o] = 0.0;
}
/*********************/
/* Function Wrappers */
/*********************/
extern "C" void CopyToConstantMemory(const REAL kx, const REAL ky)
{
checkCuda(hipMemcpyToSymbol(d_kx, &kx, sizeof(REAL), 0, hipMemcpyHostToDevice));
checkCuda(hipMemcpyToSymbol(d_ky, &ky, sizeof(REAL), 0, hipMemcpyHostToDevice));
}
extern "C" void Call_Lu2d(dim3 numBlocks, dim3 threadsPerBlock, hipStream_t aStream,
unsigned int nx, unsigned int ny, REAL *u, REAL *Lu)
{
hipLaunchKernelGGL(( Compute_Laplace2d), dim3(numBlocks),dim3(threadsPerBlock),0,aStream, u,Lu,nx,ny);
}
extern "C" void Call_RK2d(dim3 numBlocks, dim3 threadsPerBlock, hipStream_t aStream,
unsigned int step, unsigned int nx, unsigned int ny, REAL dt, REAL *u, REAL *uo, REAL *Lu)
{
hipLaunchKernelGGL(( Compute_RK), dim3(numBlocks),dim3(threadsPerBlock),0,aStream, u,uo,Lu,step,nx,ny,dt);
}
| 79f0a423c6c8eabae05a0c6de54bc97accac3963.cu | //
// kernels.cu
// diffusion2d-GPU
//
// Created by Manuel Diaz on 7/26/16.
// Copyright © 2016 Manuel Diaz. All rights reserved.
//
extern "C" {
#include "diffusion2d.h"
}
#define checkCuda(error) __checkCuda(error, __FILE__, __LINE__)
__constant__ REAL d_kx;
__constant__ REAL d_ky;
/*********************************************/
/* A method for checking error in CUDA calls */
/*********************************************/
inline void __checkCuda(cudaError_t error, const char *file, const int line)
{
#if defined(DISPL)
if (error != cudaSuccess)
{
printf("checkCuda error at %s:%i: %s\n", file, line, cudaGetErrorString(cudaGetLastError()));
exit(-1);
}
#endif
return;
}
/***********************/
/* Runge Kutta Methods */
/***********************/
__global__ void Compute_RK(
REAL * __restrict__ u,
const REAL * __restrict__ uo,
const REAL * __restrict__ Lu,
const unsigned int step,
const unsigned int nx,
const unsigned int ny,
const REAL dt){
// local threads indexes
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
// Compute Runge-Kutta step
// compute single index
unsigned int o=i+nx*j;
// update only internal cells
if (i>2 && i<nx-3 && j>2 && j<ny-3)
{
switch (step) {
case 1: // step 1
u[o] = uo[o]+dt*(Lu[o]);
break;
case 2: // step 2
u[o] = 0.75*uo[o]+0.25*(u[o]+dt*(Lu[o]));
break;
case 3: // step 3
u[o] = (uo[o]+2*(u[o]+dt*(Lu[o])))/3;
break;
}
}
// else do nothing!
}
__global__ void Compute_Laplace2d(
const REAL * __restrict__ u,
REAL * __restrict__ Lu,
const unsigned int nx,
const unsigned int ny)
{
unsigned int i, j, o;
i = threadIdx.x + blockIdx.x * blockDim.x;
j = threadIdx.y + blockIdx.y * blockDim.y;
o = i+(nx*j);
if (i>2 && i<nx-3 && j>2 && j<ny-3)
Lu[o] = d_kx*(- u[o-2] +16*u[o-1] -30*u[o]+16*u[o+1] - u[o+2] )+
d_ky*(-u[o-nx-nx]+16*u[o-nx]-30*u[o]+16*u[o+nx]-u[o+nx+nx]);
else
Lu[o] = 0.0;
}
/*********************/
/* Function Wrappers */
/*********************/
extern "C" void CopyToConstantMemory(const REAL kx, const REAL ky)
{
checkCuda(cudaMemcpyToSymbol(d_kx, &kx, sizeof(REAL), 0, cudaMemcpyHostToDevice));
checkCuda(cudaMemcpyToSymbol(d_ky, &ky, sizeof(REAL), 0, cudaMemcpyHostToDevice));
}
extern "C" void Call_Lu2d(dim3 numBlocks, dim3 threadsPerBlock, cudaStream_t aStream,
unsigned int nx, unsigned int ny, REAL *u, REAL *Lu)
{
Compute_Laplace2d<<<numBlocks,threadsPerBlock,0,aStream>>>(u,Lu,nx,ny);
}
extern "C" void Call_RK2d(dim3 numBlocks, dim3 threadsPerBlock, cudaStream_t aStream,
unsigned int step, unsigned int nx, unsigned int ny, REAL dt, REAL *u, REAL *uo, REAL *Lu)
{
Compute_RK<<<numBlocks,threadsPerBlock,0,aStream>>>(u,uo,Lu,step,nx,ny,dt);
}
|
d8dc8a281b5dfe00ca7f29dee3bd2bdd0b466da5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2018, ETH Zurich and UNC Chapel Hill.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: Johannes L. Schoenberger (jsch-at-demuc-dot-de)
#include "mvs/gpu_mat_prng.h"
namespace colmap {
namespace mvs {
namespace {
__global__ void InitRandomStateKernel(GpuMat<hiprandState_t> output) {
const size_t row = blockIdx.y * blockDim.y + threadIdx.y;
const size_t col = blockIdx.x * blockDim.x + threadIdx.x;
const size_t uniqueBlockIndex = blockIdx.y * gridDim.x + blockIdx.x;
const size_t id = uniqueBlockIndex * blockDim.y * blockDim.x +
threadIdx.y * blockDim.x + threadIdx.x;
// Each thread gets same seed, a different sequence number, no offset.
if (col < output.GetWidth() && row < output.GetHeight()) {
hiprand_init(id, 0, 0, &output.GetRef(row, col));
}
}
} // namespace
GpuMatPRNG::GpuMatPRNG(const int width, const int height)
: GpuMat(width, height) {
hipLaunchKernelGGL(( InitRandomStateKernel), dim3(gridSize_), dim3(blockSize_), 0, 0, *this);
}
} // namespace mvs
} // namespace colmap
| d8dc8a281b5dfe00ca7f29dee3bd2bdd0b466da5.cu | // Copyright (c) 2018, ETH Zurich and UNC Chapel Hill.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: Johannes L. Schoenberger (jsch-at-demuc-dot-de)
#include "mvs/gpu_mat_prng.h"
namespace colmap {
namespace mvs {
namespace {
__global__ void InitRandomStateKernel(GpuMat<curandState> output) {
const size_t row = blockIdx.y * blockDim.y + threadIdx.y;
const size_t col = blockIdx.x * blockDim.x + threadIdx.x;
const size_t uniqueBlockIndex = blockIdx.y * gridDim.x + blockIdx.x;
const size_t id = uniqueBlockIndex * blockDim.y * blockDim.x +
threadIdx.y * blockDim.x + threadIdx.x;
// Each thread gets same seed, a different sequence number, no offset.
if (col < output.GetWidth() && row < output.GetHeight()) {
curand_init(id, 0, 0, &output.GetRef(row, col));
}
}
} // namespace
GpuMatPRNG::GpuMatPRNG(const int width, const int height)
: GpuMat(width, height) {
InitRandomStateKernel<<<gridSize_, blockSize_>>>(*this);
}
} // namespace mvs
} // namespace colmap
|
cb06c63859e2c95492fd77a5c15fed9548ce51b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
//#define BLOCK_SIZE 32
#define SIZE 1024*1024
__host__ void SaveMatrixToFile(char* fileName, int* matrix, int width, int height) {
FILE* file = fopen(fileName, "wt");
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
fprintf(file, "%d\t", matrix[y * width + x]);
}
fprintf(file, "\n");
}
fclose(file);
}
__global__ void transpose(int* inputMatrix, int* outputMatrix, int width, int height) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
for (int x = 0; x < width; x++)
for (int y = 0; y < height; y++)
outputMatrix[x * height + y] = inputMatrix[y * width + x];
}
__host__ int main()
{
int width;
int height;
printf("Input number of columns: ");
scanf("%d", &width);
printf("Input number of strings: ");
scanf("%d", &height);
int N = width*height;
hipEvent_t start, stop;
float gpuTime = 0.0;
hipEventCreate(&start);
hipEventCreate(&stop);
int* A;
A = (int *)malloc(sizeof(int) * N);
int* A_t;
A_t = (int *)malloc(sizeof(int) * N);
for (int i = 0; i < N; i++)
{
A[i] = i + 1;
}
SaveMatrixToFile("matrix.txt", A, width, height);
int* A_dev;
int* A_t_dev;
hipMalloc((void**)&A_dev, sizeof(int) * N);
hipMalloc((void**)&A_t_dev, sizeof(int) * N);
hipMemcpy(A_dev, A, N * sizeof(int), hipMemcpyHostToDevice);
dim3 block(512);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( transpose), dim3(SIZE/512), dim3(block), 0, 0, A_dev, A_t_dev, width, height);
hipEvent_t syncEvent;
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpuTime, start, stop);
printf("Time of transposing: %.2f milliseconds\n", gpuTime);
// getch();
hipEventCreate(&syncEvent);
hipEventRecord(syncEvent, 0);
hipEventSynchronize(syncEvent);
hipMemcpy(A_t, A_t_dev, N * sizeof(int), hipMemcpyDeviceToHost);
SaveMatrixToFile("matrix1.txt", A_t, height, width);
hipFree(A_dev);
hipFree(A_t_dev);
hipEventDestroy(start);
hipEventDestroy(stop);
delete[] A;
delete[] A_t;
return 0;
}
| cb06c63859e2c95492fd77a5c15fed9548ce51b9.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
//#define BLOCK_SIZE 32
#define SIZE 1024*1024
__host__ void SaveMatrixToFile(char* fileName, int* matrix, int width, int height) {
FILE* file = fopen(fileName, "wt");
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
fprintf(file, "%d\t", matrix[y * width + x]);
}
fprintf(file, "\n");
}
fclose(file);
}
__global__ void transpose(int* inputMatrix, int* outputMatrix, int width, int height) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
for (int x = 0; x < width; x++)
for (int y = 0; y < height; y++)
outputMatrix[x * height + y] = inputMatrix[y * width + x];
}
__host__ int main()
{
int width;
int height;
printf("Input number of columns: ");
scanf("%d", &width);
printf("Input number of strings: ");
scanf("%d", &height);
int N = width*height;
cudaEvent_t start, stop;
float gpuTime = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int* A;
A = (int *)malloc(sizeof(int) * N);
int* A_t;
A_t = (int *)malloc(sizeof(int) * N);
for (int i = 0; i < N; i++)
{
A[i] = i + 1;
}
SaveMatrixToFile("matrix.txt", A, width, height);
int* A_dev;
int* A_t_dev;
cudaMalloc((void**)&A_dev, sizeof(int) * N);
cudaMalloc((void**)&A_t_dev, sizeof(int) * N);
cudaMemcpy(A_dev, A, N * sizeof(int), cudaMemcpyHostToDevice);
dim3 block(512);
cudaEventRecord(start, 0);
transpose<<<SIZE/512, block>>>(A_dev, A_t_dev, width, height);
cudaEvent_t syncEvent;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpuTime, start, stop);
printf("Time of transposing: %.2f milliseconds\n", gpuTime);
// getch();
cudaEventCreate(&syncEvent);
cudaEventRecord(syncEvent, 0);
cudaEventSynchronize(syncEvent);
cudaMemcpy(A_t, A_t_dev, N * sizeof(int), cudaMemcpyDeviceToHost);
SaveMatrixToFile("matrix1.txt", A_t, height, width);
cudaFree(A_dev);
cudaFree(A_t_dev);
cudaEventDestroy(start);
cudaEventDestroy(stop);
delete[] A;
delete[] A_t;
return 0;
}
|
aa72a801742f724cbb56e0175fae671f73e08621.hip | // !!! This is a file automatically generated by hipify!!!
#include "hrtf_signals.cuh"
float hrtf[NUM_HRFT][HRTF_LEN*HRTF_CHN]; /* interleaved HRTF impulse responses */
int elevation_pos[NUM_ELEV] =
{ -40, -30, -20, -10, 0, 10, 20, 30, 40, 50, 60, 70, 80, 90 };
float azimuth_inc[NUM_ELEV] =
{ 6.43, 6.00, 5.00, 5.00, 5.00, 5.00, 5.00, 6.00, 6.43, 8.00, 10.00, 15.00, 30.00, 181 };
int azimuth_offset[NUM_ELEV + 1];
/*Read hrtf signals on CPU/RAM*/
int read_hrtf_signals(void) {
char hrtf_file[PATH_LEN];
int i, j, ele, num_samples, count;
float azi;
/* sndfile data structures */
SNDFILE *sndfile;
SF_INFO sfinfo;
j = 0;
azimuth_offset[0] = 0;
for (i = 0; i<NUM_ELEV; i++) {
ele = elevation_pos[i];
for (azi = 0; azi <= 180; azi += azimuth_inc[i]) {
sprintf(hrtf_file, "%s/elev%d/H%de%03da.wav", HRTF_DIR, ele, ele, (int)round(azi));
/* zero libsndfile structures */
memset(&sfinfo, 0, sizeof(sfinfo));
/* open hrtf file */
if ((sndfile = sf_open(hrtf_file, SFM_READ, &sfinfo)) == NULL) {
fprintf(stderr, "Error: could not open hrtf file:\n%3d %3d %s\n", i, j, hrtf_file);
fprintf(stderr, "%s\n", sf_strerror(sndfile));
return -1;
}
/* check signal parameters */
if (sfinfo.channels != HRTF_CHN) {
fprintf(stderr, "ERROR: incorrect number of channels in HRTF\n");
return -1;
}
if (sfinfo.samplerate != SAMP_RATE) {
fprintf(stderr, "ERROR: incorrect sampling rate\n");
return -1;
}
/* Print file information */
printf("%3d %3d %s Frames: %d, Channels: %d, Samplerate: %d\n",
i, j, hrtf_file, (int)sfinfo.frames, sfinfo.channels, sfinfo.samplerate);
/* read HRTF signal */
num_samples = (int) sfinfo.frames * sfinfo.channels;
if ((count = sf_read_float(sndfile, hrtf[j], num_samples)) != num_samples) {
fprintf(stderr, "ERROR: cannot read HRTF signal %3d\n", j);
return -1;
}
/* close file */
sf_close(sndfile);
j++;
}
/* for given azimuth j, azimuth_offset[j] is where that set of hrtf begins
* in set of NUM_HRTF
*/
azimuth_offset[i + 1] = j;
}
printf("\nHRTF index offsets for each elevation:\n");
for (i = 0; i<NUM_ELEV + 1; i++) {
printf("%3d ", azimuth_offset[i]);
}
printf("\n");
return 0;
}
/* on entry obj_ele and obj_azi are the new object position
* on exit hrtf_idx is set to the HRTF index of the closest HRTF position
* hrtf_idx > 0 indicates to use right half-sphere HRTF
* hrtf_idx < 0 indicates to create left half-sphere HRTF by exchanging L, R
*/
int pick_hrtf(float obj_ele, float obj_azi)
{
int i, n, ele_idx, obj_azi_sign, hrtf_idx;
float d, dmin;
/* save azimuth sign and force obj_azi to right half-sphere */
obj_azi_sign = 1;
if (obj_azi < 0) {
obj_azi_sign = -1;
obj_azi = -obj_azi;
}
/* find closest elevation position */
obj_ele = std::round(obj_ele / 10) * 10;
dmin = 1e37;
for (i = 0; i<NUM_ELEV; i++) {
d = obj_ele - elevation_pos[i];
d = d > 0 ? d : -d;
if (d < dmin) {
dmin = d;
ele_idx = i;
}
}
/* find closest azimuth position */
obj_azi = std::round(obj_azi);
dmin = 1e37;
n = azimuth_offset[ele_idx + 1] - azimuth_offset[ele_idx];
for (i = 0; i<n; i++) {
d = obj_azi - i*azimuth_inc[ele_idx];
d = d > 0 ? d : -d;
if (d < dmin) {
dmin = d;
hrtf_idx = azimuth_offset[ele_idx] + i;
}
}
/* return hrtf index */
return(hrtf_idx * obj_azi_sign);
}
/* convolve signal buffer with HRTF
* new signal starts at HRTF_LEN frames into x buffer
* x is mono input signal
* HRTF and y are interleaved by channel
* y_len is in frames
*/
int convolve_hrtf(float *input, int hrtf_idx, float *output, int outputLen, float gain) {
int i, j, n, k, swap_chan, j_hrtf;
float *p_hrtf;
if (gain > 1)
gain = 1;
if (hrtf_idx >= 0) {
swap_chan = false;
p_hrtf = hrtf[hrtf_idx];
}
else {
swap_chan = true;
p_hrtf = hrtf[-hrtf_idx];
}
/* zero output buffer */
for (i = 0; i<outputLen*HRTF_CHN; i++) {
output[i] = 0.0;
}
for (n = 0; n < outputLen; n++) {
for (k = 0; k < HRTF_LEN; k++) {
for (j = 0; j < HRTF_CHN; j++) {
/* outputLen and HRTF_LEN are n frames, output and hrtf are interleaved
* input is mono
*/
j_hrtf = (swap_chan == false) ? j : (j == 0) ? 1 : 0;
output[2 * n + j] += input[n - k] * p_hrtf[2 * k + j_hrtf];
}
output[2 * n] *= gain;
output[2 * n + 1] *= gain;
}
}
return 0;
}
////////////////////////////////////////////////////////////////////////////////
/*NOTE: GPU Convolution was not fast enough because of the large overhead
of FFT and IFFT. Keeping the code here for future purposes*/
/*HRTF Impulse reading for GPU/DRAM*/
//float *d_hrtf;
//int read_hrtf_signals(void) {
// char hrtf_file[PATH_LEN];
// int i, j, ele, num_samples, count;
// float azi;
// /* sndfile data structures */
// SNDFILE *sndfile;
// SF_INFO sfinfo;
//
// j = 0;
// azimuth_offset[0] = 0;
// size_t size = sizeof(float) * NUM_HRFT * HRTF_LEN * HRTF_CHN;
// checkCudaErrors(hipMalloc((void**)&d_hrtf, size));
//
// for (i = 0; i<NUM_ELEV; i++) {
// ele = elevation_pos[i];
// for (azi = 0; azi <= 180; azi += azimuth_inc[i]) {
// sprintf(hrtf_file, "%s/elev%d/H%de%03da.wav", HRTF_DIR, ele, ele, (int)round(azi));
//
// /* zero libsndfile structures */
// memset(&sfinfo, 0, sizeof(sfinfo));
//
// /* open hrtf file */
// if ((sndfile = sf_open(hrtf_file, SFM_READ, &sfinfo)) == NULL) {
// fprintf(stderr, "Error: could not open hrtf file:\n%3d %3d %s\n", i, j, hrtf_file);
// fprintf(stderr, "%s\n", sf_strerror(sndfile));
// return -1;
// }
// /* check signal parameters */
// if (sfinfo.channels != HRTF_CHN) {
// fprintf(stderr, "ERROR: incorrect number of channels in HRTF\n");
// return -1;
// }
// if (sfinfo.samplerate != SAMP_RATE) {
// fprintf(stderr, "ERROR: incorrect sampling rate\n");
// return -1;
// }
// /* Print file information */
// printf("%3d %3d %s Frames: %d, Channels: %d, Samplerate: %d\n",
// i, j, hrtf_file, (int)sfinfo.frames, sfinfo.channels, sfinfo.samplerate);
// /* read HRTF signal */
// num_samples = sfinfo.frames*sfinfo.channels;
// if ((count = sf_read_float(sndfile, hrtf[j], num_samples)) != num_samples) {
// fprintf(stderr, "ERROR: cannot read HRTF signal %3d\n", j);
// return -1;
// }
// /* close file */
// sf_close(sndfile);
// j++;
// }
// checkCudaErrors(hipMemcpy(d_hrtf, hrtf, size, hipMemcpyHostToDevice));
// azimuth_offset[i + 1] = j;
// }
// printf("\nHRTF index offsets for each elevation:\n");
// for (i = 0; i<NUM_ELEV + 1; i++) {
// printf("%3d ", azimuth_offset[i]);
// }
// printf("\n");
// return 0;
//}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/*GPU Convolution was not fast enough because of the large overhead
of FFT and IFFT. Keeping the code here for future purposes*/
//void GPUconvolve_hrtf(float *x, int x_len, int hrtf_idx, float *output, int y_len, float gain) {
// int i, j, n, k, swap_chan, j_hrtf;
// float *p_hrtf;
// if (gain > 1)
// gain = 1;
// if (hrtf_idx >= 0) {
// swap_chan = false;
// p_hrtf = hrtf[hrtf_idx];
// }
// else {
// swap_chan = true;
// p_hrtf = hrtf[-hrtf_idx];
// }
//
// /* zero output buffer */
// /*for (i = 0; i<y_len*HRTF_CHN; i++) {
// output[i] = 0.0;
// }*/
// convolveMe(output, x, x_len, p_hrtf, gain, d_hrtf);
//}
////////////////////////////////////////////////////////////////////////////////
| aa72a801742f724cbb56e0175fae671f73e08621.cu | #include "hrtf_signals.cuh"
float hrtf[NUM_HRFT][HRTF_LEN*HRTF_CHN]; /* interleaved HRTF impulse responses */
int elevation_pos[NUM_ELEV] =
{ -40, -30, -20, -10, 0, 10, 20, 30, 40, 50, 60, 70, 80, 90 };
float azimuth_inc[NUM_ELEV] =
{ 6.43, 6.00, 5.00, 5.00, 5.00, 5.00, 5.00, 6.00, 6.43, 8.00, 10.00, 15.00, 30.00, 181 };
int azimuth_offset[NUM_ELEV + 1];
/*Read hrtf signals on CPU/RAM*/
int read_hrtf_signals(void) {
char hrtf_file[PATH_LEN];
int i, j, ele, num_samples, count;
float azi;
/* sndfile data structures */
SNDFILE *sndfile;
SF_INFO sfinfo;
j = 0;
azimuth_offset[0] = 0;
for (i = 0; i<NUM_ELEV; i++) {
ele = elevation_pos[i];
for (azi = 0; azi <= 180; azi += azimuth_inc[i]) {
sprintf(hrtf_file, "%s/elev%d/H%de%03da.wav", HRTF_DIR, ele, ele, (int)round(azi));
/* zero libsndfile structures */
memset(&sfinfo, 0, sizeof(sfinfo));
/* open hrtf file */
if ((sndfile = sf_open(hrtf_file, SFM_READ, &sfinfo)) == NULL) {
fprintf(stderr, "Error: could not open hrtf file:\n%3d %3d %s\n", i, j, hrtf_file);
fprintf(stderr, "%s\n", sf_strerror(sndfile));
return -1;
}
/* check signal parameters */
if (sfinfo.channels != HRTF_CHN) {
fprintf(stderr, "ERROR: incorrect number of channels in HRTF\n");
return -1;
}
if (sfinfo.samplerate != SAMP_RATE) {
fprintf(stderr, "ERROR: incorrect sampling rate\n");
return -1;
}
/* Print file information */
printf("%3d %3d %s Frames: %d, Channels: %d, Samplerate: %d\n",
i, j, hrtf_file, (int)sfinfo.frames, sfinfo.channels, sfinfo.samplerate);
/* read HRTF signal */
num_samples = (int) sfinfo.frames * sfinfo.channels;
if ((count = sf_read_float(sndfile, hrtf[j], num_samples)) != num_samples) {
fprintf(stderr, "ERROR: cannot read HRTF signal %3d\n", j);
return -1;
}
/* close file */
sf_close(sndfile);
j++;
}
/* for given azimuth j, azimuth_offset[j] is where that set of hrtf begins
* in set of NUM_HRTF
*/
azimuth_offset[i + 1] = j;
}
printf("\nHRTF index offsets for each elevation:\n");
for (i = 0; i<NUM_ELEV + 1; i++) {
printf("%3d ", azimuth_offset[i]);
}
printf("\n");
return 0;
}
/* on entry obj_ele and obj_azi are the new object position
* on exit hrtf_idx is set to the HRTF index of the closest HRTF position
* hrtf_idx > 0 indicates to use right half-sphere HRTF
* hrtf_idx < 0 indicates to create left half-sphere HRTF by exchanging L, R
*/
int pick_hrtf(float obj_ele, float obj_azi)
{
int i, n, ele_idx, obj_azi_sign, hrtf_idx;
float d, dmin;
/* save azimuth sign and force obj_azi to right half-sphere */
obj_azi_sign = 1;
if (obj_azi < 0) {
obj_azi_sign = -1;
obj_azi = -obj_azi;
}
/* find closest elevation position */
obj_ele = std::round(obj_ele / 10) * 10;
dmin = 1e37;
for (i = 0; i<NUM_ELEV; i++) {
d = obj_ele - elevation_pos[i];
d = d > 0 ? d : -d;
if (d < dmin) {
dmin = d;
ele_idx = i;
}
}
/* find closest azimuth position */
obj_azi = std::round(obj_azi);
dmin = 1e37;
n = azimuth_offset[ele_idx + 1] - azimuth_offset[ele_idx];
for (i = 0; i<n; i++) {
d = obj_azi - i*azimuth_inc[ele_idx];
d = d > 0 ? d : -d;
if (d < dmin) {
dmin = d;
hrtf_idx = azimuth_offset[ele_idx] + i;
}
}
/* return hrtf index */
return(hrtf_idx * obj_azi_sign);
}
/* convolve signal buffer with HRTF
* new signal starts at HRTF_LEN frames into x buffer
* x is mono input signal
* HRTF and y are interleaved by channel
* y_len is in frames
*/
int convolve_hrtf(float *input, int hrtf_idx, float *output, int outputLen, float gain) {
int i, j, n, k, swap_chan, j_hrtf;
float *p_hrtf;
if (gain > 1)
gain = 1;
if (hrtf_idx >= 0) {
swap_chan = false;
p_hrtf = hrtf[hrtf_idx];
}
else {
swap_chan = true;
p_hrtf = hrtf[-hrtf_idx];
}
/* zero output buffer */
for (i = 0; i<outputLen*HRTF_CHN; i++) {
output[i] = 0.0;
}
for (n = 0; n < outputLen; n++) {
for (k = 0; k < HRTF_LEN; k++) {
for (j = 0; j < HRTF_CHN; j++) {
/* outputLen and HRTF_LEN are n frames, output and hrtf are interleaved
* input is mono
*/
j_hrtf = (swap_chan == false) ? j : (j == 0) ? 1 : 0;
output[2 * n + j] += input[n - k] * p_hrtf[2 * k + j_hrtf];
}
output[2 * n] *= gain;
output[2 * n + 1] *= gain;
}
}
return 0;
}
////////////////////////////////////////////////////////////////////////////////
/*NOTE: GPU Convolution was not fast enough because of the large overhead
of FFT and IFFT. Keeping the code here for future purposes*/
/*HRTF Impulse reading for GPU/DRAM*/
//float *d_hrtf;
//int read_hrtf_signals(void) {
// char hrtf_file[PATH_LEN];
// int i, j, ele, num_samples, count;
// float azi;
// /* sndfile data structures */
// SNDFILE *sndfile;
// SF_INFO sfinfo;
//
// j = 0;
// azimuth_offset[0] = 0;
// size_t size = sizeof(float) * NUM_HRFT * HRTF_LEN * HRTF_CHN;
// checkCudaErrors(cudaMalloc((void**)&d_hrtf, size));
//
// for (i = 0; i<NUM_ELEV; i++) {
// ele = elevation_pos[i];
// for (azi = 0; azi <= 180; azi += azimuth_inc[i]) {
// sprintf(hrtf_file, "%s/elev%d/H%de%03da.wav", HRTF_DIR, ele, ele, (int)round(azi));
//
// /* zero libsndfile structures */
// memset(&sfinfo, 0, sizeof(sfinfo));
//
// /* open hrtf file */
// if ((sndfile = sf_open(hrtf_file, SFM_READ, &sfinfo)) == NULL) {
// fprintf(stderr, "Error: could not open hrtf file:\n%3d %3d %s\n", i, j, hrtf_file);
// fprintf(stderr, "%s\n", sf_strerror(sndfile));
// return -1;
// }
// /* check signal parameters */
// if (sfinfo.channels != HRTF_CHN) {
// fprintf(stderr, "ERROR: incorrect number of channels in HRTF\n");
// return -1;
// }
// if (sfinfo.samplerate != SAMP_RATE) {
// fprintf(stderr, "ERROR: incorrect sampling rate\n");
// return -1;
// }
// /* Print file information */
// printf("%3d %3d %s Frames: %d, Channels: %d, Samplerate: %d\n",
// i, j, hrtf_file, (int)sfinfo.frames, sfinfo.channels, sfinfo.samplerate);
// /* read HRTF signal */
// num_samples = sfinfo.frames*sfinfo.channels;
// if ((count = sf_read_float(sndfile, hrtf[j], num_samples)) != num_samples) {
// fprintf(stderr, "ERROR: cannot read HRTF signal %3d\n", j);
// return -1;
// }
// /* close file */
// sf_close(sndfile);
// j++;
// }
// checkCudaErrors(cudaMemcpy(d_hrtf, hrtf, size, cudaMemcpyHostToDevice));
// azimuth_offset[i + 1] = j;
// }
// printf("\nHRTF index offsets for each elevation:\n");
// for (i = 0; i<NUM_ELEV + 1; i++) {
// printf("%3d ", azimuth_offset[i]);
// }
// printf("\n");
// return 0;
//}
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/*GPU Convolution was not fast enough because of the large overhead
of FFT and IFFT. Keeping the code here for future purposes*/
//void GPUconvolve_hrtf(float *x, int x_len, int hrtf_idx, float *output, int y_len, float gain) {
// int i, j, n, k, swap_chan, j_hrtf;
// float *p_hrtf;
// if (gain > 1)
// gain = 1;
// if (hrtf_idx >= 0) {
// swap_chan = false;
// p_hrtf = hrtf[hrtf_idx];
// }
// else {
// swap_chan = true;
// p_hrtf = hrtf[-hrtf_idx];
// }
//
// /* zero output buffer */
// /*for (i = 0; i<y_len*HRTF_CHN; i++) {
// output[i] = 0.0;
// }*/
// convolveMe(output, x, x_len, p_hrtf, gain, d_hrtf);
//}
////////////////////////////////////////////////////////////////////////////////
|
7612861ec5dfe2083a5d9dc3a327f00bfaf081f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "catch.hpp"
#include <map>
#include <thrust/device_vector.h>
#include <thrust/unique.h>
//--------------------------------------------------------------
#define TEST_CUDA_CHECK_RETURN
//--------------------------------------------------------------
#include "BaseCudaTestHandler.h"
#include "../GPUPatternMining/HashMap/gpuhashmapper.h"
#include "../GPUPatternMining/Prevalence/PrevalentTypedPairProvider.h"
#include "../GPUPatternMining/InstanceTree/IntanceTablesMapCreator.h"
#include <thrust/execution_policy.h>
#include "../GPUPatternMining/Entities/TypeCount.h"
//--------------------------------------------------------------
/*
Test for graph
A0-B0-C0-B1-A1-C1
*/
TEST_CASE_METHOD(BaseCudaTestHandler, "PrevalentTypedPairProvider | simple")
{
std::vector<TypeCount> counts;
{
counts.push_back(TypeCount(0xA, 2));
counts.push_back(TypeCount(0xB, 2));
counts.push_back(TypeCount(0xC, 2));
}
const float minimalPrevalence = 0.6f;
auto plRes = std::make_shared<PlaneSweepTableInstanceResult>();
auto itmPack = std::make_shared<IntanceTablesMapCreator::ITMPack>();
{
thrust::host_vector<thrust::tuple<FeatureInstance, FeatureInstance>> huniques;
FeatureInstance a;
FeatureInstance b;
a.field = 0x000A0000;
b.field = 0x000B0000;
huniques.push_back(thrust::make_tuple(a, b));
a.field = 0x000A0001;
b.field = 0x000C0001;
huniques.push_back(thrust::make_tuple(a, b));
a.field = 0x000B0000;
b.field = 0x000C0000;
huniques.push_back(thrust::make_tuple(a, b));
itmPack->uniques = huniques;
}
{
thrust::host_vector<FeatureInstance> hPairsA;
thrust::host_vector<FeatureInstance> hPairsB;
FeatureInstance a;
FeatureInstance b;
// A-B
a.field = 0x000A0000;
hPairsA.push_back(a);
b.field = 0x000B0000;
hPairsB.push_back(b);
a.field = 0x000A0001;
hPairsA.push_back(a);
b.field = 0x000B0001;
hPairsB.push_back(b);
// A-C
a.field = 0x000A0001;
hPairsA.push_back(a);
b.field = 0x000C0001;
hPairsB.push_back(b);
// B-C
a.field = 0x000B0000;
hPairsA.push_back(a);
b.field = 0x000C0000;
hPairsB.push_back(b);
a.field = 0x000B0001;
hPairsA.push_back(a);
b.field = 0x000C0000;
hPairsB.push_back(b);
plRes->pairsA = hPairsA;
plRes->pairsB = hPairsB;
}
{
std::vector<unsigned int> counts = { 2, 1, 2 };
itmPack->counts = counts;
}
{
std::vector<unsigned int> indices = { 0, 2, 3 };
itmPack->begins = indices;
}
itmPack->count = itmPack->uniques.size();
Prevalence::UniqueFilter::PrevalentTypedPairProvider bppc(
counts, itmPack);
thrust::host_vector<FeatureTypePair> result = bppc.getPrevalentPairConnections(
minimalPrevalence
, plRes
);
std::vector<FeatureTypePair> expected;
{
FeatureTypePair ftp;
ftp.types.a = 0x000A;
ftp.types.b = 0x000B;
expected.push_back(ftp);
}
REQUIRE(std::equal(expected.begin(), expected.end(), result.begin()));
}
/*
Test for graph
C2-C3
| |
A1-B1 A5-B5
A2-B2 \ /
A3-B3 C1
A4-B4
|
A6-C4
*/
TEST_CASE_METHOD(BaseCudaTestHandler, "PrevalentTypedPairProvider | simple 2")
{
std::vector<TypeCount> counts;
{
counts.push_back(TypeCount(0xA, 6));
counts.push_back(TypeCount(0xB, 5));
counts.push_back(TypeCount(0xC, 4));
}
const float minimalPrevalence = 3.f / 5.1f;
auto plRes = std::make_shared<PlaneSweepTableInstanceResult>();
auto itmPack = std::make_shared<IntanceTablesMapCreator::ITMPack>();
{
thrust::host_vector<thrust::tuple<FeatureInstance, FeatureInstance>> huniques;
FeatureInstance a;
FeatureInstance b;
a.field = 0x000A0001;
b.field = 0x000B0001;
huniques.push_back(thrust::make_tuple(a, b));
a.field = 0x000A0001;
b.field = 0x000C0002;
huniques.push_back(thrust::make_tuple(a, b));
a.field = 0x000B0001;
b.field = 0x000C0003;
huniques.push_back(thrust::make_tuple(a, b));
itmPack->uniques = huniques;
}
{
thrust::host_vector<FeatureInstance> hPairsA;
thrust::host_vector<FeatureInstance> hPairsB;
FeatureInstance a;
FeatureInstance b;
// A-B
a.field = 0x000A0001;
hPairsA.push_back(a);
b.field = 0x000B0001;
hPairsB.push_back(b);
a.field = 0x000A0002;
hPairsA.push_back(a);
b.field = 0x000B0002;
hPairsB.push_back(b);
a.field = 0x000A0003;
hPairsA.push_back(a);
b.field = 0x000B0004;
hPairsB.push_back(b);
a.field = 0x000A0004;
hPairsA.push_back(a);
b.field = 0x000B0004;
hPairsB.push_back(b);
a.field = 0x000A0005;
hPairsA.push_back(a);
b.field = 0x000B0005;
hPairsB.push_back(b);
// A-C
a.field = 0x000A0001;
hPairsA.push_back(a);
b.field = 0x000C0002;
hPairsB.push_back(b);
a.field = 0x000A0005;
hPairsA.push_back(a);
b.field = 0x000C0001;
hPairsB.push_back(b);
a.field = 0x000A0006;
hPairsA.push_back(a);
b.field = 0x000C0004;
hPairsB.push_back(b);
// B-C
a.field = 0x000B0001;
hPairsA.push_back(a);
b.field = 0x000C0003;
hPairsB.push_back(b);
a.field = 0x000B0004;
hPairsA.push_back(a);
b.field = 0x000C0004;
hPairsB.push_back(b);
a.field = 0x000B0005;
hPairsA.push_back(a);
b.field = 0x000C0001;
hPairsB.push_back(b);
plRes->pairsA = hPairsA;
plRes->pairsB = hPairsB;
}
{
std::vector<unsigned int> counts = { 5, 3, 3 };
itmPack->counts = counts;
}
{
std::vector<unsigned int> indices = { 0, 5, 8 };
itmPack->begins = indices;
}
itmPack->count = itmPack->uniques.size();
Prevalence::UniqueFilter::PrevalentTypedPairProvider bppc(
counts, itmPack);
thrust::host_vector<FeatureTypePair> result = bppc.getPrevalentPairConnections(
minimalPrevalence
, plRes
);
std::vector<FeatureTypePair> expected;
{
FeatureTypePair ftp;
ftp.types.a = 0x000A;
ftp.types.b = 0x000B;
expected.push_back(ftp);
ftp.types.a = 0x000B;
ftp.types.b = 0x000C;
expected.push_back(ftp);
}
REQUIRE(std::equal(expected.begin(), expected.end(), result.begin()));
}
TEST_CASE_METHOD(BaseCudaTestHandler, "PrevalentTypedPairProvider | flag setter")
{
thrust::device_vector<float> resultA;
{
std::vector<float> resa(33, 0.1f);
resa.push_back(0.5f);
resa[15] = 0.6;
resultA = resa;
}
thrust::device_vector<float> resultB;
{
std::vector<float> resa(33, 0.1f);
resa.push_back(0.5f);
resa[15] = 0.6;
resa[17] = 0.6;
resultB = resa;
}
thrust::device_vector<bool> flags(34);
thrust::device_vector<unsigned int> writePos(34);
dim3 grid;
findSmallest2D(34, 256, grid.x, grid.y);
hipLaunchKernelGGL(( Prevalence::UniqueFilter::setPrevalentFlag) , dim3(grid), dim3(256) , 0, 0,
0.5f
, 34u
, resultA.data()
, resultB.data()
, flags.data()
, writePos.data()
);
CUDA_CHECK_RETURN(hipDeviceSynchronize());
thrust::host_vector<bool> gained = flags;
std::vector<bool> expected(34, false);
expected[15] = true;
expected[33] = true;
REQUIRE(std::equal(expected.begin(), expected.end(), gained.begin()) == true);
}
// -----------------------------------------------------------------
TEST_CASE_METHOD(BaseCudaTestHandler, "PrevalentTypedPairProvider | unique tuple functor")
{
thrust::device_vector<FeatureInstance> pairsA;
{
thrust::host_vector<FeatureInstance> hPairsA;
FeatureInstance a;
// A-B
a.field = 0x000A0000;
hPairsA.push_back(a);
a.field = 0x000A0001;
hPairsA.push_back(a);
// A-C
a.field = 0x000A0001;
hPairsA.push_back(a);
// B-C
a.field = 0x000B0000;
hPairsA.push_back(a);
a.field = 0x000B0001;
hPairsA.push_back(a);
pairsA = hPairsA;
}
thrust::device_vector<unsigned int> begins;
{
std::vector<unsigned int> hBegins = { 0, 2, 3 };
begins = hBegins;
}
thrust::device_vector<unsigned int> counts;
{
std::vector<unsigned int> hcounts = { 2, 1, 2 };
counts = hcounts;
}
thrust::device_vector<unsigned int> typesCounts;
{
std::vector<unsigned int> hTypesCounts = { 2, 2, 2 };
typesCounts = hTypesCounts;
}
thrust::device_vector<FeatureInstance> uniqueFeatureInstancesInPairType(6);
thrust::device_vector<float> result(3);
Prevalence::UniqueFilter::UniqueTupleCountFunctor f_in;
{
f_in.data = pairsA.data();
f_in.begins = begins.data();
f_in.count = counts.data();
f_in.typeCount = typesCounts.data();
f_in.uniquesOutput = uniqueFeatureInstancesInPairType.data();
f_in.results = result.data();
}
thrust::device_vector<unsigned int> idxs(3);
thrust::sequence(idxs.begin(), idxs.end());
thrust::for_each(thrust::device, idxs.begin(), idxs.end(), f_in);
CUDA_CHECK_RETURN(hipDeviceSynchronize());
thrust::host_vector<float> res = result;
std::vector<float> expected = { 1.f, 0.5f, 1.f };
REQUIRE(std::equal(expected.begin(), expected.end(), res.begin()));
}
TEST_CASE_METHOD(BaseCudaTestHandler, "PrevalentTypedPairProvider | unary transfrom")
{
const unsigned int uniquesCount = 34;
thrust::device_vector<thrust::tuple<FeatureInstance, FeatureInstance>> uniques;
{
thrust::host_vector<thrust::tuple<FeatureInstance, FeatureInstance>> huniques;
for (int i = 0; i < uniquesCount; ++i)
{
FeatureInstance a;
a.field = (i << 16) | (1);
FeatureInstance b;
b.field = ((i + 1) << 16) | (1);
huniques.push_back(thrust::make_tuple(a, b));
}
uniques = huniques;
}
thrust::device_vector<FeatureTypePair> result(uniquesCount);
std::vector<FeatureTypePair> expected;
for (int i = 0; i < uniquesCount; ++i)
{
FeatureTypePair ftp;
ftp.combined = ((i << 16) & 0xFFFF0000) | (i + 1);
expected.push_back(ftp);
}
auto f_trans = Prevalence::UniqueFilter::FeatureInstancesTupleToFeatureTypePair();
thrust::transform(
thrust::device
, uniques.begin()
, uniques.end()
, result.begin()
, f_trans
);
CUDA_CHECK_RETURN(hipDeviceSynchronize());
thrust::host_vector<FeatureTypePair> gained = result;
REQUIRE(std::equal(expected.begin(), expected.end(), gained.begin()));
}
// -----------------------------------------------------------------
/*
uniques = { { A1-B1}, {B1-C1}, {C1-D1} ... }
*/
TEST_CASE_METHOD(BaseCudaTestHandler, "PrevalentTypedPairProvider | write throught mask")
{
const unsigned int uniquesCount = 34;
thrust::device_vector<FeatureTypePair> dataFeed;
{
std::vector<FeatureTypePair> hdataFeed;
for (int i = 0; i < uniquesCount; ++i)
{
FeatureTypePair ftp;
ftp.combined = ((i << 16) & 0xFFFF0000) | (i + 1);
hdataFeed.push_back(ftp);
}
dataFeed = hdataFeed;
}
thrust::device_vector<bool> mask;
{
std::vector<bool> hmask(uniquesCount, false);
{
hmask[15] = true;
hmask[33] = true;
}
mask = hmask;
}
thrust::device_vector<unsigned int> writePos;
{
thrust::host_vector<unsigned int> hwritePos(uniquesCount);
std::fill(hwritePos.begin(), hwritePos.begin() + 16, 0);
std::fill(hwritePos.begin() + 16, hwritePos.begin() + uniquesCount, 1);
writePos = hwritePos;
}
thrust::device_vector<FeatureTypePair> result(2);
std::vector<FeatureTypePair> expected;
{
expected.push_back({ 0x000F0010});
expected.push_back({ 0x00210022 });
}
dim3 grid;
findSmallest2D(uniquesCount, 256, grid.x, grid.y);
hipLaunchKernelGGL(( Prevalence::UniqueFilter::writeThroughtMask), dim3(grid), dim3(256) , 0, 0,
uniquesCount
, dataFeed.data()
, mask.data()
, writePos.data()
, result.data()
);
CUDA_CHECK_RETURN(hipDeviceSynchronize());
thrust::host_vector<FeatureTypePair> gained = result;
REQUIRE(std::equal(expected.begin(), expected.end(), gained.begin()));
}
// -----------------------------------------------------------------
TEST_CASE_METHOD(BaseCudaTestHandler, "PrevalentTypedPairProvider | FeaturePairType union fields order")
{
FeatureTypePair ftp;
ftp.types.a = 0x000A;
ftp.types.b = 0x000B;
REQUIRE(ftp.combined == 0x000A000B);
}
| 7612861ec5dfe2083a5d9dc3a327f00bfaf081f3.cu | #include "catch.hpp"
#include <map>
#include <thrust/device_vector.h>
#include <thrust/unique.h>
//--------------------------------------------------------------
#define TEST_CUDA_CHECK_RETURN
//--------------------------------------------------------------
#include "BaseCudaTestHandler.h"
#include "../GPUPatternMining/HashMap/gpuhashmapper.h"
#include "../GPUPatternMining/Prevalence/PrevalentTypedPairProvider.h"
#include "../GPUPatternMining/InstanceTree/IntanceTablesMapCreator.h"
#include <thrust/execution_policy.h>
#include "../GPUPatternMining/Entities/TypeCount.h"
//--------------------------------------------------------------
/*
Test for graph
A0-B0-C0-B1-A1-C1
*/
TEST_CASE_METHOD(BaseCudaTestHandler, "PrevalentTypedPairProvider | simple")
{
std::vector<TypeCount> counts;
{
counts.push_back(TypeCount(0xA, 2));
counts.push_back(TypeCount(0xB, 2));
counts.push_back(TypeCount(0xC, 2));
}
const float minimalPrevalence = 0.6f;
auto plRes = std::make_shared<PlaneSweepTableInstanceResult>();
auto itmPack = std::make_shared<IntanceTablesMapCreator::ITMPack>();
{
thrust::host_vector<thrust::tuple<FeatureInstance, FeatureInstance>> huniques;
FeatureInstance a;
FeatureInstance b;
a.field = 0x000A0000;
b.field = 0x000B0000;
huniques.push_back(thrust::make_tuple(a, b));
a.field = 0x000A0001;
b.field = 0x000C0001;
huniques.push_back(thrust::make_tuple(a, b));
a.field = 0x000B0000;
b.field = 0x000C0000;
huniques.push_back(thrust::make_tuple(a, b));
itmPack->uniques = huniques;
}
{
thrust::host_vector<FeatureInstance> hPairsA;
thrust::host_vector<FeatureInstance> hPairsB;
FeatureInstance a;
FeatureInstance b;
// A-B
a.field = 0x000A0000;
hPairsA.push_back(a);
b.field = 0x000B0000;
hPairsB.push_back(b);
a.field = 0x000A0001;
hPairsA.push_back(a);
b.field = 0x000B0001;
hPairsB.push_back(b);
// A-C
a.field = 0x000A0001;
hPairsA.push_back(a);
b.field = 0x000C0001;
hPairsB.push_back(b);
// B-C
a.field = 0x000B0000;
hPairsA.push_back(a);
b.field = 0x000C0000;
hPairsB.push_back(b);
a.field = 0x000B0001;
hPairsA.push_back(a);
b.field = 0x000C0000;
hPairsB.push_back(b);
plRes->pairsA = hPairsA;
plRes->pairsB = hPairsB;
}
{
std::vector<unsigned int> counts = { 2, 1, 2 };
itmPack->counts = counts;
}
{
std::vector<unsigned int> indices = { 0, 2, 3 };
itmPack->begins = indices;
}
itmPack->count = itmPack->uniques.size();
Prevalence::UniqueFilter::PrevalentTypedPairProvider bppc(
counts, itmPack);
thrust::host_vector<FeatureTypePair> result = bppc.getPrevalentPairConnections(
minimalPrevalence
, plRes
);
std::vector<FeatureTypePair> expected;
{
FeatureTypePair ftp;
ftp.types.a = 0x000A;
ftp.types.b = 0x000B;
expected.push_back(ftp);
}
REQUIRE(std::equal(expected.begin(), expected.end(), result.begin()));
}
/*
Test for graph
C2-C3
| |
A1-B1 A5-B5
A2-B2 \ /
A3-B3 C1
A4-B4
|
A6-C4
*/
TEST_CASE_METHOD(BaseCudaTestHandler, "PrevalentTypedPairProvider | simple 2")
{
std::vector<TypeCount> counts;
{
counts.push_back(TypeCount(0xA, 6));
counts.push_back(TypeCount(0xB, 5));
counts.push_back(TypeCount(0xC, 4));
}
const float minimalPrevalence = 3.f / 5.1f;
auto plRes = std::make_shared<PlaneSweepTableInstanceResult>();
auto itmPack = std::make_shared<IntanceTablesMapCreator::ITMPack>();
{
thrust::host_vector<thrust::tuple<FeatureInstance, FeatureInstance>> huniques;
FeatureInstance a;
FeatureInstance b;
a.field = 0x000A0001;
b.field = 0x000B0001;
huniques.push_back(thrust::make_tuple(a, b));
a.field = 0x000A0001;
b.field = 0x000C0002;
huniques.push_back(thrust::make_tuple(a, b));
a.field = 0x000B0001;
b.field = 0x000C0003;
huniques.push_back(thrust::make_tuple(a, b));
itmPack->uniques = huniques;
}
{
thrust::host_vector<FeatureInstance> hPairsA;
thrust::host_vector<FeatureInstance> hPairsB;
FeatureInstance a;
FeatureInstance b;
// A-B
a.field = 0x000A0001;
hPairsA.push_back(a);
b.field = 0x000B0001;
hPairsB.push_back(b);
a.field = 0x000A0002;
hPairsA.push_back(a);
b.field = 0x000B0002;
hPairsB.push_back(b);
a.field = 0x000A0003;
hPairsA.push_back(a);
b.field = 0x000B0004;
hPairsB.push_back(b);
a.field = 0x000A0004;
hPairsA.push_back(a);
b.field = 0x000B0004;
hPairsB.push_back(b);
a.field = 0x000A0005;
hPairsA.push_back(a);
b.field = 0x000B0005;
hPairsB.push_back(b);
// A-C
a.field = 0x000A0001;
hPairsA.push_back(a);
b.field = 0x000C0002;
hPairsB.push_back(b);
a.field = 0x000A0005;
hPairsA.push_back(a);
b.field = 0x000C0001;
hPairsB.push_back(b);
a.field = 0x000A0006;
hPairsA.push_back(a);
b.field = 0x000C0004;
hPairsB.push_back(b);
// B-C
a.field = 0x000B0001;
hPairsA.push_back(a);
b.field = 0x000C0003;
hPairsB.push_back(b);
a.field = 0x000B0004;
hPairsA.push_back(a);
b.field = 0x000C0004;
hPairsB.push_back(b);
a.field = 0x000B0005;
hPairsA.push_back(a);
b.field = 0x000C0001;
hPairsB.push_back(b);
plRes->pairsA = hPairsA;
plRes->pairsB = hPairsB;
}
{
std::vector<unsigned int> counts = { 5, 3, 3 };
itmPack->counts = counts;
}
{
std::vector<unsigned int> indices = { 0, 5, 8 };
itmPack->begins = indices;
}
itmPack->count = itmPack->uniques.size();
Prevalence::UniqueFilter::PrevalentTypedPairProvider bppc(
counts, itmPack);
thrust::host_vector<FeatureTypePair> result = bppc.getPrevalentPairConnections(
minimalPrevalence
, plRes
);
std::vector<FeatureTypePair> expected;
{
FeatureTypePair ftp;
ftp.types.a = 0x000A;
ftp.types.b = 0x000B;
expected.push_back(ftp);
ftp.types.a = 0x000B;
ftp.types.b = 0x000C;
expected.push_back(ftp);
}
REQUIRE(std::equal(expected.begin(), expected.end(), result.begin()));
}
TEST_CASE_METHOD(BaseCudaTestHandler, "PrevalentTypedPairProvider | flag setter")
{
thrust::device_vector<float> resultA;
{
std::vector<float> resa(33, 0.1f);
resa.push_back(0.5f);
resa[15] = 0.6;
resultA = resa;
}
thrust::device_vector<float> resultB;
{
std::vector<float> resa(33, 0.1f);
resa.push_back(0.5f);
resa[15] = 0.6;
resa[17] = 0.6;
resultB = resa;
}
thrust::device_vector<bool> flags(34);
thrust::device_vector<unsigned int> writePos(34);
dim3 grid;
findSmallest2D(34, 256, grid.x, grid.y);
Prevalence::UniqueFilter::setPrevalentFlag <<< grid, 256 >>> (
0.5f
, 34u
, resultA.data()
, resultB.data()
, flags.data()
, writePos.data()
);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
thrust::host_vector<bool> gained = flags;
std::vector<bool> expected(34, false);
expected[15] = true;
expected[33] = true;
REQUIRE(std::equal(expected.begin(), expected.end(), gained.begin()) == true);
}
// -----------------------------------------------------------------
TEST_CASE_METHOD(BaseCudaTestHandler, "PrevalentTypedPairProvider | unique tuple functor")
{
thrust::device_vector<FeatureInstance> pairsA;
{
thrust::host_vector<FeatureInstance> hPairsA;
FeatureInstance a;
// A-B
a.field = 0x000A0000;
hPairsA.push_back(a);
a.field = 0x000A0001;
hPairsA.push_back(a);
// A-C
a.field = 0x000A0001;
hPairsA.push_back(a);
// B-C
a.field = 0x000B0000;
hPairsA.push_back(a);
a.field = 0x000B0001;
hPairsA.push_back(a);
pairsA = hPairsA;
}
thrust::device_vector<unsigned int> begins;
{
std::vector<unsigned int> hBegins = { 0, 2, 3 };
begins = hBegins;
}
thrust::device_vector<unsigned int> counts;
{
std::vector<unsigned int> hcounts = { 2, 1, 2 };
counts = hcounts;
}
thrust::device_vector<unsigned int> typesCounts;
{
std::vector<unsigned int> hTypesCounts = { 2, 2, 2 };
typesCounts = hTypesCounts;
}
thrust::device_vector<FeatureInstance> uniqueFeatureInstancesInPairType(6);
thrust::device_vector<float> result(3);
Prevalence::UniqueFilter::UniqueTupleCountFunctor f_in;
{
f_in.data = pairsA.data();
f_in.begins = begins.data();
f_in.count = counts.data();
f_in.typeCount = typesCounts.data();
f_in.uniquesOutput = uniqueFeatureInstancesInPairType.data();
f_in.results = result.data();
}
thrust::device_vector<unsigned int> idxs(3);
thrust::sequence(idxs.begin(), idxs.end());
thrust::for_each(thrust::device, idxs.begin(), idxs.end(), f_in);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
thrust::host_vector<float> res = result;
std::vector<float> expected = { 1.f, 0.5f, 1.f };
REQUIRE(std::equal(expected.begin(), expected.end(), res.begin()));
}
TEST_CASE_METHOD(BaseCudaTestHandler, "PrevalentTypedPairProvider | unary transfrom")
{
const unsigned int uniquesCount = 34;
thrust::device_vector<thrust::tuple<FeatureInstance, FeatureInstance>> uniques;
{
thrust::host_vector<thrust::tuple<FeatureInstance, FeatureInstance>> huniques;
for (int i = 0; i < uniquesCount; ++i)
{
FeatureInstance a;
a.field = (i << 16) | (1);
FeatureInstance b;
b.field = ((i + 1) << 16) | (1);
huniques.push_back(thrust::make_tuple(a, b));
}
uniques = huniques;
}
thrust::device_vector<FeatureTypePair> result(uniquesCount);
std::vector<FeatureTypePair> expected;
for (int i = 0; i < uniquesCount; ++i)
{
FeatureTypePair ftp;
ftp.combined = ((i << 16) & 0xFFFF0000) | (i + 1);
expected.push_back(ftp);
}
auto f_trans = Prevalence::UniqueFilter::FeatureInstancesTupleToFeatureTypePair();
thrust::transform(
thrust::device
, uniques.begin()
, uniques.end()
, result.begin()
, f_trans
);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
thrust::host_vector<FeatureTypePair> gained = result;
REQUIRE(std::equal(expected.begin(), expected.end(), gained.begin()));
}
// -----------------------------------------------------------------
/*
uniques = { { A1-B1}, {B1-C1}, {C1-D1} ... }
*/
TEST_CASE_METHOD(BaseCudaTestHandler, "PrevalentTypedPairProvider | write throught mask")
{
const unsigned int uniquesCount = 34;
thrust::device_vector<FeatureTypePair> dataFeed;
{
std::vector<FeatureTypePair> hdataFeed;
for (int i = 0; i < uniquesCount; ++i)
{
FeatureTypePair ftp;
ftp.combined = ((i << 16) & 0xFFFF0000) | (i + 1);
hdataFeed.push_back(ftp);
}
dataFeed = hdataFeed;
}
thrust::device_vector<bool> mask;
{
std::vector<bool> hmask(uniquesCount, false);
{
hmask[15] = true;
hmask[33] = true;
}
mask = hmask;
}
thrust::device_vector<unsigned int> writePos;
{
thrust::host_vector<unsigned int> hwritePos(uniquesCount);
std::fill(hwritePos.begin(), hwritePos.begin() + 16, 0);
std::fill(hwritePos.begin() + 16, hwritePos.begin() + uniquesCount, 1);
writePos = hwritePos;
}
thrust::device_vector<FeatureTypePair> result(2);
std::vector<FeatureTypePair> expected;
{
expected.push_back({ 0x000F0010});
expected.push_back({ 0x00210022 });
}
dim3 grid;
findSmallest2D(uniquesCount, 256, grid.x, grid.y);
Prevalence::UniqueFilter::writeThroughtMask<<< grid, 256 >>>(
uniquesCount
, dataFeed.data()
, mask.data()
, writePos.data()
, result.data()
);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
thrust::host_vector<FeatureTypePair> gained = result;
REQUIRE(std::equal(expected.begin(), expected.end(), gained.begin()));
}
// -----------------------------------------------------------------
TEST_CASE_METHOD(BaseCudaTestHandler, "PrevalentTypedPairProvider | FeaturePairType union fields order")
{
FeatureTypePair ftp;
ftp.types.a = 0x000A;
ftp.types.b = 0x000B;
REQUIRE(ftp.combined == 0x000A000B);
}
|
2076e270624fbcce60bf8ab6516b9cc722bba70b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHDeviceUtils.cuh>
#include <vector>
#include <iostream>
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1 - 1, 0.f), height = max(bottom - top + 1 - 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1 - 1) * (a[3] - a[1] + 1 - 1);
float Sb = (b[2] - b[0] + 1 - 1) * (b[3] - b[1] + 1 - 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
// boxes is a N x 5 tensor
at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) {
using scalar_t = float;
AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor");
auto scores = boxes.select(1, 4);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);
scalar_t* boxes_dev = boxes_sorted.data<scalar_t>();
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
unsigned long long* mask_dev = NULL;
//THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
THCCeilDiv(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
THCudaCheck(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
THCudaFree(state, mask_dev);
// TODO improve this part
return std::get<0>(order_t.index({
keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
order_t.device(), keep.scalar_type())
}).sort(0, false));
}
| 2076e270624fbcce60bf8ab6516b9cc722bba70b.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCDeviceUtils.cuh>
#include <vector>
#include <iostream>
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1 - 1, 0.f), height = max(bottom - top + 1 - 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1 - 1) * (a[3] - a[1] + 1 - 1);
float Sb = (b[2] - b[0] + 1 - 1) * (b[3] - b[1] + 1 - 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
// boxes is a N x 5 tensor
at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) {
using scalar_t = float;
AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor");
auto scores = boxes.select(1, 4);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);
scalar_t* boxes_dev = boxes_sorted.data<scalar_t>();
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
unsigned long long* mask_dev = NULL;
//THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
THCCeilDiv(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
THCudaCheck(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
THCudaFree(state, mask_dev);
// TODO improve this part
return std::get<0>(order_t.index({
keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
order_t.device(), keep.scalar_type())
}).sort(0, false));
}
|
c071ecb31f9077025546ec36948fa171df5e86af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Quantum Lattice Boltzmann
* (c) 2015 Fabian Thring, ETH Zurich
*
* This file contains all the CUDA kernels and function that make use of the
* CUDA runtime API
*/
// Local includes
#include "QLB.hpp"
// ==== CONSTANTS ====
__constant__ unsigned int d_L;
__constant__ float d_dx;
__constant__ float d_dt;
__constant__ float d_mass;
__constant__ float d_g;
__constant__ unsigned int d_t;
__constant__ float d_scaling;
__constant__ int d_current_scene;
// ==== INITIALIZATION ====
void QLB::allocate_device_arrays()
{
cuassert(hipMalloc(&d_X, X.size() * sizeof(d_X[0])));
cuassert(hipMalloc(&d_Y, Y.size() * sizeof(d_Y[0])));
cuassert(hipMalloc(&d_Xinv, Xinv.size() * sizeof(d_Xinv[0])));
cuassert(hipMalloc(&d_Yinv, Yinv.size() * sizeof(d_Yinv[0])));
cuassert(hipMalloc(&d_alphaX, alphaX.size() * sizeof(d_alphaX[0])));
cuassert(hipMalloc(&d_alphaY, alphaY.size() * sizeof(d_alphaY[0])));
cuassert(hipMalloc(&d_beta, beta.size() * sizeof(d_beta[0])));
cuassert(hipMalloc(&d_spinor_, spinor_.size() * sizeof(d_spinor_[0])));
cuassert(hipMalloc(&d_spinoraux_, spinoraux_.size()*sizeof(d_spinoraux_[0])));
cuassert(hipMalloc(&d_spinorrot_, spinorrot_.size()*sizeof(d_spinorrot_[0])));
cuassert(hipMalloc(&d_V_, V_.size() * sizeof(d_V_[0])));
#ifdef QLB_CUDA_GL_WORKAROUND
cuassert(hipMalloc(&d_vertex_ptr_, array_vertex_.size() * sizeof(float)));
cuassert(hipMalloc(&d_normal_ptr_, array_normal_.size() * sizeof(float)));
#endif
cuassert(hipDeviceSynchronize());
}
void QLB::free_device_arrays()
{
cuassert(hipFree((void*) d_X));
cuassert(hipFree((void*) d_Y));
cuassert(hipFree((void*) d_Xinv));
cuassert(hipFree((void*) d_Yinv));
cuassert(hipFree((void*) d_alphaX));
cuassert(hipFree((void*) d_alphaY));
cuassert(hipFree((void*) d_beta));
cuassert(hipFree((void*) d_spinor_));
cuassert(hipFree((void*) d_spinoraux_));
cuassert(hipFree((void*) d_spinorrot_));
cuassert(hipFree((void*) d_V_));
#ifdef QLB_CUDA_GL_WORKAROUND
cuassert(hipFree((void*) d_vertex_ptr_));
cuassert(hipFree((void*) d_normal_ptr_));
#endif
}
/**
* Print version information
* @param grid1 Grid dimensions for (L x L) kernels
* @param grid4 Grid dimensions for (L x L x 4) kernels
* @param block1 Block dimensions for (L x L) kernels
* @param block4 Block dimensions for (L x L x 4) kernels
*/
static void print_version_information(dim3 grid1, dim3 grid4, dim3 block1, dim3 block4)
{
std::cout << " === CUDA Info === " << std::endl;
hipDeviceProp_t deviceProp;
cuassert(hipGetDeviceProperties(&deviceProp, 0));
int dvVers = 0; cuassert(hipDriverGetVersion(&dvVers));
int rtVers = 0; cuassert(hipRuntimeGetVersion(&rtVers));
std::printf("CUDA Driver Version: %d.%d\n", dvVers/1000, dvVers % 100);
std::printf("CUDA Runtime Version: %d.%d\n", rtVers/1000, rtVers % 100);
std::printf("Total GPU memory: %u bytes\n",
unsigned(deviceProp.totalGlobalMem));
std::printf("Multiprocessors on device: %u\n",
unsigned(deviceProp.multiProcessorCount));
std::printf("Max threads per block: %u\n",
unsigned(deviceProp.maxThreadsPerBlock));
std::printf("Max warp size: %u\n",
unsigned(deviceProp.warpSize));
std::printf("Selected grid size (1): (%3u, %3u, %3u)\n",
grid1.x, grid1.y, grid1.z);
std::printf("Selected block size (1): (%3u, %3u, %3u) = %u\n",
block1.x, block1.y, block1.z, block1.x*block1.y*block1.z );
std::printf("Selected grid size (4): (%3u, %3u, %3u)\n",
grid4.x, grid4.y, grid4.z);
std::printf("Selected block size (4): (%3u, %3u, %3u) = %u\n\n",
block4.x, block4.y, block4.z, block4.x*block4.y*block4.z );
}
/**
* Copy a matrix from host to device (if [value_t = cuFloatComplex] a specialized
* version will be used)
* @param d_ptr device pointer
* @param m matrix to be copied from
*/
template< class value_t, class mat_t >
static void copy_from_host_to_device(value_t* & d_ptr, const mat_t& m)
{
std::vector<value_t> tmp(m.size());
for(std::size_t i = 0; i < m.size(); ++i)
tmp[i] = value_t(m[i]);
cuassert(hipMemcpy(d_ptr, tmp.data(), sizeof(tmp[0]) * tmp.size(),
hipMemcpyHostToDevice));
}
template< class mat_t >
static void copy_from_host_to_device(cuFloatComplex* & d_ptr, const mat_t& m)
{
std::vector<cuFloatComplex> tmp(m.size());
for(std::size_t i = 0; i < m.size(); ++i)
tmp[i] = make_cuFloatComplex(m[i]);
cuassert(hipMemcpy(d_ptr, tmp.data(), sizeof(tmp[0]) * tmp.size(),
hipMemcpyHostToDevice));
}
/**
* Copy a matrix from the device to host (if [value_t = cuFloatComplex] a specialized
* version will be used)
* @param d_ptr device pointer
* @param m matrix to be copied to (of type QLB::float_t)
*/
template< class value_t, class mat_t >
static void copy_from_device_to_host(value_t* d_ptr, mat_t& m)
{
std::vector<value_t> tmp(m.size());
cuassert(hipMemcpy(tmp.data(), d_ptr, sizeof(tmp[0]) * tmp.size(),
hipMemcpyDeviceToHost));
for(std::size_t i = 0; i < m.size(); ++i)
m[i] = value_t(tmp[i]);
}
template< class mat_t >
static void copy_from_device_to_host(cuFloatComplex* d_ptr, mat_t& m)
{
std::vector<cuFloatComplex> tmp(m.size());
cuassert(hipMemcpy(tmp.data(), d_ptr, sizeof(tmp[0]) * tmp.size(),
hipMemcpyDeviceToHost));
for(std::size_t i = 0; i < m.size(); ++i)
m[i] = make_stdComplex<QLB::float_t>(tmp[i]);
}
void QLB::init_device()
{
// initialize constant matrices
copy_from_host_to_device(d_X, X);
copy_from_host_to_device(d_Y, Y);
copy_from_host_to_device(d_Xinv, Xinv);
copy_from_host_to_device(d_Yinv, Yinv);
copy_from_host_to_device(d_alphaX, alphaX);
copy_from_host_to_device(d_alphaY, alphaY);
copy_from_host_to_device(d_beta, beta);
// initialize simulation matrices
copy_from_host_to_device(d_spinor_, spinor_);
copy_from_host_to_device(d_spinoraux_, spinoraux_);
copy_from_host_to_device(d_spinorrot_, spinorrot_);
copy_from_host_to_device(d_V_, V_);
// initialize simulation variables
cuassert(hipMemcpyToSymbol(d_L, &L_, sizeof(L_)));
float dx = static_cast<float>(dx_);
cuassert(hipMemcpyToSymbol(d_dx, &dx, sizeof(dx)));
float dt = static_cast<float>(dt_);
cuassert(hipMemcpyToSymbol(d_dt, &dt, sizeof(dt)));
float mass = static_cast<float>(mass_);
cuassert(hipMemcpyToSymbol(d_mass, &mass, sizeof(mass)));
float g = static_cast<float>(g_);
cuassert(hipMemcpyToSymbol(d_g, &g, sizeof(g)));
int current_scene = current_scene_;
cuassert(hipMemcpyToSymbol(d_current_scene, ¤t_scene, sizeof(current_scene)));
float scaling = static_cast<float>(scaling_);
cuassert(hipMemcpyToSymbol(d_scaling, &scaling, sizeof(scaling)));
cuassert(hipMemcpyToSymbol(d_t, &t_, sizeof(t_)));
if(opt_.verbose())
print_version_information(grid1_, grid4_, block1_, block4_);
cuassert(hipDeviceSynchronize());
}
void QLB::get_device_arrays()
{
copy_from_device_to_host(d_spinor_, spinor_);
copy_from_device_to_host(d_spinorrot_, spinorrot_);
copy_from_device_to_host(d_spinoraux_, spinoraux_);
}
void QLB::update_device_constants()
{
float scaling = static_cast<float>(scaling_);
cuassert(hipMemcpyToSymbol(d_scaling, &scaling, sizeof(scaling)));
int current_scene = current_scene_;
cuassert(hipMemcpyToSymbol(d_current_scene, ¤t_scene, sizeof(current_scene)));
}
// =============================== SIMULATION ==================================
#define at(i,j,k) 4*(d_L*(i) + (j)) + (k)
/**
* Rotate the spinors and store the result in spinorrot and spinoraux
* @param spinor device pointer spinors
* @param spinor device pointer spinorrot
* @param spinor device pointer spinoraux
* @param Rinv inverse rotation matrix (4 x 4)
*/
__global__ void kernel_rotate(const cuFloatComplex* __restrict spinor,
cuFloatComplex* spinorrot,
cuFloatComplex* spinoraux,
const cuFloatComplex* __restrict Rinv)
{
__shared__ cuFloatComplex Rinv_loc[16];
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
int lj = threadIdx.y;
int lk = threadIdx.z;
if(lj < 4 && lk < 4)
Rinv_loc[lj*4 + lk] = Rinv[lj*4 + lk];
__syncthreads();
if(i < d_L && j < d_L)
{
const int ij = at(i,j,0);
const int ijk = ij + k;
spinorrot[ijk] = Rinv_loc[4*k + 0] * spinor[ij + 0] +
Rinv_loc[4*k + 1] * spinor[ij + 1] +
Rinv_loc[4*k + 2] * spinor[ij + 2] +
Rinv_loc[4*k + 3] * spinor[ij + 3];
spinoraux[ijk] = spinorrot[ijk];
}
}
/**
* Rotate spinorrot back and store the result in spinor
* @param spinor device pointer spinor
* @param spinor device pointer spinorrot
* @param R rotation matrix (4 x 4)
*/
__global__ void kernel_rotate_back(cuFloatComplex* spinor,
const cuFloatComplex* __restrict spinorrot,
const cuFloatComplex* __restrict R)
{
__shared__ cuFloatComplex R_loc[16];
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
int lj = threadIdx.y;
int lk = threadIdx.z;
if(lj < 4 && lk < 4)
R_loc[lj*4 + lk] = R[lj*4 + lk];
__syncthreads();
if(i < d_L && j < d_L)
{
const int ij = at(i,j,0);
spinor[ij+k] = R_loc[4*k + 0] * spinorrot[ij + 0] +
R_loc[4*k + 1] * spinorrot[ij + 1] +
R_loc[4*k + 2] * spinorrot[ij + 2] +
R_loc[4*k + 3] * spinorrot[ij + 3];
}
}
/**
* Collide and stream with matrix Q_X
* @param spinorrot device pointer spinorrot
* @param spinoraux device pointer spinoraux
* @param V device pointer potential
*/
__global__ void kernel_collide_Q_X(cuFloatComplex* spinorrot,
const cuFloatComplex* __restrict spinoraux,
const float* __restrict V)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < d_L && j < d_L)
{
int ia = (i + 1) % d_L;
int ik = (i - 1 + d_L) % d_L;
// Precomputed indices
const int i_j = at(i, j,0);
const int ia_j = at(ia,j,0);
const int ik_j = at(ik,j,0);
const float m = 0.5f * d_mass* d_dt;
const float g = 0.5f * V[i*d_L +j] * d_dt;
const float omega = m*m - g*g;
const cuFloatComplex a_nom = make_cuFloatComplex(1.0f - 0.25f*omega, 0.0f);
const cuFloatComplex a_den = make_cuFloatComplex(1.0f + 0.25f*omega, -1.0f*g);
const cuFloatComplex b_nom = make_cuFloatComplex(m, 0.0f);
const cuFloatComplex a = a_nom / a_den;
const cuFloatComplex b = b_nom / a_den;
const cuFloatComplex neg_bi = b * make_cuFloatComplex( 0.0f, -1.0f);
const cuFloatComplex pos_bi = b * make_cuFloatComplex( 0.0f, 1.0f);
// The matrix multiplication has been unrolled and 0 entries in Qhat
// have been skipped
// Qhat = X^(-1) * Q * X
spinorrot[ia_j + 0] = a * spinoraux[i_j+0] + neg_bi * spinoraux[i_j+3];
spinorrot[ia_j + 1] = a * spinoraux[i_j+1] + pos_bi * spinoraux[i_j+2];
spinorrot[ik_j + 2] = pos_bi * spinoraux[i_j+1] + a * spinoraux[i_j+2];
spinorrot[ik_j + 3] = neg_bi * spinoraux[i_j+0] + a * spinoraux[i_j+3];
}
}
/**
* Collide and stream with matrix Q_Y
* @param spinorrot device pointer spinorrot
* @param spinoraux device pointer spinoraux
* @param V device pointer potential
*/
__global__ void kernel_collide_Q_Y(cuFloatComplex* spinorrot,
const cuFloatComplex* __restrict spinoraux,
const float* __restrict V)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < d_L && j < d_L)
{
int ja = (j + 1) % d_L;
int jk = (j - 1 + d_L) % d_L;
// Precomputed indices
const int i_j = at(i,j ,0);
const int i_ja = at(i,ja,0);
const int i_jk = at(i,jk,0);
const float m = 0.5f * d_mass* d_dt;
const float g = 0.5f * V[i*d_L +j] * d_dt;
const float omega = m*m - g*g;
const cuFloatComplex a_nom = make_cuFloatComplex(1.0f - 0.25f*omega, 0.0f);
const cuFloatComplex b_nom = make_cuFloatComplex(m, 0.0f);
const cuFloatComplex a_den = make_cuFloatComplex(1.0f + 0.25f*omega, -1.0f*g);
const cuFloatComplex a = a_nom / a_den;
const cuFloatComplex b = b_nom / a_den;
const cuFloatComplex neg_bi = b * make_cuFloatComplex( 0.0f, -1.0f);
// The matrix multiplication has been unrolled and 0 entries in Qhat
// have been skipped
// Qhat = Y^(-1) * Q * Y
spinorrot[i_ja + 0] = a * spinoraux[i_j+0] + neg_bi * spinoraux[i_j+3];
spinorrot[i_ja + 1] = a * spinoraux[i_j+1] + neg_bi * spinoraux[i_j+2];
spinorrot[i_jk + 2] = neg_bi * spinoraux[i_j+1] + a * spinoraux[i_j+2];
spinorrot[i_jk + 3] = neg_bi * spinoraux[i_j+0] + a * spinoraux[i_j+3];
}
}
/**
* Update the potential array using the GP potential
* @param V device pointer potential array
* @param spinor device pointer spinor
*/
__global__ void kernel_set_potential(float* V,
const cuFloatComplex* __restrict spinor)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
V[d_L*i + j] = d_g * (
cuCnormf(spinor[at(i,j,0)]) + cuCnormf(spinor[at(i,j,1)]) +
cuCnormf(spinor[at(i,j,2)]) + cuCnormf(spinor[at(i,j,3)]) );
}
void QLB::evolution_GPU()
{
// Update potential array if needed
if(V_indx_ == 3)hipLaunchKernelGGL(( kernel_set_potential), dim3(grid1_), dim3(block1_) , 0, 0, d_V_, d_spinor_);
// Rotate with X^(-1)
hipLaunchKernelGGL(( kernel_rotate), dim3(grid4_), dim3(block4_) , 0, 0, d_spinor_, d_spinorrot_, d_spinoraux_, d_Xinv);
CUDA_CHECK_KERNEL
// Collide & stream with Q_X
hipLaunchKernelGGL(( kernel_collide_Q_X), dim3(grid1_), dim3(block1_) , 0, 0, d_spinorrot_, d_spinoraux_, d_V_);
CUDA_CHECK_KERNEL
// Rotate back with X
hipLaunchKernelGGL(( kernel_rotate_back), dim3(grid4_), dim3(block4_) , 0, 0, d_spinor_, d_spinorrot_, d_X);
CUDA_CHECK_KERNEL
// Rotate with Y^(-1)
hipLaunchKernelGGL(( kernel_rotate), dim3(grid4_), dim3(block4_) , 0, 0, d_spinor_, d_spinorrot_, d_spinoraux_, d_Yinv);
CUDA_CHECK_KERNEL
// Collide & stream with Q_Y
hipLaunchKernelGGL(( kernel_collide_Q_Y), dim3(grid1_), dim3(block1_) , 0, 0, d_spinorrot_, d_spinoraux_, d_V_);
CUDA_CHECK_KERNEL
// Rotate back with Y
hipLaunchKernelGGL(( kernel_rotate_back), dim3(grid4_), dim3(block4_) , 0, 0, d_spinor_, d_spinorrot_, d_Y);
CUDA_CHECK_KERNEL
hipDeviceSynchronize();
// Calculate the spreads
if( (opt_.plot() & QLBopt::spread) >> 1 || (opt_.plot() & QLBopt::all) )
{
copy_from_device_to_host(d_spinor_, spinor_);
hipDeviceSynchronize();
calculate_spread();
}
// Update time;
t_ += 1;
}
// ================================= GRAPHICS ==================================
// Unrolled loop for the current
#define CURRENT_UNROLLED_LOOP(i,j) (CURRENT((i),(j),0)+CURRENT((i),(j),1)+\
CURRENT((i),(j),2)+CURRENT((i),(j),3))
#define CURRENT(i,j,is) (CURRENT_1((i),(j),(is),0)+CURRENT_1((i),(j),(is),1)+\
CURRENT_1((i),(j),(is),2)+CURRENT_1((i),(j),(is),3))
#define CURRENT_1(i,j,is,js) (cuCrealf(cuConjf(\
d_ptr[at((i),(j),(is))])*alpha[(is)*4 + (js)]*d_ptr[at((i),(j),(js))]))
/**
* Calculate the vertices (spinors,density or current) and copy them to the
* vertex VBO.
* @param vbo_ptr pointer to the VBO
* @param d_ptr pointer to the spinors
* @param alpha pointer to the alpha matrix (unused if we don't calculate
* the current)
* @param alpha pointer to the beta matrix (unused if we don't calculate
* the current)
*/
__global__ void kernel_calculate_vertex_scene(float3* vbo_ptr,
const cuFloatComplex* __restrict d_ptr,
const cuFloatComplex* __restrict alpha,
const cuFloatComplex* __restrict beta)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < d_L && j < d_L)
{
// Select the right scene (all warps always do the same)
if(d_current_scene < 4)
{
int k = d_current_scene;
vbo_ptr[d_L*i + j].y = d_scaling * cuCnormf( d_ptr[at(i,j,k)] );
}
else if(d_current_scene == 4)
{
vbo_ptr[d_L*i + j].y = d_scaling * (
cuCnormf( d_ptr[at(i,j,0)]) + cuCnormf( d_ptr[at(i,j,1)]) +
cuCnormf( d_ptr[at(i,j,2)]) + cuCnormf( d_ptr[at(i,j,3)]) );
}
else
{
vbo_ptr[d_L*i + j].y = d_scaling*( CURRENT_UNROLLED_LOOP(i,j) );
}
}
}
#define y(i,j) 3*((i)*L_ + (j)) + 1
void QLB::calculate_vertex_cuda()
{
#ifndef QLB_CUDA_GL_WORKAROUND
vbo_vertex.map();
float3* vbo_ptr = vbo_vertex.get_device_pointer();
if(current_scene_ < 5)
hipLaunchKernelGGL(( kernel_calculate_vertex_scene), dim3(grid1_), dim3(block1_) , 0, 0, vbo_ptr, d_spinor_, NULL, NULL);
else if(current_scene_ == 5)
hipLaunchKernelGGL(( kernel_calculate_vertex_scene), dim3(grid1_), dim3(block1_) , 0, 0, vbo_ptr, d_spinor_, d_alphaX, d_beta);
else
hipLaunchKernelGGL(( kernel_calculate_vertex_scene), dim3(grid1_), dim3(block1_) , 0, 0, vbo_ptr, d_spinor_, d_alphaY, d_beta);
CUDA_CHECK_KERNEL
vbo_vertex.unmap();
#else // Mac OSX
if(current_scene_ < 5)
hipLaunchKernelGGL(( kernel_calculate_vertex_scene), dim3(grid1_), dim3(block1_) , 0, 0, d_vertex_ptr_, d_spinor_, NULL, NULL);
else if(current_scene_ == 5)
hipLaunchKernelGGL(( kernel_calculate_vertex_scene), dim3(grid1_), dim3(block1_) , 0, 0, d_vertex_ptr_, d_spinor_, d_alphaX, d_beta);
else
hipLaunchKernelGGL(( kernel_calculate_vertex_scene), dim3(grid1_), dim3(block1_) , 0, 0, d_vertex_ptr_, d_spinor_, d_alphaY, d_beta);
CUDA_CHECK_KERNEL
cuassert(hipMemcpy(array_vertex_.data(), d_vertex_ptr_,
sizeof(float) * array_vertex_.size(),
hipMemcpyDeviceToHost));
// Copy vertex array to vertex VBO
vbo_vertex.bind();
vbo_vertex.BufferSubData(0, array_vertex_.size()*sizeof(float),
&array_vertex_[0]);
vbo_vertex.unbind();
#endif
}
#undef y
/**
* Calculate the vertices (by taking the abs of the potential V) and copy them
* to the vertex VBO
* @param vbo_ptr pointer to the VBO
* @param d_ptr pointer to the spinors
*/
__global__ void kernel_calculate_vertex_V(float3* vbo_ptr, float* d_ptr)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < d_L && j < d_L)
vbo_ptr[d_L*i + j].y = d_scaling * fabsf( d_ptr[i*d_L +j] ) - 0.005f*d_L;
}
#define y(i,j) 3*((i)*L_ + (j)) + 1
void QLB::calculate_vertex_V_cuda()
{
#ifndef QLB_CUDA_GL_WORKAROUND
vbo_vertex.map();
float3* vbo_ptr = vbo_vertex.get_device_pointer();
hipLaunchKernelGGL(( kernel_calculate_vertex_V), dim3(grid1_), dim3(block1_) , 0, 0, vbo_ptr, d_V_);
CUDA_CHECK_KERNEL
vbo_vertex.unmap();
#else // Mac OSX
hipLaunchKernelGGL(( kernel_calculate_vertex_V), dim3(grid1_), dim3(block1_) , 0, 0, d_vertex_ptr_, d_V_);
CUDA_CHECK_KERNEL
cuassert(hipMemcpy(array_vertex_.data(), d_vertex_ptr_,
sizeof(float) * array_vertex_.size(),
hipMemcpyDeviceToHost));
// Copy vertex array to vertex VBO
vbo_vertex.bind();
vbo_vertex.BufferSubData(0, array_vertex_.size()*sizeof(float),
&array_vertex_[0]);
vbo_vertex.unbind();
#endif
}
#undef y
/**
* Calculate the normals of the spinors and copy them to the normal VBO
* @param vbo_ptr pointer to the VBO
* @param d_ptr pointer to the spinors
* @param alpha pointer to the alpha matrix (unused if we don't calculate
* the current)
*/
__global__ void kernel_calculate_normal_scene(float3* vbo_ptr,
const cuFloatComplex* __restrict d_ptr,
const cuFloatComplex* __restrict alpha,
const cuFloatComplex* __restrict beta)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < d_L && j < d_L)
{
int ik = (i + 1) % d_L;
int jk = (d_L - 1 + j) % d_L;
float vertex_i_j, vertex_ik_j, vertex_i_jk;
// Select the right scene (all warps always do the same)
if(d_current_scene < 4)
{
int k = d_current_scene;
vertex_i_j = cuCnormf( d_ptr[at(i ,j ,k)] );
vertex_ik_j = cuCnormf( d_ptr[at(ik,j ,k)] );
vertex_i_jk = cuCnormf( d_ptr[at(i ,jk,k)] );
}
else if(d_current_scene == 4)
{
vertex_i_j = cuCnormf(d_ptr[at(i,j,0)]) + cuCnormf(d_ptr[at(i,j,1)]) +
cuCnormf(d_ptr[at(i,j,2)]) + cuCnormf(d_ptr[at(i,j,3)]);
vertex_ik_j = cuCnormf(d_ptr[at(ik,j,0)]) + cuCnormf(d_ptr[at(ik,j,1)]) +
cuCnormf(d_ptr[at(ik,j,2)]) + cuCnormf(d_ptr[at(ik,j,3)]);
vertex_i_jk = cuCnormf(d_ptr[at(i,jk,0)]) + cuCnormf(d_ptr[at(i,jk,1)]) +
cuCnormf(d_ptr[at(i,jk,2)]) + cuCnormf(d_ptr[at(i,jk,3)]);
}
else
{
vertex_i_j = CURRENT_UNROLLED_LOOP(i ,j);
vertex_ik_j = CURRENT_UNROLLED_LOOP(ik ,j);
vertex_i_jk = CURRENT_UNROLLED_LOOP(i ,jk);
}
// x
float x2 = d_scaling * vertex_i_j;
// a
float a1 = d_dx;
float a2 = d_scaling * vertex_ik_j - x2;
// b
float b2 = d_scaling * vertex_i_jk - x2;
float b3 = -d_dx;
// n = a x b
float3 n;
n.x = a2*b3;
n.y = -a1*b3;
n.z = a1*b2;
// normalize
float norm = sqrtf(n.x*n.x + n.y*n.y + n.z*n.z);
vbo_ptr[d_L*i + j].x = n.x/norm;
vbo_ptr[d_L*i + j].y = n.y/norm;
vbo_ptr[d_L*i + j].z = n.z/norm;
}
}
void QLB::calculate_normal_cuda()
{
#ifndef QLB_CUDA_GL_WORKAROUND
vbo_normal.map();
float3* vbo_ptr = vbo_normal.get_device_pointer();
if(current_scene_ < 5)
hipLaunchKernelGGL(( kernel_calculate_normal_scene), dim3(grid1_), dim3(block1_) , 0, 0, vbo_ptr, d_spinor_, NULL, NULL);
else if(current_scene_ == 5)
hipLaunchKernelGGL(( kernel_calculate_normal_scene), dim3(grid1_), dim3(block1_) , 0, 0, vbo_ptr, d_spinor_, d_alphaX, d_beta);
else
hipLaunchKernelGGL(( kernel_calculate_normal_scene), dim3(grid1_), dim3(block1_) , 0, 0, vbo_ptr, d_spinor_, d_alphaY, d_beta);
CUDA_CHECK_KERNEL
vbo_normal.unmap();
#else // Mac OSX
if(current_scene_ < 5)
hipLaunchKernelGGL(( kernel_calculate_normal_scene), dim3(grid1_), dim3(block1_) , 0, 0, d_normal_ptr_, d_spinor_, NULL, NULL);
else if(current_scene_ == 5)
hipLaunchKernelGGL(( kernel_calculate_normal_scene), dim3(grid1_), dim3(block1_) , 0, 0, d_normal_ptr_, d_spinor_, d_alphaX, d_beta);
else
hipLaunchKernelGGL(( kernel_calculate_normal_scene), dim3(grid1_), dim3(block1_) , 0, 0, d_normal_ptr_, d_spinor_, d_alphaY, d_beta);
CUDA_CHECK_KERNEL
cuassert(hipMemcpy(array_normal_.data(), d_normal_ptr_,
sizeof(float) * array_normal_.size(),
hipMemcpyDeviceToHost));
vbo_normal.bind();
vbo_normal.BufferSubData(0, array_normal_.size()*sizeof(float),
&array_normal_[0]);
vbo_normal.unbind();
#endif
}
/**
* Calculate the normals of the potential V and copy them to the normal VBO
* @param vbo_ptr pointer to the VBO
* @param d_ptr pointer to the spinors
*/
__global__ void kernel_calculate_normal_V(float3* vbo_ptr, float* d_ptr)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < d_L && j < d_L)
{
int ik = (i + 1) % d_L;
int jk = (d_L - 1 + j) % d_L;
// x
float x2 = d_scaling * fabsf( d_ptr[i*d_L +j] );
// a
float a1 = d_dx;
float a2 = d_scaling * fabsf( d_ptr[ik*d_L +j] ) - x2;
// b
float b2 = d_scaling * fabsf( d_ptr[i*d_L +jk] ) - x2;
float b3 = -d_dx;
// n = a x b
float3 n;
n.x = a2*b3;
n.y = -a1*b3;
n.z = a1*b2;
// normalize
float norm = sqrtf(n.x*n.x + n.y*n.y + n.z*n.z);
vbo_ptr[d_L*i + j].x = n.x/norm;
vbo_ptr[d_L*i + j].y = n.y/norm;
vbo_ptr[d_L*i + j].z = n.z/norm;
}
}
void QLB::calculate_normal_V_cuda()
{
#ifndef QLB_CUDA_GL_WORKAROUND
vbo_normal.map();
float3* vbo_ptr = vbo_normal.get_device_pointer();
hipLaunchKernelGGL(( kernel_calculate_normal_V), dim3(grid1_), dim3(block1_) , 0, 0, vbo_ptr, d_V_);
CUDA_CHECK_KERNEL
vbo_normal.unmap();
#else // Mac OSX
hipLaunchKernelGGL(( kernel_calculate_normal_V), dim3(grid1_), dim3(block1_) , 0, 0, d_normal_ptr_, d_V_);
CUDA_CHECK_KERNEL
cuassert(hipMemcpy(array_normal_.data(), d_normal_ptr_,
sizeof(float) * array_normal_.size(),
hipMemcpyDeviceToHost));
vbo_normal.bind();
vbo_normal.BufferSubData(0, array_normal_.size()*sizeof(float),
&array_normal_[0]);
vbo_normal.unbind();
#endif
}
| c071ecb31f9077025546ec36948fa171df5e86af.cu | /**
* Quantum Lattice Boltzmann
* (c) 2015 Fabian Thüring, ETH Zurich
*
* This file contains all the CUDA kernels and function that make use of the
* CUDA runtime API
*/
// Local includes
#include "QLB.hpp"
// ==== CONSTANTS ====
__constant__ unsigned int d_L;
__constant__ float d_dx;
__constant__ float d_dt;
__constant__ float d_mass;
__constant__ float d_g;
__constant__ unsigned int d_t;
__constant__ float d_scaling;
__constant__ int d_current_scene;
// ==== INITIALIZATION ====
void QLB::allocate_device_arrays()
{
cuassert(cudaMalloc(&d_X, X.size() * sizeof(d_X[0])));
cuassert(cudaMalloc(&d_Y, Y.size() * sizeof(d_Y[0])));
cuassert(cudaMalloc(&d_Xinv, Xinv.size() * sizeof(d_Xinv[0])));
cuassert(cudaMalloc(&d_Yinv, Yinv.size() * sizeof(d_Yinv[0])));
cuassert(cudaMalloc(&d_alphaX, alphaX.size() * sizeof(d_alphaX[0])));
cuassert(cudaMalloc(&d_alphaY, alphaY.size() * sizeof(d_alphaY[0])));
cuassert(cudaMalloc(&d_beta, beta.size() * sizeof(d_beta[0])));
cuassert(cudaMalloc(&d_spinor_, spinor_.size() * sizeof(d_spinor_[0])));
cuassert(cudaMalloc(&d_spinoraux_, spinoraux_.size()*sizeof(d_spinoraux_[0])));
cuassert(cudaMalloc(&d_spinorrot_, spinorrot_.size()*sizeof(d_spinorrot_[0])));
cuassert(cudaMalloc(&d_V_, V_.size() * sizeof(d_V_[0])));
#ifdef QLB_CUDA_GL_WORKAROUND
cuassert(cudaMalloc(&d_vertex_ptr_, array_vertex_.size() * sizeof(float)));
cuassert(cudaMalloc(&d_normal_ptr_, array_normal_.size() * sizeof(float)));
#endif
cuassert(cudaDeviceSynchronize());
}
void QLB::free_device_arrays()
{
cuassert(cudaFree((void*) d_X));
cuassert(cudaFree((void*) d_Y));
cuassert(cudaFree((void*) d_Xinv));
cuassert(cudaFree((void*) d_Yinv));
cuassert(cudaFree((void*) d_alphaX));
cuassert(cudaFree((void*) d_alphaY));
cuassert(cudaFree((void*) d_beta));
cuassert(cudaFree((void*) d_spinor_));
cuassert(cudaFree((void*) d_spinoraux_));
cuassert(cudaFree((void*) d_spinorrot_));
cuassert(cudaFree((void*) d_V_));
#ifdef QLB_CUDA_GL_WORKAROUND
cuassert(cudaFree((void*) d_vertex_ptr_));
cuassert(cudaFree((void*) d_normal_ptr_));
#endif
}
/**
* Print version information
* @param grid1 Grid dimensions for (L x L) kernels
* @param grid4 Grid dimensions for (L x L x 4) kernels
* @param block1 Block dimensions for (L x L) kernels
* @param block4 Block dimensions for (L x L x 4) kernels
*/
static void print_version_information(dim3 grid1, dim3 grid4, dim3 block1, dim3 block4)
{
std::cout << " === CUDA Info === " << std::endl;
cudaDeviceProp deviceProp;
cuassert(cudaGetDeviceProperties(&deviceProp, 0));
int dvVers = 0; cuassert(cudaDriverGetVersion(&dvVers));
int rtVers = 0; cuassert(cudaRuntimeGetVersion(&rtVers));
std::printf("CUDA Driver Version: %d.%d\n", dvVers/1000, dvVers % 100);
std::printf("CUDA Runtime Version: %d.%d\n", rtVers/1000, rtVers % 100);
std::printf("Total GPU memory: %u bytes\n",
unsigned(deviceProp.totalGlobalMem));
std::printf("Multiprocessors on device: %u\n",
unsigned(deviceProp.multiProcessorCount));
std::printf("Max threads per block: %u\n",
unsigned(deviceProp.maxThreadsPerBlock));
std::printf("Max warp size: %u\n",
unsigned(deviceProp.warpSize));
std::printf("Selected grid size (1): (%3u, %3u, %3u)\n",
grid1.x, grid1.y, grid1.z);
std::printf("Selected block size (1): (%3u, %3u, %3u) = %u\n",
block1.x, block1.y, block1.z, block1.x*block1.y*block1.z );
std::printf("Selected grid size (4): (%3u, %3u, %3u)\n",
grid4.x, grid4.y, grid4.z);
std::printf("Selected block size (4): (%3u, %3u, %3u) = %u\n\n",
block4.x, block4.y, block4.z, block4.x*block4.y*block4.z );
}
/**
* Copy a matrix from host to device (if [value_t = cuFloatComplex] a specialized
* version will be used)
* @param d_ptr device pointer
* @param m matrix to be copied from
*/
template< class value_t, class mat_t >
static void copy_from_host_to_device(value_t* & d_ptr, const mat_t& m)
{
std::vector<value_t> tmp(m.size());
for(std::size_t i = 0; i < m.size(); ++i)
tmp[i] = value_t(m[i]);
cuassert(cudaMemcpy(d_ptr, tmp.data(), sizeof(tmp[0]) * tmp.size(),
cudaMemcpyHostToDevice));
}
template< class mat_t >
static void copy_from_host_to_device(cuFloatComplex* & d_ptr, const mat_t& m)
{
std::vector<cuFloatComplex> tmp(m.size());
for(std::size_t i = 0; i < m.size(); ++i)
tmp[i] = make_cuFloatComplex(m[i]);
cuassert(cudaMemcpy(d_ptr, tmp.data(), sizeof(tmp[0]) * tmp.size(),
cudaMemcpyHostToDevice));
}
/**
* Copy a matrix from the device to host (if [value_t = cuFloatComplex] a specialized
* version will be used)
* @param d_ptr device pointer
* @param m matrix to be copied to (of type QLB::float_t)
*/
template< class value_t, class mat_t >
static void copy_from_device_to_host(value_t* d_ptr, mat_t& m)
{
std::vector<value_t> tmp(m.size());
cuassert(cudaMemcpy(tmp.data(), d_ptr, sizeof(tmp[0]) * tmp.size(),
cudaMemcpyDeviceToHost));
for(std::size_t i = 0; i < m.size(); ++i)
m[i] = value_t(tmp[i]);
}
template< class mat_t >
static void copy_from_device_to_host(cuFloatComplex* d_ptr, mat_t& m)
{
std::vector<cuFloatComplex> tmp(m.size());
cuassert(cudaMemcpy(tmp.data(), d_ptr, sizeof(tmp[0]) * tmp.size(),
cudaMemcpyDeviceToHost));
for(std::size_t i = 0; i < m.size(); ++i)
m[i] = make_stdComplex<QLB::float_t>(tmp[i]);
}
void QLB::init_device()
{
// initialize constant matrices
copy_from_host_to_device(d_X, X);
copy_from_host_to_device(d_Y, Y);
copy_from_host_to_device(d_Xinv, Xinv);
copy_from_host_to_device(d_Yinv, Yinv);
copy_from_host_to_device(d_alphaX, alphaX);
copy_from_host_to_device(d_alphaY, alphaY);
copy_from_host_to_device(d_beta, beta);
// initialize simulation matrices
copy_from_host_to_device(d_spinor_, spinor_);
copy_from_host_to_device(d_spinoraux_, spinoraux_);
copy_from_host_to_device(d_spinorrot_, spinorrot_);
copy_from_host_to_device(d_V_, V_);
// initialize simulation variables
cuassert(cudaMemcpyToSymbol(d_L, &L_, sizeof(L_)));
float dx = static_cast<float>(dx_);
cuassert(cudaMemcpyToSymbol(d_dx, &dx, sizeof(dx)));
float dt = static_cast<float>(dt_);
cuassert(cudaMemcpyToSymbol(d_dt, &dt, sizeof(dt)));
float mass = static_cast<float>(mass_);
cuassert(cudaMemcpyToSymbol(d_mass, &mass, sizeof(mass)));
float g = static_cast<float>(g_);
cuassert(cudaMemcpyToSymbol(d_g, &g, sizeof(g)));
int current_scene = current_scene_;
cuassert(cudaMemcpyToSymbol(d_current_scene, ¤t_scene, sizeof(current_scene)));
float scaling = static_cast<float>(scaling_);
cuassert(cudaMemcpyToSymbol(d_scaling, &scaling, sizeof(scaling)));
cuassert(cudaMemcpyToSymbol(d_t, &t_, sizeof(t_)));
if(opt_.verbose())
print_version_information(grid1_, grid4_, block1_, block4_);
cuassert(cudaDeviceSynchronize());
}
void QLB::get_device_arrays()
{
copy_from_device_to_host(d_spinor_, spinor_);
copy_from_device_to_host(d_spinorrot_, spinorrot_);
copy_from_device_to_host(d_spinoraux_, spinoraux_);
}
void QLB::update_device_constants()
{
float scaling = static_cast<float>(scaling_);
cuassert(cudaMemcpyToSymbol(d_scaling, &scaling, sizeof(scaling)));
int current_scene = current_scene_;
cuassert(cudaMemcpyToSymbol(d_current_scene, ¤t_scene, sizeof(current_scene)));
}
// =============================== SIMULATION ==================================
#define at(i,j,k) 4*(d_L*(i) + (j)) + (k)
/**
* Rotate the spinors and store the result in spinorrot and spinoraux
* @param spinor device pointer spinors
* @param spinor device pointer spinorrot
* @param spinor device pointer spinoraux
* @param Rinv inverse rotation matrix (4 x 4)
*/
__global__ void kernel_rotate(const cuFloatComplex* __restrict spinor,
cuFloatComplex* spinorrot,
cuFloatComplex* spinoraux,
const cuFloatComplex* __restrict Rinv)
{
__shared__ cuFloatComplex Rinv_loc[16];
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
int lj = threadIdx.y;
int lk = threadIdx.z;
if(lj < 4 && lk < 4)
Rinv_loc[lj*4 + lk] = Rinv[lj*4 + lk];
__syncthreads();
if(i < d_L && j < d_L)
{
const int ij = at(i,j,0);
const int ijk = ij + k;
spinorrot[ijk] = Rinv_loc[4*k + 0] * spinor[ij + 0] +
Rinv_loc[4*k + 1] * spinor[ij + 1] +
Rinv_loc[4*k + 2] * spinor[ij + 2] +
Rinv_loc[4*k + 3] * spinor[ij + 3];
spinoraux[ijk] = spinorrot[ijk];
}
}
/**
* Rotate spinorrot back and store the result in spinor
* @param spinor device pointer spinor
* @param spinor device pointer spinorrot
* @param R rotation matrix (4 x 4)
*/
__global__ void kernel_rotate_back(cuFloatComplex* spinor,
const cuFloatComplex* __restrict spinorrot,
const cuFloatComplex* __restrict R)
{
__shared__ cuFloatComplex R_loc[16];
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int k = blockIdx.z*blockDim.z + threadIdx.z;
int lj = threadIdx.y;
int lk = threadIdx.z;
if(lj < 4 && lk < 4)
R_loc[lj*4 + lk] = R[lj*4 + lk];
__syncthreads();
if(i < d_L && j < d_L)
{
const int ij = at(i,j,0);
spinor[ij+k] = R_loc[4*k + 0] * spinorrot[ij + 0] +
R_loc[4*k + 1] * spinorrot[ij + 1] +
R_loc[4*k + 2] * spinorrot[ij + 2] +
R_loc[4*k + 3] * spinorrot[ij + 3];
}
}
/**
* Collide and stream with matrix Q_X
* @param spinorrot device pointer spinorrot
* @param spinoraux device pointer spinoraux
* @param V device pointer potential
*/
__global__ void kernel_collide_Q_X(cuFloatComplex* spinorrot,
const cuFloatComplex* __restrict spinoraux,
const float* __restrict V)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < d_L && j < d_L)
{
int ia = (i + 1) % d_L;
int ik = (i - 1 + d_L) % d_L;
// Precomputed indices
const int i_j = at(i, j,0);
const int ia_j = at(ia,j,0);
const int ik_j = at(ik,j,0);
const float m = 0.5f * d_mass* d_dt;
const float g = 0.5f * V[i*d_L +j] * d_dt;
const float omega = m*m - g*g;
const cuFloatComplex a_nom = make_cuFloatComplex(1.0f - 0.25f*omega, 0.0f);
const cuFloatComplex a_den = make_cuFloatComplex(1.0f + 0.25f*omega, -1.0f*g);
const cuFloatComplex b_nom = make_cuFloatComplex(m, 0.0f);
const cuFloatComplex a = a_nom / a_den;
const cuFloatComplex b = b_nom / a_den;
const cuFloatComplex neg_bi = b * make_cuFloatComplex( 0.0f, -1.0f);
const cuFloatComplex pos_bi = b * make_cuFloatComplex( 0.0f, 1.0f);
// The matrix multiplication has been unrolled and 0 entries in Qhat
// have been skipped
// Qhat = X^(-1) * Q * X
spinorrot[ia_j + 0] = a * spinoraux[i_j+0] + neg_bi * spinoraux[i_j+3];
spinorrot[ia_j + 1] = a * spinoraux[i_j+1] + pos_bi * spinoraux[i_j+2];
spinorrot[ik_j + 2] = pos_bi * spinoraux[i_j+1] + a * spinoraux[i_j+2];
spinorrot[ik_j + 3] = neg_bi * spinoraux[i_j+0] + a * spinoraux[i_j+3];
}
}
/**
* Collide and stream with matrix Q_Y
* @param spinorrot device pointer spinorrot
* @param spinoraux device pointer spinoraux
* @param V device pointer potential
*/
__global__ void kernel_collide_Q_Y(cuFloatComplex* spinorrot,
const cuFloatComplex* __restrict spinoraux,
const float* __restrict V)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < d_L && j < d_L)
{
int ja = (j + 1) % d_L;
int jk = (j - 1 + d_L) % d_L;
// Precomputed indices
const int i_j = at(i,j ,0);
const int i_ja = at(i,ja,0);
const int i_jk = at(i,jk,0);
const float m = 0.5f * d_mass* d_dt;
const float g = 0.5f * V[i*d_L +j] * d_dt;
const float omega = m*m - g*g;
const cuFloatComplex a_nom = make_cuFloatComplex(1.0f - 0.25f*omega, 0.0f);
const cuFloatComplex b_nom = make_cuFloatComplex(m, 0.0f);
const cuFloatComplex a_den = make_cuFloatComplex(1.0f + 0.25f*omega, -1.0f*g);
const cuFloatComplex a = a_nom / a_den;
const cuFloatComplex b = b_nom / a_den;
const cuFloatComplex neg_bi = b * make_cuFloatComplex( 0.0f, -1.0f);
// The matrix multiplication has been unrolled and 0 entries in Qhat
// have been skipped
// Qhat = Y^(-1) * Q * Y
spinorrot[i_ja + 0] = a * spinoraux[i_j+0] + neg_bi * spinoraux[i_j+3];
spinorrot[i_ja + 1] = a * spinoraux[i_j+1] + neg_bi * spinoraux[i_j+2];
spinorrot[i_jk + 2] = neg_bi * spinoraux[i_j+1] + a * spinoraux[i_j+2];
spinorrot[i_jk + 3] = neg_bi * spinoraux[i_j+0] + a * spinoraux[i_j+3];
}
}
/**
* Update the potential array using the GP potential
* @param V device pointer potential array
* @param spinor device pointer spinor
*/
__global__ void kernel_set_potential(float* V,
const cuFloatComplex* __restrict spinor)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
V[d_L*i + j] = d_g * (
cuCnormf(spinor[at(i,j,0)]) + cuCnormf(spinor[at(i,j,1)]) +
cuCnormf(spinor[at(i,j,2)]) + cuCnormf(spinor[at(i,j,3)]) );
}
void QLB::evolution_GPU()
{
// Update potential array if needed
if(V_indx_ == 3) kernel_set_potential<<< grid1_, block1_ >>>(d_V_, d_spinor_);
// Rotate with X^(-1)
kernel_rotate<<< grid4_, block4_ >>>(d_spinor_, d_spinorrot_, d_spinoraux_, d_Xinv);
CUDA_CHECK_KERNEL
// Collide & stream with Q_X
kernel_collide_Q_X<<< grid1_, block1_ >>>(d_spinorrot_, d_spinoraux_, d_V_);
CUDA_CHECK_KERNEL
// Rotate back with X
kernel_rotate_back<<< grid4_, block4_ >>>(d_spinor_, d_spinorrot_, d_X);
CUDA_CHECK_KERNEL
// Rotate with Y^(-1)
kernel_rotate<<< grid4_, block4_ >>>(d_spinor_, d_spinorrot_, d_spinoraux_, d_Yinv);
CUDA_CHECK_KERNEL
// Collide & stream with Q_Y
kernel_collide_Q_Y<<< grid1_, block1_ >>>(d_spinorrot_, d_spinoraux_, d_V_);
CUDA_CHECK_KERNEL
// Rotate back with Y
kernel_rotate_back<<< grid4_, block4_ >>>(d_spinor_, d_spinorrot_, d_Y);
CUDA_CHECK_KERNEL
cudaDeviceSynchronize();
// Calculate the spreads
if( (opt_.plot() & QLBopt::spread) >> 1 || (opt_.plot() & QLBopt::all) )
{
copy_from_device_to_host(d_spinor_, spinor_);
cudaDeviceSynchronize();
calculate_spread();
}
// Update time;
t_ += 1;
}
// ================================= GRAPHICS ==================================
// Unrolled loop for the current
#define CURRENT_UNROLLED_LOOP(i,j) (CURRENT((i),(j),0)+CURRENT((i),(j),1)+\
CURRENT((i),(j),2)+CURRENT((i),(j),3))
#define CURRENT(i,j,is) (CURRENT_1((i),(j),(is),0)+CURRENT_1((i),(j),(is),1)+\
CURRENT_1((i),(j),(is),2)+CURRENT_1((i),(j),(is),3))
#define CURRENT_1(i,j,is,js) (cuCrealf(cuConjf(\
d_ptr[at((i),(j),(is))])*alpha[(is)*4 + (js)]*d_ptr[at((i),(j),(js))]))
/**
* Calculate the vertices (spinors,density or current) and copy them to the
* vertex VBO.
* @param vbo_ptr pointer to the VBO
* @param d_ptr pointer to the spinors
* @param alpha pointer to the alpha matrix (unused if we don't calculate
* the current)
* @param alpha pointer to the beta matrix (unused if we don't calculate
* the current)
*/
__global__ void kernel_calculate_vertex_scene(float3* vbo_ptr,
const cuFloatComplex* __restrict d_ptr,
const cuFloatComplex* __restrict alpha,
const cuFloatComplex* __restrict beta)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < d_L && j < d_L)
{
// Select the right scene (all warps always do the same)
if(d_current_scene < 4)
{
int k = d_current_scene;
vbo_ptr[d_L*i + j].y = d_scaling * cuCnormf( d_ptr[at(i,j,k)] );
}
else if(d_current_scene == 4)
{
vbo_ptr[d_L*i + j].y = d_scaling * (
cuCnormf( d_ptr[at(i,j,0)]) + cuCnormf( d_ptr[at(i,j,1)]) +
cuCnormf( d_ptr[at(i,j,2)]) + cuCnormf( d_ptr[at(i,j,3)]) );
}
else
{
vbo_ptr[d_L*i + j].y = d_scaling*( CURRENT_UNROLLED_LOOP(i,j) );
}
}
}
#define y(i,j) 3*((i)*L_ + (j)) + 1
void QLB::calculate_vertex_cuda()
{
#ifndef QLB_CUDA_GL_WORKAROUND
vbo_vertex.map();
float3* vbo_ptr = vbo_vertex.get_device_pointer();
if(current_scene_ < 5)
kernel_calculate_vertex_scene<<< grid1_, block1_ >>>(vbo_ptr, d_spinor_, NULL, NULL);
else if(current_scene_ == 5)
kernel_calculate_vertex_scene<<< grid1_, block1_ >>>(vbo_ptr, d_spinor_, d_alphaX, d_beta);
else
kernel_calculate_vertex_scene<<< grid1_, block1_ >>>(vbo_ptr, d_spinor_, d_alphaY, d_beta);
CUDA_CHECK_KERNEL
vbo_vertex.unmap();
#else // Mac OSX
if(current_scene_ < 5)
kernel_calculate_vertex_scene<<< grid1_, block1_ >>>(d_vertex_ptr_, d_spinor_, NULL, NULL);
else if(current_scene_ == 5)
kernel_calculate_vertex_scene<<< grid1_, block1_ >>>(d_vertex_ptr_, d_spinor_, d_alphaX, d_beta);
else
kernel_calculate_vertex_scene<<< grid1_, block1_ >>>(d_vertex_ptr_, d_spinor_, d_alphaY, d_beta);
CUDA_CHECK_KERNEL
cuassert(cudaMemcpy(array_vertex_.data(), d_vertex_ptr_,
sizeof(float) * array_vertex_.size(),
cudaMemcpyDeviceToHost));
// Copy vertex array to vertex VBO
vbo_vertex.bind();
vbo_vertex.BufferSubData(0, array_vertex_.size()*sizeof(float),
&array_vertex_[0]);
vbo_vertex.unbind();
#endif
}
#undef y
/**
* Calculate the vertices (by taking the abs of the potential V) and copy them
* to the vertex VBO
* @param vbo_ptr pointer to the VBO
* @param d_ptr pointer to the spinors
*/
__global__ void kernel_calculate_vertex_V(float3* vbo_ptr, float* d_ptr)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < d_L && j < d_L)
vbo_ptr[d_L*i + j].y = d_scaling * fabsf( d_ptr[i*d_L +j] ) - 0.005f*d_L;
}
#define y(i,j) 3*((i)*L_ + (j)) + 1
void QLB::calculate_vertex_V_cuda()
{
#ifndef QLB_CUDA_GL_WORKAROUND
vbo_vertex.map();
float3* vbo_ptr = vbo_vertex.get_device_pointer();
kernel_calculate_vertex_V<<< grid1_, block1_ >>>(vbo_ptr, d_V_);
CUDA_CHECK_KERNEL
vbo_vertex.unmap();
#else // Mac OSX
kernel_calculate_vertex_V<<< grid1_, block1_ >>>(d_vertex_ptr_, d_V_);
CUDA_CHECK_KERNEL
cuassert(cudaMemcpy(array_vertex_.data(), d_vertex_ptr_,
sizeof(float) * array_vertex_.size(),
cudaMemcpyDeviceToHost));
// Copy vertex array to vertex VBO
vbo_vertex.bind();
vbo_vertex.BufferSubData(0, array_vertex_.size()*sizeof(float),
&array_vertex_[0]);
vbo_vertex.unbind();
#endif
}
#undef y
/**
* Calculate the normals of the spinors and copy them to the normal VBO
* @param vbo_ptr pointer to the VBO
* @param d_ptr pointer to the spinors
* @param alpha pointer to the alpha matrix (unused if we don't calculate
* the current)
*/
__global__ void kernel_calculate_normal_scene(float3* vbo_ptr,
const cuFloatComplex* __restrict d_ptr,
const cuFloatComplex* __restrict alpha,
const cuFloatComplex* __restrict beta)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < d_L && j < d_L)
{
int ik = (i + 1) % d_L;
int jk = (d_L - 1 + j) % d_L;
float vertex_i_j, vertex_ik_j, vertex_i_jk;
// Select the right scene (all warps always do the same)
if(d_current_scene < 4)
{
int k = d_current_scene;
vertex_i_j = cuCnormf( d_ptr[at(i ,j ,k)] );
vertex_ik_j = cuCnormf( d_ptr[at(ik,j ,k)] );
vertex_i_jk = cuCnormf( d_ptr[at(i ,jk,k)] );
}
else if(d_current_scene == 4)
{
vertex_i_j = cuCnormf(d_ptr[at(i,j,0)]) + cuCnormf(d_ptr[at(i,j,1)]) +
cuCnormf(d_ptr[at(i,j,2)]) + cuCnormf(d_ptr[at(i,j,3)]);
vertex_ik_j = cuCnormf(d_ptr[at(ik,j,0)]) + cuCnormf(d_ptr[at(ik,j,1)]) +
cuCnormf(d_ptr[at(ik,j,2)]) + cuCnormf(d_ptr[at(ik,j,3)]);
vertex_i_jk = cuCnormf(d_ptr[at(i,jk,0)]) + cuCnormf(d_ptr[at(i,jk,1)]) +
cuCnormf(d_ptr[at(i,jk,2)]) + cuCnormf(d_ptr[at(i,jk,3)]);
}
else
{
vertex_i_j = CURRENT_UNROLLED_LOOP(i ,j);
vertex_ik_j = CURRENT_UNROLLED_LOOP(ik ,j);
vertex_i_jk = CURRENT_UNROLLED_LOOP(i ,jk);
}
// x
float x2 = d_scaling * vertex_i_j;
// a
float a1 = d_dx;
float a2 = d_scaling * vertex_ik_j - x2;
// b
float b2 = d_scaling * vertex_i_jk - x2;
float b3 = -d_dx;
// n = a x b
float3 n;
n.x = a2*b3;
n.y = -a1*b3;
n.z = a1*b2;
// normalize
float norm = sqrtf(n.x*n.x + n.y*n.y + n.z*n.z);
vbo_ptr[d_L*i + j].x = n.x/norm;
vbo_ptr[d_L*i + j].y = n.y/norm;
vbo_ptr[d_L*i + j].z = n.z/norm;
}
}
void QLB::calculate_normal_cuda()
{
#ifndef QLB_CUDA_GL_WORKAROUND
vbo_normal.map();
float3* vbo_ptr = vbo_normal.get_device_pointer();
if(current_scene_ < 5)
kernel_calculate_normal_scene<<< grid1_, block1_ >>>(vbo_ptr, d_spinor_, NULL, NULL);
else if(current_scene_ == 5)
kernel_calculate_normal_scene<<< grid1_, block1_ >>>(vbo_ptr, d_spinor_, d_alphaX, d_beta);
else
kernel_calculate_normal_scene<<< grid1_, block1_ >>>(vbo_ptr, d_spinor_, d_alphaY, d_beta);
CUDA_CHECK_KERNEL
vbo_normal.unmap();
#else // Mac OSX
if(current_scene_ < 5)
kernel_calculate_normal_scene<<< grid1_, block1_ >>>(d_normal_ptr_, d_spinor_, NULL, NULL);
else if(current_scene_ == 5)
kernel_calculate_normal_scene<<< grid1_, block1_ >>>(d_normal_ptr_, d_spinor_, d_alphaX, d_beta);
else
kernel_calculate_normal_scene<<< grid1_, block1_ >>>(d_normal_ptr_, d_spinor_, d_alphaY, d_beta);
CUDA_CHECK_KERNEL
cuassert(cudaMemcpy(array_normal_.data(), d_normal_ptr_,
sizeof(float) * array_normal_.size(),
cudaMemcpyDeviceToHost));
vbo_normal.bind();
vbo_normal.BufferSubData(0, array_normal_.size()*sizeof(float),
&array_normal_[0]);
vbo_normal.unbind();
#endif
}
/**
* Calculate the normals of the potential V and copy them to the normal VBO
* @param vbo_ptr pointer to the VBO
* @param d_ptr pointer to the spinors
*/
__global__ void kernel_calculate_normal_V(float3* vbo_ptr, float* d_ptr)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < d_L && j < d_L)
{
int ik = (i + 1) % d_L;
int jk = (d_L - 1 + j) % d_L;
// x
float x2 = d_scaling * fabsf( d_ptr[i*d_L +j] );
// a
float a1 = d_dx;
float a2 = d_scaling * fabsf( d_ptr[ik*d_L +j] ) - x2;
// b
float b2 = d_scaling * fabsf( d_ptr[i*d_L +jk] ) - x2;
float b3 = -d_dx;
// n = a x b
float3 n;
n.x = a2*b3;
n.y = -a1*b3;
n.z = a1*b2;
// normalize
float norm = sqrtf(n.x*n.x + n.y*n.y + n.z*n.z);
vbo_ptr[d_L*i + j].x = n.x/norm;
vbo_ptr[d_L*i + j].y = n.y/norm;
vbo_ptr[d_L*i + j].z = n.z/norm;
}
}
void QLB::calculate_normal_V_cuda()
{
#ifndef QLB_CUDA_GL_WORKAROUND
vbo_normal.map();
float3* vbo_ptr = vbo_normal.get_device_pointer();
kernel_calculate_normal_V<<< grid1_, block1_ >>>(vbo_ptr, d_V_);
CUDA_CHECK_KERNEL
vbo_normal.unmap();
#else // Mac OSX
kernel_calculate_normal_V<<< grid1_, block1_ >>>(d_normal_ptr_, d_V_);
CUDA_CHECK_KERNEL
cuassert(cudaMemcpy(array_normal_.data(), d_normal_ptr_,
sizeof(float) * array_normal_.size(),
cudaMemcpyDeviceToHost));
vbo_normal.bind();
vbo_normal.BufferSubData(0, array_normal_.size()*sizeof(float),
&array_normal_[0]);
vbo_normal.unbind();
#endif
}
|
3f33a763254f1a9e00b03d50a176c712d2176395.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* levenberg_marquardt.cu
Tim Behrens, Saad Jbabdi, Stam Sotiropoulos, Moises Hernandez - FMRIB Image Analysis Group
Copyright (C) 2005 University of Oxford */
/* Part of FSL - FMRIB's Software Library
http://www.fmrib.ox.ac.uk/fsl
[email protected]
Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance
Imaging of the Brain), Department of Clinical Neurology, Oxford
University, Oxford, UK
LICENCE
FMRIB Software Library, Release 5.0 (c) 2012, The University of
Oxford (the "Software")
The Software remains the property of the University of Oxford ("the
University").
The Software is distributed "AS IS" under this Licence solely for
non-commercial use in the hope that it will be useful, but in order
that the University as a charitable foundation protects its assets for
the benefit of its educational and research purposes, the University
makes clear that no condition is made or to be implied, nor is any
warranty given or to be implied, as to the accuracy of the Software,
or that it will be suitable for any particular purpose or for use
under any specific conditions. Furthermore, the University disclaims
all responsibility for the use which is made of the Software. It
further disclaims any liability for the outcomes arising from using
the Software.
The Licensee agrees to indemnify the University and hold the
University harmless from and against any and all claims, damages and
liabilities asserted by third parties (including claims for
negligence) which arise directly or indirectly from the use of the
Software or the sale of any products based on the Software.
No part of the Software may be reproduced, modified, transmitted or
transferred in any form or by any means, electronic or mechanical,
without the express permission of the University. The permission of
the University is not required if the said reproduction, modification,
transmission or transference is done without financial return, the
conditions of this Licence are imposed upon the receiver of the
product, and all original and amended source code is included in any
transmitted product. You may be held legally responsible for any
copyright infringement that is caused or encouraged by your failure to
abide by these terms and conditions.
You are not permitted under this Licence to use this Software
commercially. Use for which any financial return is received shall be
defined as commercial use, and includes (1) integration of all or part
of the source code or the Software into a product for sale or license
by or on behalf of Licensee to third parties or (2) use of the
Software or any derivative of it for research with the final aim of
developing software products for sale or license to a third party or
(3) use of the Software or any derivative of it for research with the
final aim of developing non-software products for sale or license to a
third party, or (4) use of the Software to provide any service to an
external organisation for which payment is received. If you are
interested in using the Software commercially, please contact Oxford
University Innovation ("OUI"), the technology transfer company of the
University, to negotiate a licence. Contact details are:
[email protected] quoting reference DE/9564. */
#ifndef __LEVENBERG
#define __LEVENBERG
#include "solver_mult_inverse.cu"
#include "diffmodels.cuh"
#include "options.h"
//CPU version in nonlin.h
__device__ const double EPS_gpu = 2.0e-16; //Losely based on NRinC 20.1
//CPU version in nonlin.cpp
__device__ inline bool zero_cf_diff_conv(double* cfo,double* cfn,double* cftol){
return(2.0*fabs(*cfo-*cfn) <= *cftol*(fabs(*cfo)+fabs(*cfn)+EPS_gpu));
}
__device__ void levenberg_marquardt_PVM_single_gpu( //INPUT
const float* mydata,
const float* bvecs,
const float* bvals,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const int idSubVOX,
float* step, //shared memory
float* grad, //shared memory
float* hess, //shared memory
float* inverse, //shared memory
double* pcf, //shared memory
double* ncf, //shared memory
double* lambda, //shared memory
double* cftol, //shared memory
double* ltol, //shared memory
double* olambda, //shared memory
int* success, //shared memory
int* end, //shared memory
float* J, //shared memory
float* reduction, //shared memory
float* fs, //shared memory
float* x, //shared memory
float* _d, //shared memory
float* sumf, //shared memory
float* C, //shared memory
float* el, //shared memory
int* indx, //shared memory
//INPUT-OUTPUT
float* myparams) //shared memory
{
int niter=0;
int maxiter=200;
if(idSubVOX==0){
*end=false;
*lambda=0.1;
*cftol=1.0e-8;
*ltol=1.0e20;
*success = true;
*olambda = 0.0;
*ncf=0;
}
cf_PVM_single(myparams,mydata,bvecs,bvals,ndirections,nfib,nparams,m_include_f0,idSubVOX,reduction,fs,x,_d,sumf,pcf);
__syncthreads();
while (!(*success&&niter++>=maxiter)){ //if success we don't increase niter (first condition is true)
//function cost has been decreased, we have advanced.
if(*success){
grad_PVM_single(myparams,mydata,bvecs,bvals,ndirections,nfib,nparams,m_include_f0,idSubVOX,J,reduction,fs,x,_d,sumf,grad);
__syncthreads();
hess_PVM_single(myparams,bvecs,bvals,ndirections,nfib,nparams,m_include_f0,idSubVOX,J,reduction,fs,x,_d,sumf,hess);
}
if(idSubVOX==0){
for (int i=0; i<nparams; i++) {
hess[(i*nparams)+i]+=*lambda-*olambda; //Levenberg LM_L
}
solver(hess,grad,nparams,C,el,indx,inverse);
for (int i=0;i<nparams;i++){
step[i]=-inverse[i];
}
for(int i=0;i<nparams;i++){
step[i]=myparams[i]+step[i];
}
}
__syncthreads();
cf_PVM_single(step,mydata,bvecs,bvals,ndirections,nfib,nparams,m_include_f0,idSubVOX,reduction,fs,x,_d,sumf,ncf);
if(idSubVOX==0){
if (*success = (*ncf < *pcf)){
*olambda = 0.0;
for(int i=0;i<nparams;i++){
myparams[i]=step[i];
}
*lambda=*lambda/10.0;
if (zero_cf_diff_conv(pcf,ncf,cftol)){
*end=true;
}
*pcf=*ncf;
}else{
*olambda=*lambda;
*lambda=*lambda*10.0;
if(*lambda> *ltol){
*end=true;
}
}
}
__syncthreads();
if(*end) return;
}
}
__device__ void levenberg_marquardt_PVM_single_c_gpu( //INPUT
const float* mydata,
const float* bvecs,
const float* bvals,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const int idSubVOX,
float* step, //shared memory
float* grad, //shared memory
float* hess, //shared memory
float* inverse, //shared memory
double* pcf, //shared memory
double* ncf, //shared memory
double* lambda, //shared memory
double* cftol, //shared memory
double* ltol, //shared memory
double* olambda, //shared memory
int* success, //shared memory
int* end, //shared memory
float* J, //shared memory
float* reduction, //shared memory
float* fs, //shared memory
float* f_deriv, //shared memory
float* x, //shared memory
float* _d, //shared memory
float* sumf, //shared memory
float* C, //shared memory
float* el, //shared memory
int* indx, //shared memory
//INPUT-OUTPUT
float* myparams) //shared memory
{
int niter=0;
int maxiter=200;
if(idSubVOX==0){
*end=false;
*lambda=0.1;
*cftol=1.0e-8;
*ltol=1.0e20;
*success = true;
*olambda = 0.0;
*ncf=0;
}
cf_PVM_single_c(myparams,mydata,bvecs,bvals,ndirections,nfib,nparams,m_include_f0,idSubVOX,reduction,fs,x,_d,sumf,pcf);
__syncthreads();
while (!(*success&&niter++ >= maxiter)){ //if success we don't increase niter (first condition is true)
//function cost has been decreased, we have advanced.
if(*success){
grad_PVM_single_c(myparams,mydata,bvecs,bvals,ndirections,nfib,nparams,m_include_f0,idSubVOX,J,reduction,fs,f_deriv,x,_d,sumf,grad);
__syncthreads();
hess_PVM_single_c(myparams,bvecs,bvals,ndirections,nfib,nparams,m_include_f0,idSubVOX,J,reduction,fs,f_deriv,x,_d,sumf,hess);
}
if(idSubVOX==0){
for (int i=0; i<nparams; i++) {
hess[(i*nparams)+i]+=*lambda-*olambda; //Levenberg LM_L
}
solver(hess,grad,nparams,C,el,indx,inverse);
for (int i=0;i<nparams;i++){
step[i]=-inverse[i];
}
for(int i=0;i<nparams;i++){
step[i]=myparams[i]+step[i];
}
}
__syncthreads();
cf_PVM_single_c(step,mydata,bvecs,bvals,ndirections,nfib,nparams,m_include_f0,idSubVOX,reduction,fs,x,_d,sumf,ncf);
if(idSubVOX==0){
if (*success = (*ncf < *pcf)) {
*olambda = 0.0;
for(int i=0;i<nparams;i++){
myparams[i]=step[i];
}
*lambda=*lambda/10.0;
if (zero_cf_diff_conv(pcf,ncf,cftol)){
*end=true;
}
*pcf=*ncf;
}else{
*olambda=*lambda;
*lambda=*lambda*10.0;
if(*lambda> *ltol){
*end=true;
}
}
}
__syncthreads();
if(*end) return;
}
}
__device__ void levenberg_marquardt_PVM_multi_gpu( //INPUT
const float* mydata,
const float* bvecs,
const float* bvals,
const float R,
const float invR,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const int idSubVOX,
const int Gamma_for_ball_only,
float* step, //shared memory
float* grad, //shared memory
float* hess, //shared memory
float* inverse, //shared memory
double* pcf, //shared memory
double* ncf, //shared memory
double* lambda, //shared memory
double* cftol, //shared memory
double* ltol, //shared memory
double* olambda, //shared memory
int* success, //shared memory
int* end, //shared memory
float* J, //shared memory
float* reduction, //shared memory
float* fs, //shared memory
float* x, //shared memory
float* _a, //shared memory
float* _b, //shared memory
float* sumf, //shared memory
float* C, //shared memory
float* el, //shared memory
int* indx, //shared memory
//INPUT-OUTPUT
float* myparams) //shared memory
{
int niter=0;
int maxiter=200;
if(idSubVOX==0){
*end=false;
*lambda=0.1;
*cftol=1.0e-8;
*ltol=1.0e20;
*success = true;
*olambda = 0.0;
*ncf=0;
}
cf_PVM_multi(myparams,mydata,bvecs,bvals,R,invR,ndirections,nfib,nparams,m_include_f0,idSubVOX,Gamma_for_ball_only,reduction,fs,x,_a,_b,sumf,pcf);
__syncthreads();
while (!(*success&&niter++ >= maxiter)){ //if success we don't increase niter (first condition is true)
//function cost has been decreased, we have advanced.
if(*success){
grad_PVM_multi(myparams,mydata,bvecs,bvals,R,invR,ndirections,nfib,nparams,m_include_f0,
idSubVOX,Gamma_for_ball_only,J,reduction,fs,x,_a,_b,sumf,grad);
__syncthreads();
hess_PVM_multi(myparams,bvecs,bvals,R,invR,ndirections,nfib,nparams,m_include_f0,idSubVOX,Gamma_for_ball_only,J,reduction,fs,x,_a,_b,sumf,hess);
}
if(idSubVOX==0){
for (int i=0; i<nparams; i++) {
hess[(i*nparams)+i]+=*lambda-*olambda; //Levenberg LM_L
}
solver(hess,grad,nparams,C,el,indx,inverse);
for (int i=0;i<nparams;i++){
step[i]=-inverse[i];
}
for(int i=0;i<nparams;i++){
step[i]=myparams[i]+step[i];
}
}
__syncthreads();
cf_PVM_multi(step,mydata,bvecs,bvals,R,invR,ndirections,nfib,nparams,m_include_f0,idSubVOX,Gamma_for_ball_only,reduction,fs,x,_a,_b,sumf,ncf);
if(idSubVOX==0){
if (*success = (*ncf < *pcf)) {
*olambda = 0.0;
for(int i=0;i<nparams;i++){
myparams[i]=step[i];
}
*lambda=*lambda/10.0;
if (zero_cf_diff_conv(pcf,ncf,cftol)){
*end=true;
}
*pcf=*ncf;
}else{
*olambda=*lambda;
*lambda=*lambda*10.0;
if(*lambda> *ltol){
*end=true;
}
}
}
__syncthreads();
if(*end) return;
}
}
#endif
| 3f33a763254f1a9e00b03d50a176c712d2176395.cu | /* levenberg_marquardt.cu
Tim Behrens, Saad Jbabdi, Stam Sotiropoulos, Moises Hernandez - FMRIB Image Analysis Group
Copyright (C) 2005 University of Oxford */
/* Part of FSL - FMRIB's Software Library
http://www.fmrib.ox.ac.uk/fsl
[email protected]
Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance
Imaging of the Brain), Department of Clinical Neurology, Oxford
University, Oxford, UK
LICENCE
FMRIB Software Library, Release 5.0 (c) 2012, The University of
Oxford (the "Software")
The Software remains the property of the University of Oxford ("the
University").
The Software is distributed "AS IS" under this Licence solely for
non-commercial use in the hope that it will be useful, but in order
that the University as a charitable foundation protects its assets for
the benefit of its educational and research purposes, the University
makes clear that no condition is made or to be implied, nor is any
warranty given or to be implied, as to the accuracy of the Software,
or that it will be suitable for any particular purpose or for use
under any specific conditions. Furthermore, the University disclaims
all responsibility for the use which is made of the Software. It
further disclaims any liability for the outcomes arising from using
the Software.
The Licensee agrees to indemnify the University and hold the
University harmless from and against any and all claims, damages and
liabilities asserted by third parties (including claims for
negligence) which arise directly or indirectly from the use of the
Software or the sale of any products based on the Software.
No part of the Software may be reproduced, modified, transmitted or
transferred in any form or by any means, electronic or mechanical,
without the express permission of the University. The permission of
the University is not required if the said reproduction, modification,
transmission or transference is done without financial return, the
conditions of this Licence are imposed upon the receiver of the
product, and all original and amended source code is included in any
transmitted product. You may be held legally responsible for any
copyright infringement that is caused or encouraged by your failure to
abide by these terms and conditions.
You are not permitted under this Licence to use this Software
commercially. Use for which any financial return is received shall be
defined as commercial use, and includes (1) integration of all or part
of the source code or the Software into a product for sale or license
by or on behalf of Licensee to third parties or (2) use of the
Software or any derivative of it for research with the final aim of
developing software products for sale or license to a third party or
(3) use of the Software or any derivative of it for research with the
final aim of developing non-software products for sale or license to a
third party, or (4) use of the Software to provide any service to an
external organisation for which payment is received. If you are
interested in using the Software commercially, please contact Oxford
University Innovation ("OUI"), the technology transfer company of the
University, to negotiate a licence. Contact details are:
[email protected] quoting reference DE/9564. */
#ifndef __LEVENBERG
#define __LEVENBERG
#include "solver_mult_inverse.cu"
#include "diffmodels.cuh"
#include "options.h"
//CPU version in nonlin.h
__device__ const double EPS_gpu = 2.0e-16; //Losely based on NRinC 20.1
//CPU version in nonlin.cpp
__device__ inline bool zero_cf_diff_conv(double* cfo,double* cfn,double* cftol){
return(2.0*fabs(*cfo-*cfn) <= *cftol*(fabs(*cfo)+fabs(*cfn)+EPS_gpu));
}
__device__ void levenberg_marquardt_PVM_single_gpu( //INPUT
const float* mydata,
const float* bvecs,
const float* bvals,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const int idSubVOX,
float* step, //shared memory
float* grad, //shared memory
float* hess, //shared memory
float* inverse, //shared memory
double* pcf, //shared memory
double* ncf, //shared memory
double* lambda, //shared memory
double* cftol, //shared memory
double* ltol, //shared memory
double* olambda, //shared memory
int* success, //shared memory
int* end, //shared memory
float* J, //shared memory
float* reduction, //shared memory
float* fs, //shared memory
float* x, //shared memory
float* _d, //shared memory
float* sumf, //shared memory
float* C, //shared memory
float* el, //shared memory
int* indx, //shared memory
//INPUT-OUTPUT
float* myparams) //shared memory
{
int niter=0;
int maxiter=200;
if(idSubVOX==0){
*end=false;
*lambda=0.1;
*cftol=1.0e-8;
*ltol=1.0e20;
*success = true;
*olambda = 0.0;
*ncf=0;
}
cf_PVM_single(myparams,mydata,bvecs,bvals,ndirections,nfib,nparams,m_include_f0,idSubVOX,reduction,fs,x,_d,sumf,pcf);
__syncthreads();
while (!(*success&&niter++>=maxiter)){ //if success we don't increase niter (first condition is true)
//function cost has been decreased, we have advanced.
if(*success){
grad_PVM_single(myparams,mydata,bvecs,bvals,ndirections,nfib,nparams,m_include_f0,idSubVOX,J,reduction,fs,x,_d,sumf,grad);
__syncthreads();
hess_PVM_single(myparams,bvecs,bvals,ndirections,nfib,nparams,m_include_f0,idSubVOX,J,reduction,fs,x,_d,sumf,hess);
}
if(idSubVOX==0){
for (int i=0; i<nparams; i++) {
hess[(i*nparams)+i]+=*lambda-*olambda; //Levenberg LM_L
}
solver(hess,grad,nparams,C,el,indx,inverse);
for (int i=0;i<nparams;i++){
step[i]=-inverse[i];
}
for(int i=0;i<nparams;i++){
step[i]=myparams[i]+step[i];
}
}
__syncthreads();
cf_PVM_single(step,mydata,bvecs,bvals,ndirections,nfib,nparams,m_include_f0,idSubVOX,reduction,fs,x,_d,sumf,ncf);
if(idSubVOX==0){
if (*success = (*ncf < *pcf)){
*olambda = 0.0;
for(int i=0;i<nparams;i++){
myparams[i]=step[i];
}
*lambda=*lambda/10.0;
if (zero_cf_diff_conv(pcf,ncf,cftol)){
*end=true;
}
*pcf=*ncf;
}else{
*olambda=*lambda;
*lambda=*lambda*10.0;
if(*lambda> *ltol){
*end=true;
}
}
}
__syncthreads();
if(*end) return;
}
}
__device__ void levenberg_marquardt_PVM_single_c_gpu( //INPUT
const float* mydata,
const float* bvecs,
const float* bvals,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const int idSubVOX,
float* step, //shared memory
float* grad, //shared memory
float* hess, //shared memory
float* inverse, //shared memory
double* pcf, //shared memory
double* ncf, //shared memory
double* lambda, //shared memory
double* cftol, //shared memory
double* ltol, //shared memory
double* olambda, //shared memory
int* success, //shared memory
int* end, //shared memory
float* J, //shared memory
float* reduction, //shared memory
float* fs, //shared memory
float* f_deriv, //shared memory
float* x, //shared memory
float* _d, //shared memory
float* sumf, //shared memory
float* C, //shared memory
float* el, //shared memory
int* indx, //shared memory
//INPUT-OUTPUT
float* myparams) //shared memory
{
int niter=0;
int maxiter=200;
if(idSubVOX==0){
*end=false;
*lambda=0.1;
*cftol=1.0e-8;
*ltol=1.0e20;
*success = true;
*olambda = 0.0;
*ncf=0;
}
cf_PVM_single_c(myparams,mydata,bvecs,bvals,ndirections,nfib,nparams,m_include_f0,idSubVOX,reduction,fs,x,_d,sumf,pcf);
__syncthreads();
while (!(*success&&niter++ >= maxiter)){ //if success we don't increase niter (first condition is true)
//function cost has been decreased, we have advanced.
if(*success){
grad_PVM_single_c(myparams,mydata,bvecs,bvals,ndirections,nfib,nparams,m_include_f0,idSubVOX,J,reduction,fs,f_deriv,x,_d,sumf,grad);
__syncthreads();
hess_PVM_single_c(myparams,bvecs,bvals,ndirections,nfib,nparams,m_include_f0,idSubVOX,J,reduction,fs,f_deriv,x,_d,sumf,hess);
}
if(idSubVOX==0){
for (int i=0; i<nparams; i++) {
hess[(i*nparams)+i]+=*lambda-*olambda; //Levenberg LM_L
}
solver(hess,grad,nparams,C,el,indx,inverse);
for (int i=0;i<nparams;i++){
step[i]=-inverse[i];
}
for(int i=0;i<nparams;i++){
step[i]=myparams[i]+step[i];
}
}
__syncthreads();
cf_PVM_single_c(step,mydata,bvecs,bvals,ndirections,nfib,nparams,m_include_f0,idSubVOX,reduction,fs,x,_d,sumf,ncf);
if(idSubVOX==0){
if (*success = (*ncf < *pcf)) {
*olambda = 0.0;
for(int i=0;i<nparams;i++){
myparams[i]=step[i];
}
*lambda=*lambda/10.0;
if (zero_cf_diff_conv(pcf,ncf,cftol)){
*end=true;
}
*pcf=*ncf;
}else{
*olambda=*lambda;
*lambda=*lambda*10.0;
if(*lambda> *ltol){
*end=true;
}
}
}
__syncthreads();
if(*end) return;
}
}
__device__ void levenberg_marquardt_PVM_multi_gpu( //INPUT
const float* mydata,
const float* bvecs,
const float* bvals,
const float R,
const float invR,
const int ndirections,
const int nfib,
const int nparams,
const bool m_include_f0,
const int idSubVOX,
const int Gamma_for_ball_only,
float* step, //shared memory
float* grad, //shared memory
float* hess, //shared memory
float* inverse, //shared memory
double* pcf, //shared memory
double* ncf, //shared memory
double* lambda, //shared memory
double* cftol, //shared memory
double* ltol, //shared memory
double* olambda, //shared memory
int* success, //shared memory
int* end, //shared memory
float* J, //shared memory
float* reduction, //shared memory
float* fs, //shared memory
float* x, //shared memory
float* _a, //shared memory
float* _b, //shared memory
float* sumf, //shared memory
float* C, //shared memory
float* el, //shared memory
int* indx, //shared memory
//INPUT-OUTPUT
float* myparams) //shared memory
{
int niter=0;
int maxiter=200;
if(idSubVOX==0){
*end=false;
*lambda=0.1;
*cftol=1.0e-8;
*ltol=1.0e20;
*success = true;
*olambda = 0.0;
*ncf=0;
}
cf_PVM_multi(myparams,mydata,bvecs,bvals,R,invR,ndirections,nfib,nparams,m_include_f0,idSubVOX,Gamma_for_ball_only,reduction,fs,x,_a,_b,sumf,pcf);
__syncthreads();
while (!(*success&&niter++ >= maxiter)){ //if success we don't increase niter (first condition is true)
//function cost has been decreased, we have advanced.
if(*success){
grad_PVM_multi(myparams,mydata,bvecs,bvals,R,invR,ndirections,nfib,nparams,m_include_f0,
idSubVOX,Gamma_for_ball_only,J,reduction,fs,x,_a,_b,sumf,grad);
__syncthreads();
hess_PVM_multi(myparams,bvecs,bvals,R,invR,ndirections,nfib,nparams,m_include_f0,idSubVOX,Gamma_for_ball_only,J,reduction,fs,x,_a,_b,sumf,hess);
}
if(idSubVOX==0){
for (int i=0; i<nparams; i++) {
hess[(i*nparams)+i]+=*lambda-*olambda; //Levenberg LM_L
}
solver(hess,grad,nparams,C,el,indx,inverse);
for (int i=0;i<nparams;i++){
step[i]=-inverse[i];
}
for(int i=0;i<nparams;i++){
step[i]=myparams[i]+step[i];
}
}
__syncthreads();
cf_PVM_multi(step,mydata,bvecs,bvals,R,invR,ndirections,nfib,nparams,m_include_f0,idSubVOX,Gamma_for_ball_only,reduction,fs,x,_a,_b,sumf,ncf);
if(idSubVOX==0){
if (*success = (*ncf < *pcf)) {
*olambda = 0.0;
for(int i=0;i<nparams;i++){
myparams[i]=step[i];
}
*lambda=*lambda/10.0;
if (zero_cf_diff_conv(pcf,ncf,cftol)){
*end=true;
}
*pcf=*ncf;
}else{
*olambda=*lambda;
*lambda=*lambda*10.0;
if(*lambda> *ltol){
*end=true;
}
}
}
__syncthreads();
if(*end) return;
}
}
#endif
|
0a0996a3fb08475e1f1867ba681a60f2fece535c.hip | // !!! This is a file automatically generated by hipify!!!
#include "helper.cuh"
void printArray(float* array, int dim1, int dim2, std::ostream& stream)
{
for (int i = 0; i < dim1; ++i) {
for (int j = 0; j < dim2; ++j) {
stream << array[i*dim2 + j] << " ";
}
stream << std::endl;
}
}
void printArray(int* array, int dim1, int dim2, std::ostream& stream)
{
for (int i = 0; i < dim1; ++i) {
for (int j = 0; j < dim2; ++j) {
stream << array[i*dim2 + j] << " ";
}
stream << std::endl;
}
}
void printArray(bool* array, int dim1, int dim2, std::ostream& stream)
{
for (int i = 0; i < dim1; ++i) {
for (int j = 0; j < dim2; ++j) {
stream << array[i*dim2 + j] << " ";
}
stream << std::endl;
}
}
void printArray(char* array, int dim1, int dim2, std::ostream& stream)
{
for (int i = 0; i < dim1; ++i) {
for (int j = 0; j < dim2; ++j) {
stream << array[i*dim2 + j] << " ";
}
stream << std::endl;
}
}
float avgArray(float* array, int length)
{
int n = 0;
float sum = 0;
for (int i = 0; i < length; ++i) {
if (array[i] > 0.0001) {
++n;
sum += array[i];
}
}
std::cout << "sum is " << sum << " and " << n << " of " << length << " remain" << std::endl;
return sum/n;
}
void copyArray(float* arrayTo, float* arrayFrom, int length)
{
for (int i = 0; i < length; ++i) {
arrayTo[i] = arrayFrom[i];
}
}
float calculateUnitBallVolume(int dim)
{
if (dim == 0)
return 1;
else if (dim == 1)
return 2;
return 2*M_PI/dim*calculateUnitBallVolume(dim-2);
}
float calculateConnectionBallRadius(int dim, int samplesCount)
{
float coverage = 0.0;
float eta = 0.5;
float dim_recip = 1.0/dim;
float gammaval = (1.0+eta)*2.0*::pow(dim_recip,dim_recip)*::pow(((1-coverage)/calculateUnitBallVolume(dim)),dim_recip);
float scalingFn = log((float) samplesCount)/samplesCount;
return gammaval * ::pow(scalingFn, dim_recip);
}
void multiplyArrays(float* A, float* B, float* C, int rowsA, int colsA, int rowsB, int colsB)
{
if (colsA != rowsB)
std::cout << " ERROR: Matrix dimensions do not match: [" << rowsA << "x" << colsA << "] * [" << rowsB << "x" << colsB << "]" << std::cout;
int inners = colsA;
for (int i = 0; i < rowsA; ++i)
for (int j = 0; j < colsB; ++j) {
C[i*colsB+j]=0;
for (int k = 0; k < inners; ++k)
C[i*colsB+j] += A[i*inners+k]*B[k*colsB+j];
}
}
void scalarMultiplyArray(float *A, float b, float *C, int rows, int cols) {
for (int i = 0; i < rows; ++i)
for (int j = 0; j < cols; ++j)
C[i*cols+j] = b*A[i*cols+j];
}
void subtractArrays(float* A, float* B, float* C, int rows, int cols)
{
for (int i = 0; i < rows; ++i)
for (int j = 0; j < cols; ++j)
C[i*cols+j] = A[i*cols+j] - B[i*cols+j];
}
void horizConcat(float* A, float *B, float *C, int nA, int nB, int m)
{
int n = nA + nB;
for (int i = 0; i < m; ++i) {
for (int j = 0; j < nA; ++j)
C[i*n+j] = A[i*nA+j];
for (int j = nA; j < n; ++j)
C[i*n+j] = B[i*nB+(j-nA)];
}
}
void vertConcat(float* A, float *B, float *C, int n, int mA, int mB)
{
int m = mA + mB;
for (int i = 0; i < n; ++i) {
for (int j = 0; j < mA; ++j)
C[j*n+i] = A[j*n+i];
for (int j = mA; j < m; ++j)
C[j*n+i] = B[(j-mA)*n+i];
}
}
void transpose(float* A, int rows, int cols)
{
float trans[rows*cols];
for (int i = 0; i < rows; ++i)
for (int j = 0; j < cols; ++j)
trans[i + j*rows] = A[j + i*cols];
for (int i = 0; i < rows; ++i)
for (int j = 0; j < cols; ++j)
A[j + i*cols] = trans[j + i*cols];
}
void transpose(float* Atrans, float* A, int rows, int cols)
{
float trans[rows*cols];
for (int i = 0; i < rows; ++i)
for (int j = 0; j < cols; ++j)
trans[i + j*rows] = A[j + i*cols];
for (int i = 0; i < rows; ++i)
for (int j = 0; j < cols; ++j)
Atrans[j + i*cols] = trans[j + i*cols];
}
int printSolution(int samplesCount, float *d_samples, int *d_edges, float *d_costs)
{
std::ofstream solnFile;
solnFile.open("soln.txt");
int edges[samplesCount];
float costs[samplesCount];
float samples[samplesCount*DIM];
hipMemcpy(edges, d_edges, sizeof(int)*samplesCount, hipMemcpyDeviceToHost);
hipMemcpy(costs, d_costs, sizeof(float)*samplesCount, hipMemcpyDeviceToHost);
hipMemcpy(samples, d_samples, sizeof(float)*DIM*samplesCount, hipMemcpyDeviceToHost);
solnFile << "edges = [";
printArray(edges, 1, samplesCount, solnFile);
solnFile << "];" << std::endl << "samples = [";
printArray(samples, samplesCount, DIM, solnFile);
solnFile << "];" << std::endl << "costs = [";
printArray(costs, 1, samplesCount, solnFile);
solnFile << "];" << std::endl;
solnFile.close();
return 0;
} | 0a0996a3fb08475e1f1867ba681a60f2fece535c.cu | #include "helper.cuh"
void printArray(float* array, int dim1, int dim2, std::ostream& stream)
{
for (int i = 0; i < dim1; ++i) {
for (int j = 0; j < dim2; ++j) {
stream << array[i*dim2 + j] << " ";
}
stream << std::endl;
}
}
void printArray(int* array, int dim1, int dim2, std::ostream& stream)
{
for (int i = 0; i < dim1; ++i) {
for (int j = 0; j < dim2; ++j) {
stream << array[i*dim2 + j] << " ";
}
stream << std::endl;
}
}
void printArray(bool* array, int dim1, int dim2, std::ostream& stream)
{
for (int i = 0; i < dim1; ++i) {
for (int j = 0; j < dim2; ++j) {
stream << array[i*dim2 + j] << " ";
}
stream << std::endl;
}
}
void printArray(char* array, int dim1, int dim2, std::ostream& stream)
{
for (int i = 0; i < dim1; ++i) {
for (int j = 0; j < dim2; ++j) {
stream << array[i*dim2 + j] << " ";
}
stream << std::endl;
}
}
float avgArray(float* array, int length)
{
int n = 0;
float sum = 0;
for (int i = 0; i < length; ++i) {
if (array[i] > 0.0001) {
++n;
sum += array[i];
}
}
std::cout << "sum is " << sum << " and " << n << " of " << length << " remain" << std::endl;
return sum/n;
}
void copyArray(float* arrayTo, float* arrayFrom, int length)
{
for (int i = 0; i < length; ++i) {
arrayTo[i] = arrayFrom[i];
}
}
float calculateUnitBallVolume(int dim)
{
if (dim == 0)
return 1;
else if (dim == 1)
return 2;
return 2*M_PI/dim*calculateUnitBallVolume(dim-2);
}
float calculateConnectionBallRadius(int dim, int samplesCount)
{
float coverage = 0.0;
float eta = 0.5;
float dim_recip = 1.0/dim;
float gammaval = (1.0+eta)*2.0*std::pow(dim_recip,dim_recip)*std::pow(((1-coverage)/calculateUnitBallVolume(dim)),dim_recip);
float scalingFn = log((float) samplesCount)/samplesCount;
return gammaval * std::pow(scalingFn, dim_recip);
}
void multiplyArrays(float* A, float* B, float* C, int rowsA, int colsA, int rowsB, int colsB)
{
if (colsA != rowsB)
std::cout << " ERROR: Matrix dimensions do not match: [" << rowsA << "x" << colsA << "] * [" << rowsB << "x" << colsB << "]" << std::cout;
int inners = colsA;
for (int i = 0; i < rowsA; ++i)
for (int j = 0; j < colsB; ++j) {
C[i*colsB+j]=0;
for (int k = 0; k < inners; ++k)
C[i*colsB+j] += A[i*inners+k]*B[k*colsB+j];
}
}
void scalarMultiplyArray(float *A, float b, float *C, int rows, int cols) {
for (int i = 0; i < rows; ++i)
for (int j = 0; j < cols; ++j)
C[i*cols+j] = b*A[i*cols+j];
}
void subtractArrays(float* A, float* B, float* C, int rows, int cols)
{
for (int i = 0; i < rows; ++i)
for (int j = 0; j < cols; ++j)
C[i*cols+j] = A[i*cols+j] - B[i*cols+j];
}
void horizConcat(float* A, float *B, float *C, int nA, int nB, int m)
{
int n = nA + nB;
for (int i = 0; i < m; ++i) {
for (int j = 0; j < nA; ++j)
C[i*n+j] = A[i*nA+j];
for (int j = nA; j < n; ++j)
C[i*n+j] = B[i*nB+(j-nA)];
}
}
void vertConcat(float* A, float *B, float *C, int n, int mA, int mB)
{
int m = mA + mB;
for (int i = 0; i < n; ++i) {
for (int j = 0; j < mA; ++j)
C[j*n+i] = A[j*n+i];
for (int j = mA; j < m; ++j)
C[j*n+i] = B[(j-mA)*n+i];
}
}
void transpose(float* A, int rows, int cols)
{
float trans[rows*cols];
for (int i = 0; i < rows; ++i)
for (int j = 0; j < cols; ++j)
trans[i + j*rows] = A[j + i*cols];
for (int i = 0; i < rows; ++i)
for (int j = 0; j < cols; ++j)
A[j + i*cols] = trans[j + i*cols];
}
void transpose(float* Atrans, float* A, int rows, int cols)
{
float trans[rows*cols];
for (int i = 0; i < rows; ++i)
for (int j = 0; j < cols; ++j)
trans[i + j*rows] = A[j + i*cols];
for (int i = 0; i < rows; ++i)
for (int j = 0; j < cols; ++j)
Atrans[j + i*cols] = trans[j + i*cols];
}
int printSolution(int samplesCount, float *d_samples, int *d_edges, float *d_costs)
{
std::ofstream solnFile;
solnFile.open("soln.txt");
int edges[samplesCount];
float costs[samplesCount];
float samples[samplesCount*DIM];
cudaMemcpy(edges, d_edges, sizeof(int)*samplesCount, cudaMemcpyDeviceToHost);
cudaMemcpy(costs, d_costs, sizeof(float)*samplesCount, cudaMemcpyDeviceToHost);
cudaMemcpy(samples, d_samples, sizeof(float)*DIM*samplesCount, cudaMemcpyDeviceToHost);
solnFile << "edges = [";
printArray(edges, 1, samplesCount, solnFile);
solnFile << "];" << std::endl << "samples = [";
printArray(samples, samplesCount, DIM, solnFile);
solnFile << "];" << std::endl << "costs = [";
printArray(costs, 1, samplesCount, solnFile);
solnFile << "];" << std::endl;
solnFile.close();
return 0;
} |
c2342f65d47191a998397e6e441a61c3e0ab529c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
//========================================================================================================================================================================================================200
// INCLUDE
//========================================================================================================================================================================================================200
//======================================================================================================================================================150
// COMMON
//======================================================================================================================================================150
#include "../common.h" // (in the main program folder) needed to recognized input parameters
//======================================================================================================================================================150
// UTILITIES
//======================================================================================================================================================150
#include "../util/cuda/cuda.h" // (in library path specified to compiler) needed by for device functions
#include "../util/timer/timer.h" // (in library path specified to compiler) needed by timer
//======================================================================================================================================================150
// KERNEL
//======================================================================================================================================================150
#include "./kernel_gpu_cuda_2.cu" // (in the current directory) GPU kernel, cannot include with header file because of complications with passing of constant memory variables
//======================================================================================================================================================150
// HEADER
//======================================================================================================================================================150
#include "./kernel_gpu_cuda_wrapper_2.h" // (in the current directory)
//========================================================================================================================================================================================================200
// FUNCTION
//========================================================================================================================================================================================================200
void
kernel_gpu_cuda_wrapper_2( knode *knodes,
long knodes_elem,
long knodes_mem,
int order,
long maxheight,
int count,
long *currKnode,
long *offset,
long *lastKnode,
long *offset_2,
int *start,
int *end,
int *recstart,
int *reclength)
{
//======================================================================================================================================================150
// CPU VARIABLES
//======================================================================================================================================================150
// timer
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
time0 = get_time();
//======================================================================================================================================================150
// GPU SETUP
//======================================================================================================================================================150
//====================================================================================================100
// INITIAL DRIVER OVERHEAD
//====================================================================================================100
hipDeviceSynchronize();
//====================================================================================================100
// EXECUTION PARAMETERS
//====================================================================================================100
int numBlocks;
numBlocks = count;
int threadsPerBlock;
threadsPerBlock = order < 1024 ? order : 1024;
printf("# of blocks = %d, # of threads/block = %d (ensure that device can handle)\n", numBlocks, threadsPerBlock);
time1 = get_time();
//======================================================================================================================================================150
// GPU MEMORY MALLOC
//======================================================================================================================================================150
//====================================================================================================100
// DEVICE IN
//====================================================================================================100
//==================================================50
// knodesD
//==================================================50
knode *knodesD;
hipMalloc((void**)&knodesD, knodes_mem);
checkCUDAError("hipMalloc recordsD");
//==================================================50
// currKnodeD
//==================================================50
long *currKnodeD;
hipMalloc((void**)&currKnodeD, count*sizeof(long));
checkCUDAError("hipMalloc currKnodeD");
//==================================================50
// offsetD
//==================================================50
long *offsetD;
hipMalloc((void**)&offsetD, count*sizeof(long));
checkCUDAError("hipMalloc offsetD");
//==================================================50
// lastKnodeD
//==================================================50
long *lastKnodeD;
hipMalloc((void**)&lastKnodeD, count*sizeof(long));
checkCUDAError("hipMalloc lastKnodeD");
//==================================================50
// offset_2D
//==================================================50
long *offset_2D;
hipMalloc((void**)&offset_2D, count*sizeof(long));
checkCUDAError("hipMalloc offset_2D");
//==================================================50
// startD
//==================================================50
int *startD;
hipMalloc((void**)&startD, count*sizeof(int));
checkCUDAError("hipMalloc startD");
//==================================================50
// endD
//==================================================50
int *endD;
hipMalloc((void**)&endD, count*sizeof(int));
checkCUDAError("hipMalloc endD");
//====================================================================================================100
// DEVICE IN/OUT
//====================================================================================================100
//==================================================50
// ansDStart
//==================================================50
int *ansDStart;
hipMalloc((void**)&ansDStart, count*sizeof(int));
checkCUDAError("hipMalloc ansDStart");
//==================================================50
// ansDLength
//==================================================50
int *ansDLength;
hipMalloc((void**)&ansDLength, count*sizeof(int));
checkCUDAError("hipMalloc ansDLength");
time2 = get_time();
//======================================================================================================================================================150
// GPU MEMORY COPY
//======================================================================================================================================================150
//====================================================================================================100
// DEVICE IN
//====================================================================================================100
//==================================================50
// knodesD
//==================================================50
hipMemcpy(knodesD, knodes, knodes_mem, hipMemcpyHostToDevice);
checkCUDAError("hipMalloc hipMemcpy memD");
//==================================================50
// currKnodeD
//==================================================50
hipMemcpy(currKnodeD, currKnode, count*sizeof(long), hipMemcpyHostToDevice);
checkCUDAError("hipMalloc hipMemcpy currKnodeD");
//==================================================50
// offsetD
//==================================================50
hipMemcpy(offsetD, offset, count*sizeof(long), hipMemcpyHostToDevice);
checkCUDAError("hipMalloc hipMemcpy offsetD");
//==================================================50
// lastKnodeD
//==================================================50
hipMemcpy(lastKnodeD, lastKnode, count*sizeof(long), hipMemcpyHostToDevice);
checkCUDAError("hipMalloc hipMemcpy lastKnodeD");
//==================================================50
// offset_2D
//==================================================50
hipMemcpy(offset_2D, offset_2, count*sizeof(long), hipMemcpyHostToDevice);
checkCUDAError("hipMalloc hipMemcpy offset_2D");
//==================================================50
// startD
//==================================================50
hipMemcpy(startD, start, count*sizeof(int), hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy startD");
//==================================================50
// endD
//==================================================50
hipMemcpy(endD, end, count*sizeof(int), hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy endD");
//====================================================================================================100
// DEVICE IN/OUT
//====================================================================================================100
//==================================================50
// ansDStart
//==================================================50
hipMemcpy(ansDStart, recstart, count*sizeof(int), hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy ansDStart");
//==================================================50
// ansDLength
//==================================================50
hipMemcpy(ansDLength, reclength, count*sizeof(int), hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy ansDLength");
time3 = get_time();
//======================================================================================================================================================150
// KERNEL
//======================================================================================================================================================150
// [GPU] findRangeK kernel
hipLaunchKernelGGL(( findRangeK), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, maxheight,
knodesD,
knodes_elem,
currKnodeD,
offsetD,
lastKnodeD,
offset_2D,
startD,
endD,
ansDStart,
ansDLength);
hipDeviceSynchronize();
checkCUDAError("findRangeK");
time4 = get_time();
//======================================================================================================================================================150
// GPU MEMORY COPY (CONTD.)
//======================================================================================================================================================150
//====================================================================================================100
// DEVICE IN/OUT
//====================================================================================================100
//==================================================50
// ansDStart
//==================================================50
hipMemcpy(recstart, ansDStart, count*sizeof(int), hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy ansDStart");
//==================================================50
// ansDLength
//==================================================50
hipMemcpy(reclength, ansDLength, count*sizeof(int), hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy ansDLength");
time5 = get_time();
//======================================================================================================================================================150
// GPU MEMORY DEALLOCATION
//======================================================================================================================================================150
hipFree(knodesD);
hipFree(currKnodeD);
hipFree(offsetD);
hipFree(lastKnodeD);
hipFree(offset_2D);
hipFree(startD);
hipFree(endD);
hipFree(ansDStart);
hipFree(ansDLength);
time6 = get_time();
//======================================================================================================================================================150
// DISPLAY TIMING
//======================================================================================================================================================150
printf("Time spent in different stages of GPU_CUDA KERNEL:\n");
printf("%15.12f s, %15.12f % : GPU: SET DEVICE / DRIVER INIT\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: ALO\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: COPY IN\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU: KERNEL\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: COPY OUT\n", (float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: FRE\n", (float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time6-time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float) (time6-time0) / 1000000);
}
//========================================================================================================================================================================================================200
// END
//========================================================================================================================================================================================================200
#ifdef __cplusplus
}
#endif
| c2342f65d47191a998397e6e441a61c3e0ab529c.cu | #ifdef __cplusplus
extern "C" {
#endif
//========================================================================================================================================================================================================200
// INCLUDE
//========================================================================================================================================================================================================200
//======================================================================================================================================================150
// COMMON
//======================================================================================================================================================150
#include "../common.h" // (in the main program folder) needed to recognized input parameters
//======================================================================================================================================================150
// UTILITIES
//======================================================================================================================================================150
#include "../util/cuda/cuda.h" // (in library path specified to compiler) needed by for device functions
#include "../util/timer/timer.h" // (in library path specified to compiler) needed by timer
//======================================================================================================================================================150
// KERNEL
//======================================================================================================================================================150
#include "./kernel_gpu_cuda_2.cu" // (in the current directory) GPU kernel, cannot include with header file because of complications with passing of constant memory variables
//======================================================================================================================================================150
// HEADER
//======================================================================================================================================================150
#include "./kernel_gpu_cuda_wrapper_2.h" // (in the current directory)
//========================================================================================================================================================================================================200
// FUNCTION
//========================================================================================================================================================================================================200
void
kernel_gpu_cuda_wrapper_2( knode *knodes,
long knodes_elem,
long knodes_mem,
int order,
long maxheight,
int count,
long *currKnode,
long *offset,
long *lastKnode,
long *offset_2,
int *start,
int *end,
int *recstart,
int *reclength)
{
//======================================================================================================================================================150
// CPU VARIABLES
//======================================================================================================================================================150
// timer
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
time0 = get_time();
//======================================================================================================================================================150
// GPU SETUP
//======================================================================================================================================================150
//====================================================================================================100
// INITIAL DRIVER OVERHEAD
//====================================================================================================100
cudaDeviceSynchronize();
//====================================================================================================100
// EXECUTION PARAMETERS
//====================================================================================================100
int numBlocks;
numBlocks = count;
int threadsPerBlock;
threadsPerBlock = order < 1024 ? order : 1024;
printf("# of blocks = %d, # of threads/block = %d (ensure that device can handle)\n", numBlocks, threadsPerBlock);
time1 = get_time();
//======================================================================================================================================================150
// GPU MEMORY MALLOC
//======================================================================================================================================================150
//====================================================================================================100
// DEVICE IN
//====================================================================================================100
//==================================================50
// knodesD
//==================================================50
knode *knodesD;
cudaMalloc((void**)&knodesD, knodes_mem);
checkCUDAError("cudaMalloc recordsD");
//==================================================50
// currKnodeD
//==================================================50
long *currKnodeD;
cudaMalloc((void**)&currKnodeD, count*sizeof(long));
checkCUDAError("cudaMalloc currKnodeD");
//==================================================50
// offsetD
//==================================================50
long *offsetD;
cudaMalloc((void**)&offsetD, count*sizeof(long));
checkCUDAError("cudaMalloc offsetD");
//==================================================50
// lastKnodeD
//==================================================50
long *lastKnodeD;
cudaMalloc((void**)&lastKnodeD, count*sizeof(long));
checkCUDAError("cudaMalloc lastKnodeD");
//==================================================50
// offset_2D
//==================================================50
long *offset_2D;
cudaMalloc((void**)&offset_2D, count*sizeof(long));
checkCUDAError("cudaMalloc offset_2D");
//==================================================50
// startD
//==================================================50
int *startD;
cudaMalloc((void**)&startD, count*sizeof(int));
checkCUDAError("cudaMalloc startD");
//==================================================50
// endD
//==================================================50
int *endD;
cudaMalloc((void**)&endD, count*sizeof(int));
checkCUDAError("cudaMalloc endD");
//====================================================================================================100
// DEVICE IN/OUT
//====================================================================================================100
//==================================================50
// ansDStart
//==================================================50
int *ansDStart;
cudaMalloc((void**)&ansDStart, count*sizeof(int));
checkCUDAError("cudaMalloc ansDStart");
//==================================================50
// ansDLength
//==================================================50
int *ansDLength;
cudaMalloc((void**)&ansDLength, count*sizeof(int));
checkCUDAError("cudaMalloc ansDLength");
time2 = get_time();
//======================================================================================================================================================150
// GPU MEMORY COPY
//======================================================================================================================================================150
//====================================================================================================100
// DEVICE IN
//====================================================================================================100
//==================================================50
// knodesD
//==================================================50
cudaMemcpy(knodesD, knodes, knodes_mem, cudaMemcpyHostToDevice);
checkCUDAError("cudaMalloc cudaMemcpy memD");
//==================================================50
// currKnodeD
//==================================================50
cudaMemcpy(currKnodeD, currKnode, count*sizeof(long), cudaMemcpyHostToDevice);
checkCUDAError("cudaMalloc cudaMemcpy currKnodeD");
//==================================================50
// offsetD
//==================================================50
cudaMemcpy(offsetD, offset, count*sizeof(long), cudaMemcpyHostToDevice);
checkCUDAError("cudaMalloc cudaMemcpy offsetD");
//==================================================50
// lastKnodeD
//==================================================50
cudaMemcpy(lastKnodeD, lastKnode, count*sizeof(long), cudaMemcpyHostToDevice);
checkCUDAError("cudaMalloc cudaMemcpy lastKnodeD");
//==================================================50
// offset_2D
//==================================================50
cudaMemcpy(offset_2D, offset_2, count*sizeof(long), cudaMemcpyHostToDevice);
checkCUDAError("cudaMalloc cudaMemcpy offset_2D");
//==================================================50
// startD
//==================================================50
cudaMemcpy(startD, start, count*sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy startD");
//==================================================50
// endD
//==================================================50
cudaMemcpy(endD, end, count*sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy endD");
//====================================================================================================100
// DEVICE IN/OUT
//====================================================================================================100
//==================================================50
// ansDStart
//==================================================50
cudaMemcpy(ansDStart, recstart, count*sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy ansDStart");
//==================================================50
// ansDLength
//==================================================50
cudaMemcpy(ansDLength, reclength, count*sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy ansDLength");
time3 = get_time();
//======================================================================================================================================================150
// KERNEL
//======================================================================================================================================================150
// [GPU] findRangeK kernel
findRangeK<<<numBlocks, threadsPerBlock>>>( maxheight,
knodesD,
knodes_elem,
currKnodeD,
offsetD,
lastKnodeD,
offset_2D,
startD,
endD,
ansDStart,
ansDLength);
cudaDeviceSynchronize();
checkCUDAError("findRangeK");
time4 = get_time();
//======================================================================================================================================================150
// GPU MEMORY COPY (CONTD.)
//======================================================================================================================================================150
//====================================================================================================100
// DEVICE IN/OUT
//====================================================================================================100
//==================================================50
// ansDStart
//==================================================50
cudaMemcpy(recstart, ansDStart, count*sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy ansDStart");
//==================================================50
// ansDLength
//==================================================50
cudaMemcpy(reclength, ansDLength, count*sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy ansDLength");
time5 = get_time();
//======================================================================================================================================================150
// GPU MEMORY DEALLOCATION
//======================================================================================================================================================150
cudaFree(knodesD);
cudaFree(currKnodeD);
cudaFree(offsetD);
cudaFree(lastKnodeD);
cudaFree(offset_2D);
cudaFree(startD);
cudaFree(endD);
cudaFree(ansDStart);
cudaFree(ansDLength);
time6 = get_time();
//======================================================================================================================================================150
// DISPLAY TIMING
//======================================================================================================================================================150
printf("Time spent in different stages of GPU_CUDA KERNEL:\n");
printf("%15.12f s, %15.12f % : GPU: SET DEVICE / DRIVER INIT\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: ALO\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: COPY IN\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU: KERNEL\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: COPY OUT\n", (float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: FRE\n", (float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time6-time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float) (time6-time0) / 1000000);
}
//========================================================================================================================================================================================================200
// END
//========================================================================================================================================================================================================200
#ifdef __cplusplus
}
#endif
|
8bfd877d72bf3038a83b63e41d04c092e39db606.hip | // !!! This is a file automatically generated by hipify!!!
#include <chrono>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include "kernel.h"
#define max(a,b) ((a<b)?b:a)
#define min(a,b) ((a<b)?a:b)
const int NSIZE_round = NSIZE%16 ? NSIZE+16-NSIZE%16: NSIZE;
const size_t SSIZE = (size_t)NSIZE_round*48*48*48; //Coefs size
void eval_abc(const float *Af, float tx, float *a) {
a[0] = ( ( Af[0] * tx + Af[1] ) * tx + Af[2] ) * tx + Af[3];
a[1] = ( ( Af[4] * tx + Af[5] ) * tx + Af[6] ) * tx + Af[7];
a[2] = ( ( Af[8] * tx + Af[9] ) * tx + Af[10] ) * tx + Af[11];
a[3] = ( ( Af[12] * tx + Af[13] ) * tx + Af[14] ) * tx + Af[15];
}
int main(int argc, char ** argv) {
float *Af = (float*) malloc (sizeof(float)*16);
float *dAf = (float*) malloc (sizeof(float)*16);
float *d2Af = (float*) malloc (sizeof(float)*16);
Af[0]=-0.166667;
Af[1]=0.500000;
Af[2]=-0.500000;
Af[3]=0.166667;
Af[4]=0.500000;
Af[5]=-1.000000;
Af[6]=0.000000;
Af[7]=0.666667;
Af[8]=-0.500000;
Af[9]=0.500000;
Af[10]=0.500000;
Af[11]=0.166667;
Af[12]=0.166667;
Af[13]=0.000000;
Af[14]=0.000000;
Af[15]=0.000000;
dAf[0]=0.000000; d2Af[0]=0.000000;
dAf[1]=-0.500000; d2Af[1]=0.000000;
dAf[2]=1.000000; d2Af[2]=-1.000000;
dAf[3]=-0.500000; d2Af[3]=1.000000;
dAf[4]=0.000000; d2Af[4]=0.000000;
dAf[5]=1.500000; d2Af[5]=0.000000;
dAf[6]=-2.000000; d2Af[6]=3.000000;
dAf[7]=0.000000; d2Af[7]=-2.000000;
dAf[8]=0.000000; d2Af[8]=0.000000;
dAf[9]=-1.500000; d2Af[9]=0.000000;
dAf[10]=1.000000; d2Af[10]=-3.00000;
dAf[11]=0.500000; d2Af[11]=1.000000;
dAf[12]=0.000000; d2Af[12]=0.000000;
dAf[13]=0.500000; d2Af[13]=0.000000;
dAf[14]=0.000000; d2Af[14]=1.000000;
dAf[15]=0.000000; d2Af[15]=0.000000;
float x=0.822387;
float y=0.989919;
float z=0.104573;
float* walkers_vals = (float*) malloc(sizeof(float)*WSIZE*NSIZE);
float* walkers_grads = (float*) malloc(sizeof(float)*WSIZE*MSIZE);
float* walkers_hess = (float*) malloc(sizeof(float)*WSIZE*OSIZE);
float* walkers_x = (float*) malloc(sizeof(float)*WSIZE);
float* walkers_y = (float*) malloc(sizeof(float)*WSIZE);
float* walkers_z = (float*) malloc(sizeof(float)*WSIZE);
for (int i=0; i<WSIZE; i++) {
walkers_x[i] = x + i*1.0/WSIZE;
walkers_y[i] = y + i*1.0/WSIZE;
walkers_z[i] = z + i*1.0/WSIZE;
}
float* spline_coefs = (float*) malloc (sizeof(float)*SSIZE);
for(size_t i=0;i<SSIZE;i++)
spline_coefs[i]=sqrt(0.22+i*1.0)*sin(i*1.0);
int spline_num_splines = NSIZE;
int spline_x_grid_start = 0;
int spline_y_grid_start = 0;
int spline_z_grid_start = 0;
int spline_x_grid_num = 45;
int spline_y_grid_num = 45;
int spline_z_grid_num = 45;
int spline_x_stride=NSIZE_round*48*48;
int spline_y_stride=NSIZE_round*48;
int spline_z_stride=NSIZE_round;
int spline_x_grid_delta_inv=45;
int spline_y_grid_delta_inv=45;
int spline_z_grid_delta_inv=45;
float* d_walkers_vals;
hipMalloc((void**)&d_walkers_vals, sizeof(float)*WSIZE*NSIZE);
hipMemcpy(d_walkers_vals, walkers_vals, sizeof(float)*WSIZE*NSIZE, hipMemcpyHostToDevice);
float* d_walkers_grads;
hipMalloc((void**)&d_walkers_grads, sizeof(float)*WSIZE*MSIZE);
hipMemcpy(d_walkers_grads, walkers_grads, sizeof(float)*WSIZE*MSIZE, hipMemcpyHostToDevice);
float* d_walkers_hess;
hipMalloc((void**)&d_walkers_hess, sizeof(float)*WSIZE*OSIZE);
hipMemcpy(d_walkers_hess, walkers_hess, sizeof(float)*WSIZE*OSIZE, hipMemcpyHostToDevice);
float* d_spline_coefs;
hipMalloc((void**)&d_spline_coefs, sizeof(float)*SSIZE);
hipMemcpy(d_spline_coefs, spline_coefs, sizeof(float)*SSIZE, hipMemcpyHostToDevice);
float* d_a;
hipMalloc((void**)&d_a, sizeof(float)*4);
float* d_b;
hipMalloc((void**)&d_b, sizeof(float)*4);
float* d_c;
hipMalloc((void**)&d_c, sizeof(float)*4);
float* d_da;
hipMalloc((void**)&d_da, sizeof(float)*4);
float* d_db;
hipMalloc((void**)&d_db, sizeof(float)*4);
float* d_dc;
hipMalloc((void**)&d_dc, sizeof(float)*4);
float* d_d2a;
hipMalloc((void**)&d_d2a, sizeof(float)*4);
float* d_d2b;
hipMalloc((void**)&d_d2b, sizeof(float)*4);
float* d_d2c;
hipMalloc((void**)&d_d2c, sizeof(float)*4);
double total_time = 0.0;
for(int i=0; i<WSIZE; i++) {
float x = walkers_x[i], y = walkers_y[i], z = walkers_z[i];
float ux = x*spline_x_grid_delta_inv;
float uy = y*spline_y_grid_delta_inv;
float uz = z*spline_z_grid_delta_inv;
float ipartx, iparty, ipartz, tx, ty, tz;
float a[4], b[4], c[4], da[4], db[4], dc[4], d2a[4], d2b[4], d2c[4];
intptr_t xs = spline_x_stride;
intptr_t ys = spline_y_stride;
intptr_t zs = spline_z_stride;
x -= spline_x_grid_start;
y -= spline_y_grid_start;
z -= spline_z_grid_start;
ipartx = (int) ux; tx = ux-ipartx; int ix = min(max(0,(int) ipartx),spline_x_grid_num-1);
iparty = (int) uy; ty = uy-iparty; int iy = min(max(0,(int) iparty),spline_y_grid_num-1);
ipartz = (int) uz; tz = uz-ipartz; int iz = min(max(0,(int) ipartz),spline_z_grid_num-1);
eval_abc(Af,tx,&a[0]);
hipMemcpy(d_a, a, sizeof(float)*4, hipMemcpyHostToDevice);
eval_abc(Af,ty,&b[0]);
hipMemcpy(d_b, b, sizeof(float)*4, hipMemcpyHostToDevice);
eval_abc(Af,tz,&c[0]);
hipMemcpy(d_c, c, sizeof(float)*4, hipMemcpyHostToDevice);
eval_abc(dAf,tx,&da[0]);
hipMemcpy(d_da, da, sizeof(float)*4, hipMemcpyHostToDevice);
eval_abc(dAf,ty,&db[0]);
hipMemcpy(d_db, db, sizeof(float)*4, hipMemcpyHostToDevice);
eval_abc(dAf,tz,&dc[0]);
hipMemcpy(d_dc, dc, sizeof(float)*4, hipMemcpyHostToDevice);
eval_abc(d2Af,tx,&d2a[0]);
hipMemcpy(d_d2a, d2a, sizeof(float)*4, hipMemcpyHostToDevice);
eval_abc(d2Af,ty,&d2b[0]);
hipMemcpy(d_d2b, d2b, sizeof(float)*4, hipMemcpyHostToDevice);
eval_abc(d2Af,tz,&d2c[0]);
hipMemcpy(d_d2c, d2c, sizeof(float)*4, hipMemcpyHostToDevice);
dim3 global_size((spline_num_splines+255)/256*256);
dim3 local_size(256);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( bspline), dim3(global_size), dim3(local_size), 0, 0,
d_spline_coefs,
xs, ys, zs,
d_walkers_vals,
d_walkers_grads,
d_walkers_hess,
d_a,
d_b,
d_c,
d_da,
d_db,
d_dc,
d_d2a,
d_d2b,
d_d2c,
spline_x_grid_delta_inv,
spline_y_grid_delta_inv,
spline_z_grid_delta_inv,
spline_num_splines,
i, ix, iy, iz );
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
total_time += time;
}
printf("Total kernel execution time %lf (s)\n", total_time * 1e-9);
hipMemcpy(walkers_vals, d_walkers_vals, sizeof(float)*WSIZE*NSIZE, hipMemcpyDeviceToHost);
hipMemcpy(walkers_grads, d_walkers_grads, sizeof(float)*WSIZE*MSIZE, hipMemcpyDeviceToHost);
hipMemcpy(walkers_hess, d_walkers_hess, sizeof(float)*WSIZE*OSIZE, hipMemcpyDeviceToHost);
// collect results for the first walker
float resVal = 0.f;
float resGrad = 0.f;
float resHess = 0.f;
for( int i = 0; i < NSIZE; i++ ) resVal = resVal + walkers_vals[i];
for( int i = 0; i < MSIZE; i++ ) resGrad = resGrad + walkers_grads[i];
for( int i = 0; i < OSIZE; i++ ) resHess = resHess + walkers_hess[i];
printf("walkers[0]->collect([resVal resGrad resHess]) = [%e %e %e]\n",
resVal,resGrad, resHess);
free(Af);
free(dAf);
free(d2Af);
free(walkers_vals);
free(walkers_grads);
free(walkers_hess);
free(walkers_x);
free(walkers_y);
free(walkers_z);
free(spline_coefs);
hipFree(d_walkers_vals);
hipFree(d_walkers_grads);
hipFree(d_walkers_hess);
hipFree(d_spline_coefs);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipFree(d_da);
hipFree(d_db);
hipFree(d_dc);
hipFree(d_d2a);
hipFree(d_d2b);
hipFree(d_d2c);
return 0;
}
| 8bfd877d72bf3038a83b63e41d04c092e39db606.cu | #include <chrono>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cuda.h>
#include "kernel.h"
#define max(a,b) ((a<b)?b:a)
#define min(a,b) ((a<b)?a:b)
const int NSIZE_round = NSIZE%16 ? NSIZE+16-NSIZE%16: NSIZE;
const size_t SSIZE = (size_t)NSIZE_round*48*48*48; //Coefs size
void eval_abc(const float *Af, float tx, float *a) {
a[0] = ( ( Af[0] * tx + Af[1] ) * tx + Af[2] ) * tx + Af[3];
a[1] = ( ( Af[4] * tx + Af[5] ) * tx + Af[6] ) * tx + Af[7];
a[2] = ( ( Af[8] * tx + Af[9] ) * tx + Af[10] ) * tx + Af[11];
a[3] = ( ( Af[12] * tx + Af[13] ) * tx + Af[14] ) * tx + Af[15];
}
int main(int argc, char ** argv) {
float *Af = (float*) malloc (sizeof(float)*16);
float *dAf = (float*) malloc (sizeof(float)*16);
float *d2Af = (float*) malloc (sizeof(float)*16);
Af[0]=-0.166667;
Af[1]=0.500000;
Af[2]=-0.500000;
Af[3]=0.166667;
Af[4]=0.500000;
Af[5]=-1.000000;
Af[6]=0.000000;
Af[7]=0.666667;
Af[8]=-0.500000;
Af[9]=0.500000;
Af[10]=0.500000;
Af[11]=0.166667;
Af[12]=0.166667;
Af[13]=0.000000;
Af[14]=0.000000;
Af[15]=0.000000;
dAf[0]=0.000000; d2Af[0]=0.000000;
dAf[1]=-0.500000; d2Af[1]=0.000000;
dAf[2]=1.000000; d2Af[2]=-1.000000;
dAf[3]=-0.500000; d2Af[3]=1.000000;
dAf[4]=0.000000; d2Af[4]=0.000000;
dAf[5]=1.500000; d2Af[5]=0.000000;
dAf[6]=-2.000000; d2Af[6]=3.000000;
dAf[7]=0.000000; d2Af[7]=-2.000000;
dAf[8]=0.000000; d2Af[8]=0.000000;
dAf[9]=-1.500000; d2Af[9]=0.000000;
dAf[10]=1.000000; d2Af[10]=-3.00000;
dAf[11]=0.500000; d2Af[11]=1.000000;
dAf[12]=0.000000; d2Af[12]=0.000000;
dAf[13]=0.500000; d2Af[13]=0.000000;
dAf[14]=0.000000; d2Af[14]=1.000000;
dAf[15]=0.000000; d2Af[15]=0.000000;
float x=0.822387;
float y=0.989919;
float z=0.104573;
float* walkers_vals = (float*) malloc(sizeof(float)*WSIZE*NSIZE);
float* walkers_grads = (float*) malloc(sizeof(float)*WSIZE*MSIZE);
float* walkers_hess = (float*) malloc(sizeof(float)*WSIZE*OSIZE);
float* walkers_x = (float*) malloc(sizeof(float)*WSIZE);
float* walkers_y = (float*) malloc(sizeof(float)*WSIZE);
float* walkers_z = (float*) malloc(sizeof(float)*WSIZE);
for (int i=0; i<WSIZE; i++) {
walkers_x[i] = x + i*1.0/WSIZE;
walkers_y[i] = y + i*1.0/WSIZE;
walkers_z[i] = z + i*1.0/WSIZE;
}
float* spline_coefs = (float*) malloc (sizeof(float)*SSIZE);
for(size_t i=0;i<SSIZE;i++)
spline_coefs[i]=sqrt(0.22+i*1.0)*sin(i*1.0);
int spline_num_splines = NSIZE;
int spline_x_grid_start = 0;
int spline_y_grid_start = 0;
int spline_z_grid_start = 0;
int spline_x_grid_num = 45;
int spline_y_grid_num = 45;
int spline_z_grid_num = 45;
int spline_x_stride=NSIZE_round*48*48;
int spline_y_stride=NSIZE_round*48;
int spline_z_stride=NSIZE_round;
int spline_x_grid_delta_inv=45;
int spline_y_grid_delta_inv=45;
int spline_z_grid_delta_inv=45;
float* d_walkers_vals;
cudaMalloc((void**)&d_walkers_vals, sizeof(float)*WSIZE*NSIZE);
cudaMemcpy(d_walkers_vals, walkers_vals, sizeof(float)*WSIZE*NSIZE, cudaMemcpyHostToDevice);
float* d_walkers_grads;
cudaMalloc((void**)&d_walkers_grads, sizeof(float)*WSIZE*MSIZE);
cudaMemcpy(d_walkers_grads, walkers_grads, sizeof(float)*WSIZE*MSIZE, cudaMemcpyHostToDevice);
float* d_walkers_hess;
cudaMalloc((void**)&d_walkers_hess, sizeof(float)*WSIZE*OSIZE);
cudaMemcpy(d_walkers_hess, walkers_hess, sizeof(float)*WSIZE*OSIZE, cudaMemcpyHostToDevice);
float* d_spline_coefs;
cudaMalloc((void**)&d_spline_coefs, sizeof(float)*SSIZE);
cudaMemcpy(d_spline_coefs, spline_coefs, sizeof(float)*SSIZE, cudaMemcpyHostToDevice);
float* d_a;
cudaMalloc((void**)&d_a, sizeof(float)*4);
float* d_b;
cudaMalloc((void**)&d_b, sizeof(float)*4);
float* d_c;
cudaMalloc((void**)&d_c, sizeof(float)*4);
float* d_da;
cudaMalloc((void**)&d_da, sizeof(float)*4);
float* d_db;
cudaMalloc((void**)&d_db, sizeof(float)*4);
float* d_dc;
cudaMalloc((void**)&d_dc, sizeof(float)*4);
float* d_d2a;
cudaMalloc((void**)&d_d2a, sizeof(float)*4);
float* d_d2b;
cudaMalloc((void**)&d_d2b, sizeof(float)*4);
float* d_d2c;
cudaMalloc((void**)&d_d2c, sizeof(float)*4);
double total_time = 0.0;
for(int i=0; i<WSIZE; i++) {
float x = walkers_x[i], y = walkers_y[i], z = walkers_z[i];
float ux = x*spline_x_grid_delta_inv;
float uy = y*spline_y_grid_delta_inv;
float uz = z*spline_z_grid_delta_inv;
float ipartx, iparty, ipartz, tx, ty, tz;
float a[4], b[4], c[4], da[4], db[4], dc[4], d2a[4], d2b[4], d2c[4];
intptr_t xs = spline_x_stride;
intptr_t ys = spline_y_stride;
intptr_t zs = spline_z_stride;
x -= spline_x_grid_start;
y -= spline_y_grid_start;
z -= spline_z_grid_start;
ipartx = (int) ux; tx = ux-ipartx; int ix = min(max(0,(int) ipartx),spline_x_grid_num-1);
iparty = (int) uy; ty = uy-iparty; int iy = min(max(0,(int) iparty),spline_y_grid_num-1);
ipartz = (int) uz; tz = uz-ipartz; int iz = min(max(0,(int) ipartz),spline_z_grid_num-1);
eval_abc(Af,tx,&a[0]);
cudaMemcpy(d_a, a, sizeof(float)*4, cudaMemcpyHostToDevice);
eval_abc(Af,ty,&b[0]);
cudaMemcpy(d_b, b, sizeof(float)*4, cudaMemcpyHostToDevice);
eval_abc(Af,tz,&c[0]);
cudaMemcpy(d_c, c, sizeof(float)*4, cudaMemcpyHostToDevice);
eval_abc(dAf,tx,&da[0]);
cudaMemcpy(d_da, da, sizeof(float)*4, cudaMemcpyHostToDevice);
eval_abc(dAf,ty,&db[0]);
cudaMemcpy(d_db, db, sizeof(float)*4, cudaMemcpyHostToDevice);
eval_abc(dAf,tz,&dc[0]);
cudaMemcpy(d_dc, dc, sizeof(float)*4, cudaMemcpyHostToDevice);
eval_abc(d2Af,tx,&d2a[0]);
cudaMemcpy(d_d2a, d2a, sizeof(float)*4, cudaMemcpyHostToDevice);
eval_abc(d2Af,ty,&d2b[0]);
cudaMemcpy(d_d2b, d2b, sizeof(float)*4, cudaMemcpyHostToDevice);
eval_abc(d2Af,tz,&d2c[0]);
cudaMemcpy(d_d2c, d2c, sizeof(float)*4, cudaMemcpyHostToDevice);
dim3 global_size((spline_num_splines+255)/256*256);
dim3 local_size(256);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
bspline<<<global_size, local_size>>>(
d_spline_coefs,
xs, ys, zs,
d_walkers_vals,
d_walkers_grads,
d_walkers_hess,
d_a,
d_b,
d_c,
d_da,
d_db,
d_dc,
d_d2a,
d_d2b,
d_d2c,
spline_x_grid_delta_inv,
spline_y_grid_delta_inv,
spline_z_grid_delta_inv,
spline_num_splines,
i, ix, iy, iz );
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
total_time += time;
}
printf("Total kernel execution time %lf (s)\n", total_time * 1e-9);
cudaMemcpy(walkers_vals, d_walkers_vals, sizeof(float)*WSIZE*NSIZE, cudaMemcpyDeviceToHost);
cudaMemcpy(walkers_grads, d_walkers_grads, sizeof(float)*WSIZE*MSIZE, cudaMemcpyDeviceToHost);
cudaMemcpy(walkers_hess, d_walkers_hess, sizeof(float)*WSIZE*OSIZE, cudaMemcpyDeviceToHost);
// collect results for the first walker
float resVal = 0.f;
float resGrad = 0.f;
float resHess = 0.f;
for( int i = 0; i < NSIZE; i++ ) resVal = resVal + walkers_vals[i];
for( int i = 0; i < MSIZE; i++ ) resGrad = resGrad + walkers_grads[i];
for( int i = 0; i < OSIZE; i++ ) resHess = resHess + walkers_hess[i];
printf("walkers[0]->collect([resVal resGrad resHess]) = [%e %e %e]\n",
resVal,resGrad, resHess);
free(Af);
free(dAf);
free(d2Af);
free(walkers_vals);
free(walkers_grads);
free(walkers_hess);
free(walkers_x);
free(walkers_y);
free(walkers_z);
free(spline_coefs);
cudaFree(d_walkers_vals);
cudaFree(d_walkers_grads);
cudaFree(d_walkers_hess);
cudaFree(d_spline_coefs);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_da);
cudaFree(d_db);
cudaFree(d_dc);
cudaFree(d_d2a);
cudaFree(d_d2b);
cudaFree(d_d2c);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.