text
stringlengths 2.5k
6.39M
| kind
stringclasses 3
values |
---|---|
#include <cstdio>
#include <cstdlib>
#include <curand_kernel.h>
#include <raft/common/cub_wrappers.cuh>
#include <raft/common/scatter.cuh>
#include <raft/cuda_utils.cuh>
#include <raft/cudart_utils.h>
#include <raft/handle.hpp>
#include <random>
#include <rmm/device_uvector.hpp>
#include <stdint.h>
#include <type_traits>
namespace raft {
namespace random {
namespace detail {
/** all different generator types used */
enum GeneratorType {
/** curand-based philox generator */
GenPhilox = 0,
/** LFSR taps generator */
GenTaps,
/** kiss99 generator (currently the fastest) */
GenKiss99
};
template <typename Type>
DI void box_muller_transform(Type& val1, Type& val2, Type sigma1, Type mu1, Type sigma2, Type mu2)
{
constexpr Type twoPi = Type(2.0) * Type(3.141592654);
constexpr Type minus2 = -Type(2.0);
Type R = raft::mySqrt(minus2 * raft::myLog(val1));
Type theta = twoPi * val2;
Type s, c;
raft::mySinCos(theta, s, c);
val1 = R * c * sigma1 + mu1;
val2 = R * s * sigma2 + mu2;
}
template <typename Type>
DI void box_muller_transform(Type& val1, Type& val2, Type sigma1, Type mu1)
{
box_muller_transform<Type>(val1, val2, sigma1, mu1, sigma1, mu1);
}
/**
* @brief generator-agnostic way of generating random numbers
* @tparam GenType the generator object that expose 'next' method
*/
template <typename GenType>
struct Generator {
DI Generator(uint64_t seed, uint64_t subsequence, uint64_t offset)
: gen(seed, subsequence, offset)
{
}
template <typename Type>
DI void next(Type& ret)
{
gen.next(ret);
}
private:
/** the actual generator */
GenType gen;
};
template <typename OutType, typename MathType, typename GenType, typename LenType, typename Lambda>
__global__ void randKernel(uint64_t seed, uint64_t offset, OutType* ptr, LenType len, Lambda randOp)
{
LenType tid = (blockIdx.x * blockDim.x) + threadIdx.x;
Generator<GenType> gen(seed, (uint64_t)tid, offset);
const LenType stride = gridDim.x * blockDim.x;
for (LenType idx = tid; idx < len; idx += stride) {
MathType val;
gen.next(val);
ptr[idx] = randOp(val, idx);
}
}
// used for Box-Muller type transformations
template <typename OutType, typename MathType, typename GenType, typename LenType, typename Lambda2>
__global__ void rand2Kernel(
uint64_t seed, uint64_t offset, OutType* ptr, LenType len, Lambda2 rand2Op)
{
LenType tid = (blockIdx.x * blockDim.x) + threadIdx.x;
Generator<GenType> gen(seed, (uint64_t)tid, offset);
const LenType stride = gridDim.x * blockDim.x;
for (LenType idx = tid; idx < len; idx += stride) {
MathType val1, val2;
gen.next(val1);
gen.next(val2);
rand2Op(val1, val2, idx, idx + stride);
if (idx < len) ptr[idx] = (OutType)val1;
idx += stride;
if (idx < len) ptr[idx] = (OutType)val2;
}
}
template <typename Type>
__global__ void constFillKernel(Type* ptr, int len, Type val)
{
unsigned tid = (blockIdx.x * blockDim.x) + threadIdx.x;
const unsigned stride = gridDim.x * blockDim.x;
for (unsigned idx = tid; idx < len; idx += stride) {
ptr[idx] = val;
}
}
/** Philox-based random number generator */
// Courtesy: Jakub Szuppe
struct PhiloxGenerator {
/**
* @brief ctor. Initializes the state for RNG
* @param seed random seed (can be same across all threads)
* @param subsequence as found in curand docs
* @param offset as found in curand docs
*/
DI PhiloxGenerator(uint64_t seed, uint64_t subsequence, uint64_t offset)
{
curand_init(seed, subsequence, offset, &state);
}
/**
* @defgroup NextRand Generate the next random number
* @{
*/
DI void next(float& ret) { ret = curand_uniform(&(this->state)); }
DI void next(double& ret) { ret = curand_uniform_double(&(this->state)); }
DI void next(uint32_t& ret) { ret = curand(&(this->state)); }
DI void next(uint64_t& ret)
{
uint32_t a, b;
next(a);
next(b);
ret = (uint64_t)a | ((uint64_t)b << 32);
}
DI void next(int32_t& ret)
{
uint32_t val;
next(val);
ret = int32_t(val & 0x7fffffff);
}
DI void next(int64_t& ret)
{
uint64_t val;
next(val);
ret = int64_t(val & 0x7fffffffffffffff);
}
/** @} */
private:
/** the state for RNG */
curandStatePhilox4_32_10_t state;
};
/** LFSR taps-filter for generating random numbers. */
// Courtesy: Vinay Deshpande
struct TapsGenerator {
/**
* @brief ctor. Initializes the state for RNG
* @param seed the seed (can be same across all threads)
* @param subsequence unused
* @param offset unused
*/
DI TapsGenerator(uint64_t seed, uint64_t subsequence, uint64_t offset)
{
uint64_t delta = (blockIdx.x * blockDim.x) + threadIdx.x;
uint64_t stride = blockDim.x * gridDim.x;
delta += ((blockIdx.y * blockDim.y) + threadIdx.y) * stride;
stride *= blockDim.y * gridDim.y;
delta += ((blockIdx.z * blockDim.z) + threadIdx.z) * stride;
state = seed + delta + 1;
}
/**
* @defgroup NextRand Generate the next random number
* @{
*/
template <typename Type>
DI void next(Type& ret)
{
constexpr double ULL_LARGE = 1.8446744073709551614e19;
uint64_t val;
next(val);
ret = static_cast<Type>(val);
ret /= static_cast<Type>(ULL_LARGE);
}
DI void next(uint64_t& ret)
{
constexpr uint64_t TAPS = 0x8000100040002000ULL;
constexpr int ROUNDS = 128;
for (int i = 0; i < ROUNDS; i++)
state = (state >> 1) ^ (-(state & 1ULL) & TAPS);
ret = state;
}
DI void next(uint32_t& ret)
{
uint64_t val;
next(val);
ret = (uint32_t)val;
}
DI void next(int32_t& ret)
{
uint32_t val;
next(val);
ret = int32_t(val & 0x7fffffff);
}
DI void next(int64_t& ret)
{
uint64_t val;
next(val);
ret = int64_t(val & 0x7fffffffffffffff);
}
/** @} */
private:
/** the state for RNG */
uint64_t state;
};
/** Kiss99-based random number generator */
struct Kiss99Generator {
/**
* @brief ctor. Initializes the state for RNG
* @param seed the seed (can be same across all threads)
* @param subsequence unused
* @param offset unused
*/
DI Kiss99Generator(uint64_t seed, uint64_t subsequence, uint64_t offset) { initKiss99(seed); }
/**
* @defgroup NextRand Generate the next random number
* @{
*/
template <typename Type>
DI void next(Type& ret)
{
constexpr double U_LARGE = 4.294967295e9;
uint32_t val;
next(val);
ret = static_cast<Type>(val);
ret /= static_cast<Type>(U_LARGE);
}
DI void next(uint32_t& ret)
{
uint32_t MWC;
z = 36969 * (z & 65535) + (z >> 16);
w = 18000 * (w & 65535) + (w >> 16);
MWC = ((z << 16) + w);
jsr ^= (jsr << 17);
jsr ^= (jsr >> 13);
jsr ^= (jsr << 5);
jcong = 69069 * jcong + 1234567;
MWC = ((MWC ^ jcong) + jsr);
ret = MWC;
}
DI void next(uint64_t& ret)
{
uint32_t a, b;
next(a);
next(b);
ret = (uint64_t)a | ((uint64_t)b << 32);
}
DI void next(int32_t& ret)
{
uint32_t val;
next(val);
ret = int32_t(val & 0x7fffffff);
}
DI void next(int64_t& ret)
{
uint64_t val;
next(val);
ret = int64_t(val & 0x7fffffffffffffff);
}
/** @} */
private:
/** one of the kiss99 states */
uint32_t z;
/** one of the kiss99 states */
uint32_t w;
/** one of the kiss99 states */
uint32_t jsr;
/** one of the kiss99 states */
uint32_t jcong;
// This function multiplies 128-bit hash by 128-bit FNV prime and returns lower
// 128 bits. It uses 32-bit wide multiply only.
DI void mulByFnv1a128Prime(uint32_t* h)
{
typedef union {
uint32_t u32[2];
uint64_t u64[1];
} words64;
// 128-bit FNV prime = p3 * 2^96 + p2 * 2^64 + p1 * 2^32 + p0
// Here p0 = 315, p2 = 16777216, p1 = p3 = 0
const uint32_t p0 = uint32_t(315), p2 = uint32_t(16777216);
// Partial products
words64 h0p0, h1p0, h2p0, h0p2, h3p0, h1p2;
h0p0.u64[0] = uint64_t(h[0]) * p0;
h1p0.u64[0] = uint64_t(h[1]) * p0;
h2p0.u64[0] = uint64_t(h[2]) * p0;
h0p2.u64[0] = uint64_t(h[0]) * p2;
h3p0.u64[0] = uint64_t(h[3]) * p0;
h1p2.u64[0] = uint64_t(h[1]) * p2;
// h_n[0] = LO(h[0]*p[0]);
// h_n[1] = HI(h[0]*p[0]) + LO(h[1]*p[0]);
// h_n[2] = HI(h[1]*p[0]) + LO(h[2]*p[0]) + LO(h[0]*p[2]);
// h_n[3] = HI(h[2]*p[0]) + HI(h[0]*p[2]) + LO(h[3]*p[0]) + LO(h[1]*p[2]);
uint32_t carry = 0;
h[0] = h0p0.u32[0];
h[1] = h0p0.u32[1] + h1p0.u32[0];
carry = h[1] < h0p0.u32[1] ? 1 : 0;
h[2] = h1p0.u32[1] + carry;
carry = h[2] < h1p0.u32[1] ? 1 : 0;
h[2] += h2p0.u32[0];
carry = h[2] < h2p0.u32[0] ? carry + 1 : carry;
h[2] += h0p2.u32[0];
carry = h[2] < h0p2.u32[0] ? carry + 1 : carry;
h[3] = h2p0.u32[1] + h0p2.u32[1] + h3p0.u32[0] + h1p2.u32[0] + carry;
return;
}
DI void fnv1a128(uint32_t* hash, uint32_t txt)
{
hash[0] ^= (txt >> 0) & 0xFF;
mulByFnv1a128Prime(hash);
hash[0] ^= (txt >> 8) & 0xFF;
mulByFnv1a128Prime(hash);
hash[0] ^= (txt >> 16) & 0xFF;
mulByFnv1a128Prime(hash);
hash[0] ^= (txt >> 24) & 0xFF;
mulByFnv1a128Prime(hash);
}
DI void initKiss99(uint64_t seed)
{
// Initialize hash to 128-bit FNV1a basis
uint32_t hash[4] = {1653982605UL, 1656234357UL, 129696066UL, 1818371886UL};
// Digest threadIdx, blockIdx and seed
fnv1a128(hash, threadIdx.x);
fnv1a128(hash, threadIdx.y);
fnv1a128(hash, threadIdx.z);
fnv1a128(hash, blockIdx.x);
fnv1a128(hash, blockIdx.y);
fnv1a128(hash, blockIdx.z);
fnv1a128(hash, uint32_t(seed));
fnv1a128(hash, uint32_t(seed >> 32));
// Initialize KISS99 state with hash
z = hash[0];
w = hash[1];
jsr = hash[2];
jcong = hash[3];
}
};
/** The main random number generator class, fully on GPUs */
class RngImpl {
public:
RngImpl(uint64_t _s, GeneratorType _t = GenPhilox)
: type(_t),
offset(0),
// simple heuristic to make sure all SMs will be occupied properly
// and also not too many initialization calls will be made by each thread
nBlocks(4 * getMultiProcessorCount()),
gen()
{
seed(_s);
}
void seed(uint64_t _s)
{
gen.seed(_s);
offset = 0;
}
template <typename IdxT>
void affine_transform_params(IdxT n, IdxT& a, IdxT& b)
{
// always keep 'a' to be coprime to 'n'
a = gen() % n;
while (gcd(a, n) != 1) {
++a;
if (a >= n) a = 0;
}
// the bias term 'b' can be any number in the range of [0, n)
b = gen() % n;
}
template <typename Type, typename LenType = int>
void uniform(Type* ptr, LenType len, Type start, Type end, cudaStream_t stream)
{
static_assert(std::is_floating_point<Type>::value,
"Type for 'uniform' can only be floating point!");
custom_distribution(
ptr,
len,
[=] __device__(Type val, LenType idx) { return (val * (end - start)) + start; },
stream);
}
template <typename IntType, typename LenType = int>
void uniformInt(IntType* ptr, LenType len, IntType start, IntType end, cudaStream_t stream)
{
static_assert(std::is_integral<IntType>::value, "Type for 'uniformInt' can only be integer!");
custom_distribution(
ptr,
len,
[=] __device__(IntType val, LenType idx) { return (val % (end - start)) + start; },
stream);
}
template <typename Type, typename LenType = int>
void normal(Type* ptr, LenType len, Type mu, Type sigma, cudaStream_t stream)
{
static_assert(std::is_floating_point<Type>::value,
"Type for 'normal' can only be floating point!");
rand2Impl(
offset,
ptr,
len,
[=] __device__(Type & val1, Type & val2, LenType idx1, LenType idx2) {
box_muller_transform<Type>(val1, val2, sigma, mu);
},
NumThreads,
nBlocks,
type,
stream);
}
template <typename IntType, typename LenType = int>
void normalInt(IntType* ptr, LenType len, IntType mu, IntType sigma, cudaStream_t stream)
{
static_assert(std::is_integral<IntType>::value, "Type for 'normalInt' can only be integer!");
rand2Impl<IntType, double>(
offset,
ptr,
len,
[=] __device__(double& val1, double& val2, LenType idx1, LenType idx2) {
box_muller_transform<double>(val1, val2, sigma, mu);
},
NumThreads,
nBlocks,
type,
stream);
}
template <typename Type, typename LenType = int>
void normalTable(Type* ptr,
LenType n_rows,
LenType n_cols,
const Type* mu,
const Type* sigma_vec,
Type sigma,
cudaStream_t stream)
{
rand2Impl(
offset,
ptr,
n_rows * n_cols,
[=] __device__(Type & val1, Type & val2, LenType idx1, LenType idx2) {
// yikes! use fast-int-div
auto col1 = idx1 % n_cols;
auto col2 = idx2 % n_cols;
auto mean1 = mu[col1];
auto mean2 = mu[col2];
auto sig1 = sigma_vec == nullptr ? sigma : sigma_vec[col1];
auto sig2 = sigma_vec == nullptr ? sigma : sigma_vec[col2];
box_muller_transform<Type>(val1, val2, sig1, mean1, sig2, mean2);
},
NumThreads,
nBlocks,
type,
stream);
}
template <typename Type, typename LenType = int>
void fill(Type* ptr, LenType len, Type val, cudaStream_t stream)
{
detail::constFillKernel<Type><<<nBlocks, NumThreads, 0, stream>>>(ptr, len, val);
RAFT_CUDA_TRY(cudaPeekAtLastError());
}
template <typename Type, typename OutType = bool, typename LenType = int>
void bernoulli(OutType* ptr, LenType len, Type prob, cudaStream_t stream)
{
custom_distribution<OutType, Type>(
ptr, len, [=] __device__(Type val, LenType idx) { return val > prob; }, stream);
}
template <typename Type, typename LenType = int>
void scaled_bernoulli(Type* ptr, LenType len, Type prob, Type scale, cudaStream_t stream)
{
static_assert(std::is_floating_point<Type>::value,
"Type for 'scaled_bernoulli' can only be floating point!");
custom_distribution(
ptr,
len,
[=] __device__(Type val, LenType idx) { return val > prob ? -scale : scale; },
stream);
}
template <typename Type, typename LenType = int>
void gumbel(Type* ptr, LenType len, Type mu, Type beta, cudaStream_t stream)
{
custom_distribution(
ptr,
len,
[=] __device__(Type val, LenType idx) { return mu - beta * raft::myLog(-raft::myLog(val)); },
stream);
}
template <typename Type, typename LenType = int>
void lognormal(Type* ptr, LenType len, Type mu, Type sigma, cudaStream_t stream)
{
rand2Impl(
offset,
ptr,
len,
[=] __device__(Type & val1, Type & val2, LenType idx1, LenType idx2) {
box_muller_transform<Type>(val1, val2, sigma, mu);
val1 = raft::myExp(val1);
val2 = raft::myExp(val2);
},
NumThreads,
nBlocks,
type,
stream);
}
template <typename Type, typename LenType = int>
void logistic(Type* ptr, LenType len, Type mu, Type scale, cudaStream_t stream)
{
custom_distribution(
ptr,
len,
[=] __device__(Type val, LenType idx) {
constexpr Type one = (Type)1.0;
return mu - scale * raft::myLog(one / val - one);
},
stream);
}
template <typename Type, typename LenType = int>
void exponential(Type* ptr, LenType len, Type lambda, cudaStream_t stream)
{
custom_distribution(
ptr,
len,
[=] __device__(Type val, LenType idx) {
constexpr Type one = (Type)1.0;
return -raft::myLog(one - val) / lambda;
},
stream);
}
template <typename Type, typename LenType = int>
void rayleigh(Type* ptr, LenType len, Type sigma, cudaStream_t stream)
{
custom_distribution(
ptr,
len,
[=] __device__(Type val, LenType idx) {
constexpr Type one = (Type)1.0;
constexpr Type two = (Type)2.0;
return raft::mySqrt(-two * raft::myLog(one - val)) * sigma;
},
stream);
}
template <typename Type, typename LenType = int>
void laplace(Type* ptr, LenType len, Type mu, Type scale, cudaStream_t stream)
{
custom_distribution(
ptr,
len,
[=] __device__(Type val, LenType idx) {
constexpr Type one = (Type)1.0;
constexpr Type two = (Type)2.0;
constexpr Type oneHalf = (Type)0.5;
Type out;
if (val <= oneHalf) {
out = mu + scale * raft::myLog(two * val);
} else {
out = mu - scale * raft::myLog(two * (one - val));
}
return out;
},
stream);
}
template <typename DataT, typename WeightsT, typename IdxT = int>
void sampleWithoutReplacement(const raft::handle_t& handle,
DataT* out,
IdxT* outIdx,
const DataT* in,
const WeightsT* wts,
IdxT sampledLen,
IdxT len,
cudaStream_t stream)
{
ASSERT(sampledLen <= len, "sampleWithoutReplacement: 'sampledLen' cant be more than 'len'.");
rmm::device_uvector<WeightsT> expWts(len, stream);
rmm::device_uvector<WeightsT> sortedWts(len, stream);
rmm::device_uvector<IdxT> inIdx(len, stream);
rmm::device_uvector<IdxT> outIdxBuff(len, stream);
auto* inIdxPtr = inIdx.data();
// generate modified weights
custom_distribution(
expWts.data(),
len,
[wts, inIdxPtr] __device__(WeightsT val, IdxT idx) {
inIdxPtr[idx] = idx;
constexpr WeightsT one = (WeightsT)1.0;
auto exp = -raft::myLog(one - val);
if (wts != nullptr) { return exp / wts[idx]; }
return exp;
},
stream);
///@todo: use a more efficient partitioning scheme instead of full sort
// sort the array and pick the top sampledLen items
IdxT* outIdxPtr = outIdxBuff.data();
rmm::device_uvector<char> workspace(0, stream);
sortPairs(workspace, expWts.data(), sortedWts.data(), inIdxPtr, outIdxPtr, (int)len, stream);
if (outIdx != nullptr) {
RAFT_CUDA_TRY(cudaMemcpyAsync(
outIdx, outIdxPtr, sizeof(IdxT) * sampledLen, cudaMemcpyDeviceToDevice, stream));
}
raft::scatter<DataT, IdxT>(out, in, outIdxPtr, sampledLen, stream);
}
template <typename OutType, typename MathType = OutType, typename LenType = int, typename Lambda>
void custom_distribution(OutType* ptr, LenType len, Lambda randOp, cudaStream_t stream)
{
randImpl<OutType, MathType, LenType, Lambda>(
offset, ptr, len, randOp, NumThreads, nBlocks, type, stream);
}
template <typename OutType, typename MathType = OutType, typename LenType = int, typename Lambda>
void custom_distribution2(OutType* ptr, LenType len, Lambda randOp, cudaStream_t stream)
{
rand2Impl<OutType, MathType, LenType, Lambda>(
offset, ptr, len, randOp, NumThreads, nBlocks, type, stream);
}
/** @} */
private:
/** generator type */
GeneratorType type;
/**
* offset is also used to initialize curand state.
* Limits period of Philox RNG from (4 * 2^128) to (Blocks * Threads * 2^64),
* but is still a large period.
*/
uint64_t offset;
/** number of blocks to launch */
int nBlocks;
/** next seed generator for device-side RNG */
std::mt19937_64 gen;
static const int NumThreads = 256;
template <bool IsNormal, typename Type, typename LenType>
uint64_t _setupSeeds(uint64_t& seed, uint64_t& offset, LenType len, int nThreads, int nBlocks)
{
LenType itemsPerThread = raft::ceildiv(len, LenType(nBlocks * nThreads));
if (IsNormal && itemsPerThread % 2 == 1) { ++itemsPerThread; }
// curand uses 2 32b uint's to generate one double
uint64_t factor = sizeof(Type) / sizeof(float);
if (factor == 0) ++factor;
// Check if there are enough random numbers left in sequence
// If not, then generate new seed and start from zero offset
uint64_t newOffset = offset + LenType(itemsPerThread) * factor;
if (newOffset < offset) {
offset = 0;
seed = gen();
newOffset = itemsPerThread * factor;
}
return newOffset;
}
template <typename OutType, typename MathType = OutType, typename LenType = int, typename Lambda>
void randImpl(uint64_t& offset,
OutType* ptr,
LenType len,
Lambda randOp,
int nThreads,
int nBlocks,
GeneratorType type,
cudaStream_t stream)
{
if (len <= 0) return;
uint64_t seed = gen();
auto newOffset = _setupSeeds<false, MathType, LenType>(seed, offset, len, nThreads, nBlocks);
switch (type) {
case GenPhilox:
detail::randKernel<OutType, MathType, detail::PhiloxGenerator, LenType, Lambda>
<<<nBlocks, nThreads, 0, stream>>>(seed, offset, ptr, len, randOp);
break;
case GenTaps:
detail::randKernel<OutType, MathType, detail::TapsGenerator, LenType, Lambda>
<<<nBlocks, nThreads, 0, stream>>>(seed, offset, ptr, len, randOp);
break;
case GenKiss99:
detail::randKernel<OutType, MathType, detail::Kiss99Generator, LenType, Lambda>
<<<nBlocks, nThreads, 0, stream>>>(seed, offset, ptr, len, randOp);
break;
default: ASSERT(false, "randImpl: Incorrect generator type! %d", type);
};
RAFT_CUDA_TRY(cudaGetLastError());
offset = newOffset;
}
template <typename OutType, typename MathType = OutType, typename LenType = int, typename Lambda2>
void rand2Impl(uint64_t& offset,
OutType* ptr,
LenType len,
Lambda2 rand2Op,
int nThreads,
int nBlocks,
GeneratorType type,
cudaStream_t stream)
{
if (len <= 0) return;
auto seed = gen();
auto newOffset = _setupSeeds<true, MathType, LenType>(seed, offset, len, nThreads, nBlocks);
switch (type) {
case GenPhilox:
detail::rand2Kernel<OutType, MathType, detail::PhiloxGenerator, LenType, Lambda2>
<<<nBlocks, nThreads, 0, stream>>>(seed, offset, ptr, len, rand2Op);
break;
case GenTaps:
detail::rand2Kernel<OutType, MathType, detail::TapsGenerator, LenType, Lambda2>
<<<nBlocks, nThreads, 0, stream>>>(seed, offset, ptr, len, rand2Op);
break;
case GenKiss99:
detail::rand2Kernel<OutType, MathType, detail::Kiss99Generator, LenType, Lambda2>
<<<nBlocks, nThreads, 0, stream>>>(seed, offset, ptr, len, rand2Op);
break;
default: ASSERT(false, "rand2Impl: Incorrect generator type! %d", type);
};
RAFT_CUDA_TRY(cudaGetLastError());
offset = newOffset;
}
};
}; // end namespace detail
}; // end namespace random
}; // end namespace raft
|
the_stack
|
#include <types.h>
#include <cutil.h>
#include <error.h>
#include <cusp/print.h>
#include <my_timer.h>
__device__ __constant__ CGType c_w_x_3d[DEGREE];
__device__ __constant__ CGType c_w_y_3d[DEGREE];
__device__ __constant__ CGType c_w_z_3d[DEGREE];
__device__ __constant__ CGType c_phi[DEGREE*DEGREE*DEGREE * 4];
template<typename ValueType>
__device__ __host__ ValueType forceFunction_3d(ValueType x, ValueType y)
{
return 0.0;
}
template<typename IndexType, typename ValueType>
__device__ __host__ void compute_stiffness_matrix_3d(const ValueType* __restrict__ linearBaseCoeff, ValueType Tvol, ValueType* __restrict__ stiffMat, ValueType co)
{
ValueType a1, a2, b1, b2, c1, c2;
int cnt = 0;
#pragma unroll
for (int k = 0; k < 4; k++)
{
#pragma unroll
for (int g = k; g < 4; g++)
{
a1 = linearBaseCoeff[4 * k + 0];
b1 = linearBaseCoeff[4 * k + 1];
c1 = linearBaseCoeff[4 * k + 2];
a2 = linearBaseCoeff[4 * g + 0];
b2 = linearBaseCoeff[4 * g + 1];
c2 = linearBaseCoeff[4 * g + 2];
stiffMat[cnt++] = (a1 * a2 + b1 * b2 + c1 * c2) * Tvol * co;
}
}
}
template<typename ValueType >
__device__ __host__ ValueType Integration_Quadrilateral_3d(ValueType(*fx)[DEGREE][DEGREE], ValueType* w_x, ValueType* w_y, ValueType* w_z)
{
ValueType integral = 0;
ValueType tmp_y, tmp_z;
for (int i = 0; i < DEGREE; i++)
{
tmp_y = 0.0;
for (int j = 0; j < DEGREE; j++)
{
tmp_z = 0.0;
for (int k = 0; k < DEGREE; k++)
{
tmp_z += fx[i][j][k] * w_z[k];
}
tmp_y += tmp_z * w_y[j];
}
integral += tmp_y * w_x[i];
}
return integral;
}
template<typename IndexType, typename ValueType >
__device__ __host__ void compute_massmatrix_vector_3d(ValueType* vertX, ValueType* vertY, ValueType* vertZ,
ValueType* __restrict__ linearBaseCoeff, ValueType* __restrict__ massMat, ValueType * __restrict__ ele_b,
ValueType* w_x_3d, ValueType* w_y_3d, ValueType* w_z_3d, ValueType* phi, ValueType* integrand)
{
ValueType x1, y1, z1, x2, y2, z2, x3, y3, z3, x4, y4, z4;
x1 = vertX[0];
y1 = vertY[0];
z1 = vertZ[0];
x2 = vertX[1];
y2 = vertY[1];
z2 = vertZ[1];
x3 = vertX[2];
y3 = vertY[2];
z3 = vertZ[2];
x4 = vertX[3];
y4 = vertY[3];
z4 = vertZ[3];
ValueType det = 0.125 * ((-x1 + x2) * (-y1 + y3) * (-z1 + z4) +
(-y1 + y2) * (-z1 + z3) * (-x1 + x4) +
(-z1 + z2) * (-x1 + x3) * (-y1 + y4) -
(-x1 + x2) * (-z1 + z3) * (-y1 + y4) -
(-z1 + z2) * (-y1 + y3) * (-x1 + x4) -
(-y1 + y2) * (-x1 + x3) * (-z1 + z4));
ValueType jacobi = fabs(det);
int Cnt = 0;
#pragma unroll
for (int k = 0; k < 4; k++)
{
#pragma unroll
for (int g = k; g < 4; g++)
{
massMat[Cnt] = integrand[Cnt] * jacobi;
Cnt++;
}
}
}
template <typename IndexType >
__device__ __host__ int binarySearch(IndexType *indices, IndexType low, IndexType high, IndexType _val, const IndexType pitch)
{
IndexType retval = -1;
intuint<IndexType> val;
val.ival = _val;
while (high >= low)
{
IndexType mid = low + (high - low) / 2;
intuint<IndexType> mval;
mval.ival = indices[pitch * mid];
if (mval.uval > val.uval)
high = mid - 1;
else if (mval.uval < val.uval)
low = mid + 1;
else
{
retval = mid;
break;
}
}
return retval;
}
__device__ double atomicAdd_3d(double* address, double val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do
{
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val + __longlong_as_double(assumed)));
}
while (assumed != old);
return __longlong_as_double(old);
}
__device__ float atomicAdd_3d(float* address, float val)
{
return atomicAdd(address, val);
}
template<typename IndexType, typename ValueType >
__device__ void sum_into_global_linear_system_cuda_3d(IndexType* __restrict__ ids, ValueType* __restrict__ stiffMat, ValueType* __restrict__ massMat,
ValueType* __restrict__ ele_b,
ValueType* __restrict__ d_ellvalues, IndexType* __restrict__ d_ellcolidx, size_t nrow, size_t num_col_per_row, size_t pitch,
ValueType * __restrict__ d_b)
{
IndexType idxi = ids[0];
IndexType idxj = ids[1];
IndexType* mat_row_cols = &d_ellcolidx[idxi];
__syncthreads();
ValueType* mat_row_coefs = &d_ellvalues[idxi];
ValueType lambda = 1.0;
ValueType coef = stiffMat[1] + lambda * massMat[1];
IndexType loc;
// first one is diagonal
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxj, pitch);
if (loc >= 0)
{
atomicAdd_3d(&mat_row_coefs[pitch * loc], coef);
}
mat_row_cols = &d_ellcolidx[idxj];
mat_row_coefs = &d_ellvalues[idxj];
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxi, pitch); // first one is diagonal
if (loc >= 0)
{
atomicAdd_3d(&mat_row_coefs[pitch * loc], coef);
}
idxi = ids[0];
idxj = ids[2];
mat_row_cols = &d_ellcolidx[idxi];
mat_row_coefs = &d_ellvalues[idxi];
coef = stiffMat[2] + lambda * massMat[2];
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxj, pitch); // first one is diagonal
if (loc >= 0)
{
atomicAdd_3d(&mat_row_coefs[pitch * loc], coef);
}
mat_row_cols = &d_ellcolidx[idxj];
mat_row_coefs = &d_ellvalues[idxj];
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxi, pitch); // first one is diagonal
if (loc >= 0)
{
atomicAdd_3d(&mat_row_coefs[pitch * loc], coef);
}
idxi = ids[0];
idxj = ids[3];
mat_row_cols = &d_ellcolidx[idxi];
mat_row_coefs = &d_ellvalues[idxi];
coef = stiffMat[3] + lambda * massMat[3];
// first one is diagonal
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxj, pitch);
if (loc >= 0)
{
atomicAdd_3d(&mat_row_coefs[pitch * loc], coef);
}
mat_row_cols = &d_ellcolidx[idxj];
mat_row_coefs = &d_ellvalues[idxj];
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxi, pitch); // first one is diagonal
if (loc >= 0)
{
atomicAdd_3d(&mat_row_coefs[pitch * loc], coef);
}
idxi = ids[1];
idxj = ids[2];
mat_row_cols = &d_ellcolidx[idxi];
mat_row_coefs = &d_ellvalues[idxi];
coef = stiffMat[5] + lambda * massMat[5];
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxj, pitch); // first one is diagonal
if (loc >= 0)
{
atomicAdd_3d(&mat_row_coefs[pitch * loc], coef);
}
mat_row_cols = &d_ellcolidx[idxj];
mat_row_coefs = &d_ellvalues[idxj];
//first one is diagonal
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxi, pitch);
if (loc >= 0)
{
atomicAdd_3d(&mat_row_coefs[pitch * loc], coef);
}
idxi = ids[1];
idxj = ids[3];
mat_row_cols = &d_ellcolidx[idxi];
mat_row_coefs = &d_ellvalues[idxi];
coef = stiffMat[6] + lambda * massMat[6];
//first one is diagonal
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxj, pitch);
if (loc >= 0)
{
atomicAdd_3d(&mat_row_coefs[pitch * loc], coef);
}
mat_row_cols = &d_ellcolidx[idxj];
mat_row_coefs = &d_ellvalues[idxj];
//first one is diagonal
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxi, pitch);
if (loc >= 0)
{
atomicAdd_3d(&mat_row_coefs[pitch * loc], coef);
}
idxi = ids[2];
idxj = ids[3];
mat_row_cols = &d_ellcolidx[idxi];
mat_row_coefs = &d_ellvalues[idxi];
coef = stiffMat[8] + lambda * massMat[8];
// first one is diagonal
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxj, pitch);
if (loc >= 0)
{
atomicAdd_3d(&mat_row_coefs[pitch * loc], coef);
}
mat_row_cols = &d_ellcolidx[idxj];
mat_row_coefs = &d_ellvalues[idxj];
// first one is diagonal
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxi, pitch);
if (loc >= 0)
{
atomicAdd_3d(&mat_row_coefs[pitch * loc], coef);
}
idxi = ids[0];
mat_row_cols = &d_ellcolidx[idxi];
mat_row_coefs = &d_ellvalues[idxi];
coef = stiffMat[0] + lambda * massMat[0];
atomicAdd_3d(&mat_row_coefs[0], coef);
idxi = ids[1];
mat_row_cols = &d_ellcolidx[idxi];
mat_row_coefs = &d_ellvalues[idxi];
coef = stiffMat[4] + lambda * massMat[4];
atomicAdd_3d(&mat_row_coefs[0], coef);
idxi = ids[2];
mat_row_cols = &d_ellcolidx[idxi];
mat_row_coefs = &d_ellvalues[idxi];
coef = stiffMat[7] + lambda * massMat[7];
atomicAdd_3d(&mat_row_coefs[0], coef);
idxi = ids[3];
mat_row_cols = &d_ellcolidx[idxi];
mat_row_coefs = &d_ellvalues[idxi];
coef = stiffMat[9] + lambda * massMat[9];
atomicAdd_3d(&mat_row_coefs[0], coef);
}
template<typename IndexType, typename ValueType >
void sum_into_global_linear_system_3d_host(IndexType* __restrict__ ids, ValueType* __restrict__ stiffMat, ValueType* __restrict__ massMat,
ValueType* __restrict__ ele_b,
ValueType* __restrict__ d_ellvalues, IndexType* __restrict__ d_ellcolidx, size_t nrow, size_t num_col_per_row, size_t pitch,
ValueType * __restrict__ d_b)
{
IndexType idxi = ids[0];
IndexType idxj = ids[1];
IndexType* mat_row_cols = &d_ellcolidx[idxi];
ValueType* mat_row_coefs = &d_ellvalues[idxi];
ValueType lambda = 1.0;
ValueType coef = stiffMat[1] + lambda * massMat[1];
// first one is diagonal
IndexType loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxj, pitch);
if (loc >= 0)
{
mat_row_coefs[pitch * loc] += coef;
}
mat_row_cols = &d_ellcolidx[idxj];
mat_row_coefs = &d_ellvalues[idxj];
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxi, pitch);
if (loc >= 0)
{
mat_row_coefs[pitch * loc] += coef;
}
idxi = ids[0];
idxj = ids[2];
mat_row_cols = &d_ellcolidx[idxi];
mat_row_coefs = &d_ellvalues[idxi];
coef = stiffMat[2] + lambda * massMat[2];
// first one is diagonal
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxj, pitch);
if (loc >= 0)
{
mat_row_coefs[pitch * loc] += coef;
}
mat_row_cols = &d_ellcolidx[idxj];
mat_row_coefs = &d_ellvalues[idxj];
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxi, pitch);
if (loc >= 0)
{
mat_row_coefs[pitch * loc] += coef;
}
idxi = ids[0];
idxj = ids[3];
mat_row_cols = &d_ellcolidx[idxi];
mat_row_coefs = &d_ellvalues[idxi];
coef = stiffMat[3] + lambda * massMat[3];
// first one is diagonal
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxj, pitch);
if (loc >= 0)
{
mat_row_coefs[pitch * loc] += coef;
}
mat_row_cols = &d_ellcolidx[idxj];
mat_row_coefs = &d_ellvalues[idxj];
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxi, pitch);
if (loc >= 0)
{
mat_row_coefs[pitch * loc] += coef;
}
idxi = ids[1];
idxj = ids[2];
mat_row_cols = &d_ellcolidx[idxi];
mat_row_coefs = &d_ellvalues[idxi];
coef = stiffMat[5] + lambda * massMat[5];
// first one is diagonal
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxj, pitch);
if (loc >= 0)
{
mat_row_coefs[pitch * loc] += coef;
}
mat_row_cols = &d_ellcolidx[idxj];
mat_row_coefs = &d_ellvalues[idxj];
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxi, pitch);
if (loc >= 0)
{
mat_row_coefs[pitch * loc] += coef;
}
idxi = ids[1];
idxj = ids[3];
mat_row_cols = &d_ellcolidx[idxi];
mat_row_coefs = &d_ellvalues[idxi];
coef = stiffMat[6] + lambda * massMat[6];
// first one is diagonal
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxj, pitch);
if (loc >= 0)
{
mat_row_coefs[pitch * loc] += coef;
}
mat_row_cols = &d_ellcolidx[idxj];
mat_row_coefs = &d_ellvalues[idxj];
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxi, pitch);
if (loc >= 0)
{
mat_row_coefs[pitch * loc] += coef;
}
idxi = ids[2];
idxj = ids[3];
mat_row_cols = &d_ellcolidx[idxi];
mat_row_coefs = &d_ellvalues[idxi];
coef = stiffMat[8] + lambda * massMat[8];
// first one is diagonal
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxj, pitch);
if (loc >= 0)
{
mat_row_coefs[pitch * loc] += coef;
}
mat_row_cols = &d_ellcolidx[idxj];
mat_row_coefs = &d_ellvalues[idxj];
loc = binarySearch<IndexType > (mat_row_cols, 1, num_col_per_row - 1, idxi, pitch);
if (loc >= 0)
{
mat_row_coefs[pitch * loc] += coef;
}
idxi = ids[0];
mat_row_cols = &d_ellcolidx[idxi];
mat_row_coefs = &d_ellvalues[idxi];
coef = stiffMat[0] + lambda * massMat[0];
// first one is diagonal
mat_row_coefs[0] += coef;
idxi = ids[1];
mat_row_cols = &d_ellcolidx[idxi];
mat_row_coefs = &d_ellvalues[idxi];
coef = stiffMat[4] + lambda * massMat[4];
// first one is diagonal
mat_row_coefs[0] += coef;
idxi = ids[2];
mat_row_cols = &d_ellcolidx[idxi];
mat_row_coefs = &d_ellvalues[idxi];
coef = stiffMat[7] + lambda * massMat[7];
// first one is diagonal
mat_row_coefs[0] += coef;
idxi = ids[3];
mat_row_cols = &d_ellcolidx[idxi];
mat_row_coefs = &d_ellvalues[idxi];
coef = stiffMat[9] + lambda * massMat[9];
// first one is diagonal
mat_row_coefs[0] += coef;
d_b[ids[0]] += ele_b[0];
d_b[ids[1]] += ele_b[1];
d_b[ids[2]] += ele_b[2];
d_b[ids[3]] += ele_b[3];
}
template<typename IndexType, typename ValueType >
__global__ void element_loop_3d_kernel(size_t nv, ValueType *d_nx, ValueType *d_ny, ValueType *d_nz, size_t ne, IndexType *d_tri0, IndexType *d_tri1, IndexType *d_tri2, IndexType *d_tri3,
ValueType *d_ellvalues, IndexType *d_ellcolidx, size_t nrow, size_t num_col_per_row, size_t pitch,
ValueType * d_b, IndexType* matlabels, ValueType* integrand)
{
ValueType coeffs[16];
ValueType stiffMat[10];
ValueType massMat[10] = {0};
ValueType ele_b[4];
IndexType ids[4];
ValueType x[4];
ValueType y[4];
ValueType z[4];
IndexType matlabel;
ValueType co;
for (int eleidx = blockIdx.x * blockDim.x + threadIdx.x; eleidx < ne; eleidx += blockDim.x * gridDim.x)
{
ids[0] = d_tri0[eleidx];
ids[1] = d_tri1[eleidx];
ids[2] = d_tri2[eleidx];
ids[3] = d_tri3[eleidx];
x[0] = d_nx[ids[0]];
x[1] = d_nx[ids[1]];
x[2] = d_nx[ids[2]];
x[3] = d_nx[ids[3]];
y[0] = d_ny[ids[0]];
y[1] = d_ny[ids[1]];
y[2] = d_ny[ids[2]];
y[3] = d_ny[ids[3]];
z[0] = d_nz[ids[0]];
z[1] = d_nz[ids[1]];
z[2] = d_nz[ids[2]];
z[3] = d_nz[ids[3]];
ValueType a1 = x[1] - x[3], a2 = y[1] - y[3], a3 = z[1] - z[3];
ValueType b1 = x[2] - x[3], b2 = y[2] - y[3], b3 = z[2] - z[3];
ValueType c1 = x[0] - x[3], c2 = y[0] - y[3], c3 = z[0] - z[3];
ValueType Tvol = fabs(
c1 * (a2 * b3 - a3 * b2) +
c2 * (a3 * b1 - a1 * b3) +
c3 * (a1 * b2 - a2 * b1)) / 6.0;
//compute inverse of 4 by 4 matrix
ValueType a11 = x[0], a12 = y[0], a13 = z[0], a14 = 1.0,
a21 = x[1], a22 = y[1], a23 = z[1], a24 = 1.0,
a31 = x[2], a32 = y[2], a33 = z[2], a34 = 1.0,
a41 = x[3], a42 = y[3], a43 = z[3], a44 = 1.0;
ValueType det =
a11 * a22 * a33 * a44 + a11 * a23 * a34 * a42 + a11 * a24 * a32 * a43
+ a12 * a21 * a34 * a43 + a12 * a23 * a31 * a44 + a12 * a24 * a33 * a41
+ a13 * a21 * a32 * a44 + a13 * a22 * a34 * a41 + a13 * a24 * a31 * a42
+ a14 * a21 * a33 * a42 + a14 * a22 * a31 * a43 + a14 * a23 * a32 * a41
- a11 * a22 * a34 * a43 - a11 * a23 * a32 * a44 - a11 * a24 * a33 * a42
- a12 * a21 * a33 * a44 - a12 * a23 * a34 * a41 - a12 * a24 * a31 * a43
- a13 * a21 * a34 * a42 - a13 * a22 * a31 * a44 - a13 * a24 * a32 * a41
- a14 * a21 * a32 * a43 - a14 * a22 * a33 * a41 - a14 * a23 * a31 * a42;
ValueType b11 = a22 * a33 * a44 + a23 * a34 * a42 + a24 * a32 * a43 - a22 * a34 * a43 - a23 * a32 * a44 - a24 * a33 * a42;
ValueType b12 = a12 * a34 * a43 + a13 * a32 * a44 + a14 * a33 * a42 - a12 * a33 * a44 - a13 * a34 * a42 - a14 * a32 * a43;
ValueType b13 = a12 * a23 * a44 + a13 * a24 * a42 + a14 * a22 * a43 - a12 * a24 * a43 - a13 * a22 * a44 - a14 * a23 * a42;
ValueType b14 = a12 * a24 * a33 + a13 * a22 * a34 + a14 * a23 * a32 - a12 * a23 * a34 - a13 * a24 * a32 - a14 * a22 * a33;
ValueType b21 = a21 * a34 * a43 + a23 * a31 * a44 + a24 * a33 * a41 - a21 * a33 * a44 - a23 * a34 * a41 - a24 * a31 * a43;
ValueType b22 = a11 * a33 * a44 + a13 * a34 * a41 + a14 * a31 * a43 - a11 * a34 * a43 - a13 * a31 * a44 - a14 * a33 * a41;
ValueType b23 = a11 * a24 * a43 + a13 * a21 * a44 + a14 * a23 * a41 - a11 * a23 * a44 - a13 * a24 * a41 - a14 * a21 * a43;
ValueType b24 = a11 * a23 * a34 + a13 * a24 * a31 + a14 * a21 * a33 - a11 * a24 * a33 - a13 * a21 * a34 - a14 * a23 * a31;
ValueType b31 = a21 * a32 * a44 + a22 * a34 * a41 + a24 * a31 * a42 - a21 * a34 * a42 - a22 * a31 * a44 - a24 * a32 * a41;
ValueType b32 = a11 * a34 * a42 + a12 * a31 * a44 + a14 * a32 * a41 - a11 * a32 * a44 - a12 * a34 * a41 - a14 * a31 * a42;
ValueType b33 = a11 * a22 * a44 + a12 * a24 * a41 + a14 * a21 * a42 - a11 * a24 * a42 - a12 * a21 * a44 - a14 * a22 * a41;
ValueType b34 = a11 * a24 * a32 + a12 * a21 * a34 + a14 * a22 * a31 - a11 * a22 * a34 - a12 * a24 * a31 - a14 * a21 * a32;
ValueType b41 = a21 * a33 * a42 + a22 * a31 * a43 + a23 * a32 * a41 - a21 * a32 * a43 - a22 * a33 * a41 - a23 * a31 * a42;
ValueType b42 = a11 * a32 * a43 + a12 * a33 * a41 + a13 * a31 * a42 - a11 * a33 * a42 - a12 * a31 * a43 - a13 * a32 * a41;
ValueType b43 = a11 * a23 * a42 + a12 * a21 * a43 + a13 * a22 * a41 - a11 * a22 * a43 - a12 * a23 * a41 - a13 * a21 * a42;
ValueType b44 = a11 * a22 * a33 + a12 * a23 * a31 + a13 * a21 * a32 - a11 * a23 * a32 - a12 * a21 * a33 - a13 * a22 * a31;
coeffs[0] = b11 / det;
coeffs[1] = b21 / det;
coeffs[2] = b31 / det;
coeffs[3] = b41 / det;
coeffs[4] = b12 / det;
coeffs[5] = b22 / det;
coeffs[6] = b32 / det;
coeffs[7] = b42 / det;
coeffs[8] = b13 / det;
coeffs[9] = b23 / det;
coeffs[10] = b33 / det;
coeffs[11] = b43 / det;
coeffs[12] = b14 / det;
coeffs[13] = b24 / det;
coeffs[14] = b34 / det;
coeffs[15] = b44 / det;
//compute element stiffness matrix
matlabel = matlabels[eleidx];
switch (matlabel)
{
case 0:
co = 1.0;
break;
case 1:
co = 1.0;
break;
case 2:
co = 2;
break;
case 3:
co = 3.0;
break;
case 4:
co = 4.0;
break;
case 5:
co = 5.0;
break;
case 6:
co = 6.0;
break;
}
compute_stiffness_matrix_3d<IndexType, ValueType > (coeffs, Tvol, stiffMat, co);
// if(threadIdx.x < 0)
{
//compte element mass matrix and vector
compute_massmatrix_vector_3d<IndexType, ValueType > (x, y, z, coeffs, massMat, ele_b, c_w_x_3d, c_w_y_3d, c_w_z_3d, c_phi, integrand);
sum_into_global_linear_system_cuda_3d<IndexType, ValueType > (ids, stiffMat, massMat, ele_b,
d_ellvalues, d_ellcolidx, nrow, num_col_per_row, pitch,
d_b);
}
}
}
template<typename IndexType, typename ValueType >
void element_loop_3d_host(Vector_h_CG &nx, Vector_h_CG &ny, Vector_h_CG &nz, IdxVector_h &tri0, IdxVector_h &tri1, IdxVector_h &tri2, IdxVector_h &tri3, Matrix_ell_h_CG &A, Vector_h_CG &b,
Vector_h_CG & phi, Vector_h_CG &weight_x, Vector_h_CG &weight_y, Vector_h_CG & weight_z, IdxVector_h &matlabels, Vector_h_CG &integrand)
{
ValueType coeffs[16];
ValueType stiffMat[10];
ValueType massMat[10] = {0};
ValueType ele_b[4];
IndexType ids[4];
ValueType x[4];
ValueType y[4];
ValueType z[4];
IndexType matlabel;
ValueType co;
int ne = tri0.size();
ValueType *integrand_ptr = thrust::raw_pointer_cast(&integrand[0]);
ValueType *wx_ptr = thrust::raw_pointer_cast(&weight_x[0]);
ValueType *wy_ptr = thrust::raw_pointer_cast(&weight_y[0]);
ValueType *wz_ptr = thrust::raw_pointer_cast(&weight_z[0]);
ValueType *phi_ptr = thrust::raw_pointer_cast(&phi[0]);
ValueType *d_ellvalues = thrust::raw_pointer_cast(&A.values.values[0]);
IndexType *d_ellcolidx = thrust::raw_pointer_cast(&A.column_indices.values[0]);
size_t num_col_per_row = A.column_indices.num_cols;
size_t pitch = A.column_indices.pitch;
size_t nrow = A.num_rows;
ValueType *d_b = thrust::raw_pointer_cast(&b[0]);
for (int eleidx = 0; eleidx < ne; eleidx++)
{
ids[0] = tri0[eleidx];
ids[1] = tri1[eleidx];
ids[2] = tri2[eleidx];
ids[3] = tri3[eleidx];
x[0] = nx[ids[0]];
x[1] = nx[ids[1]];
x[2] = nx[ids[2]];
x[3] = nx[ids[3]];
y[0] = ny[ids[0]];
y[1] = ny[ids[1]];
y[2] = ny[ids[2]];
y[3] = ny[ids[3]];
z[0] = nz[ids[0]];
z[1] = nz[ids[1]];
z[2] = nz[ids[2]];
z[3] = nz[ids[3]];
ValueType a1 = x[1] - x[3], a2 = y[1] - y[3], a3 = z[1] - z[3];
ValueType b1 = x[2] - x[3], b2 = y[2] - y[3], b3 = z[2] - z[3];
ValueType c1 = x[0] - x[3], c2 = y[0] - y[3], c3 = z[0] - z[3];
ValueType Tvol = fabs(c1 * (a2 * b3 - a3 * b2) + c2 * (a3 * b1 - a1 * b3) + c3 * (a1 * b2 - a2 * b1)) / 6.0;
//compute inverse of 4 by 4 matrix
ValueType a11 = x[0], a12 = y[0], a13 = z[0], a14 = 1.0, a21 = x[1], a22 = y[1], a23 = z[1], a24 = 1.0, a31 = x[2], a32 = y[2], a33 = z[2], a34 = 1.0, a41 = x[3], a42 = y[3], a43 = z[3], a44 = 1.0;
ValueType det =
a11 * a22 * a33 * a44 + a11 * a23 * a34 * a42 + a11 * a24 * a32 * a43
+ a12 * a21 * a34 * a43 + a12 * a23 * a31 * a44 + a12 * a24 * a33 * a41
+ a13 * a21 * a32 * a44 + a13 * a22 * a34 * a41 + a13 * a24 * a31 * a42
+ a14 * a21 * a33 * a42 + a14 * a22 * a31 * a43 + a14 * a23 * a32 * a41
- a11 * a22 * a34 * a43 - a11 * a23 * a32 * a44 - a11 * a24 * a33 * a42
- a12 * a21 * a33 * a44 - a12 * a23 * a34 * a41 - a12 * a24 * a31 * a43
- a13 * a21 * a34 * a42 - a13 * a22 * a31 * a44 - a13 * a24 * a32 * a41
- a14 * a21 * a32 * a43 - a14 * a22 * a33 * a41 - a14 * a23 * a31 * a42;
ValueType b11 = a22 * a33 * a44 + a23 * a34 * a42 + a24 * a32 * a43 - a22 * a34 * a43 - a23 * a32 * a44 - a24 * a33 * a42;
ValueType b12 = a12 * a34 * a43 + a13 * a32 * a44 + a14 * a33 * a42 - a12 * a33 * a44 - a13 * a34 * a42 - a14 * a32 * a43;
ValueType b13 = a12 * a23 * a44 + a13 * a24 * a42 + a14 * a22 * a43 - a12 * a24 * a43 - a13 * a22 * a44 - a14 * a23 * a42;
ValueType b14 = a12 * a24 * a33 + a13 * a22 * a34 + a14 * a23 * a32 - a12 * a23 * a34 - a13 * a24 * a32 - a14 * a22 * a33;
ValueType b21 = a21 * a34 * a43 + a23 * a31 * a44 + a24 * a33 * a41 - a21 * a33 * a44 - a23 * a34 * a41 - a24 * a31 * a43;
ValueType b22 = a11 * a33 * a44 + a13 * a34 * a41 + a14 * a31 * a43 - a11 * a34 * a43 - a13 * a31 * a44 - a14 * a33 * a41;
ValueType b23 = a11 * a24 * a43 + a13 * a21 * a44 + a14 * a23 * a41 - a11 * a23 * a44 - a13 * a24 * a41 - a14 * a21 * a43;
ValueType b24 = a11 * a23 * a34 + a13 * a24 * a31 + a14 * a21 * a33 - a11 * a24 * a33 - a13 * a21 * a34 - a14 * a23 * a31;
ValueType b31 = a21 * a32 * a44 + a22 * a34 * a41 + a24 * a31 * a42 - a21 * a34 * a42 - a22 * a31 * a44 - a24 * a32 * a41;
ValueType b32 = a11 * a34 * a42 + a12 * a31 * a44 + a14 * a32 * a41 - a11 * a32 * a44 - a12 * a34 * a41 - a14 * a31 * a42;
ValueType b33 = a11 * a22 * a44 + a12 * a24 * a41 + a14 * a21 * a42 - a11 * a24 * a42 - a12 * a21 * a44 - a14 * a22 * a41;
ValueType b34 = a11 * a24 * a32 + a12 * a21 * a34 + a14 * a22 * a31 - a11 * a22 * a34 - a12 * a24 * a31 - a14 * a21 * a32;
ValueType b41 = a21 * a33 * a42 + a22 * a31 * a43 + a23 * a32 * a41 - a21 * a32 * a43 - a22 * a33 * a41 - a23 * a31 * a42;
ValueType b42 = a11 * a32 * a43 + a12 * a33 * a41 + a13 * a31 * a42 - a11 * a33 * a42 - a12 * a31 * a43 - a13 * a32 * a41;
ValueType b43 = a11 * a23 * a42 + a12 * a21 * a43 + a13 * a22 * a41 - a11 * a22 * a43 - a12 * a23 * a41 - a13 * a21 * a42;
ValueType b44 = a11 * a22 * a33 + a12 * a23 * a31 + a13 * a21 * a32 - a11 * a23 * a32 - a12 * a21 * a33 - a13 * a22 * a31;
coeffs[0] = b11 / det;
coeffs[1] = b21 / det;
coeffs[2] = b31 / det;
coeffs[3] = b41 / det;
coeffs[4] = b12 / det;
coeffs[5] = b22 / det;
coeffs[6] = b32 / det;
coeffs[7] = b42 / det;
coeffs[8] = b13 / det;
coeffs[9] = b23 / det;
coeffs[10] = b33 / det;
coeffs[11] = b43 / det;
coeffs[12] = b14 / det;
coeffs[13] = b24 / det;
coeffs[14] = b34 / det;
coeffs[15] = b44 / det;
//compute element stiffness matrix
matlabel = matlabels[eleidx];
if (matlabel == 0)
co = 1.0;
else
co = 1.0;
compute_stiffness_matrix_3d<IndexType, ValueType > (coeffs, Tvol, stiffMat, co);
//compte element mass matrix and vector
compute_massmatrix_vector_3d<IndexType, ValueType > (x, y, z, coeffs, massMat, ele_b, wx_ptr, wy_ptr, wz_ptr, phi_ptr, integrand_ptr);
sum_into_global_linear_system_3d_host<IndexType, ValueType > (ids, stiffMat, massMat, ele_b,
d_ellvalues, d_ellcolidx, nrow, num_col_per_row, pitch,
d_b);
}
}
void perform_element_loop_3d(Vector_d_CG &nx, Vector_d_CG &ny,
Vector_d_CG &nz, IdxVector_d &tri0, IdxVector_d &tri1,
IdxVector_d &tri2, IdxVector_d &tri3, Matrix_ell_d_CG &A,
Vector_d_CG &b, Vector_h_CG & phi, Vector_h_CG &weight_x,
Vector_h_CG &weight_y, Vector_h_CG & weight_z,
IdxVector_d &matlabels, Vector_d_CG &integrand, bool isdevice)
{
typedef typename Matrix_ell_d_CG::index_type IndexType;
typedef typename Matrix_ell_d_CG::value_type ValueType;
int nv = nx.size();
int ne = tri0.size();
double start, stop;
if (isdevice)
{
ValueType *d_b = thrust::raw_pointer_cast(&b[0]);
ValueType *d_nx = thrust::raw_pointer_cast(&nx[0]);
ValueType *d_ny = thrust::raw_pointer_cast(&ny[0]);
ValueType *d_nz = thrust::raw_pointer_cast(&nz[0]);
IndexType *d_tri0 = thrust::raw_pointer_cast(&tri0[0]);
IndexType *d_tri1 = thrust::raw_pointer_cast(&tri1[0]);
IndexType *d_tri2 = thrust::raw_pointer_cast(&tri2[0]);
IndexType *d_tri3 = thrust::raw_pointer_cast(&tri3[0]);
IndexType *d_matlabels = thrust::raw_pointer_cast(&matlabels[0]);
ValueType *d_ellvalues = thrust::raw_pointer_cast(&A.values.values[0]);
IndexType *d_ellcolidx = thrust::raw_pointer_cast(&A.column_indices.values[0]);
ValueType *wx = thrust::raw_pointer_cast(&weight_x[0]);
ValueType *wy = thrust::raw_pointer_cast(&weight_y[0]);
ValueType *wz = thrust::raw_pointer_cast(&weight_z[0]);
ValueType *integrand_d = thrust::raw_pointer_cast(&integrand[0]);
ValueType *h_phi = thrust::raw_pointer_cast(&phi[0]);
size_t num_col_per_row = A.column_indices.num_cols;
size_t pitch = A.column_indices.pitch;
size_t nrow = A.num_rows;
cudaMemcpyToSymbol(c_w_x_3d, wx, sizeof (ValueType) *
weight_x.size(), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_w_y_3d, wy, sizeof (ValueType) *
weight_y.size(), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_w_z_3d, wz, sizeof (ValueType) *
weight_z.size(), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_phi, h_phi, sizeof (ValueType) *
phi.size(), 0, cudaMemcpyHostToDevice);
int threads = 256;
int num_blocks = std::min((int)ceil((double)ne / threads), 65535); //32 blocks per SM
cudaThreadSetCacheConfig(cudaFuncCachePreferL1);
//Now do the actual finite-element assembly loop:
element_loop_3d_kernel<IndexType, ValueType>
<< <num_blocks, threads >> >(
nv, d_nx, d_ny, d_nz, ne, d_tri0, d_tri1, d_tri2, d_tri3,
d_ellvalues, d_ellcolidx, nrow, num_col_per_row, pitch,
d_b, d_matlabels, integrand_d);
}
else
{
Vector_h_CG h_b = b;
Vector_h_CG h_nx = nx;
Vector_h_CG h_ny = ny;
Vector_h_CG h_nz = nz;
IdxVector_h h_tri0 = tri0;
IdxVector_h h_tri1 = tri1;
IdxVector_h h_tri2 = tri2;
IdxVector_h h_tri3 = tri3;
Vector_h_CG integrand_h = integrand;
IdxVector_h h_matlabels = matlabels;
start = CLOCK();
Matrix_ell_h_CG h_Aell = A;
cudaThreadSynchronize();
stop = CLOCK();
double copy1 = stop - start;
element_loop_3d_host<IndexType, ValueType >
(h_nx, h_ny, h_nz, h_tri0, h_tri1, h_tri2, h_tri3,
h_Aell, h_b, phi, weight_x, weight_y, weight_z, h_matlabels, integrand_h);
start = CLOCK();
A = h_Aell;
cudaThreadSynchronize();
stop = CLOCK();
double copy2 = stop - start;
printf("data transfer time in host assemble is: %f\n", copy1 + copy2);
}
}
#endif
|
the_stack
|
__constant__ GlobalConstants cu_const_params;
#include "block_matching.cu_inl"
#include "aggregation.cu_inl"
////////////////////////////////////////////////////////////////////////////////////////
// Putting all the cuda kernels here
///////////////////////////////////////////////////////////////////////////////////////
__device__ float norm2(cuComplex & a) {
return (a.x * a.x) + (a.y * a.y);
}
__global__ void real2complex(uchar* h_data, cufftComplex *output) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
int index = j*cu_const_params.image_width + i;
if (i<cu_const_params.image_width && j<cu_const_params.image_height) {
output[index].x = h_data[index];
output[index].y = 0.0f;
}
}
__global__ void complex2real(cufftComplex *data, float* output, int total_size, int trans_size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= total_size) {
return;
}
output[index] = data[index].x / (float)(trans_size);
}
/*
* normalize cufft inverse result by dividing number of elements per batch
*/
__global__ void normalize(cufftComplex *data, int size) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
int j = threadIdx.y + blockIdx.y*blockDim.y;
int index = idx2(i, j, cu_const_params.image_width);
data[index].x = data[index].x / (float)(size);
data[index].y = data[index].y / (float)(size);
}
/*
* taking d_rearrange_stacks and perform thresholding. Count number of non zeros
* Also will normalize the 1D transform result.
*/
__global__ void hard_filter(cufftComplex *d_transformed_stacks, float *d_weight) {
int group_id = threadIdx.x + blockIdx.x * blockDim.x;
if (group_id >= cu_const_params.total_ref_patches) {
return;
}
int patch_size = cu_const_params.patch_size;
int non_zero = 0;
float threshold = cu_const_params.lambda_3d * cu_const_params.lambda_3d *
cu_const_params.sigma * cu_const_params.sigma * patch_size * patch_size * cu_const_params.max_group_size;
// printf("Threshold %f\n", threshold);
int offset = group_id*cu_const_params.max_group_size * patch_size * patch_size;
float x, y, val;
for (int i=0; i<patch_size*patch_size*cu_const_params.max_group_size;i++) {
x = d_transformed_stacks[offset + i].x;
y = d_transformed_stacks[offset + i].y;
val = x*x + y*y;
if (val < threshold) {
// printf("below threshold\n");
x = 0.0f;
y = 0.0f;
} else {
++non_zero;
}
d_transformed_stacks[offset + i].x = x;
d_transformed_stacks[offset + i].y = y;
}
d_weight[group_id] = 1.0f / (float)non_zero;
}
__global__ void get_wiener_coef(cufftComplex *d_transformed_stacks, float *d_wien_coef) {
int group_id = threadIdx.x + blockIdx.x * blockDim.x;
if (group_id >= cu_const_params.total_ref_patches) {
return;
}
const int patch_size = cu_const_params.patch_size;
const int sigma = cu_const_params.sigma;
const int norm_fator = patch_size * patch_size * cu_const_params.max_group_size;
int offset = group_id*cu_const_params.max_group_size * patch_size * patch_size;
float val;
for (int i=0; i<patch_size*patch_size*cu_const_params.max_group_size;i++) {
val = norm2(d_transformed_stacks[offset + i]) / (float)norm_fator;
d_wien_coef[offset + i] = val / (val + sigma * sigma);
}
}
__global__ void apply_wiener_coef(cufftComplex *d_transformed_stacks, float *d_wien_coef, float *d_wien_weight) {
int group_id = threadIdx.x + blockIdx.x * blockDim.x;
if (group_id >= cu_const_params.total_ref_patches) {
return;
}
const int patch_size = cu_const_params.patch_size;
int offset = group_id*cu_const_params.max_group_size * patch_size * patch_size;
float wien_acc = 0.0f;
for (int i=0; i<patch_size*patch_size*cu_const_params.max_group_size;i++) {
float wien = d_wien_coef[offset+i];
d_transformed_stacks[offset + i].x *= wien;
d_transformed_stacks[offset + i].y *= wien;
wien_acc += wien * wien;
}
d_wien_weight[group_id] = 1.0f / wien_acc;
}
/*
* Each thread maps to a group, d_transformed_stacks is organized as (w, h, patch in group)
*/
__global__ void fill_patch_major_from_source(Q* d_stacks, uint* d_num_patches_in_stack, uchar* input_data, cufftComplex* d_transformed_stacks) {
int group_id = threadIdx.x + blockIdx.x * blockDim.x;
if (group_id >= cu_const_params.total_ref_patches) {
return;
}
int width = cu_const_params.image_width;
int patch_size = cu_const_params.patch_size;
// start patch num
int start = group_id*cu_const_params.max_group_size;
int offset = start * patch_size * patch_size;
for (int z=0;z<d_num_patches_in_stack[group_id];z++) {
// fill in the actual data
uint patch_x = d_stacks[z+start].position.x;
uint patch_y = d_stacks[z+start].position.y;
for (int k=0;k<patch_size*patch_size;k++) {
int index = idx2(patch_x + (k%patch_size), patch_y + (k/patch_size), width);
int output_index = idx2(k, z, patch_size*patch_size);
d_transformed_stacks[output_index+offset].x = (float)(input_data[index]);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
// Class member functions
///////////////////////////////////////////////////////////////////////////////////////
/*
* Initialize params struct
*/
Bm3d::Bm3d() {
h_width = 0;
h_height = 0;
h_channels = 0;
d_noisy_image = NULL;
d_denoised_image = NULL;
d_stacks = NULL;
d_num_patches_in_stack = NULL;
d_weight = NULL;
d_wien_coef = NULL;
d_kaiser_window = NULL;
}
Bm3d::~Bm3d() {
}
/*
* Set first step params
*/
void Bm3d::set_fst_step_param() {
}
/*
* Set second step params
*/
void Bm3d::set_2nd_step_param() {
}
/*
* Set device params and allocate device memories
*/
void Bm3d::set_device_param() {
total_patches = (h_width - h_fst_step_params.patch_size + 1) * (h_height - h_fst_step_params.patch_size + 1);
total_ref_patches = ((h_width - h_fst_step_params.patch_size) / h_fst_step_params.stripe + 1) * ((h_height - h_fst_step_params.patch_size) / h_fst_step_params.stripe + 1);
// copy original image to cuda
const uint size = h_width * h_height;
cudaMalloc(&d_noisy_image, sizeof(uchar) * h_channels * size);
cudaMalloc(&d_stacks, sizeof(Q) * total_ref_patches * h_fst_step_params.max_group_size);
cudaMalloc(&d_num_patches_in_stack, sizeof(uint) * total_ref_patches);
cudaMalloc(&d_transformed_stacks, sizeof(cufftComplex) * h_fst_step_params.patch_size * h_fst_step_params.patch_size * h_fst_step_params.max_group_size * total_ref_patches);
cudaMalloc(&d_numerator, sizeof(float) * size);
cudaMalloc(&d_denominator, sizeof(float) * size);
cudaMalloc(&d_weight, sizeof(float) * total_ref_patches);
cudaMalloc(&d_wien_coef, sizeof(float) * h_fst_step_params.patch_size * h_fst_step_params.patch_size * h_fst_step_params.max_group_size * total_ref_patches);
cudaMalloc(&d_wien_weight, sizeof(float) * total_ref_patches);
cudaMalloc(&d_denoised_image, sizeof(uchar) * size);
// Only use the generic params for now
GlobalConstants params;
params.image_width = h_width;
params.image_height = h_height;
params.image_channels = h_channels;
params.patch_size = h_fst_step_params.patch_size;
params.searching_window_size = h_fst_step_params.searching_window_size;
params.stripe = h_fst_step_params.stripe;
params.max_group_size = h_fst_step_params.max_group_size;
params.distance_threshold_1 = h_fst_step_params.distance_threshold_1;
params.distance_threshold_2 = h_fst_step_params.distance_threshold_2;
params.sigma = h_fst_step_params.sigma;
params.lambda_3d = h_fst_step_params.lambda_3d;
params.beta = h_fst_step_params.beta;
params.total_ref_patches = total_ref_patches;
cudaMemcpyToSymbol(cu_const_params, ¶ms, sizeof(GlobalConstants));
int dim3D[3] = {h_fst_step_params.patch_size, h_fst_step_params.patch_size, h_fst_step_params.max_group_size};
int size_3d = h_fst_step_params.patch_size * h_fst_step_params.patch_size * h_fst_step_params.max_group_size;
if(cufftPlanMany(&plan3D, 3, dim3D,
NULL, 1, size_3d,
NULL, 1, size_3d,
CUFFT_C2C, total_ref_patches) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT Plan error: Plan failed");
return;
}
}
/*
* Initialize image stats and allocate memory
*/
void Bm3d::copy_image_to_device(uchar *src_image) {
// set width and height
cudaMemcpy(d_noisy_image, src_image, sizeof(uchar) * h_channels * h_height * h_width, cudaMemcpyHostToDevice);
}
void Bm3d::free_device_params() {
if (d_noisy_image) {
cudaFree(d_noisy_image);
}
}
void Bm3d::clean_up_buffer() {
// clean up buffer
cudaMemset(d_numerator, 0, sizeof(float)*h_width*h_height);
cudaMemset(d_denominator, 0, sizeof(float)*h_width*h_height);
// cudaMemset(d_stacks, 0, sizeof(Q) * total_ref_patches * h_fst_step_params.max_group_size);
// cudaMemset(d_num_patches_in_stack, 0, sizeof(uint) * total_ref_patches);
cudaMemset(d_transformed_stacks, 0, sizeof(cufftComplex) * h_fst_step_params.patch_size * h_fst_step_params.patch_size * h_fst_step_params.max_group_size * total_ref_patches);
cudaMemset(d_weight, 0, sizeof(float) * total_ref_patches);
cudaMemset(d_wien_coef, 0, sizeof(float) * h_fst_step_params.patch_size * h_fst_step_params.patch_size * h_fst_step_params.max_group_size * total_ref_patches);
cudaMemset(d_wien_weight, 0, sizeof(float) * total_ref_patches);
cudaMemset(d_denoised_image, 0, sizeof(uchar) * h_width*h_height);
}
void Bm3d::set_up_realtime(int width, int height, int channels) {
h_width = width;
h_height = height;
h_channels = channels;
set_device_param();
}
/*
* need to call set_up_realtime first
*/
void Bm3d::realtime_denoise(uchar *src_image,
uchar *dst_image
) {
copy_image_to_device(src_image);
clean_up_buffer();
denoise_fst_step();
cudaMemset(d_transformed_stacks, 0, sizeof(cufftComplex) * h_fst_step_params.patch_size * h_fst_step_params.patch_size * h_fst_step_params.max_group_size * total_ref_patches);
denoise_2nd_step();
cudaMemcpy(dst_image, d_denoised_image, sizeof(uchar) * h_width * h_height, cudaMemcpyDeviceToHost);
}
/*
* Take an image and run the algorithm to denoise.
*/
void Bm3d::denoise(uchar *src_image,
uchar *dst_image,
int width,
int height,
int sigma,
int channels,
int step,
int verbose = 1) {
Stopwatch init_time;
Stopwatch first_step;
Stopwatch sed_step;
h_width = width;
h_height = height;
h_channels = channels;
init_time.start();
set_device_param();
init_time.stop();
copy_image_to_device(src_image);
first_step.start();
denoise_fst_step();
first_step.stop();
sed_step.start();
if (step == 2) {
denoise_2nd_step();
}
sed_step.stop();
// copy image from device to host
printf("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n");
printf("Init takes %f\n", init_time.getSeconds());
printf("First step takes %f\n", first_step.getSeconds());
printf("Second step takes %f\n", sed_step.getSeconds());
const uint num_pixels = h_width * h_height;
cudaMemcpy(dst_image, d_denoised_image, sizeof(uchar) * num_pixels, cudaMemcpyDeviceToHost);
}
/*
* Perform the first step denoise
*/
void Bm3d::denoise_fst_step() {
//Block matching, each thread maps to a ref patch
do_block_matching(d_noisy_image, h_fst_step_params.distance_threshold_1);
//gather patches
arrange_block(d_noisy_image);
// perform 3D dct transform;
if (cufftExecC2C(plan3D, d_transformed_stacks, d_transformed_stacks, CUFFT_FORWARD) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT error: 3D Forward failed");
return;
}
// hard thresholding and normalize
hard_threshold();
// perform inverse 3D dct transform;
if (cufftExecC2C(plan3D, d_transformed_stacks, d_transformed_stacks, CUFFT_INVERSE) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT error: 3D inverse failed");
return;
}
// Need to normalize 3D inverse result by dividing patch_size * patch_size
// aggregate to single image by writing into buffer
do_aggregation(d_weight);
}
/*
* Perform the second step denoise
*/
void Bm3d::denoise_2nd_step() {
//Block matching estimate image, each thread maps to a ref patch
do_block_matching(d_denoised_image, h_fst_step_params.distance_threshold_2);
//gather patches for estimate image
arrange_block(d_denoised_image);
// perform 3d transform for estimate groups
if (cufftExecC2C(plan3D, d_transformed_stacks, d_transformed_stacks, CUFFT_FORWARD) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT error: 3D Forward failed");
return;
}
// calculate Wiener coefficient for each estimate group
cal_wiener_coef();
// gather noisy image patches according to estimate block matching result
arrange_block(d_noisy_image);
// perform 3d transform on original image
if (cufftExecC2C(plan3D, d_transformed_stacks, d_transformed_stacks, CUFFT_FORWARD) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT error: 3D Forward failed");
return;
}
// apply wiener coefficient to each group of transformed noisy data
apply_wien_filter();
// inverse 3d transform
if (cufftExecC2C(plan3D, d_transformed_stacks, d_transformed_stacks, CUFFT_INVERSE) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT error: 3D Forward failed");
return;
}
// aggregate to single image by writing into buffer
do_aggregation(d_wien_weight);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
fprintf(stderr, "Cuda error: %s\n", cudaGetErrorString(code));
return;
}
}
void Bm3d::test_block_matching(uchar *input_image, int width, int height) {
// generate a dummy image
printf("testing block_matching\n");
if (!input_image) {
const int img_width = 40; // a 40 by 40 checkerboard of 8x8 patch
const int patch_width = 8;
uchar *dummy_image = (uchar *)malloc(img_width * img_width * sizeof(uchar));
bool isWhite = false;
for (int y = 0; y < img_width; y += patch_width) {
for (int x = 0; x < img_width; x += patch_width) {
// (x, y) is the top-left corner coordinate
for (int j = 0; j < patch_width; ++j) {
for (int i = 0; i < patch_width; ++i) {
// (x + i, y + j) is the pixel coordinate
int idx = idx2(x+i, y+j, img_width);
input_image[idx] = isWhite ? 255 : 0;
}
}
isWhite = !isWhite;
}
}
// set up the parameters and consts
input_image = dummy_image;
}
h_width = width;
h_height = height;
h_channels = 1;
set_device_param();
copy_image_to_device(input_image);
printf("width, height: %d %d\n", width, height);
// determine how many threads we need to spawn
const int num_ref_patches_x = (h_width - h_fst_step_params.patch_size) / h_fst_step_params.stripe + 1;
// printf("total_ref_patches %d\n", total_ref_patches);
// const int total_num_threads = total_ref_patches;
// const int threads_per_block = 256;
// const int num_blocks = (total_num_threads + threads_per_block - 1) / threads_per_block;
// printf("total_num_threads %d num_block %d\n", total_ref_patches, num_blocks);
// // cudaError_t code = cudaGetLastError();
// // if (code != cudaSuccess) {
// // fprintf(stderr, "Cuda error: %s\n", cudaGetErrorString(code));
// // return;
// // }
// // call our block matching magic
// block_matching<<<num_blocks, threads_per_block>>>(d_stacks, d_num_patches_in_stack);
do_block_matching(input_image, h_fst_step_params.distance_threshold_1);
Q *h_stacks = (Q *)malloc(sizeof(Q) * total_ref_patches * h_fst_step_params.max_group_size);
cudaMemcpy(h_stacks, d_stacks, sizeof(Q) * total_ref_patches * h_fst_step_params.max_group_size, cudaMemcpyDeviceToHost);
uint *h_num_patches_in_stack = (uint *)malloc(sizeof(uint) * total_ref_patches);
cudaMemcpy(h_num_patches_in_stack, d_num_patches_in_stack, sizeof(uint) * total_ref_patches, cudaMemcpyDeviceToHost);
// print the first stack
const int which_stack = 13970;
const int stack_x = which_stack % num_ref_patches_x;
const int stack_y = which_stack / num_ref_patches_x;
h_stacks = &h_stacks[which_stack * h_fst_step_params.max_group_size];
printf("number of patches in stack %d: %d\n", which_stack, h_num_patches_in_stack[which_stack]);
for (int i = 0; i < h_num_patches_in_stack[which_stack]; ++i) {
const uint start_x = h_stacks[i].position.x;
const uint start_y = h_stacks[i].position.y;
printf("distance %d, x %d y %d\n", h_stacks[i].distance, start_x, start_y);
for (int y = 0; y < h_fst_step_params.patch_size; ++y) {
for (int x = 0; x < h_fst_step_params.patch_size; ++x) {
const int idx = idx2( start_x + x, start_y + y, width);
input_image[idx] = 255;
}
}
}
// set the original ref patch to 0
for (int y = 0; y < h_fst_step_params.patch_size; ++y) {
for (int x = 0; x < h_fst_step_params.patch_size; ++x) {
const int idx = idx2(
stack_x * h_fst_step_params.stripe + x,
stack_y * h_fst_step_params.stripe + y,
width);
input_image[idx] = 0;
}
}
// for (int y = 0; y < img_width; y += 1) {
// for (int x = 0; x < img_width; x += 1) {
// int idx = idx2(x, y, img_width);
// switch(input_image[idx]) {
// case 255:
// printf("x");
// break;
// case 127:
// printf("o");
// break;
// case 110:
// printf("*");
// break;
// default:
// printf(" ");
// }
// }
// printf("\n");
// }
free_device_params();
}
/*
* arrange_block - according to the stacked patch indices, fetching data from the transformed
* data array for 2D DCT. Input is an array of uint2, every N uint2
* is a group. This kernel will put each group into an continuous array
* of cufftComplex num with x component to be the value, y component to be 0.f
*/
void Bm3d::arrange_block(uchar* input_data) {
// input: Q* each struct is a patch with top left index
// output: d_transformed_stacks, each patch got patch*patch size continuous chunk
// each group will be assigned a thread
Stopwatch arrange;
arrange.start();
int thread_per_block = 512;
int num_blocks = (total_ref_patches + thread_per_block - 1) / thread_per_block;
fill_patch_major_from_source<<<num_blocks, thread_per_block>>>(d_stacks, d_num_patches_in_stack, input_data, d_transformed_stacks);
cudaDeviceSynchronize();
arrange.stop();
// printf("Arrange block takes %f\n", arrange.getSeconds());
}
void Bm3d::test_arrange_block(uchar *input_data) {
int size = h_fst_step_params.patch_size * h_fst_step_params.patch_size * h_fst_step_params.max_group_size * total_ref_patches;
Q* test_q = (Q*)malloc(sizeof(Q)*total_ref_patches * h_fst_step_params.max_group_size);
for (int i=0;i<2*h_fst_step_params.max_group_size; i++) {
test_q[i].position.x = i;
test_q[i].position.y = 0;
}
float* h_data = (float*)malloc(sizeof(float) * size);
float* d_data;
cudaMalloc(&d_data, sizeof(float) * size);
cudaMemcpy(d_stacks, test_q, sizeof(Q) * total_ref_patches * h_fst_step_params.max_group_size, cudaMemcpyHostToDevice);
uint* h_num_patches = (uint*)calloc(total_ref_patches, sizeof(uint));
h_num_patches[0] = h_fst_step_params.max_group_size;
h_num_patches[1] = h_fst_step_params.max_group_size - 2;
cudaMemcpy(d_num_patches_in_stack, h_num_patches, sizeof(uint)*total_ref_patches, cudaMemcpyHostToDevice);
arrange_block(d_noisy_image);
if (cufftExecC2C(plan, d_transformed_stacks, d_transformed_stacks, CUFFT_FORWARD) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT error: ExecR2C Forward failed");
return;
}
if (cufftExecC2C(plan, d_transformed_stacks, d_transformed_stacks, CUFFT_INVERSE) != CUFFT_SUCCESS) {
fprintf(stderr, "CUFFT error: ExecR2C Forward failed");
return;
}
int threads_per_block = 512;
int num_blocks = (size + threads_per_block - 1) / threads_per_block;
complex2real<<<num_blocks, threads_per_block>>>(d_transformed_stacks, d_data, size, h_fst_step_params.patch_size*h_fst_step_params.patch_size);
cudaMemcpy(h_data, d_data, size * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaGetLastError() != cudaSuccess) {
fprintf(stderr, "Cuda error: Failed results copy\n");
return;
}
for (int i=0;i<2*h_fst_step_params.patch_size*h_fst_step_params.patch_size*h_fst_step_params.max_group_size;i++) {
int x = i/(h_fst_step_params.patch_size*h_fst_step_params.patch_size);
int y = 0;
if (i % (h_fst_step_params.patch_size*h_fst_step_params.patch_size) == 0) {
printf("Patch (%d, %d)\n", x, 0);
}
int z = i - x*(h_fst_step_params.patch_size*h_fst_step_params.patch_size);
int index = idx2(x+(z%h_fst_step_params.patch_size), y+(z/h_fst_step_params.patch_size), h_width);
printf("Transform: %.3f vs Original: %d\n",
h_data[i],
input_data[index]
);
}
}
void Bm3d::test_aggregation(
uchar *src_image,
uint width,
uint height,
uchar *dst_image) {
// set environmental variables
h_width = width;
h_height = height;
h_channels = 1;
set_device_param();
copy_image_to_device(src_image);
// step 0: block matching
do_block_matching(src_image, h_fst_step_params.distance_threshold_1);
// step 1: arrange the data into stacks of pixels
arrange_block(d_noisy_image);
// step 2: fill the weights with dummy values;
float *weights = (float*)malloc(total_ref_patches * sizeof(float));
for (int i = 0; i < total_ref_patches; ++i) {
weights[i] = i % 32 + 1;
}
cudaMemcpy(d_weight, weights, sizeof(float) * total_ref_patches, cudaMemcpyHostToDevice);
// step 3: do aggregation
do_aggregation(d_weight);
const uint num_pixels = h_width * h_height;
cudaMemcpy(dst_image, d_denoised_image, sizeof(uchar) * num_pixels, cudaMemcpyDeviceToHost);
}
/*
* do_block_matching - launch kernel to run block matching
*/
void Bm3d::do_block_matching(uchar* input_image, const uint distance_threshold) {
// determine how many threads we need to spawn
Stopwatch bm_time;
bm_time.start();
const int total_num_threads = total_ref_patches;
const int threads_per_block = 512;
const int num_blocks = (total_num_threads + threads_per_block - 1) / threads_per_block;
block_matching<<<num_blocks, threads_per_block>>>(d_stacks, d_num_patches_in_stack, input_image, distance_threshold);
cudaDeviceSynchronize();
bm_time.stop();
// printf("Block Matching: %f\n", bm_time.getSeconds());
}
void Bm3d::hard_threshold() {
Stopwatch hard_threshold;
hard_threshold.start();
int thread_per_block = 512;
int num_blocks = (total_ref_patches + thread_per_block - 1) / thread_per_block;
hard_filter<<<num_blocks, thread_per_block>>>(d_transformed_stacks, d_weight);
cudaDeviceSynchronize();
hard_threshold.stop();
// printf("Hard threshold takes %.5f\n", hard_threshold.getSeconds());
}
void Bm3d::cal_wiener_coef() {
Stopwatch wiener_coef;
wiener_coef.start();
int thread_per_block = 512;
int num_blocks = (total_ref_patches + thread_per_block - 1) / thread_per_block;
get_wiener_coef<<<num_blocks, thread_per_block>>>(d_transformed_stacks, d_wien_coef);
cudaDeviceSynchronize();
wiener_coef.stop();
// printf("Get wiener takes %.5f\n", wiener_coef.getSeconds());
}
void Bm3d::apply_wien_filter() {
Stopwatch apply_wiener;
apply_wiener.start();
int thread_per_block = 512;
int num_blocks = (total_ref_patches + thread_per_block - 1) / thread_per_block;
apply_wiener_coef<<<num_blocks, thread_per_block>>>(d_transformed_stacks, d_wien_coef, d_wien_weight);
cudaDeviceSynchronize();
apply_wiener.stop();
// printf("Apply wiener takes %.5f\n", apply_wiener.getSeconds());
}
void Bm3d::do_aggregation(float* weight) {
Stopwatch ag_time;
ag_time.start();
const uint num_threads_per_block = 512;
// step 1: do aggregation, one thread per stack
uint num_blocks = (total_ref_patches + num_threads_per_block - 1) / num_threads_per_block;
aggregate<<<num_blocks, num_threads_per_block>>>(
d_stacks,
d_num_patches_in_stack,
weight,
d_transformed_stacks,
d_numerator,
d_denominator
);
// step 2: reduction. calculate how many pixels
const uint num_pixels = h_width * h_height;
num_blocks = (num_pixels + num_threads_per_block - 1) / num_threads_per_block;
reduction<<<num_blocks, num_threads_per_block>>>(d_numerator, d_denominator, d_denoised_image, num_pixels);
cudaDeviceSynchronize();
ag_time.stop();
// printf("Aggregation: %f\n", ag_time.getSeconds());
}
|
the_stack
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include "cuda_util.h"
#include "mat.h"
#include "padding_cuda.h"
#include <iostream>
namespace ncnn {
template<typename T>
__global__ void gpu_copy_make_border_image_type0(const T* src, const CudaMatInfo src_info, T* dst, const CudaMatInfo dst_info,
int top, int left, int type, T v)
{
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int column = blockIdx.x * blockDim.x + threadIdx.x;
// const int channel = blockIdx.z * blockDim.z + threadIdx.z;
if (row >= dst_info.h || column >= dst_info.w)
return;
T* outptr = dst + row * dst_info.w + column;
T output_value = v; // initialize whole matrix with padding
if ((row >= top && row < top + src_info.h)
&& (column >= left && column < left + src_info.w))
{
const T* inptr = src + (row - top) * src_info.w + column - left;
output_value = *inptr;
}
*outptr = output_value;
}
template<typename T>
__global__ void gpu_copy_make_border_image_type1(const T* src, const CudaMatInfo src_info, T* dst, const CudaMatInfo dst_info,
int top, int left, int type, T v)
{
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int column = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= dst_info.h || column >= dst_info.w)
return;
T* outptr = dst + row * dst_info.w + column;
T output_value = 0;
if (row < top) {
if (column < left )
output_value = src[0];
else if (column >= left + src_info.w)
output_value = src[src_info.w - 1];
else
output_value = src[column - left];
}
else if (row >= top && row < top + src_info.h)
{
const T* inptr = src + (row - top) * src_info.w;
if (column < left)
output_value = inptr[0];
else if (column >= left+src_info.w)
output_value = inptr[src_info.w - 1];
else
output_value = inptr[column - left];
}
else if (row >= top + src_info.h) {
const T* inptr = src + (src_info.h - 1) * src_info.w;
if (column < left)
output_value = *inptr;
else if (column >= left + src_info.w)
output_value = inptr[src_info.w - 1];
else
output_value = inptr[column - left];
}
*outptr = output_value;
}
template<typename T>
__global__ void gpu_copy_make_border_image_type2(const T* src, const CudaMatInfo src_info, T* dst, const CudaMatInfo dst_info,
int top, int left, int type, T v)
{
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int column = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= dst_info.h || column >= dst_info.w)
return;
T* outptr = dst + row * dst_info.w + column;
T output_value = 0;
if (row < top) {
const T* inptr = src + (top - row) * src_info.w;
if (column < left )
output_value = inptr[left - column];
else if (column >= left && column < left + src_info.w)
output_value = inptr[column - left];
else if (column < dst_info.w)
output_value = inptr[src_info.w - (column - left - src_info.w) - 2];
}
else if (row >= top && row < top + src_info.h)
{
const T* inptr = src + (row - top) * src_info.w;
if (column < left)
output_value = inptr[left - column];
else if (column >= left+src_info.w)
output_value = inptr[src_info.w - (column - left - src_info.w) - 2];
else
output_value = inptr[column - left];
}
else if (row >= top + src_info.h) {
int diff = dst_info.h - top - src_info.h;
const T* inptr = src + (src_info.h - (diff - (dst_info.h-row)) - 2) * src_info.w;
if (column < left)
output_value = inptr[left - column];
else if (column >= left + src_info.w)
output_value = inptr[src_info.w - (column - left - src_info.w) - 2];
else
output_value = inptr[column - left];
}
*outptr = output_value;
}
template<typename T>
__global__ void gpu_copy_make_border_image_3d_type0(const T* src, const CudaMatInfo src_info, T* dst, const CudaMatInfo dst_info,
int front, int top, int left, int type, ncnn::GPUPaddingValue<T> values)
{
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int column = blockIdx.x * blockDim.x + threadIdx.x;
const int channel = blockIdx.z * blockDim.z + threadIdx.z;
if (row >= dst_info.h || column >= dst_info.w || channel >= dst_info.c)
return;
const int dst_channel_step = dst_info.cstep * channel;
T* outptr = dst + dst_channel_step + row * dst_info.w + column;
T output_value{};
T padding_value{};
if (values.per_channel_pad_data_size)
padding_value = values.per_channel_values[channel];
else
padding_value = values.value;
output_value = padding_value;
if (channel < front || channel >= src_info.c+front) {
//do nothing
}
else if ((row >= top && row < top + src_info.h)
&& (column >= left && column < left + src_info.w))
{
const T* inptr = src + (channel - front) * src_info.cstep + (row - top) * src_info.w + column - left;
output_value = *inptr;
}
*outptr = output_value;
}
template<typename T>
__global__ void gpu_copy_make_border_image_3d_type1(const T* src, const CudaMatInfo src_info, T* dst, const CudaMatInfo dst_info,
int front, int top, int left, int type, ncnn::GPUPaddingValue<T> values)
{
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int column = blockIdx.x * blockDim.x + threadIdx.x;
const int channel = blockIdx.z * blockDim.z + threadIdx.z;
if (row >= dst_info.h || column >= dst_info.w || channel >= dst_info.c)
return;
int q = channel - front;
q = q <= 0 ? 0 : q;
q = q >= src_info.c - 1 ? src_info.c - 1 : q;
const int dst_channel_step = dst_info.cstep * channel;
const int src_channel_step = src_info.cstep * q;
T* outptr = dst + dst_channel_step + row * dst_info.w + column;
T padding_value{};
if (values.per_channel_pad_data_size)
padding_value = values.per_channel_values[channel];
else
padding_value = values.value;
T output_value = padding_value;
if (row < top) {
const T* inptr = src + src_channel_step;
if (column < left )
output_value = inptr[0];
else if (column >= left + src_info.w)
output_value = inptr[src_info.w - 1];
else
output_value = inptr[column - left];
}
else if (row >= top && row < top + src_info.h)
{
const T* inptr = src + src_channel_step + (row - top) * src_info.w;
if (column < left)
output_value = inptr[0];
else if (column >= left+src_info.w)
output_value = inptr[src_info.w - 1];
else
output_value = inptr[column - left];
}
else if (row >= top + src_info.h) {
const T* inptr = src + src_channel_step + (src_info.h - 1) * src_info.w;
if (column < left)
output_value = *inptr;
else if (column >= left + src_info.w)
output_value = inptr[src_info.w - 1];
else
output_value = inptr[column - left];
}
*outptr = output_value;
}
template<typename T>
__global__ void gpu_copy_make_border_image_3d_type2(const T* src, const CudaMatInfo src_info, T* dst, const CudaMatInfo dst_info,
int front, int top, int left, int type, ncnn::GPUPaddingValue<T> values)
{
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int column = blockIdx.x * blockDim.x + threadIdx.x;
const int channel = blockIdx.z * blockDim.z + threadIdx.z;
if (row >= dst_info.h || column >= dst_info.w || channel >= dst_info.c)
return;
int q = channel - front;
q = abs(q);
q = (src_info.c - 1) - abs(q - (src_info.c - 1));
const int dst_channel_step = dst_info.cstep * channel;
const int src_channel_step = src_info.cstep * q;
T* outptr = dst + dst_channel_step + row * dst_info.w + column;
T padding_value{};
if (values.per_channel_pad_data_size)
padding_value = values.per_channel_values[channel];
else
padding_value = values.value;
T output_value = padding_value;
if (row < top) {
const T* inptr = src + src_channel_step + (top - row) * src_info.w;
if (column < left )
output_value = inptr[left - column];
else if (column >= left && column < left + src_info.w)
output_value = inptr[column - left];
else if (column < dst_info.w)
output_value = inptr[src_info.w - (column - left - src_info.w) - 2];
}
else if (row >= top && row < top + src_info.h)
{
const T* inptr = src + src_channel_step + (row - top) * src_info.w;
if (column < left)
output_value = inptr[left - column];
else if (column >= left+src_info.w)
output_value = inptr[src_info.w - (column - left - src_info.w) - 2];
else
output_value = inptr[column - left];
}
else if (row >= top + src_info.h) {
int diff = dst_info.h - top - src_info.h;
const T* inptr = src + src_channel_step + (src_info.h - (diff - (dst_info.h-row)) - 2) * src_info.w;
if (column < left)
output_value = inptr[left - column];
else if (column >= left + src_info.w)
output_value = inptr[src_info.w - (column - left - src_info.w) - 2];
else
output_value = inptr[column - left];
}
*outptr = output_value;
}
int copy_make_border_image(const CudaMat& src, CudaMat& dst, int top, int left, int type, PaddingValue value, PaddingVariableType padding_type)
{
const ncnn::CudaMatInfo input_info{src};
const ncnn::CudaMatInfo output_info{dst};
int thread_per_block_x = output_info.w;
if (thread_per_block_x > 32) thread_per_block_x = 32;
int thread_per_block_y = output_info.h;
if (thread_per_block_y > 8) thread_per_block_y = 8;
int thread_per_block_z = output_info.c;
if (thread_per_block_z > 1) thread_per_block_z = 1;
const int total_number_of_columns = output_info.w;
const int total_number_of_rows = output_info.h;
const int total_number_of_channels = output_info.c;
const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z);
const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1,
(total_number_of_rows - 1) / thread_per_block_y + 1,
(total_number_of_channels - 1) / thread_per_block_z + 1);
if (type == 0)
{
if (padding_type == PaddingVariableType::type_char)
{
char value_char = value.c;
gpu_copy_make_border_image_type0<char><<<grid_size, block_size>>>(static_cast<const char*>(src.get_craw_data()),
input_info,
static_cast<char*>(dst.get_raw_data()),
output_info,
top, left, type, value_char);
}
else if (padding_type == PaddingVariableType::type_unsigned_short)
{
unsigned short value_unsigned_short = value.sh;
gpu_copy_make_border_image_type0<unsigned short><<<grid_size, block_size>>>(static_cast<const unsigned short*>(src.get_craw_data()),
input_info,
static_cast<unsigned short*>(dst.get_raw_data()),
output_info,
top, left, type, value_unsigned_short);
}
else if (padding_type == PaddingVariableType::type_float)
{
float value_float = value.fl;
gpu_copy_make_border_image_type0<float><<<grid_size, block_size>>>(static_cast<const float*>(src.get_craw_data()),
input_info,
static_cast<float*>(dst.get_raw_data()),
output_info,
top, left, type, value_float);
}
}
else if (type == 1)
{
if (padding_type == PaddingVariableType::type_char)
{
char value_char = value.c;
gpu_copy_make_border_image_type1<char><<<grid_size, block_size>>>(static_cast<const char*>(src.get_craw_data()),
input_info,
static_cast<char*>(dst.get_raw_data()),
output_info,
top, left, type, value_char);
}
else if (padding_type == PaddingVariableType::type_unsigned_short)
{
unsigned short value_unsigned_short = value.sh;
gpu_copy_make_border_image_type1<unsigned short><<<grid_size, block_size>>>(static_cast<const unsigned short*>(src.get_craw_data()),
input_info,
static_cast<unsigned short*>(dst.get_raw_data()),
output_info,
top, left, type, value_unsigned_short);
}
else if (padding_type == PaddingVariableType::type_float)
{
float value_float = value.fl;
gpu_copy_make_border_image_type1<float><<<grid_size, block_size>>>(static_cast<const float*>(src.get_craw_data()),
input_info,
static_cast<float*>(dst.get_raw_data()),
output_info,
top, left, type, value_float);
}
}
else if (type == 2)
{
if (padding_type == PaddingVariableType::type_char)
{
char value_char = value.c;
gpu_copy_make_border_image_type2<char><<<grid_size, block_size>>>(static_cast<const char*>(src.get_craw_data()),
input_info,
static_cast<char*>(dst.get_raw_data()),
output_info,
top, left, type, value_char);
}
else if (padding_type == PaddingVariableType::type_unsigned_short)
{
unsigned short value_unsigned_short = value.sh;
gpu_copy_make_border_image_type2<unsigned short><<<grid_size, block_size>>>(static_cast<const unsigned short*>(src.get_craw_data()),
input_info,
static_cast<unsigned short*>(dst.get_raw_data()),
output_info,
top, left, type, value_unsigned_short);
}
else if (padding_type == PaddingVariableType::type_float)
{
float value_float = value.fl;
gpu_copy_make_border_image_type2<float><<<grid_size, block_size>>>(static_cast<const float*>(src.get_craw_data()),
input_info,
static_cast<float*>(dst.get_raw_data()),
output_info,
top, left, type, value_float);
}
}
return 0;
}
int copy_make_border_image_3d(const CudaMat& src, CudaMat& dst, int front, int top, int left, int type,
PaddingValue value, PaddingVariableType padding_type,
void *gpu_per_channel_padding_data, int per_channel_pad_data_size)
{
const ncnn::CudaMatInfo input_info{src};
const ncnn::CudaMatInfo output_info{dst};
int thread_per_block_x = output_info.w;
if (thread_per_block_x > 32) thread_per_block_x = 32;
int thread_per_block_y = output_info.h;
if (thread_per_block_y > 8) thread_per_block_y = 8;
int thread_per_block_z = output_info.c;
if (thread_per_block_z > 1) thread_per_block_z = 1;
const int total_number_of_columns = output_info.w;
const int total_number_of_rows = output_info.h;
const int total_number_of_channels = output_info.c;
const dim3 block_size(thread_per_block_x, thread_per_block_y, thread_per_block_z);
const dim3 grid_size((total_number_of_columns - 1) / thread_per_block_x + 1,
(total_number_of_rows - 1) / thread_per_block_y + 1,
(total_number_of_channels - 1) / thread_per_block_z + 1);
if (type == 0)
{
if (padding_type == PaddingVariableType::type_char)
{
ncnn::GPUPaddingValue<char> padding_values{};
padding_values.per_channel_pad_data_size = per_channel_pad_data_size;
padding_values.value = value.c;
padding_values.per_channel_values = static_cast<char*>(gpu_per_channel_padding_data);
gpu_copy_make_border_image_3d_type0<char><<<grid_size, block_size>>>(static_cast<const char*>(src.get_craw_data()),
input_info,
static_cast<char*>(dst.get_raw_data()),
output_info,
front, top, left, type,
padding_values);
}
else if (padding_type == PaddingVariableType::type_unsigned_short)
{
ncnn::GPUPaddingValue<unsigned short> padding_values{};
padding_values.per_channel_pad_data_size = per_channel_pad_data_size;
padding_values.value = value.sh;
padding_values.per_channel_values = static_cast<unsigned short*>(gpu_per_channel_padding_data);
gpu_copy_make_border_image_3d_type0<unsigned short><<<grid_size, block_size>>>(static_cast<const unsigned short*>(src.get_craw_data()),
input_info,
static_cast<unsigned short*>(dst.get_raw_data()),
output_info,
front, top, left, type, padding_values);
}
else if (padding_type == PaddingVariableType::type_float)
{
ncnn::GPUPaddingValue<float> padding_values{};
padding_values.per_channel_pad_data_size = per_channel_pad_data_size;
padding_values.value = value.fl;
padding_values.per_channel_values = static_cast<float*>(gpu_per_channel_padding_data);
gpu_copy_make_border_image_3d_type0<float><<<grid_size, block_size>>>(static_cast<const float*>(src.get_craw_data()),
input_info,
static_cast<float*>(dst.get_raw_data()),
output_info,
front, top, left, type, padding_values);
}
}
else if (type == 1)
{
if (padding_type == PaddingVariableType::type_char)
{
ncnn::GPUPaddingValue<char> padding_values{};
padding_values.per_channel_pad_data_size = per_channel_pad_data_size;
padding_values.value = value.c;
padding_values.per_channel_values = static_cast<char*>(gpu_per_channel_padding_data);
gpu_copy_make_border_image_3d_type1<char><<<grid_size, block_size>>>(static_cast<const char*>(src.get_craw_data()),
input_info,
static_cast<char*>(dst.get_raw_data()),
output_info,
front, top, left, type,
padding_values);
}
else if (padding_type == PaddingVariableType::type_unsigned_short)
{
ncnn::GPUPaddingValue<unsigned short> padding_values{};
padding_values.per_channel_pad_data_size = per_channel_pad_data_size;
padding_values.value = value.sh;
padding_values.per_channel_values = static_cast<unsigned short*>(gpu_per_channel_padding_data);
gpu_copy_make_border_image_3d_type1<unsigned short><<<grid_size, block_size>>>(static_cast<const unsigned short*>(src.get_craw_data()),
input_info,
static_cast<unsigned short*>(dst.get_raw_data()),
output_info,
front, top, left, type, padding_values);
}
else if (padding_type == PaddingVariableType::type_float)
{
ncnn::GPUPaddingValue<float> padding_values{};
padding_values.per_channel_pad_data_size = per_channel_pad_data_size;
padding_values.value = value.fl;
padding_values.per_channel_values = static_cast<float*>(gpu_per_channel_padding_data);
gpu_copy_make_border_image_3d_type1<float><<<grid_size, block_size>>>(static_cast<const float*>(src.get_craw_data()),
input_info,
static_cast<float*>(dst.get_raw_data()),
output_info,
front, top, left, type, padding_values);
}
}
else if (type == 2)
{
if (padding_type == PaddingVariableType::type_char)
{
ncnn::GPUPaddingValue<char> padding_values{};
padding_values.per_channel_pad_data_size = per_channel_pad_data_size;
padding_values.value = value.c;
padding_values.per_channel_values = static_cast<char*>(gpu_per_channel_padding_data);
gpu_copy_make_border_image_3d_type2<char><<<grid_size, block_size>>>(static_cast<const char*>(src.get_craw_data()),
input_info,
static_cast<char*>(dst.get_raw_data()),
output_info,
front, top, left, type,
padding_values);
}
else if (padding_type == PaddingVariableType::type_unsigned_short)
{
ncnn::GPUPaddingValue<unsigned short> padding_values{};
padding_values.per_channel_pad_data_size = per_channel_pad_data_size;
padding_values.value = value.sh;
padding_values.per_channel_values = static_cast<unsigned short*>(gpu_per_channel_padding_data);
gpu_copy_make_border_image_3d_type2<unsigned short><<<grid_size, block_size>>>(static_cast<const unsigned short*>(src.get_craw_data()),
input_info,
static_cast<unsigned short*>(dst.get_raw_data()),
output_info,
front, top, left, type, padding_values);
}
else if (padding_type == PaddingVariableType::type_float)
{
ncnn::GPUPaddingValue<float> padding_values{};
padding_values.per_channel_pad_data_size = per_channel_pad_data_size;
padding_values.value = value.fl;
padding_values.per_channel_values = static_cast<float*>(gpu_per_channel_padding_data);
gpu_copy_make_border_image_3d_type2<float><<<grid_size, block_size>>>(static_cast<const float*>(src.get_craw_data()),
input_info,
static_cast<float*>(dst.get_raw_data()),
output_info,
front, top, left, type, padding_values);
}
}
return 0;
}
}
|
the_stack
|
#include <cstdio>
#include <iostream>
#include <string>
using namespace std;
typedef unsigned char byte;
int main(){
InitTensorEngine<gpu>();
picojson::value v;
cin >> v;
if (std::cin.fail()) {
std::cerr << picojson::get_last_error() << std::endl;
return 1;
}
picojson::object& o = v.get<picojson::object>();
string network_type;
picojson::value network_param;
// data
string data_type;
string fn_train_data;
string fn_train_label;
string fn_test_data;
string fn_test_label;
int n_train, n_test;
// learning
int n_time=1, n_batch=1;
int init_epoch = 0;
int iter_per_epoch=100;
int snapshot_interval=100;
int max_epoch=10000;
bool load_model=false;
string sgd = "rmsprop";
float momentum = 0.95f;
float decay = 0.001f;
float base_lr = 0.0001f;
float lr_mult = 0.5f;
int lr_mult_interval = 100;
int sampling_length = 0;
int train_error_limit = 0;
for (picojson::object::const_iterator i = o.begin(); i != o.end(); ++i) {
if(i->first == "env"){
picojson::object env = i->second.get<picojson::object>();
for (picojson::object::const_iterator j = env.begin(); j != env.end(); ++j) {
if(j->first == "python") Global_params::python = j->second.get<string>();
if(j->first == "prefix") Global_params::prefix = j->second.get<string>();
if(j->first == "debug") Global_params::DEBUG = j->second.get<bool>();
}
}
if(i->first == "network"){
picojson::object net = i->second.get<picojson::object>();
for (picojson::object::const_iterator j = net.begin(); j != net.end(); ++j) {
if(j->first == "type") network_type = j->second.get<string>();
if(j->first == "param") network_param = j->second;
}
}
if(i->first == "data"){
picojson::object dat = i->second.get<picojson::object>();
for (picojson::object::const_iterator j = dat.begin(); j != dat.end(); ++j) {
if(j->first == "type") data_type = j->second.get<string>();
if(j->first == "train_data") fn_train_data = j->second.get<string>();
if(j->first == "train_label") fn_train_label = j->second.get<string>();
if(j->first == "test_data") fn_test_data = j->second.get<string>();
if(j->first == "test_label") fn_test_label = j->second.get<string>();
if(j->first == "n_train") n_train = (int)j->second.get<double>();
if(j->first == "n_test") n_test = (int)j->second.get<double>();
if(j->first == "load_model") load_model = j->second.get<bool>();
}
}
if(i->first == "learning"){
picojson::object dat = i->second.get<picojson::object>();
for (picojson::object::const_iterator j = dat.begin(); j != dat.end(); ++j) {
if(j->first == "n_time") n_time = (int)j->second.get<double>();
if(j->first == "n_batch") n_batch = (int)j->second.get<double>();
if(j->first == "init_epoch") init_epoch = (int)j->second.get<double>();
if(j->first == "iter_per_epoch") iter_per_epoch = (int)j->second.get<double>();
if(j->first == "snapshot_interval") snapshot_interval = (int)j->second.get<double>();
if(j->first == "max_epoch") max_epoch = (int)j->second.get<double>();
if(j->first == "sgd") sgd = j->second.get<string>();
if(j->first == "momentum") momentum = (float)j->second.get<double>();
if(j->first == "decay") decay = (float)j->second.get<double>();
if(j->first == "base_lr") base_lr = (float)j->second.get<double>();
if(j->first == "lr_mult") lr_mult = (float)j->second.get<double>();
if(j->first == "lr_mult_interval") lr_mult_interval = (int)j->second.get<double>();
if(j->first == "sampling_length") sampling_length = (int)j->second.get<double>();
if(j->first == "train_error_limit") train_error_limit = (int)j->second.get<double>();
}
}
}
cerr << "load json done" << endl;
// data load
Node<gpu> *node_train_data, *node_train_label, *node_test_data, *node_test_label;
Node<gpu> *node_train_data0, *node_test_data0;
if(data_type=="MNIST"){
node_train_data = new Node<gpu>(n_train, n_batch, n_time, 784, true);
node_train_label = new Node<gpu>(n_train, n_batch, n_time, 10, true);
node_test_data = new Node<gpu>(n_test, n_batch, n_time, 784, true);
node_test_label = new Node<gpu>(n_test, n_batch, n_time, 10, true);
read_mnist_data(fn_train_data, node_train_data, n_train, 784, 16);
read_mnist_label(fn_train_label, node_train_label, n_train, 10, 8);
read_mnist_data(fn_test_data, node_test_data, n_test, 784, 16);
read_mnist_label(fn_test_label, node_test_label, n_test, 10, 8);
}else if(data_type=="w2v"){
node_train_data = new W2vNode<gpu>(n_train, n_batch, n_time, 1001, true);
node_train_label = new W2vNode<gpu>(n_train, n_batch, n_time, 1001, true);
node_test_data = new W2vNode<gpu>(n_test, n_batch, n_time, 1001, true);
node_test_label = new W2vNode<gpu>(n_test, n_batch, n_time, 1001, true);
read_w2v_data<gpu>(fn_train_data, (W2vNode<gpu> *)node_train_data, n_train);
read_w2v_data<gpu>(fn_train_label, (W2vNode<gpu> *)node_train_label, n_train);
read_w2v_data<gpu>(fn_test_data, (W2vNode<gpu> *)node_test_data, n_test);
read_w2v_data<gpu>(fn_test_label, (W2vNode<gpu> *)node_test_label, n_test);
init_w2v<gpu>();
}else if(data_type=="char2"){
node_train_data = new CharacterNode2<gpu>(n_train, n_batch, n_time, 16, true, true);
node_train_label = new CharacterNode2<gpu>(n_train, n_batch, n_time, 5000, true);
node_test_data = new CharacterNode2<gpu>(n_test, n_batch, n_time, 16, true, true);
node_test_label = new CharacterNode2<gpu>(n_test, n_batch, n_time, 5000, true);
read_int_binary_data<gpu>(fn_train_data, (CharacterNode2<gpu> *)node_train_data, n_train);
read_int_binary_data<gpu>(fn_train_label, (CharacterNode2<gpu> *)node_train_label, n_train);
read_int_binary_data<gpu>(fn_test_data, (CharacterNode2<gpu> *)node_test_data, n_test);
read_int_binary_data<gpu>(fn_test_label, (CharacterNode2<gpu> *)node_test_label, n_test);
init_char2<gpu>();
}else if(data_type=="ngram"){
node_train_data0 = new CharacterNode2<gpu>(n_train, n_batch, n_time, 16, true, true);
node_train_label = new CharacterNode2<gpu>(n_train, n_batch, n_time, 5000, true);
node_test_data0 = new CharacterNode2<gpu>(n_test, n_batch, n_time, 16, true, true);
node_test_label = new CharacterNode2<gpu>(n_test, n_batch, n_time, 5000, true);
read_int_binary_data<gpu>(fn_train_data, (CharacterNode2<gpu> *)node_train_data0, n_train);
read_int_binary_data<gpu>(fn_train_label, (CharacterNode2<gpu> *)node_train_label, n_train);
read_int_binary_data<gpu>(fn_test_data, (CharacterNode2<gpu> *)node_test_data0, n_test);
read_int_binary_data<gpu>(fn_test_label, (CharacterNode2<gpu> *)node_test_label, n_test);
node_train_data = new NgramNode<gpu>(n_train, n_batch, n_time, 5000);
node_test_data = new NgramNode<gpu>(n_test, n_batch, n_time, 5000);
((NgramNode<gpu> *)node_train_data)->c_all = ((CharacterNode2<gpu> *)node_train_data0)->c_all;
((NgramNode<gpu> *)node_test_data)->c_all = ((CharacterNode2<gpu> *)node_test_data0)->c_all;
init_char2<gpu>();
NgramNode<gpu>::load_ngram();
}
// prepare network
Network<gpu> *network = new Network<gpu>();
network->train_data = node_train_data;
network->train_label = node_train_label;
network->test_data = node_test_data;
network->test_label = node_test_label;
if(network_type=="MLP"){
int layers = 0;
int *num_neurons;
string hidden_nl="relu", out_nl="none";
bool dropout=false;
string loss = "category";
string shuffle = "random_random";
picojson::object dat = network_param.get<picojson::object>();
for (picojson::object::const_iterator j = dat.begin(); j != dat.end(); ++j) {
if(j->first == "layers") layers = (int)j->second.get<double>();
if(j->first == "neurons"){
num_neurons = (int *)malloc(layers*sizeof(int));
const picojson::array& a = j->second.get<picojson::array>();
int cnt = 0;
for(picojson::array::const_iterator i=a.begin(); i!=a.end(); ++i){
num_neurons[cnt++] = (int)i->get<double>();
}
}
if(j->first == "hidden_nl") hidden_nl = j->second.get<string>();
if(j->first == "out_nl") out_nl = j->second.get<string>();
if(j->first == "dropout") dropout = j->second.get<bool>();
if(j->first == "loss") loss = j->second.get<string>();
if(j->first == "shuffle") shuffle = j->second.get<string>();
}
network->loss_type = loss;
network->shuffle_type = shuffle;
// network->is_dropout = dropout;
network->net = new MLP<gpu>(layers, num_neurons, out_nl, hidden_nl);
}else if(network_type=="SLSTM"){
int layers = 0;
int base_neurons=512;
int inout_dim = 1001;
string out_nl="none";
string loss = "mse";
string shuffle = "w2v";
picojson::object dat = network_param.get<picojson::object>();
for (picojson::object::const_iterator j = dat.begin(); j != dat.end(); ++j) {
if(j->first == "layers") layers = (int)j->second.get<double>();
if(j->first == "base_neurons")base_neurons = (int)j->second.get<double>();
if(j->first == "out_nl") out_nl = j->second.get<string>();
if(j->first == "loss") loss = j->second.get<string>();
if(j->first == "shuffle") shuffle = j->second.get<string>();
if(j->first == "inout_dim") inout_dim = (int)j->second.get<double>();
}
network->loss_type = loss;
network->shuffle_type = shuffle;
// network->is_dropout = dropout;
cerr << base_neurons << " " << layers << endl;
network->net = new Stacked_LSTM<gpu>(inout_dim, base_neurons, layers, out_nl);
}else if(network_type=="HRes"){
int base_neurons=512;
int inout_dim = 1001;
string out_nl="none";
string loss = "mse";
string shuffle = "w2v";
picojson::object dat = network_param.get<picojson::object>();
for (picojson::object::const_iterator j = dat.begin(); j != dat.end(); ++j) {
if(j->first == "base_neurons")base_neurons = (int)j->second.get<double>();
if(j->first == "out_nl") out_nl = j->second.get<string>();
if(j->first == "loss") loss = j->second.get<string>();
if(j->first == "shuffle") shuffle = j->second.get<string>();
if(j->first == "inout_dim") inout_dim = (int)j->second.get<double>();
}
network->loss_type = loss;
network->shuffle_type = shuffle;
// network->is_dropout = dropout;
// cerr << base_neurons << " " << layers << endl;
network->net = new Hybrid_LSTM_Reservoir<gpu>(inout_dim, base_neurons, out_nl);
}else if(network_type=="GMR2"){
int base_neurons=512;
int inout_dim = 1001;
string out_nl="none";
string loss = "mse";
string shuffle = "w2v";
picojson::object dat = network_param.get<picojson::object>();
for (picojson::object::const_iterator j = dat.begin(); j != dat.end(); ++j) {
if(j->first == "base_neurons")base_neurons = (int)j->second.get<double>();
if(j->first == "out_nl") out_nl = j->second.get<string>();
if(j->first == "loss") loss = j->second.get<string>();
if(j->first == "shuffle") shuffle = j->second.get<string>();
if(j->first == "inout_dim") inout_dim = (int)j->second.get<double>();
}
network->loss_type = loss;
network->shuffle_type = shuffle;
// network->is_dropout = dropout;
// cerr << base_neurons << " " << layers << endl;
network->net = new Gate_MLP_Reservoir2<gpu>(inout_dim, base_neurons, out_nl);
}else if(network_type=="GMR2S"){
int base_neurons=512;
int inout_dim = 1001;
string out_nl="none";
string loss = "mse";
string shuffle = "w2v";
picojson::object dat = network_param.get<picojson::object>();
for (picojson::object::const_iterator j = dat.begin(); j != dat.end(); ++j) {
if(j->first == "base_neurons")base_neurons = (int)j->second.get<double>();
if(j->first == "out_nl") out_nl = j->second.get<string>();
if(j->first == "loss") loss = j->second.get<string>();
if(j->first == "shuffle") shuffle = j->second.get<string>();
if(j->first == "inout_dim") inout_dim = (int)j->second.get<double>();
}
network->loss_type = loss;
network->shuffle_type = shuffle;
// network->is_dropout = dropout;
// cerr << base_neurons << " " << layers << endl;
network->net = new Gate_MLP_Reservoir2_SLSTM<gpu>(inout_dim, base_neurons, out_nl);
}else if(network_type=="Hatsuwa_simple"){
int base_neurons=512;
int inout_dim = 1001;
string out_nl="none";
string loss = "mse";
string shuffle = "w2v";
picojson::object dat = network_param.get<picojson::object>();
for (picojson::object::const_iterator j = dat.begin(); j != dat.end(); ++j) {
if(j->first == "base_neurons")base_neurons = (int)j->second.get<double>();
if(j->first == "out_nl") out_nl = j->second.get<string>();
if(j->first == "loss") loss = j->second.get<string>();
if(j->first == "shuffle") shuffle = j->second.get<string>();
if(j->first == "inout_dim") inout_dim = (int)j->second.get<double>();
}
network->loss_type = loss;
network->shuffle_type = shuffle;
// network->is_dropout = dropout;
// cerr << base_neurons << " " << layers << endl;
network->net = new Hatsuwa_simple<gpu>();
}else if(network_type=="Hatsuwa_ngram"){
int base_neurons=512;
int inout_dim = 1001;
string out_nl="none";
string loss = "mse";
string shuffle = "w2v";
picojson::object dat = network_param.get<picojson::object>();
for (picojson::object::const_iterator j = dat.begin(); j != dat.end(); ++j) {
if(j->first == "base_neurons")base_neurons = (int)j->second.get<double>();
if(j->first == "out_nl") out_nl = j->second.get<string>();
if(j->first == "loss") loss = j->second.get<string>();
if(j->first == "shuffle") shuffle = j->second.get<string>();
if(j->first == "inout_dim") inout_dim = (int)j->second.get<double>();
}
network->loss_type = loss;
network->shuffle_type = shuffle;
// network->is_dropout = dropout;
// cerr << base_neurons << " " << layers << endl;
network->net = new Hatsuwa_ngram<gpu>();
}else if(network_type=="Hatsuwa_aws_res"){
int base_neurons=512;
int inout_dim = 1001;
int layers = 0;
string out_nl="none";
string loss = "mse";
string shuffle = "w2v";
picojson::object dat = network_param.get<picojson::object>();
for (picojson::object::const_iterator j = dat.begin(); j != dat.end(); ++j) {
if(j->first == "base_neurons")base_neurons = (int)j->second.get<double>();
if(j->first == "out_nl") out_nl = j->second.get<string>();
if(j->first == "loss") loss = j->second.get<string>();
if(j->first == "shuffle") shuffle = j->second.get<string>();
if(j->first == "inout_dim") inout_dim = (int)j->second.get<double>();
if(j->first == "layers") layers = (int)j->second.get<double>();
}
network->loss_type = loss;
network->shuffle_type = shuffle;
// network->is_dropout = dropout;
// cerr << base_neurons << " " << layers << endl;
network->net = new Hatsuwa_aws_res<gpu>(base_neurons, base_neurons, layers);
}else if(network_type=="Hatsuwa_aws_nores"){
int base_neurons=512;
int inout_dim = 1001;
int layers = 0;
string out_nl="none";
string loss = "mse";
string shuffle = "w2v";
picojson::object dat = network_param.get<picojson::object>();
for (picojson::object::const_iterator j = dat.begin(); j != dat.end(); ++j) {
if(j->first == "base_neurons")base_neurons = (int)j->second.get<double>();
if(j->first == "out_nl") out_nl = j->second.get<string>();
if(j->first == "loss") loss = j->second.get<string>();
if(j->first == "shuffle") shuffle = j->second.get<string>();
if(j->first == "inout_dim") inout_dim = (int)j->second.get<double>();
if(j->first == "layers") layers = (int)j->second.get<double>();
}
network->loss_type = loss;
network->shuffle_type = shuffle;
// network->is_dropout = dropout;
// cerr << base_neurons << " " << layers << endl;
network->net = new Hatsuwa_aws_nores<gpu>(base_neurons, base_neurons, layers);
}
network->net->set_param("eta", base_lr);
network->net->set_param("decay", decay);
network->net->set_param("sgd_algo", sgd=="momentum"?0:1);
network->net->set_param("momentum", momentum);
cerr << "network initialize done" << endl;
// learning
if(load_model){
FILE *ii = fopen(to_string("./tmp/"+Global_params::prefix+"_model", init_epoch).c_str(), "rb");
network->load_model(ii);
fclose(ii);
}
for(int epoch=init_epoch+1; epoch<=max_epoch; epoch++){
network->train(1, iter_per_epoch);
float train_err = network->train_error(train_error_limit);
float test_err = network->test_error();
cerr << "epoch: " << epoch << ", train error: " << train_err << ", test error: " << test_err << endl;
printf("epoch: %d, train error: %f, test error: %f\n", epoch, train_err, test_err);
if(epoch%lr_mult_interval==0){
float eta = network->net->get_param("eta");
network->net->set_param("eta", eta*lr_mult);
cerr << "eta: " << eta << endl;
}
if(epoch%snapshot_interval==0){
if(sampling_length>0) network->sampling_char2(to_string("./tmp/"+Global_params::prefix+"_smp", epoch), sampling_length);
FILE *ir = fopen(to_string("./tmp/"+Global_params::prefix+"_model", epoch).c_str(), "wb");
network->save_model(ir);
fclose(ir);
}
}
ShutdownTensorEngine<gpu>();
return 0;
}
|
the_stack
|
#pragma once
#include "cuda_utils_kernels.cuh"
namespace cuhnsw {
__inline__ __device__
bool IsNeighbor(const int* graph, const int deg, const int dstid) {
__syncthreads();
// figure out the warp/ position inside the warp
int warp = threadIdx.x / WARP_SIZE;
int lane = threadIdx.x % WARP_SIZE;
static __shared__ bool shared[WARP_SIZE];
__syncthreads();
bool is_neighbor = false;
for (int i = threadIdx.x; i < deg; i += blockDim.x) {
if (graph[i] == dstid) {
is_neighbor = true;
break;
}
}
__syncthreads();
#if __CUDACC_VER_MAJOR__ >= 9
unsigned int active = __activemask();
is_neighbor = __any_sync(active, is_neighbor);
#else
is_neighbor = __any(is_neighbor);
#endif
// write out the partial reduction to shared memory if appropiate
if (lane == 0) {
shared[warp] = is_neighbor;
}
__syncthreads();
// if we we don't have multiple warps, we're done
if (blockDim.x <= WARP_SIZE) {
return shared[0];
}
// otherwise reduce again in the first warp
is_neighbor = (threadIdx.x < blockDim.x / WARP_SIZE) ? shared[lane] : false;
if (warp == 0) {
#if __CUDACC_VER_MAJOR__ >= 9
active = __activemask();
is_neighbor = __any_sync(active, is_neighbor);
#else
is_neighbor = __any(is_neighbor);
#endif
// broadcast back to shared memory
if (threadIdx.x == 0) {
shared[0] = is_neighbor;
}
}
__syncthreads();
return shared[0];
}
__inline__ __device__
void SearchHeuristic(
Neighbor* ef_const_pq, int* size,
const int srcid, const int* nodes,
const cuda_scalar* data, const int dist_type, const int num_dims,
const int ef_construction, const int max_m,
const bool save_remains,
int* cand_nodes, cuda_scalar* cand_distances,
int* graph, float* distances, int* deg,
const float heuristic_coef, const int new_comer = -1) {
int size2 = *size;
__syncthreads();
// get sorted neighbors
if (threadIdx.x == 0) {
while (*size > 0) {
cand_nodes[(*size) - 1] = ef_const_pq[0].nodeid;
cand_distances[(*size) - 1] = ef_const_pq[0].distance;
PqPop(ef_const_pq, size);
}
}
__syncthreads();
// set variables for search heuristic
int head = 0;
int tail = max_m - 1;
if (tail > size2 - 1)
tail = size2 - 1;
const int max_head = tail + 1;
// take some proportion of closest nodes by default
// this mechanism does not exist in hnswlib
// it refers to https://github.com/kakao/n2/blob/36888c3869ac478d896d0921ac64f21930d85659/src/heuristic.cc#L42
const int nn_num = max_m * heuristic_coef;
int* _graph = graph + srcid * max_m;
float* _distances = distances + srcid * max_m;
bool new_comer_inserted = false;
// search heuristic
for (int j = 0; j < size2; ++j) {
if (head >= max_m) break;
bool freepass = head < nn_num or
(new_comer >= 0 and not new_comer_inserted and cand_nodes[j] != new_comer);
if (freepass) {
if (threadIdx.x == 0) {
_graph[head] = cand_nodes[j];
_distances[head] = out_scalar(cand_distances[j]);
}
head++;
__syncthreads();
continue;
}
const cuda_scalar dist_to_src = cand_distances[j];
bool skip = false;
if (new_comer >= 0 and new_comer_inserted) {
cuda_scalar dist = GetDistance(cand_nodes[j], new_comer,
num_dims, dist_type, nodes, data);
skip = gt(dist_to_src, dist);
} else {
for (int k = 0; k < head; ++k) {
cuda_scalar dist = GetDistance(cand_nodes[j], _graph[k],
num_dims, dist_type, nodes, data);
if (gt(dist_to_src, dist)) {
skip = true;
__syncthreads();
break;
}
}
}
if (cand_nodes[j] == new_comer and not skip)
new_comer_inserted = true;
if (skip and tail >= head) {
if (threadIdx.x == 0) {
_graph[tail] = cand_nodes[j];
_distances[tail] = out_scalar(cand_distances[j]);
}
tail--;
} else if (not skip){
if (threadIdx.x == 0) {
_graph[head] = cand_nodes[j];
_distances[head] = out_scalar(cand_distances[j]);
}
head++;
}
}
__syncthreads();
// copy to graph
// take remaining nodes as new neighbors
// it also refers to https://github.com/kakao/n2/blob/36888c3869ac478d896d0921ac64f21930d85659/src/heuristic.cc#L85
// it does not exist in hnswlib as well
if (threadIdx.x == 0) deg[srcid] = save_remains? max_head: head;
__syncthreads();
}
__global__ void BuildLevelGraphKernel(
const cuda_scalar* data, const int* nodes,
const int num_dims, const int num_nodes, const int max_m, const int dist_type,
const bool save_remains, const int ef_construction, int* graph, float* distances, int* deg,
int* visited_table, int* visited_list, const int visited_table_size, const int visited_list_size,
int* mutex, int64_t* acc_visited_cnt,
const bool reverse_cand, Neighbor* neighbors, int* global_cand_nodes, cuda_scalar* global_cand_distances,
const float heuristic_coef, int* backup_neighbors, cuda_scalar* backup_distances, bool* went_through_heuristic
) {
static __shared__ int size;
static __shared__ int visited_cnt;
// storage to store neighbors and distnces temporarily
static __shared__ int backup_deg;
int* _backup_neighbors = backup_neighbors + max_m * blockIdx.x;
cuda_scalar* _backup_distances = backup_distances + max_m * blockIdx.x;
Neighbor* ef_const_pq = neighbors + ef_construction * blockIdx.x;
int* cand_nodes = global_cand_nodes + ef_construction * blockIdx.x;
cuda_scalar* cand_distances = global_cand_distances + ef_construction * blockIdx.x;
int* _visited_table = visited_table + visited_table_size * blockIdx.x;
int* _visited_list = visited_list + visited_list_size * blockIdx.x;
for (int i = blockIdx.x; i < num_nodes; i += gridDim.x) {
if (threadIdx.x == 0) {
size = 0;
visited_cnt = 0;
}
__syncthreads();
int srcid = i;
// read access of srcid
if (threadIdx.x == 0) {
while (atomicCAS(&mutex[srcid], 0, 1)) {}
}
__syncthreads();
// initialize entries as neighbors
for (int j = max_m * i; j < max_m * i + deg[i]; ++j) {
int dstid = graph[j];
if (CheckVisited(_visited_table, _visited_list, visited_cnt, dstid,
visited_table_size, visited_list_size))
continue;
__syncthreads();
PushNodeToPq(ef_const_pq, &size, ef_construction,
data, num_dims, dist_type, srcid, dstid, nodes);
}
__syncthreads();
// release lock
if (threadIdx.x == 0) mutex[srcid] = 0;
__syncthreads();
// iterate until converge
int idx = GetCand(ef_const_pq, size, reverse_cand);
while (idx >= 0) {
__syncthreads();
if (threadIdx.x == 0) ef_const_pq[idx].checked = true;
int entry = ef_const_pq[idx].nodeid;
// read access of entry
if (threadIdx.x == 0) {
while (atomicCAS(&mutex[entry], 0, 1)) {}
}
__syncthreads();
for (int j = max_m * entry; j < max_m * entry + deg[entry]; ++j) {
int dstid = graph[j];
if (CheckVisited(_visited_table, _visited_list, visited_cnt, dstid,
visited_table_size, visited_list_size))
continue;
__syncthreads();
PushNodeToPq(ef_const_pq, &size, ef_construction,
data, num_dims, dist_type, srcid, dstid, nodes);
}
__syncthreads();
// release lock
if (threadIdx.x == 0) mutex[entry] = 0;
__syncthreads();
idx = GetCand(ef_const_pq, size, reverse_cand);
}
__syncthreads();
if (threadIdx.x == 0) {
acc_visited_cnt[blockIdx.x] += visited_cnt;
}
for (int j = threadIdx.x; j < visited_cnt; j += blockDim.x) {
_visited_table[_visited_list[j]] = -1;
}
__syncthreads();
// write access of dstid
if (threadIdx.x == 0) {
while (atomicCAS(&mutex[srcid], 0, 1)) {}
}
__syncthreads();
for (int j = 0; j < deg[srcid]; ++j) {
int dstid = graph[srcid * max_m + j];
PushNodeToPq(ef_const_pq, &size, ef_construction,
data, num_dims, dist_type, srcid, dstid, nodes);
}
// run search heuristic for myself
SearchHeuristic(ef_const_pq, &size, srcid, nodes,
data, dist_type, num_dims,
ef_construction, max_m, save_remains,
cand_nodes, cand_distances,
graph, distances, deg, heuristic_coef);
if (threadIdx.x == 0) went_through_heuristic[srcid] = true;
__syncthreads();
// backup neighbors to handle overwrite
if (threadIdx.x == 0) backup_deg = deg[srcid];
__syncthreads();
for (int j = threadIdx.x; j < backup_deg; j += blockDim.x) {
_backup_neighbors[j] = graph[srcid * max_m + j];
_backup_distances[j] = conversion(distances[srcid * max_m + j]);
}
__syncthreads();
// release lock
if (threadIdx.x == 0) mutex[srcid] = 0;
__syncthreads();
// run search heuristic for neighbors
for (int j = 0; j < backup_deg; ++j) {
int dstid = _backup_neighbors[j];
cuda_scalar dist = _backup_distances[j];
__syncthreads();
// write access of dstid
if (threadIdx.x == 0) {
while (atomicCAS(&mutex[dstid], 0, 1)) {}
}
__syncthreads();
const int* _graph = graph + max_m * dstid;
const int _deg = deg[dstid];
bool is_neighbor = IsNeighbor(_graph, _deg, srcid);
if (not is_neighbor){
PushNodeToPq2(ef_const_pq, &size, ef_construction,
dist, dstid, srcid, nodes);
for (int k = 0; k < _deg; ++k) {
int dstid2 = _graph[k];
dist = conversion(distances[dstid * max_m + k]);
PushNodeToPq2(ef_const_pq, &size, ef_construction,
dist, dstid, dstid2, nodes);
}
__syncthreads();
const int new_comer = not save_remains and went_through_heuristic[dstid]? srcid: -1;
__syncthreads();
SearchHeuristic(ef_const_pq, &size, dstid, nodes,
data, dist_type, num_dims,
ef_construction, max_m, save_remains,
cand_nodes, cand_distances,
graph, distances, deg, heuristic_coef, new_comer);
if (threadIdx.x == 0) went_through_heuristic[dstid] = true;
__syncthreads();
}
// release lock
if (threadIdx.x == 0) mutex[dstid] = 0;
__syncthreads();
}
__syncthreads();
}
// cooperative_groups::grid_group g = cooperative_groups::this_grid();
// g.sync();
}
} // namespace cuhnsw
|
the_stack
|
__global__ void
initColorVolumeKernel (PtrStep<uchar4> volume)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < VOLUME_X && y < VOLUME_Y)
{
uchar4 *pos = volume.ptr (y) + x;
int z_step = VOLUME_Y * volume.step / sizeof(*pos);
#pragma unroll
for(int z = 0; z < VOLUME_Z; ++z, pos += z_step)
{
clear_voxel(*pos);
}
}
}
void
initColorVolume (PtrStep<uchar4> color_volume)
{
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
initColorVolumeKernel<<<grid, block>>>(color_volume);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}template<typename T>
__global__ void
clearVolumeInX (PtrStep<T> volume, int bottom, int numUp)
{
int y = threadIdx.y + blockIdx.y * blockDim.y;
bottom %= VOLUME_X;
const int cachedWrap = (bottom + numUp) % VOLUME_X;
const bool wrap = cachedWrap != bottom + numUp;
int x = ((threadIdx.x + blockIdx.x * blockDim.x) + bottom) % VOLUME_X;
if(!wrap ? (x >= bottom && x <= cachedWrap) :
(x >= bottom || x <= cachedWrap))
{
T * base = volume.ptr(0);
T * pos;
const int cachedXY = x + y * VOLUME_X;
const int cachedProduct = VOLUME_X * VOLUME_Y;
for(int z = 0; z < VOLUME_Z; ++z)
{
pos = &base[cachedXY + z * cachedProduct];
clear_voxel(*pos);
}
}
}
void clearVolumeX (PtrStep<short> volume, const int currentVoxelWrap, const int deltaVoxelWrap)
{
int remainder = (deltaVoxelWrap - currentVoxelWrap) % 16;
if(remainder != 0)
{
remainder = (deltaVoxelWrap - currentVoxelWrap) + 16 - remainder;
}
else
{
remainder = abs(deltaVoxelWrap - currentVoxelWrap);
}
dim3 block (16, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (remainder, block.x);
grid.y = divUp (VOLUME_Y, block.y);
int bottom = currentVoxelWrap > 0 ? currentVoxelWrap % VOLUME_X : VOLUME_X - ((-currentVoxelWrap) % VOLUME_X);
int numUp = -(currentVoxelWrap - deltaVoxelWrap);
clearVolumeInX<<<grid, block>>>(volume, bottom, numUp);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
void
clearVolumeXc (PtrStep<uchar4> volume, const int currentVoxelWrap, const int deltaVoxelWrap)
{
int remainder = (deltaVoxelWrap - currentVoxelWrap) % 16;
if(remainder != 0)
{
remainder = (deltaVoxelWrap - currentVoxelWrap) + 16 - remainder;
}
else
{
remainder = abs(deltaVoxelWrap - currentVoxelWrap);
}
dim3 block (16, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (remainder, block.x);
grid.y = divUp (VOLUME_Y, block.y);
int bottom = currentVoxelWrap > 0 ? currentVoxelWrap % VOLUME_X : VOLUME_X - ((-currentVoxelWrap) % VOLUME_X);
int numUp = -(currentVoxelWrap - deltaVoxelWrap);
clearVolumeInX<<<grid, block>>>(volume, bottom, numUp);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
void
clearVolumeXBack (PtrStep<short> volume, const int currentVoxelWrap, const int deltaVoxelWrap)
{
int remainder = (deltaVoxelWrap - currentVoxelWrap) % 16;
if(remainder != 0)
{
remainder = (deltaVoxelWrap - currentVoxelWrap) + 16 - remainder;
}
else
{
remainder = abs(deltaVoxelWrap - currentVoxelWrap);
}
dim3 block (16, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (remainder, block.x);
grid.y = divUp (VOLUME_Y, block.y);
int currentVoxelBase = currentVoxelWrap > 0 ? currentVoxelWrap % VOLUME_X : VOLUME_X - ((-currentVoxelWrap) % VOLUME_X);
int top = (currentVoxelBase + VOLUME_X) % VOLUME_X;
int numDown = currentVoxelWrap - deltaVoxelWrap;
int bottom = top - numDown;
if(bottom < 0)
{
bottom = VOLUME_X + bottom;
}
clearVolumeInX<<<grid, block>>>(volume, bottom, numDown);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
void
clearVolumeXBackc (PtrStep<uchar4> volume, const int currentVoxelWrap, const int deltaVoxelWrap)
{
int remainder = (deltaVoxelWrap - currentVoxelWrap) % 16;
if(remainder != 0)
{
remainder = (deltaVoxelWrap - currentVoxelWrap) + 16 - remainder;
}
else
{
remainder = abs(deltaVoxelWrap - currentVoxelWrap);
}
dim3 block (16, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (remainder, block.x);
grid.y = divUp (VOLUME_Y, block.y);
int currentVoxelBase = currentVoxelWrap > 0 ? currentVoxelWrap % VOLUME_X : VOLUME_X - ((-currentVoxelWrap) % VOLUME_X);
int top = (currentVoxelBase + VOLUME_X) % VOLUME_X;
int numDown = currentVoxelWrap - deltaVoxelWrap;
int bottom = top - numDown;
if(bottom < 0)
{
bottom = VOLUME_X + bottom;
}
clearVolumeInX<<<grid, block>>>(volume, bottom, numDown);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
template<typename T>
__global__ void
clearVolumeInY (PtrStep<T> volume, const int currentVoxelWrap, const int deltaVoxelWrap)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < VOLUME_X && y < VOLUME_Y)
{
int bottom = currentVoxelWrap > 0 ? currentVoxelWrap % VOLUME_Y : VOLUME_Y - ((-currentVoxelWrap) % VOLUME_Y);
int numUp = -(currentVoxelWrap - deltaVoxelWrap);
T * base = volume.ptr(0);
T * pos;
while(numUp >= 0)
{
pos = &base[x + (bottom++ % VOLUME_Y) * VOLUME_X + y * VOLUME_X * VOLUME_Y];
clear_voxel(*pos);
numUp--;
}
}
}
void
clearVolumeY (PtrStep<short> volume, const int currentVoxelWrap, const int deltaVoxelWrap)
{
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
clearVolumeInY<<<grid, block>>>(volume, currentVoxelWrap, deltaVoxelWrap);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
void
clearVolumeYc (PtrStep<uchar4> volume, const int currentVoxelWrap, const int deltaVoxelWrap)
{
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
clearVolumeInY<<<grid, block>>>(volume, currentVoxelWrap, deltaVoxelWrap);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
template<typename T>
__global__ void
clearVolumeInYBack (PtrStep<T> volume, const int currentVoxelWrap, const int deltaVoxelWrap)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < VOLUME_X && y < VOLUME_Y)
{
int currentVoxelBase = currentVoxelWrap > 0 ? currentVoxelWrap % VOLUME_Y : VOLUME_Y - ((-currentVoxelWrap) % VOLUME_Y);
int top = (currentVoxelBase + VOLUME_Y) % VOLUME_Y;
int numDown = currentVoxelWrap - deltaVoxelWrap;
T * base = volume.ptr(0);
T * pos;
while(numDown >= 0)
{
pos = &base[x + (top-- % VOLUME_Y) * VOLUME_X + y * VOLUME_X * VOLUME_Y];
clear_voxel(*pos);
if(top < 0)
top = VOLUME_Y - 1;
numDown--;
}
}
}
void
clearVolumeYBack (PtrStep<short> volume, const int currentVoxelWrap, const int deltaVoxelWrap)
{
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
clearVolumeInYBack<<<grid, block>>>(volume, currentVoxelWrap, deltaVoxelWrap);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
void
clearVolumeYBackc (PtrStep<uchar4> volume, const int currentVoxelWrap, const int deltaVoxelWrap)
{
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
clearVolumeInYBack<<<grid, block>>>(volume, currentVoxelWrap, deltaVoxelWrap);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
template<typename T>
__global__ void
clearVolumeInZ (PtrStep<T> volume, const int currentVoxelWrap, const int deltaVoxelWrap)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < VOLUME_X && y < VOLUME_Y)
{
int bottom = currentVoxelWrap > 0 ? currentVoxelWrap % VOLUME_Z : VOLUME_Z - ((-currentVoxelWrap) % VOLUME_Z);
int numUp = -(currentVoxelWrap - deltaVoxelWrap);
T * base = volume.ptr(0);
T * pos;
while(numUp >= 0)
{
pos = &base[x + y * VOLUME_X + (bottom++ % VOLUME_Z) * VOLUME_X * VOLUME_Y];
clear_voxel(*pos);
numUp--;
}
}
}
void
clearVolumeZ (PtrStep<short> volume, const int currentVoxelWrap, const int deltaVoxelWrap)
{
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
clearVolumeInZ<<<grid, block>>>(volume, currentVoxelWrap, deltaVoxelWrap);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
void
clearVolumeZc (PtrStep<uchar4> volume, const int currentVoxelWrap, const int deltaVoxelWrap)
{
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
clearVolumeInZ<<<grid, block>>>(volume, currentVoxelWrap, deltaVoxelWrap);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
template<typename T>
__global__ void
clearVolumeInZBack (PtrStep<T> volume, const int currentVoxelWrap, const int deltaVoxelWrap)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < VOLUME_X && y < VOLUME_Y)
{
int currentVoxelBase = currentVoxelWrap > 0 ? currentVoxelWrap % VOLUME_Z : VOLUME_Z - ((-currentVoxelWrap) % VOLUME_Z);
int top = (currentVoxelBase + VOLUME_Z) % VOLUME_Z;
int numDown = currentVoxelWrap - deltaVoxelWrap;
T * base = volume.ptr(0);
T * pos;
while(numDown >= 0)
{
pos = &base[x + y * VOLUME_X + (top-- % VOLUME_Z) * VOLUME_X * VOLUME_Y];
clear_voxel(*pos);
if(top < 0)
top = VOLUME_Z - 1;
numDown--;
}
}
}
void
clearVolumeZBack (PtrStep<short> volume, const int currentVoxelWrap, const int deltaVoxelWrap)
{
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
clearVolumeInZBack<<<grid, block>>>(volume, currentVoxelWrap, deltaVoxelWrap);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
void
clearVolumeZBackc (PtrStep<uchar4> volume, const int currentVoxelWrap, const int deltaVoxelWrap)
{
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
clearVolumeInZBack<<<grid, block>>>(volume, currentVoxelWrap, deltaVoxelWrap);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
template<typename T>
__global__ void
initializeVolume (PtrStep<T> volume)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < VOLUME_X && y < VOLUME_Y)
{
T *pos = volume.ptr(y) + x;
int z_step = VOLUME_Y * volume.step / sizeof(*pos);
#pragma unroll
for(int z = 0; z < VOLUME_Z; ++z, pos+=z_step)
{
clear_voxel(*pos);
}
}
}
void
initVolume (PtrStep<short> volume)
{
dim3 block (32, 16);
dim3 grid (1, 1, 1);
grid.x = divUp (VOLUME_X, block.x);
grid.y = divUp (VOLUME_Y, block.y);
initializeVolume<<<grid, block>>>(volume);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
struct Tsdf
{
enum
{
CTA_SIZE_X = 32, CTA_SIZE_Y = 8,
MAX_WEIGHT = 1 << 7
};
};
__global__ void
scaleDepth (const PtrStepSz<unsigned short> depth, PtrStep<float> scaled, const Intr intr, bool angleColor)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= depth.cols || y >= depth.rows)
return;
int Dp = depth.ptr (y)[x];
float xl = (x - intr.cx) / intr.fx;
float yl = (y - intr.cy) / intr.fy;
float lambda = sqrtf (xl * xl + yl * yl + 1);
if(angleColor)
{
int STEP = 1;
int ky = 7;
int kx = 7;
int ty = min (y - ky / 2 + ky, depth.rows - 1);
int tx = min (x - kx / 2 + kx, depth.cols - 1);
int count = 0;
for (int cy = max (y - ky / 2, 0); cy < ty; cy += STEP)
{
for (int cx = max (x - kx / 2, 0); cx < tx; cx += STEP)
{
if (abs(Dp-depth.ptr (cy)[cx]) > 200 || Dp == 0)
{
count++;
}
}
}
if(count > 5)
{
scaled.ptr (y)[x] = -Dp * lambda/1000.f; //meters
}
else
{
scaled.ptr (y)[x] = Dp * lambda/1000.f; //meters
}
}
else
{
scaled.ptr (y)[x] = Dp * lambda/1000.f; //meters
}
}
__global__ void
tsdf23 (const PtrStepSz<float> depthScaled, PtrStep<short> volume,
const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size,
const int3 voxelWrap, PtrStep<uchar4> color_volume, PtrStepSz<uchar3> colors,
PtrStep<float> nmap_curr, int rows, bool angleColor)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= VOLUME_X || y >= VOLUME_Y)
return;
float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x;
float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y;
float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z;
float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y;
float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx;
float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy;
float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z);
float z_scaled = 0;
float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx;
float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy;
float tranc_dist_inv = 1.0f / tranc_dist;
for (int z = 0; z < VOLUME_Z;
++z,
v_g_z += cell_size.z,
z_scaled += cell_size.z,
v_x += Rcurr_inv_0_z_scaled,
v_y += Rcurr_inv_1_z_scaled)
{
float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled);
if (inv_z < 0)
continue;
// project to current cam
int2 coo =
{
__float2int_rn (v_x * inv_z + intr.cx),
__float2int_rn (v_y * inv_z + intr.cy)
};
if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6
{
float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters
bool no_color = false;
if(Dp_scaled < 0.0)
{
Dp_scaled = -Dp_scaled;
no_color = true;
}
float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm);
if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters
{
float3 ncurr;
ncurr.x = nmap_curr.ptr (coo.y)[coo.x];
ncurr.y = nmap_curr.ptr (coo.y + rows)[coo.x];
ncurr.z = nmap_curr.ptr (coo.y + 2 * rows)[coo.x];
if (ncurr.z < 0) ncurr.z = -ncurr.z;
float tsdf = fmin (1.0f, sdf * tranc_dist_inv);
//read and unpack
short * pos = &volume.ptr(0)[((x + voxelWrap.x) % VOLUME_X) + ((y + voxelWrap.y) % VOLUME_Y) * VOLUME_X + ((z + voxelWrap.z) % VOLUME_Z) * VOLUME_X * VOLUME_Y];
float tsdf_prev = unpack_tsdf(*pos);
uchar4 * ptrColor = &color_volume.ptr(0)[((x + voxelWrap.x) % VOLUME_X) + ((y + voxelWrap.y) % VOLUME_Y) * VOLUME_X + ((z + voxelWrap.z) % VOLUME_Z) * VOLUME_X * VOLUME_Y];
float weight_prev = ptrColor->w;
const float Wrk = 1; //Try weight this?
pack_tsdf((tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk), *pos);
ptrColor->w = min(weight_prev + Wrk, (float)Tsdf::MAX_WEIGHT);
if ((!isnan(ncurr.x) && !no_color) || (ptrColor->x == 0 && ptrColor->y == 0 && ptrColor->z == 0))
{
const float Wrkc = (angleColor ? min(1.0f, ncurr.z / RGB_VIEW_ANGLE_WEIGHT) : 1.0f) * 2.0f;
uchar3 rgb = colors.ptr (coo.y)[coo.x];
float new_x = (ptrColor->x * weight_prev + Wrkc * rgb.x) / (weight_prev + Wrkc);
float new_y = (ptrColor->y * weight_prev + Wrkc * rgb.y) / (weight_prev + Wrkc);
float new_z = (ptrColor->z * weight_prev + Wrkc * rgb.z) / (weight_prev + Wrkc);
ptrColor->x = min (255, max (0, __float2int_rn (new_x)));
ptrColor->y = min (255, max (0, __float2int_rn (new_y)));
ptrColor->z = min (255, max (0, __float2int_rn (new_z)));
}
}
}
}
}
void
integrateTsdfVolume (const PtrStepSz<unsigned short>& depth, const Intr& intr,
const float3& volume_size, const Mat33& Rcurr_inv, const float3& tcurr,
float tranc_dist,
PtrStep<short> volume, DeviceArray2D<float>& depthScaled,
const int3 & voxelWrap, PtrStep<uchar4> color_volume, PtrStepSz<uchar3> colors,
const DeviceArray2D<float>& nmap_curr,
bool angleColor)
{
depthScaled.create (depth.rows, depth.cols);
dim3 block_scale (32, 8);
dim3 grid_scale (divUp (depth.cols, block_scale.x), divUp (depth.rows, block_scale.y));
//scales depth along ray and converts mm -> meters.
scaleDepth<<<grid_scale, block_scale>>>(depth, depthScaled, intr, angleColor);
cudaSafeCall ( cudaGetLastError () );
float3 cell_size;
cell_size.x = volume_size.x / VOLUME_X;
cell_size.y = volume_size.y / VOLUME_Y;
cell_size.z = volume_size.z / VOLUME_Z;
dim3 block (16, 16);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
int rows = nmap_curr.rows () / 3;
tsdf23<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size, voxelWrap, color_volume, colors, nmap_curr, rows, angleColor);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
|
the_stack
|
KW_GLOBAL_KERNEL void kernelPartialsPartialsGrowing(KW_GLOBAL_VAR REAL* KW_RESTRICT partials1,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials2,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials3,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices1,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices2,
int totalPatterns) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
todo(); // TODO
#else // GPU implementation
DETERMINE_INDICES_X_GPU();
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix1 = matrices1 + deltaMatrix; /* Points to *this* matrix */
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix2 = matrices2 + deltaMatrix;
/* Load values into shared memory */
KW_LOCAL_MEM REAL sMatrix1[BLOCK_PEELING_SIZE][PADDED_STATE_COUNT];
KW_LOCAL_MEM REAL sMatrix2[BLOCK_PEELING_SIZE][PADDED_STATE_COUNT];
KW_LOCAL_MEM REAL sPartials1[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];
KW_LOCAL_MEM REAL sPartials2[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];
int y = deltaPartialsByState + deltaPartialsByMatrix;
/* copy PADDED_STATE_COUNT*PATTERN_BLOCK_SIZE lengthed partials */
/* These are all coherent global memory reads; checked in Profiler */
if (pattern < totalPatterns) {
sPartials1[patIdx][state] = partials1[y + state];
sPartials2[patIdx][state] = partials2[y + state];
} else {
sPartials1[patIdx][state] = 0;
sPartials2[patIdx][state] = 0;
}
REAL sum2 = 0;
for (int i = 0; i < PADDED_STATE_COUNT; i += BLOCK_PEELING_SIZE) {
/* load one row of matrices */
if (patIdx < BLOCK_PEELING_SIZE) {
/* These are all coherent global memory reads. */
sMatrix2[patIdx][state] = matrix2[patIdx * PADDED_STATE_COUNT + state];
/* sMatrix now filled with starting in state and ending in i */
matrix2 += BLOCK_PEELING_SIZE * PADDED_STATE_COUNT;
}
KW_LOCAL_FENCE;
for(int j = 0; j < BLOCK_PEELING_SIZE; j++) {
FMA(sMatrix2[j][state], sPartials2[patIdx][i + j], sum2);
}
KW_LOCAL_FENCE;
}
sPartials1[patIdx][state] *= sum2;
KW_LOCAL_FENCE; // TODO Remove?
REAL sum1 = 0;
for (int i = 0; i < PADDED_STATE_COUNT; i += BLOCK_PEELING_SIZE) {
/* load one row of matrices */
if (patIdx < BLOCK_PEELING_SIZE) {
/* These are all coherent global memory reads. */
sMatrix1[patIdx][state] = matrix1[patIdx * PADDED_STATE_COUNT + state];
/* sMatrix now filled with starting in state and ending in i */
matrix1 += BLOCK_PEELING_SIZE * PADDED_STATE_COUNT;
}
KW_LOCAL_FENCE;
for(int j = 0; j < BLOCK_PEELING_SIZE; j++) {
FMA(sMatrix1[j][state], sPartials1[patIdx][i + j], sum1);
}
KW_LOCAL_FENCE;
}
if (pattern < totalPatterns) {
partials3[u] = sum1;
}
#endif
}
KW_GLOBAL_KERNEL void kernelPartialsStatesGrowing(KW_GLOBAL_VAR REAL* KW_RESTRICT partials1,
KW_GLOBAL_VAR int* KW_RESTRICT states2,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials3,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices1,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices2,
int totalPatterns) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
todo(); // TODO
#else // GPU implementation
DETERMINE_INDICES_X_GPU();
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix1 = matrices1 + deltaMatrix; /* Points to *this* matrix */
/* Load values into shared memory */
KW_LOCAL_MEM REAL sMatrix1[BLOCK_PEELING_SIZE][PADDED_STATE_COUNT];
KW_LOCAL_MEM REAL sPartials1[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];
int y = deltaPartialsByState + deltaPartialsByMatrix;
/* copy PADDED_STATE_COUNT*PATTERN_BLOCK_SIZE lengthed partials */
/* These are all coherent global memory reads; checked in Profiler */
if (pattern < totalPatterns) {
sPartials1[patIdx][state] = partials1[y + state];
} else {
sPartials1[patIdx][state] = 0;
}
REAL sum2 = 1;
if (pattern < totalPatterns) { // Remove padded threads!
int state2 = states2[pattern];
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix2 = matrices2 + deltaMatrix + state2 * PADDED_STATE_COUNT;
if (state2 < PADDED_STATE_COUNT) {
sum2 = matrix2[state];
}
}
sPartials1[patIdx][state] *= sum2;
KW_LOCAL_FENCE; // TODO Remove?
REAL sum1 = 0;
for (int i = 0; i < PADDED_STATE_COUNT; i += BLOCK_PEELING_SIZE) {
/* load one row of matrices */
if (patIdx < BLOCK_PEELING_SIZE) {
/* These are all coherent global memory reads. */
sMatrix1[patIdx][state] = matrix1[patIdx * PADDED_STATE_COUNT + state];
/* sMatrix now filled with starting in state and ending in i */
matrix1 += BLOCK_PEELING_SIZE * PADDED_STATE_COUNT;
}
KW_LOCAL_FENCE;
for(int j = 0; j < BLOCK_PEELING_SIZE; j++) {
FMA(sMatrix1[j][state], sPartials1[patIdx][i + j], sum1);
}
KW_LOCAL_FENCE;
}
if (pattern < totalPatterns) {
partials3[u] = sum1;
}
#endif
}
KW_GLOBAL_KERNEL void kernelPartialsPartialsEdgeFirstDerivatives(KW_GLOBAL_VAR REAL* KW_RESTRICT out,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials0,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices0,
KW_GLOBAL_VAR unsigned int* KW_RESTRICT instructions,
KW_GLOBAL_VAR REAL* KW_RESTRICT weights,
int skip,
int totalPatterns, int categoryCount) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
todo(); // TODO
#else // GPU implementation
#define NEW_BLOCK_PEELING_SIZE PATTERN_BLOCK_SIZE
int state = KW_LOCAL_ID_0;
int patIdx = KW_LOCAL_ID_1;
int pattern = KW_GROUP_ID_0 * NEW_BLOCK_PEELING_SIZE + patIdx;
int node = KW_GROUP_ID_1 + skip;
int instructionOffset = node * 3;
unsigned int partials1Offset = instructions[instructionOffset + 0];
unsigned int partials2Offset = instructions[instructionOffset + 1];
unsigned int matrices1Offset = instructions[instructionOffset + 2];
KW_LOCAL_MEM REAL sMatrix2[NEW_BLOCK_PEELING_SIZE][PADDED_STATE_COUNT];
KW_LOCAL_MEM REAL sPartials1[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];
KW_LOCAL_MEM REAL sPartials2[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];
/* TODO: Currently assumes MATRIX_BLOCK_SIZE >> matrixCount */\
KW_LOCAL_MEM REAL sWeights[MATRIX_BLOCK_SIZE];
for (int c = 0; c < categoryCount; c += KW_LOCAL_SIZE_0) {
int x = c + KW_LOCAL_ID_0;
if (x < categoryCount) {
sWeights[x] = weights[x];
}
}
KW_LOCAL_FENCE;
REAL numerator = 0;
REAL denominator = 0;
REAL lPartial1;
REAL lPartial2;
for (int c = 0; c < categoryCount; ++c) {
KW_GLOBAL_VAR REAL* KW_RESTRICT partials1 = partials0 + partials1Offset + totalPatterns * PADDED_STATE_COUNT * c;
KW_GLOBAL_VAR REAL* KW_RESTRICT partials2 = partials0 + partials2Offset + totalPatterns * PADDED_STATE_COUNT * c;
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix2 = matrices0 + matrices1Offset + PADDED_STATE_COUNT * PADDED_STATE_COUNT * c;
/* copy PADDED_STATE_COUNT*PATTERN_BLOCK_SIZE lengthed partials */
/* These are all coherent global memory reads; checked in Profiler */
if (pattern<totalPatterns) {
lPartial1 = partials1[pattern * PADDED_STATE_COUNT + state];
sPartials2[patIdx][state] = lPartial2 = partials2[pattern * PADDED_STATE_COUNT + state];
} else {
lPartial1 = 0;
sPartials2[patIdx][state] = lPartial2 = 0;
}
FMA(lPartial1, lPartial2 * sWeights[c], denominator);
REAL sum2 = 0;
for (int i = 0; i < PADDED_STATE_COUNT; i += NEW_BLOCK_PEELING_SIZE) {
/* load one row of matrices */
if (patIdx < NEW_BLOCK_PEELING_SIZE) {
/* These are all coherent global memory reads. */
sMatrix2[patIdx][state] = matrix2[patIdx * PADDED_STATE_COUNT + state];
/* sMatrix now filled with starting in state and ending in i */
matrix2 += NEW_BLOCK_PEELING_SIZE * PADDED_STATE_COUNT;
}
KW_LOCAL_FENCE;
// TODO 2nd check is unncessary for stateCount >= 16
for (int j = 0; (j < NEW_BLOCK_PEELING_SIZE) && (i + j < PADDED_STATE_COUNT); j++) {
FMA(sMatrix2[j][state], sPartials2[patIdx][i + j], sum2);
}
KW_LOCAL_FENCE;
}
FMA(lPartial1, sum2 * sWeights[c], numerator);
// partials1 += totalPatterns * PADDED_STATE_COUNT;
// partials2 += totalPatterns * PADDED_STATE_COUNT;
}
sPartials1[patIdx][state] = numerator;
sPartials2[patIdx][state] = denominator;
KW_LOCAL_FENCE;
#ifdef IS_POWER_OF_TWO
// parallelized reduction *** only works for powers-of-2 ****
for (int i = PADDED_STATE_COUNT / 2; i > 0; i >>= 1) {
if (state < i) {
#else
for (int i = SMALLEST_POWER_OF_TWO / 2; i > 0; i >>= 1) {
if (state < i && state + i < PADDED_STATE_COUNT ) {
#endif // IS_POWER_OF_TWO
sPartials1[patIdx][state] += sPartials1[patIdx][state + i];
sPartials2[patIdx][state] += sPartials2[patIdx][state + i];
}
KW_LOCAL_FENCE;
}
// TODO Test this coalesced write code out
int tx = KW_LOCAL_ID_0;
if (tx < PATTERN_BLOCK_SIZE && patIdx == 0) { // Use first PATTERN_BLOCK_SIZE threads to write
int site = KW_GROUP_ID_0 * NEW_BLOCK_PEELING_SIZE + tx;
if (site < totalPatterns) {
REAL numerator = sPartials1[tx][0];
REAL denominator = sPartials2[tx][0];
REAL ratio = 0.0;
if (denominator != 0.0) {
ratio = numerator / denominator;
}
out[totalPatterns * node + site] = ratio;
}
}
// if (pattern < totalPatterns) {
// if (state == 0) {
// out[totalPatterns * node + pattern] = sPartials1[patIdx][0] / sPartials2[patIdx][0]; // pre;
//// out[totalPatterns * node + pattern] = sPartials1[patIdx][0]; // Write numerator
//// out[totalPatterns * (KW_NUM_GROUPS_1 + node) + pattern] = sPartials2[patIdx][0]; // Write denomiator
// }
// }
#endif
}
KW_GLOBAL_KERNEL void kernelPartialsStatesEdgeFirstDerivatives(KW_GLOBAL_VAR REAL* KW_RESTRICT out,
KW_GLOBAL_VAR int* KW_RESTRICT states0,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials0,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices0,
KW_GLOBAL_VAR unsigned int* KW_RESTRICT instructions,
KW_GLOBAL_VAR REAL* KW_RESTRICT weights,
int skip,
int totalPatterns, int categoryCount) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
todo(); // TODO
#else // GPU implementation
#define NEW_BLOCK_PEELING_SIZE PATTERN_BLOCK_SIZE
int state = KW_LOCAL_ID_0;
int patIdx = KW_LOCAL_ID_1;
int pattern = KW_GROUP_ID_0 * NEW_BLOCK_PEELING_SIZE + patIdx;
int node = KW_GROUP_ID_1 + skip;
int instructionOffset = node * 3;
unsigned int states1Offset = instructions[instructionOffset + 0];
unsigned int partials2Offset = instructions[instructionOffset + 1];
unsigned int matrices1Offset = instructions[instructionOffset + 2];
KW_LOCAL_MEM REAL sMatrix2[NEW_BLOCK_PEELING_SIZE][PADDED_STATE_COUNT];
KW_LOCAL_MEM REAL sPartials1[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];
KW_LOCAL_MEM REAL sPartials2[PATTERN_BLOCK_SIZE][PADDED_STATE_COUNT];
/* TODO: Currently assumes MATRIX_BLOCK_SIZE >> matrixCount */\
KW_LOCAL_MEM REAL sWeights[MATRIX_BLOCK_SIZE];
for (int c = 0; c < categoryCount; c += KW_LOCAL_SIZE_0) {
int x = c + KW_LOCAL_ID_0;
if (x < categoryCount) {
sWeights[x] = weights[x];
}
}
KW_LOCAL_FENCE;
REAL numerator = 0;
REAL denominator = 0;
int lState1 = (pattern < totalPatterns) ?
states0[states1Offset + pattern] : PADDED_STATE_COUNT;
REAL lPartial1 = (lState1 >= PADDED_STATE_COUNT || state == lState1) ?
1 : 0;
REAL lPartial2;
for (int c = 0; c < categoryCount; ++c) {
KW_GLOBAL_VAR REAL* KW_RESTRICT partials2 = partials0 + partials2Offset + totalPatterns * PADDED_STATE_COUNT * c;
KW_GLOBAL_VAR REAL* KW_RESTRICT matrix2 = matrices0 + matrices1Offset + PADDED_STATE_COUNT * PADDED_STATE_COUNT * c;
/* copy PADDED_STATE_COUNT*PATTERN_BLOCK_SIZE lengthed partials */
/* These are all coherent global memory reads; checked in Profiler */
if (pattern<totalPatterns) {
sPartials2[patIdx][state] = lPartial2 = partials2[pattern * PADDED_STATE_COUNT + state];
} else {
sPartials2[patIdx][state] = lPartial2 = 0;
}
FMA(lPartial1, lPartial2 * sWeights[c], denominator);
REAL sum2 = 0;
for (int i = 0; i < PADDED_STATE_COUNT; i += NEW_BLOCK_PEELING_SIZE) {
/* load one row of matrices */
if (patIdx < NEW_BLOCK_PEELING_SIZE) {
/* These are all coherent global memory reads. */
sMatrix2[patIdx][state] = matrix2[patIdx * PADDED_STATE_COUNT + state];
/* sMatrix now filled with starting in state and ending in i */
matrix2 += NEW_BLOCK_PEELING_SIZE * PADDED_STATE_COUNT;
}
KW_LOCAL_FENCE;
// TODO 2nd check is unncessary for stateCount >= 16
for (int j = 0; (j < NEW_BLOCK_PEELING_SIZE) && (i + j < PADDED_STATE_COUNT); j++) {
FMA(sMatrix2[j][state], sPartials2[patIdx][i + j], sum2);
}
KW_LOCAL_FENCE;
}
FMA(lPartial1, sum2 * sWeights[c], numerator);
// partials1 += totalPatterns * PADDED_STATE_COUNT;
// partials2 += totalPatterns * PADDED_STATE_COUNT;
}
sPartials1[patIdx][state] = numerator;
sPartials2[patIdx][state] = denominator;
KW_LOCAL_FENCE;
#ifdef IS_POWER_OF_TWO
// parallelized reduction *** only works for powers-of-2 ****
for (int i = PADDED_STATE_COUNT / 2; i > 0; i >>= 1) {
if (state < i) {
#else
for (int i = SMALLEST_POWER_OF_TWO / 2; i > 0; i >>= 1) {
if (state < i && state + i < PADDED_STATE_COUNT ) {
#endif // IS_POWER_OF_TWO
sPartials1[patIdx][state] += sPartials1[patIdx][state + i];
sPartials2[patIdx][state] += sPartials2[patIdx][state + i];
}
KW_LOCAL_FENCE;
}
// TODO Test this coalesced write code out
int tx = KW_LOCAL_ID_0;
if (tx < PATTERN_BLOCK_SIZE && patIdx == 0) { // Use first PATTERN_BLOCK_SIZE threads to write
int site = KW_GROUP_ID_0 * NEW_BLOCK_PEELING_SIZE + tx;
if (site < totalPatterns) {
REAL numerator = sPartials1[tx][0];
REAL denominator = sPartials2[tx][0];
REAL ratio = 0.0;
if (denominator != 0.0) {
ratio = numerator / denominator;
}
out[totalPatterns * node + site] = ratio;
}
}
#endif
}
KW_GLOBAL_KERNEL void kernelPartialsStatesCrossProducts(KW_GLOBAL_VAR REAL* KW_RESTRICT out,
KW_GLOBAL_VAR int* KW_RESTRICT states0,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials0,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices0,
KW_GLOBAL_VAR unsigned int* KW_RESTRICT instructions,
KW_GLOBAL_VAR REAL* KW_RESTRICT weights,
int skip,
int totalPatterns, int categoryCount) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
todo(); // Not implemented
#else // GPU implementation
#endif
}
KW_GLOBAL_KERNEL void kernelPartialsPartialsCrossProducts(KW_GLOBAL_VAR REAL* KW_RESTRICT out,
KW_GLOBAL_VAR REAL* KW_RESTRICT partials0,
KW_GLOBAL_VAR REAL* KW_RESTRICT matrices0,
KW_GLOBAL_VAR unsigned int* KW_RESTRICT instructions,
KW_GLOBAL_VAR REAL* KW_RESTRICT categoryRates,
KW_GLOBAL_VAR REAL* KW_RESTRICT categoryWeights,
KW_GLOBAL_VAR REAL* KW_RESTRICT patternWeights,
int skip,
int totalPatterns,
int totalNodes,
int categoryCount) {
#ifdef FW_OPENCL_CPU // CPU/MIC implementation
todo(); // Not implemented
#else // GPU implementation
#endif
}
|
the_stack
|
#include "ConsolidateAndIdentifyContours.h"
#include <iostream>
#include <fstream>
#include <cmath>
using namespace std;
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块的尺寸。
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// 宏:RED_MAC_COUNT
// 定义了重复进行边缘检测的次数。
#define RED_MAC_COUNT 4
// 宏:DILATE_TPL_SHAPE 和 SEARCH_TPL_SHAPE
// 定义了膨胀操作和搜索标准轮廓时的临域模板形状
#define DILATE_TPL_SHAPE TF_SHAPE_CIRCLE
#define SEARCH_TPL_SHAPE TF_SHAPE_BOX
// 宏:OBJ_IDX_OFFSET
// 定义了输出到标记轮廓中的标号偏移量。
#define OBJ_IDX_OFFSET 100
// 全局变量:_redMacDiffSize
// 不同的边缘检测器的检测半径。
static unsigned _redMacDiffSize[RED_MAC_COUNT] = { 3, 5, 7, 9 };
// 静态成员变量:redMachine(边缘检测处理机)
RobustEdgeDetection *ConsolidateAndIdentifyContours::redMachine = NULL;
// Kernel 函数:_searchPrimitiveContourKer(匹配并标记轮廓)
// 从检测中的轮廓图像中匹配标准轮廓图像中的相关轮廓。被匹配上的边缘将被标记成对
// 应的标号信息,未匹配上的轮廓被标记为异常点。
static __global__ void
_searchPrimitiveContourKer(
ImageCuda inimg, // 轮廓输入图像
ImageCuda outimg, // 标记后的输出图像
ImageCuda abnorimg, // 异常点图像
ImageCuda prmtcont, // 标准轮廓图像
ImageCuda prmtreg, // 物体区域图像
unsigned trackrad // 搜索半径
);
// Host 成员方法:initRedMachine(初始化边缘检测处理机)
__host__ int ConsolidateAndIdentifyContours::initRedMachine()
{
// 如果 redMachine 不为 NULL,则说明已经初始化过了。
if (redMachine != NULL)
return NO_ERROR;
// 申请指定个数的边缘检测器。
redMachine = new RobustEdgeDetection[RED_MAC_COUNT];
if (redMachine == NULL)
return OUT_OF_MEM;
// 迭代设定各个边缘检测器的检测半径。
int errcode = NO_ERROR;
for (int i = 0; i < RED_MAC_COUNT; i++) {
int curerrcode = redMachine[i].setDiffsize(_redMacDiffSize[i]);
// 最终返回的错误码应该是更加严重的错误。
if (curerrcode < errcode)
errcode = curerrcode;
}
// 初始化完毕,返回赋值过程中累积下来的错误码。
return errcode;
}
// Host 成员方法:initMorphMachine(初始化膨胀处理机)
__host__ int ConsolidateAndIdentifyContours::initMorphMachine()
{
// 取出膨胀处理机中原来的模板
Template *oldtpl = morphMachine.getTemplate();
// 通过模版工厂生成新的模板,这里暂时适用方形模板。
int errcode;
Template *curtpl = NULL;
size_t boxsize = this->dilationRad * 2 + 1;
errcode = TemplateFactory::getTemplate(&curtpl, DILATE_TPL_SHAPE, boxsize);
if (errcode != NO_ERROR)
return errcode;
// 将新生成的模板放入膨胀处理机中
errcode = morphMachine.setTemplate(curtpl);
if (errcode != NO_ERROR)
return errcode;
// 如果原始的模板不为 NULL,则需要释放对该模板的占用。
if (oldtpl != NULL)
TemplateFactory::putTemplate(oldtpl);
// 处理完毕,返回。
return NO_ERROR;
}
// Host 成员方法:getCsldtContoursImg(获取轮廓图像)
__host__ int ConsolidateAndIdentifyContours::getCsldtContoursImg(
Image *inimg, Image *outimg)
{
// 检查输入输出图像是否为 NULL。
if (inimg == NULL || outimg == NULL)
return NULL_POINTER;
// 由于后续的失败处理要清除所申请的临时图像,因此设计一个宏来简化代码,方便
// 代码维护。
#define CAIC_GETCONT_ERRFREE(errcode) do { \
for (int _i_cge = 0; _i_cge < RED_MAC_COUNT; _i_cge++) { \
if (edgetmpimg[_i_cge] != NULL) \
ImageBasicOp::deleteImage(edgetmpimg[_i_cge]); \
} \
return (errcode); \
} while (0)
// 该迭代完成两件事情:第一是完成边缘检测输出图像的创建;另一件则是调用边缘
// 检测算法完成边缘检测。
int errcode = NO_ERROR;
Image *edgetmpimg[RED_MAC_COUNT] = { NULL };
for (int i = 0; i < RED_MAC_COUNT; i++) {
// 创建边缘检测的输出图像。
errcode = ImageBasicOp::newImage(edgetmpimg + i);
if (errcode != NO_ERROR)
CAIC_GETCONT_ERRFREE(errcode);
//cout << "AA" << i << endl;
// 调用边缘检测方法,获得边缘图像。
errcode = redMachine[i].detectEdgeSA(inimg, edgetmpimg[i], NULL);
if (errcode != NO_ERROR)
CAIC_GETCONT_ERRFREE(errcode);
//cout << "BB" << i << endl;
}
// 合并在不同参数下边缘检测的结果。
errcode = combineMachine.combineImageMax(edgetmpimg, RED_MAC_COUNT, outimg);
if (errcode != NO_ERROR)
CAIC_GETCONT_ERRFREE(errcode);
// 对边缘进行膨胀操作,以连接一些断线的点
errcode = morphMachine.dilate(outimg, edgetmpimg[0]);
if (errcode != NO_ERROR)
CAIC_GETCONT_ERRFREE(errcode);
// 对膨胀后的边缘进行细化操作,在此恢复其单一线宽。
errcode = thinMachine.thinMatlabLike(edgetmpimg[0], outimg);
if (errcode != NO_ERROR)
CAIC_GETCONT_ERRFREE(errcode);
// 由于边缘检测算法输出的图像为二值图像,因此不需要再进行二值化处理了
//errcode = binMachine.binarize(outimg);
//if (errcode != NO_ERROR)
// CAIC_GETCONT_ERRFREE(errcode);
// 处理完毕返回。
CAIC_GETCONT_ERRFREE(NO_ERROR);
#undef CAIC_GETCONT_ERRFREE
}
// Kernel 函数:_searchPrimitiveContourKer(匹配并标记轮廓)
__global__ void _searchPrimitiveContourKer(
ImageCuda inimg, ImageCuda outimg, ImageCuda abnorimg,
ImageCuda prmtcont, ImageCuda prmtreg, unsigned trackrad)
{
// 计算线程对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的坐标
// 的 x 和 y 分量(其中,c 表示 column;r 表示 row)。
unsigned c = blockIdx.x * blockDim.x + threadIdx.x;
unsigned r = blockIdx.y * blockDim.y + threadIdx.y;
// 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一
// 方面防止由于段错误导致的程序崩溃。
if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height)
return;
// 计算输入输出图像的访存下标。
unsigned inidx = r * inimg.pitchBytes + c;
unsigned outidx = r * outimg.pitchBytes + c;
unsigned abnoridx = r * abnorimg.pitchBytes + c;
// 读取输入图像中对应的像素值。
unsigned char inpixel = inimg.imgMeta.imgData[inidx];
// 如果该点像素值为0,即当前点不在检出的轮廓上,则在输出图像中直接赋 0 值,
// 不进行后续的搜索处理。
if (inpixel == 0) {
outimg.imgMeta.imgData[outidx] = 0;
abnorimg.imgMeta.imgData[abnoridx] = 0;
return;
}
// 按照由中心向周围的方式搜索当前点对应的物体标记值。最先判断当前点位置在标
// 准轮廓图像中是否有对应的轮廓标记。如果存在对应的轮廓标记,通过后续的循环
// 条件中的 prmtcontpxl == 0 则会略过整个后续搜索。
unsigned prmtcontidx = r * prmtcont.pitchBytes + c;
unsigned char prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
// 由近及远搜索当前位置的临近位置,查看是否可以命中标准轮廓上的点。
int curr, curc;
// 外层循环处理半径
for (int currad = 1; currad <= trackrad && prmtcontpxl == 0; currad++) {
// 迭代各个半径下的点,由中点向对角点检测。当发现某一标准轮廓点时,退出
// 循环,不再进一步搜索。
for (int i = 0; i < trackrad && prmtcontpxl == 0; i++) {
// 检测上方右侧点
curc = c + i;
curr = r - currad;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc < prmtcont.imgMeta.width || curr >= 0)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
// 检测下方右侧点
curc = c + i;
curr = r + currad;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc < prmtcont.imgMeta.width || curr < prmtcont.imgMeta.height)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
// 检测左方下侧点
curc = c - currad;
curr = r + i;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc >= 0 || curr < prmtcont.imgMeta.height)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
// 检测右方下侧点
curc = c + currad;
curr = r + i;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc < prmtcont.imgMeta.width || curr < prmtcont.imgMeta.height)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
// 根据计算公式,左侧点(上侧点)要比右侧点(下侧点)更加外围一些,
// 故而统一在稍候检测左侧系列点。
// 检测上方左侧点
curc = c - i - 1;
curr = r - currad;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc >= 0 || curr >= 0)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
// 检测下方左侧点
curc = c - i - 1;
curr = r + currad;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc >= 0 || curr < prmtcont.imgMeta.height)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
// 检测左方上侧点
curc = c - currad;
curr = r - i - 1;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc >= 0 || curr >= 0)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
// 检测右方上侧点
curc = c + currad;
curr = r - i - 1;
prmtcontidx = curr * prmtcont.pitchBytes + curc;
if (curc < prmtcont.imgMeta.width || curr >= 0)
prmtcontpxl = prmtcont.imgMeta.imgData[prmtcontidx];
if (prmtcontpxl != 0)
break;
}
}
// 根据是否找到标准轮廓点作出输出动作
if (prmtcontpxl != 0) {
// 当匹配到标准轮廓点时,标记输出图像,但不标记异常点图像。
outimg.imgMeta.imgData[outidx] = prmtcontpxl + OBJ_IDX_OFFSET;
abnorimg.imgMeta.imgData[abnoridx] = 0;
} else {
// 当匹配标准轮廓点失败时,标记该点为异常点,写入异常点图像。
outimg.imgMeta.imgData[outidx] = 0;
abnorimg.imgMeta.imgData[abnoridx] =
prmtreg.imgMeta.imgData[r * prmtreg.pitchBytes + c];
}
}
// Host 成员方法:searchPrimitiveContour(匹配并标记轮廓)
__host__ int ConsolidateAndIdentifyContours::searchPrimitiveContour(
Image *inimg, Image *outimg, Image *abnormalimg)
{
// 当标准轮廓点和标准区域点未设置时,报错返回。
if (this->primitiveContour == NULL || this->primitiveRegion == NULL)
return OP_OVERFLOW;
// 当输入的参数含有 NULL 指针时,报错返回。
if (inimg == NULL || outimg == NULL || abnormalimg == NULL)
return NULL_POINTER;
// 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为
// 输入和输出图像准备内存空间,以便盛放数据。
int errcode; // 局部变量,错误码
// 局部变量,本次操作的图像尺寸
size_t imgw = inimg->roiX2 - inimg->roiX1;
size_t imgh = inimg->roiY2 - inimg->roiY1;
// 将输入图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// 将标准轮廓图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(this->primitiveContour);
if (errcode != NO_ERROR)
return errcode;
// 根据标准轮廓图像的 ROI 区域尺寸调整计算尺寸。
if (imgw > this->primitiveContour->roiX2 - this->primitiveContour->roiX1)
imgw = this->primitiveContour->roiX2 - this->primitiveContour->roiX1;
if (imgh > this->primitiveContour->roiY2 - this->primitiveContour->roiY1)
imgh = this->primitiveContour->roiY2 - this->primitiveContour->roiY1;
// 将标准区域图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(this->primitiveRegion);
if (errcode != NO_ERROR)
return errcode;
// 根据标准区域图像的 ROI 区域尺寸调整计算尺寸。
if (imgw > this->primitiveRegion->roiX2 - this->primitiveRegion->roiX1)
imgw = this->primitiveRegion->roiX2 - this->primitiveRegion->roiX1;
if (imgh > this->primitiveRegion->roiY2 - this->primitiveRegion->roiY1)
imgh = this->primitiveRegion->roiY2 - this->primitiveRegion->roiY1;
// 将输出图像拷贝入 Device 内存。
errcode = ImageBasicOp::copyToCurrentDevice(outimg);
if (errcode != NO_ERROR) {
// 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图
// 像的 ROI 子图像尺寸相同的图像。
errcode = ImageBasicOp::makeAtCurrentDevice(outimg, imgw, imgh);
// 如果创建图像也操作失败,则说明操作彻底失败,报错退出。
if (errcode != NO_ERROR)
return errcode;
} else {
// 如果输出图片已经含有数据,则用这个数据更新最终参与计算的尺寸
if (imgw > outimg->roiX2 - outimg->roiX1)
imgw = outimg->roiX2 - outimg->roiX1;
if (imgh > outimg->roiY2 - outimg->roiY1)
imgh = outimg->roiY2 - outimg->roiY1;
}
// 将输出图像拷贝入 Device 内存。
errcode = ImageBasicOp::copyToCurrentDevice(abnormalimg);
if (errcode != NO_ERROR) {
// 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图
// 像的 ROI 子图像尺寸相同的图像。
errcode = ImageBasicOp::makeAtCurrentDevice(abnormalimg, imgw, imgh);
// 如果创建图像也操作失败,则说明操作彻底失败,报错退出。
if (errcode != NO_ERROR)
return errcode;
} else {
// 如果输出图片已经含有数据,则用这个数据更新最终参与计算的尺寸
if (imgw > abnormalimg->roiX2 - abnormalimg->roiX1)
imgw = abnormalimg->roiX2 - abnormalimg->roiX1;
if (imgh > abnormalimg->roiY2 - abnormalimg->roiY1)
imgh = abnormalimg->roiY2 - abnormalimg->roiY1;
}
// 提取输入图像的 ROI 子图像。
ImageCuda insubimgCud;
errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 提取标准轮廓图像的 ROI 子图像。
ImageCuda prmtcontsubimgCud;
errcode = ImageBasicOp::roiSubImage(this->primitiveContour, &prmtcontsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 提取标准区域图像的 ROI 子图像。
ImageCuda prmtregsubimgCud;
errcode = ImageBasicOp::roiSubImage(this->primitiveRegion, &prmtregsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 提取输出图像的 ROI 子图像。
ImageCuda outsubimgCud;
errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 提取异常点图像的 ROI 子图像。
ImageCuda abnorsubimgCud;
errcode = ImageBasicOp::roiSubImage(abnormalimg, &abnorsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 根据之前得到的计算区域尺寸调整子图像的尺寸。
insubimgCud.imgMeta.width = prmtcontsubimgCud.imgMeta.width =
prmtregsubimgCud.imgMeta.width =
outsubimgCud.imgMeta.width =
abnorsubimgCud.imgMeta.width = imgw;
insubimgCud.imgMeta.height = prmtcontsubimgCud.imgMeta.height =
prmtregsubimgCud.imgMeta.height =
outsubimgCud.imgMeta.height =
abnorsubimgCud.imgMeta.height = imgh;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x;
gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / blocksize.y;
// 调用核函数,根据阈值 threshold 进行二值化处理。
_searchPrimitiveContourKer<<<gridsize, blocksize>>>(
insubimgCud, outsubimgCud, abnorsubimgCud,
prmtcontsubimgCud, prmtregsubimgCud, this->trackRad);
// 若调用 CUDA 出错返回错误代码
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
// 处理完毕,退出。
return NO_ERROR;
}
|
the_stack
|
#include "typedef.h"
#include "cuda_rys_sp.h"
#include "cuda_rys_dp.h"
__device__ void cuda_Roots_dp(int n, double X, double roots[], double weights[]){
if (n <= 3)
cuda_Root123_dp(n,X, roots,weights);
else if (n==4)
cuda_Root4_dp(X, roots,weights);
else if (n==5)
cuda_Root5_dp(X, roots,weights);
else
cuda_Root6_dp(n,X, roots,weights);
return;
}
__device__ void cuda_Root123_dp(int n, double X, double roots[], double weights[]){
double R12, PIE4, R22, W22, R13, R23, W23, R33, W33;
double RT1=0,RT2=0,RT3=0,WW1=0,WW2=0,WW3=0;
double F1,F2,E,T1,T2,T3,A1,A2,Y;
R12 = 2.75255128608411E-01;
PIE4 = 7.85398163397448E-01;
R22 = 2.72474487139158E+00;
W22 = 9.17517095361369E-02;
R13 = 1.90163509193487E-01;
R23 = 1.78449274854325E+00;
W23 = 1.77231492083829E-01;
R33 = 5.52534374226326E+00;
W33 = 5.11156880411248E-03;
if (X < 3.e-7){
if (n == 1){
RT1 = 0.5E+00 -X/5.0E+00;
WW1 = 1.0E+00 -X/3.0E+00;
} else if (n == 2) {
RT1 = 1.30693606237085E-01 -2.90430236082028E-02 *X;
RT2 = 2.86930639376291E+00 -6.37623643058102E-01 *X;
WW1 = 6.52145154862545E-01 -1.22713621927067E-01 *X;
WW2 = 3.47854845137453E-01 -2.10619711404725E-01 *X;
} else if (n == 3) {
RT1 = 6.03769246832797E-02 -9.28875764357368E-03 *X;
RT2 = 7.76823355931043E-01 -1.19511285527878E-01 *X;
RT3 = 6.66279971938567E+00 -1.02504611068957E+00 *X;
WW1 = 4.67913934572691E-01 -5.64876917232519E-02 *X;
WW2 = 3.60761573048137E-01 -1.49077186455208E-01 *X;
WW3 = 1.71324492379169E-01 -1.27768455150979E-01 *X;
}
} else if (X < 1.) {
if (n == 1){
F1 = ((((((((-8.36313918003957E-08*X+1.21222603512827E-06 )*X-
1.15662609053481E-05 )*X+9.25197374512647E-05 )*X-
6.40994113129432E-04 )*X+3.78787044215009E-03 )*X-
1.85185172458485E-02 )*X+7.14285713298222E-02 )*X-
1.99999999997023E-01 )*X+3.33333333333318E-01;
WW1 = (X+X)*F1+exp(-X);
RT1 = F1/(WW1-F1);
} else if (n == 2) {
F1 = ((((((((-8.36313918003957E-08*X+1.21222603512827E-06 )*X-
1.15662609053481E-05 )*X+9.25197374512647E-05 )*X-
6.40994113129432E-04 )*X+3.78787044215009E-03 )*X-
1.85185172458485E-02 )*X+7.14285713298222E-02 )*X-
1.99999999997023E-01 )*X+3.33333333333318E-01;
WW1 = (X+X)*F1+exp(-X);
RT1 = (((((((-2.35234358048491E-09*X+2.49173650389842E-08)*X-
4.558315364581E-08)*X-2.447252174587E-06)*X+
4.743292959463E-05)*X-5.33184749432408E-04 )*X+
4.44654947116579E-03 )*X-2.90430236084697E-02 )*X+
1.30693606237085E-01;
RT2 = (((((((-2.47404902329170E-08*X+2.36809910635906E-07)*X+
1.835367736310E-06)*X-2.066168802076E-05)*X-
1.345693393936E-04)*X-5.88154362858038E-05 )*X+
5.32735082098139E-02 )*X-6.37623643056745E-01 )*X+
2.86930639376289E+00;
WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00+RT2)/(RT2-RT1);
WW1 = WW1-WW2;
} else if (n==3){
RT1 = ((((((-5.10186691538870E-10*X+2.40134415703450E-08)*X-
5.01081057744427E-07 )*X+7.58291285499256E-06 )*X-
9.55085533670919E-05 )*X+1.02893039315878E-03 )*X-
9.28875764374337E-03 )*X+6.03769246832810E-02;
RT2 = ((((((-1.29646524960555E-08*X+7.74602292865683E-08)*X+
1.56022811158727E-06 )*X-1.58051990661661E-05 )*X-
3.30447806384059E-04 )*X+9.74266885190267E-03 )*X-
1.19511285526388E-01 )*X+7.76823355931033E-01;
RT3 = ((((((-9.28536484109606E-09*X-3.02786290067014E-07)*X-
2.50734477064200E-06 )*X-7.32728109752881E-06 )*X+
2.44217481700129E-04 )*X+4.94758452357327E-02 )*X-
1.02504611065774E+00 )*X+6.66279971938553E+00;
F2 = ((((((((-7.60911486098850E-08*X+1.09552870123182E-06 )*X-
1.03463270693454E-05 )*X+8.16324851790106E-05 )*X-
5.55526624875562E-04 )*X+3.20512054753924E-03 )*X-
1.51515139838540E-02 )*X+5.55555554649585E-02 )*X-
1.42857142854412E-01 )*X+1.99999999999986E-01;
E = exp(-X);
F1 = ((X+X)*F2+E)/3.0E+00;
WW1 = (X+X)*F1+E;
T1 = RT1/(RT1+1.0E+00);
T2 = RT2/(RT2+1.0E+00);
T3 = RT3/(RT3+1.0E+00);
A2 = F2-T1*F1;
A1 = F1-T1*WW1;
WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1));
WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1));
WW1 = WW1-WW2-WW3;
}
} else if (X < 3.) {
Y = X-2.0E+00;
if (n == 1) {
F1 = ((((((((((-1.61702782425558E-10*Y+1.96215250865776E-09 )*Y-
2.14234468198419E-08 )*Y+2.17216556336318E-07 )*Y-
1.98850171329371E-06 )*Y+1.62429321438911E-05 )*Y-
1.16740298039895E-04 )*Y+7.24888732052332E-04 )*Y-
3.79490003707156E-03 )*Y+1.61723488664661E-02 )*Y-
5.29428148329736E-02 )*Y+1.15702180856167E-01;
WW1 = (X+X)*F1+exp(-X);
RT1 = F1/(WW1-F1);
} else if (n == 2) {
F1 = ((((((((((-1.61702782425558E-10*Y+1.96215250865776E-09 )*Y-
2.14234468198419E-08 )*Y+2.17216556336318E-07 )*Y-
1.98850171329371E-06 )*Y+1.62429321438911E-05 )*Y-
1.16740298039895E-04 )*Y+7.24888732052332E-04 )*Y-
3.79490003707156E-03 )*Y+1.61723488664661E-02 )*Y-
5.29428148329736E-02 )*Y+1.15702180856167E-01;
WW1 = (X+X)*F1+exp(-X);
RT1 = (((((((((-6.36859636616415E-12*Y+8.47417064776270E-11)*Y-
5.152207846962E-10)*Y-3.846389873308E-10)*Y+
8.472253388380E-08)*Y-1.85306035634293E-06 )*Y+
2.47191693238413E-05 )*Y-2.49018321709815E-04 )*Y+
2.19173220020161E-03 )*Y-1.63329339286794E-02 )*Y+
8.68085688285261E-02;
RT2 = ((((((((( 1.45331350488343E-10*Y+2.07111465297976E-09)*Y-
1.878920917404E-08)*Y-1.725838516261E-07)*Y+
2.247389642339E-06)*Y+9.76783813082564E-06 )*Y-
1.93160765581969E-04 )*Y-1.58064140671893E-03 )*Y+
4.85928174507904E-02 )*Y-4.30761584997596E-01 )*Y+
1.80400974537950E+00;
WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00+RT2)/(RT2-RT1);
WW1 = WW1-WW2;
} else if (n == 3) {
RT1 = (((((((( 1.44687969563318E-12*Y+4.85300143926755E-12)*Y-
6.55098264095516E-10 )*Y+1.56592951656828E-08 )*Y-
2.60122498274734E-07 )*Y+3.86118485517386E-06 )*Y-
5.13430986707889E-05 )*Y+6.03194524398109E-04 )*Y-
6.11219349825090E-03 )*Y+4.52578254679079E-02;
RT2 = ((((((( 6.95964248788138E-10*Y-5.35281831445517E-09)*Y-
6.745205954533E-08)*Y+1.502366784525E-06)*Y+
9.923326947376E-07)*Y-3.89147469249594E-04 )*Y+
7.51549330892401E-03 )*Y-8.48778120363400E-02 )*Y+
5.73928229597613E-01;
RT3 = ((((((((-2.81496588401439E-10*Y+3.61058041895031E-09)*Y+
4.53631789436255E-08 )*Y-1.40971837780847E-07 )*Y-
6.05865557561067E-06 )*Y-5.15964042227127E-05 )*Y+
3.34761560498171E-05 )*Y+5.04871005319119E-02 )*Y-
8.24708946991557E-01 )*Y+4.81234667357205E+00;
F2 = ((((((((((-1.48044231072140E-10*Y+1.78157031325097E-09 )*Y-
1.92514145088973E-08 )*Y+1.92804632038796E-07 )*Y-
1.73806555021045E-06 )*Y+1.39195169625425E-05 )*Y-
9.74574633246452E-05 )*Y+5.83701488646511E-04 )*Y-
2.89955494844975E-03 )*Y+1.13847001113810E-02 )*Y-
3.23446977320647E-02 )*Y+5.29428148329709E-02;
E = exp(-X);
F1 = ((X+X)*F2+E)/3.0E+00;
WW1 = (X+X)*F1+E;
T1 = RT1/(RT1+1.0E+00);
T2 = RT2/(RT2+1.0E+00);
T3 = RT3/(RT3+1.0E+00);
A2 = F2-T1*F1;
A1 = F1-T1*WW1;
WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1));
WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1));
WW1 = WW1-WW2-WW3;
}
} else if (X < 5.){
Y = X-4.0E+00;
if (n == 1){
F1 = ((((((((((-2.62453564772299E-11*Y+3.24031041623823E-10 )*Y-
3.614965656163E-09)*Y+3.760256799971E-08)*Y-
3.553558319675E-07)*Y+3.022556449731E-06)*Y-
2.290098979647E-05)*Y+1.526537461148E-04)*Y-
8.81947375894379E-04 )*Y+4.33207949514611E-03 )*Y-
1.75257821619926E-02 )*Y+5.28406320615584E-02;
WW1 = (X+X)*F1+exp(-X);
RT1 = F1/(WW1-F1);
} else if (n == 2) {
F1 = ((((((((((-2.62453564772299E-11*Y+3.24031041623823E-10 )*Y-
3.614965656163E-09)*Y+3.760256799971E-08)*Y-
3.553558319675E-07)*Y+3.022556449731E-06)*Y-
2.290098979647E-05)*Y+1.526537461148E-04)*Y-
8.81947375894379E-04 )*Y+4.33207949514611E-03 )*Y-
1.75257821619926E-02 )*Y+5.28406320615584E-02;
WW1 = (X+X)*F1+exp(-X);
RT1 = ((((((((-4.11560117487296E-12*Y+7.10910223886747E-11)*Y-
1.73508862390291E-09 )*Y+5.93066856324744E-08 )*Y-
9.76085576741771E-07 )*Y+1.08484384385679E-05 )*Y-
1.12608004981982E-04 )*Y+1.16210907653515E-03 )*Y-
9.89572595720351E-03 )*Y+6.12589701086408E-02;
RT2 = (((((((((-1.80555625241001E-10*Y+5.44072475994123E-10)*Y+
1.603498045240E-08)*Y-1.497986283037E-07)*Y-
7.017002532106E-07)*Y+1.85882653064034E-05 )*Y-
2.04685420150802E-05 )*Y-2.49327728643089E-03 )*Y+
3.56550690684281E-02 )*Y-2.60417417692375E-01 )*Y+
1.12155283108289E+00;
WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00+RT2)/(RT2-RT1);
WW1 = WW1-WW2;
} else if (n == 3) {
RT1 = ((((((( 1.44265709189601E-11*Y-4.66622033006074E-10)*Y+
7.649155832025E-09)*Y-1.229940017368E-07)*Y+
2.026002142457E-06)*Y-2.87048671521677E-05 )*Y+
3.70326938096287E-04 )*Y-4.21006346373634E-03 )*Y+
3.50898470729044E-02;
RT2 = ((((((((-2.65526039155651E-11*Y+1.97549041402552E-10)*Y+
2.15971131403034E-09 )*Y-7.95045680685193E-08 )*Y+
5.15021914287057E-07 )*Y+1.11788717230514E-05 )*Y-
3.33739312603632E-04 )*Y+5.30601428208358E-03 )*Y-
5.93483267268959E-02 )*Y+4.31180523260239E-01;
RT3 = ((((((((-3.92833750584041E-10*Y-4.16423229782280E-09)*Y+
4.42413039572867E-08 )*Y+6.40574545989551E-07 )*Y-
3.05512456576552E-06 )*Y-1.05296443527943E-04 )*Y-
6.14120969315617E-04 )*Y+4.89665802767005E-02 )*Y-
6.24498381002855E-01 )*Y+3.36412312243724E+00;
F2 = ((((((((((-2.36788772599074E-11*Y+2.89147476459092E-10 )*Y-
3.18111322308846E-09 )*Y+3.25336816562485E-08 )*Y-
3.00873821471489E-07 )*Y+2.48749160874431E-06 )*Y-
1.81353179793672E-05 )*Y+1.14504948737066E-04 )*Y-
6.10614987696677E-04 )*Y+2.64584212770942E-03 )*Y-
8.66415899015349E-03 )*Y+1.75257821619922E-02;
E = exp(-X);
F1 = ((X+X)*F2+E)/3.0E+00;
WW1 = (X+X)*F1+E;
T1 = RT1/(RT1+1.0E+00);
T2 = RT2/(RT2+1.0E+00);
T3 = RT3/(RT3+1.0E+00);
A2 = F2-T1*F1;
A1 = F1-T1*WW1;
WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1));
WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1));
WW1 = WW1-WW2-WW3;
}
} else if (X < 10) {
E = exp(-X);
WW1 = (((((( 4.6897511375022E-01/X-6.9955602298985E-01)/X +
5.3689283271887E-01)/X-3.2883030418398E-01)/X +
2.4645596956002E-01)/X-4.9984072848436E-01)/X -
3.1501078774085E-06)*E + sqrt(PIE4/X);
F1 = (WW1-E)/(X+X);
if (n == 1)
RT1 = F1/(WW1-F1);
else if (n == 2){
Y = X-7.5E+00;
RT1 = (((((((((((((-1.43632730148572E-16*Y+2.38198922570405E-16)*
Y+1.358319618800E-14)*Y-7.064522786879E-14)*Y-
7.719300212748E-13)*Y+7.802544789997E-12)*Y+
6.628721099436E-11)*Y-1.775564159743E-09)*Y+
1.713828823990E-08)*Y-1.497500187053E-07)*Y+
2.283485114279E-06)*Y-3.76953869614706E-05 )*Y+
4.74791204651451E-04 )*Y-4.60448960876139E-03 )*Y+
3.72458587837249E-02;
RT2 = (((((((((((( 2.48791622798900E-14*Y-1.36113510175724E-13)*Y-
2.224334349799E-12)*Y+4.190559455515E-11)*Y-
2.222722579924E-10)*Y-2.624183464275E-09)*Y+
6.128153450169E-08)*Y-4.383376014528E-07)*Y-
2.49952200232910E-06 )*Y+1.03236647888320E-04 )*Y-
1.44614664924989E-03 )*Y+1.35094294917224E-02 )*Y-
9.53478510453887E-02 )*Y+5.44765245686790E-01;
WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00+RT2)/(RT2-RT1);
WW1 = WW1-WW2;
} else if (n == 3) {
F2 = (F1+F1+F1-E)/(X+X);
Y = X-7.5E+00;
RT1 = ((((((((((( 5.74429401360115E-16*Y+7.11884203790984E-16)*Y-
6.736701449826E-14)*Y-6.264613873998E-13)*Y+
1.315418927040E-11)*Y-4.23879635610964E-11 )*Y+
1.39032379769474E-09 )*Y-4.65449552856856E-08 )*Y+
7.34609900170759E-07 )*Y-1.08656008854077E-05 )*Y+
1.77930381549953E-04 )*Y-2.39864911618015E-03 )*Y+
2.39112249488821E-02;
RT2 = ((((((((((( 1.13464096209120E-14*Y+6.99375313934242E-15)*Y-
8.595618132088E-13)*Y-5.293620408757E-12)*Y-
2.492175211635E-11)*Y+2.73681574882729E-09 )*Y-
1.06656985608482E-08 )*Y-4.40252529648056E-07 )*Y+
9.68100917793911E-06 )*Y-1.68211091755327E-04 )*Y+
2.69443611274173E-03 )*Y-3.23845035189063E-02 )*Y+
2.75969447451882E-01;
RT3 = (((((((((((( 6.66339416996191E-15*Y+1.84955640200794E-13)*Y-
1.985141104444E-12)*Y-2.309293727603E-11)*Y+
3.917984522103E-10)*Y+1.663165279876E-09)*Y-
6.205591993923E-08)*Y+8.769581622041E-09)*Y+
8.97224398620038E-06 )*Y-3.14232666170796E-05 )*Y-
1.83917335649633E-03 )*Y+3.51246831672571E-02 )*Y-
3.22335051270860E-01 )*Y+1.73582831755430E+00;
T1 = RT1/(RT1+1.0E+00);
T2 = RT2/(RT2+1.0E+00);
T3 = RT3/(RT3+1.0E+00);
A2 = F2-T1*F1;
A1 = F1-T1*WW1;
WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1));
WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1));
WW1 = WW1-WW2-WW3;
}
} else if (X < 15) {
E = exp(-X);
WW1 = (((-1.8784686463512E-01/X+2.2991849164985E-01)/X -
4.9893752514047E-01)/X-2.1916512131607E-05)*E
+ sqrt(PIE4/X);
F1 = (WW1-E)/(X+X);
if (n == 1)
RT1 = F1/(WW1-F1);
else if (n == 2) {
RT1 = ((((-1.01041157064226E-05*X+1.19483054115173E-03)*X -
6.73760231824074E-02)*X+1.25705571069895E+00)*X +
(((-8.57609422987199E+03/X+5.91005939591842E+03)/X -
1.70807677109425E+03)/X+2.64536689959503E+02)/X -
2.38570496490846E+01)*E + R12/(X-R12);
RT2 = ((( 3.39024225137123E-04*X-9.34976436343509E-02)*X -
4.22216483306320E+00)*X +
(((-2.08457050986847E+03/X -
1.04999071905664E+03)/X+3.39891508992661E+02)/X -
1.56184800325063E+02)/X+8.00839033297501E+00)*E + R22/(X-R22);
WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00+RT2)/(RT2-RT1);
WW1 = WW1-WW2;
} else if (n == 3) {
F2 = (F1+F1+F1-E)/(X+X);
Y = X-12.5E+00;
RT1 = ((((((((((( 4.42133001283090E-16*Y-2.77189767070441E-15)*Y-
4.084026087887E-14)*Y+5.379885121517E-13)*Y+
1.882093066702E-12)*Y-8.67286219861085E-11 )*Y+
7.11372337079797E-10 )*Y-3.55578027040563E-09 )*Y+
1.29454702851936E-07 )*Y-4.14222202791434E-06 )*Y+
8.04427643593792E-05 )*Y-1.18587782909876E-03 )*Y+
1.53435577063174E-02;
RT2 = ((((((((((( 6.85146742119357E-15*Y-1.08257654410279E-14)*Y-
8.579165965128E-13)*Y+6.642452485783E-12)*Y+
4.798806828724E-11)*Y-1.13413908163831E-09 )*Y+
7.08558457182751E-09 )*Y-5.59678576054633E-08 )*Y+
2.51020389884249E-06 )*Y-6.63678914608681E-05 )*Y+
1.11888323089714E-03 )*Y-1.45361636398178E-02 )*Y+
1.65077877454402E-01;
RT3 = (((((((((((( 3.20622388697743E-15*Y-2.73458804864628E-14)*Y-
3.157134329361E-13)*Y+8.654129268056E-12)*Y-
5.625235879301E-11)*Y-7.718080513708E-10)*Y+
2.064664199164E-08)*Y-1.567725007761E-07)*Y-
1.57938204115055E-06 )*Y+6.27436306915967E-05 )*Y-
1.01308723606946E-03 )*Y+1.13901881430697E-02 )*Y-
1.01449652899450E-01 )*Y+7.77203937334739E-01;
T1 = RT1/(RT1+1.0E+00);
T2 = RT2/(RT2+1.0E+00);
T3 = RT3/(RT3+1.0E+00);
A2 = F2-T1*F1;
A1 = F1-T1*WW1;
WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1));
WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1));
WW1 = WW1-WW2-WW3;
}
} else if (X < 33) {
E = exp(-X);
WW1 = (( 1.9623264149430E-01/X-4.9695241464490E-01)/X -
6.0156581186481E-05)*E + sqrt(PIE4/X);
F1 = (WW1-E)/(X+X);
if (n == 1)
RT1 = F1/(WW1-F1);
else if (n == 2){
RT1 = ((((-1.14906395546354E-06*X+1.76003409708332E-04)*X -
1.71984023644904E-02)*X-1.37292644149838E-01)*X +
(-4.75742064274859E+01/X+9.21005186542857E+00)/X -
2.31080873898939E-02)*E + R12/(X-R12);
RT2 = ((( 3.64921633404158E-04*X-9.71850973831558E-02)*X -
4.02886174850252E+00)*X +
(-1.35831002139173E+02/X -
8.66891724287962E+01)/X+2.98011277766958E+00)*E + R22/(X-R22);
WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00+RT2)/(RT2-RT1);
WW1 = WW1-WW2;
} else if (n == 3) {
F2 = (F1+F1+F1-E)/(X+X);
if (X < 20) {
RT1 = ((((((-2.43270989903742E-06*X+3.57901398988359E-04)*X -
2.34112415981143E-02)*X+7.81425144913975E-01)*X -
1.73209218219175E+01)*X+2.43517435690398E+02)*X +
(-1.97611541576986E+04/X+9.82441363463929E+03)/X -
2.07970687843258E+03)*E + R13/(X-R13);
RT2 = (((((-2.62627010965435E-04*X+3.49187925428138E-02)*X -
3.09337618731880E+00)*X+1.07037141010778E+02)*X -
2.36659637247087E+03)*X +
((-2.91669113681020E+06/X +
1.41129505262758E+06)/X-2.91532335433779E+05)/X +
3.35202872835409E+04)*E + R23/(X-R23);
RT3 = ((((( 9.31856404738601E-05*X-2.87029400759565E-02)*X -
7.83503697918455E-01)*X-1.84338896480695E+01)*X +
4.04996712650414E+02)*X +
(-1.89829509315154E+05/X +
5.11498390849158E+04)/X-6.88145821789955E+03)*E
+ R33/(X-R33);
} else {
RT1 = ((((-4.97561537069643E-04*X-5.00929599665316E-02)*X +
1.31099142238996E+00)*X-1.88336409225481E+01)*X -
6.60344754467191E+02 /X+1.64931462413877E+02)*E
+ R13/(X-R13);
RT2 = ((((-4.48218898474906E-03*X-5.17373211334924E-01)*X +
1.13691058739678E+01)*X-1.65426392885291E+02)*X -
6.30909125686731E+03 /X+1.52231757709236E+03)*E
+ R23/(X-R23);
RT3 = ((((-1.38368602394293E-02*X-1.77293428863008E+00)*X +
1.73639054044562E+01)*X-3.57615122086961E+02)*X -
1.45734701095912E+04 /X+2.69831813951849E+03)*E
+ R33/(X-R33);
}
T1 = RT1/(RT1+1.0E+00);
T2 = RT2/(RT2+1.0E+00);
T3 = RT3/(RT3+1.0E+00);
A2 = F2-T1*F1;
A1 = F1-T1*WW1;
WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1));
WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1));
WW1 = WW1-WW2-WW3;
}
} else {
WW1 = sqrt(PIE4/X);
if (n == 1)
RT1 = 0.5E+00/(X-0.5E+00);
else if (n == 2) {
if (X < 40) {
E = exp(-X);
RT1 = (-8.78947307498880E-01*X+1.09243702330261E+01)*E
+ R12/(X-R12);
RT2 = (-9.28903924275977E+00*X+8.10642367843811E+01)*E
+ R22/(X-R22);
WW2 = ( 4.46857389308400E+00*X-7.79250653461045E+01)*E + W22*WW1;
WW1 = WW1-WW2;
} else {
RT1 = R12/(X-R12);
RT2 = R22/(X-R22);
WW2 = W22*WW1;
WW1 = WW1-WW2;
}
} else if (n == 3) {
if (X < 47) {
E = exp(-X);
RT1 = ((-7.39058467995275E+00*X+3.21318352526305E+02)*X -
3.99433696473658E+03)*E + R13/(X-R13);
RT2 = ((-7.38726243906513E+01*X+3.13569966333873E+03)*X -
3.86862867311321E+04)*E + R23/(X-R23);
RT3 = ((-2.63750565461336E+02*X+1.04412168692352E+04)*X -
1.28094577915394E+05)*E + R33/(X-R33);
WW3 = ((( 1.52258947224714E-01*X-8.30661900042651E+00)*X +
1.92977367967984E+02)*X-1.67787926005344E+03)*E
+ W33*WW1;
WW2 = (( 6.15072615497811E+01*X-2.91980647450269E+03)*X +
3.80794303087338E+04)*E + W23*WW1;
WW1 = WW1-WW2-WW3;
} else {
RT1 = R13/(X-R13);
RT2 = R23/(X-R23);
RT3 = R33/(X-R33);
WW2 = W23*WW1;
WW3 = W33*WW1;
WW1 = WW1-WW2-WW3;
}
}
}
roots[0] = RT1;
weights[0] = WW1;
if (n > 1){
roots[1] = RT2;
weights[1] = WW2;
}
if (n > 2) {
roots[2] = RT3;
weights[2] = WW3;
}
return;
}
__device__ void cuda_Root4_dp(double X, double roots[], double weights[]){
double R14,PIE4,R24,W24,R34,W34,R44,W44;
double RT1=0,RT2=0,RT3=0,RT4=0,WW1=0,WW2=0,WW3=0,WW4=0;
double Y,E;
R14 = 1.45303521503316E-01;
PIE4 = 7.85398163397448E-01;
R24 = 1.33909728812636E+00;
W24 = 2.34479815323517E-01;
R34 = 3.92696350135829E+00;
W34 = 1.92704402415764E-02;
R44 = 8.58863568901199E+00;
W44 = 2.25229076750736E-04;
if (X <= 3.0E-7) {
RT1 = 3.48198973061471E-02 -4.09645850660395E-03 *X;
RT2 = 3.81567185080042E-01 -4.48902570656719E-02 *X;
RT3 = 1.73730726945891E+00 -2.04389090547327E-01 *X;
RT4 = 1.18463056481549E+01 -1.39368301742312E+00 *X;
WW1 = 3.62683783378362E-01 -3.13844305713928E-02 *X;
WW2 = 3.13706645877886E-01 -8.98046242557724E-02 *X;
WW3 = 2.22381034453372E-01 -1.29314370958973E-01 *X;
WW4 = 1.01228536290376E-01 -8.28299075414321E-02 *X;
} else if (X <= 1.0) {
RT1 = ((((((-1.95309614628539E-10*X+5.19765728707592E-09)*X-
1.01756452250573E-07 )*X+1.72365935872131E-06 )*X-
2.61203523522184E-05 )*X+3.52921308769880E-04 )*X-
4.09645850658433E-03 )*X+3.48198973061469E-02;
RT2 = (((((-1.89554881382342E-08*X+3.07583114342365E-07)*X+
1.270981734393E-06)*X-1.417298563884E-04)*X+
3.226979163176E-03)*X-4.48902570678178E-02 )*X+
3.81567185080039E-01;
RT3 = (((((( 1.77280535300416E-09*X+3.36524958870615E-08)*X-
2.58341529013893E-07 )*X-1.13644895662320E-05 )*X-
7.91549618884063E-05 )*X+1.03825827346828E-02 )*X-
2.04389090525137E-01 )*X+1.73730726945889E+00;
RT4 = (((((-5.61188882415248E-08*X-2.49480733072460E-07)*X+
3.428685057114E-06)*X+1.679007454539E-04)*X+
4.722855585715E-02)*X-1.39368301737828E+00 )*X+
1.18463056481543E+01;
WW1 = ((((((-1.14649303201279E-08*X+1.88015570196787E-07)*X-
2.33305875372323E-06 )*X+2.68880044371597E-05 )*X-
2.94268428977387E-04 )*X+3.06548909776613E-03 )*X-
3.13844305680096E-02 )*X+3.62683783378335E-01;
WW2 = ((((((((-4.11720483772634E-09*X+6.54963481852134E-08)*X-
7.20045285129626E-07 )*X+6.93779646721723E-06 )*X-
6.05367572016373E-05 )*X+4.74241566251899E-04 )*X-
3.26956188125316E-03 )*X+1.91883866626681E-02 )*X-
8.98046242565811E-02 )*X+3.13706645877886E-01;
WW3 = ((((((((-3.41688436990215E-08*X+5.07238960340773E-07)*X-
5.01675628408220E-06 )*X+4.20363420922845E-05 )*X-
3.08040221166823E-04 )*X+1.94431864731239E-03 )*X-
1.02477820460278E-02 )*X+4.28670143840073E-02 )*X-
1.29314370962569E-01 )*X+2.22381034453369E-01;
WW4 = ((((((((( 4.99660550769508E-09*X-7.94585963310120E-08)*X+
8.359072409485E-07)*X-7.422369210610E-06)*X+
5.763374308160E-05)*X-3.86645606718233E-04 )*X+
2.18417516259781E-03 )*X-9.99791027771119E-03 )*X+
3.48791097377370E-02 )*X-8.28299075413889E-02 )*X+
1.01228536290376E-01;
} else if (X <= 5.0) {
Y = X-3.0E+00;
RT1 = (((((((((-1.48570633747284E-15*Y-1.33273068108777E-13)*Y+
4.068543696670E-12)*Y-9.163164161821E-11)*Y+
2.046819017845E-09)*Y-4.03076426299031E-08 )*Y+
7.29407420660149E-07 )*Y-1.23118059980833E-05 )*Y+
1.88796581246938E-04 )*Y-2.53262912046853E-03 )*Y+
2.51198234505021E-02;
RT2 = ((((((((( 1.35830583483312E-13*Y-2.29772605964836E-12)*Y-
3.821500128045E-12)*Y+6.844424214735E-10)*Y-
1.048063352259E-08)*Y+1.50083186233363E-08 )*Y+
3.48848942324454E-06 )*Y-1.08694174399193E-04 )*Y+
2.08048885251999E-03 )*Y-2.91205805373793E-02 )*Y+
2.72276489515713E-01;
RT3 = ((((((((( 5.02799392850289E-13*Y+1.07461812944084E-11)*Y-
1.482277886411E-10)*Y-2.153585661215E-09)*Y+
3.654087802817E-08)*Y+5.15929575830120E-07 )*Y-
9.52388379435709E-06 )*Y-2.16552440036426E-04 )*Y+
9.03551469568320E-03 )*Y-1.45505469175613E-01 )*Y+
1.21449092319186E+00;
RT4 = (((((((((-1.08510370291979E-12*Y+6.41492397277798E-11)*Y+
7.542387436125E-10)*Y-2.213111836647E-09)*Y-
1.448228963549E-07)*Y-1.95670833237101E-06 )*Y-
1.07481314670844E-05 )*Y+1.49335941252765E-04 )*Y+
4.87791531990593E-02 )*Y-1.10559909038653E+00 )*Y+
8.09502028611780E+00;
WW1 = ((((((((((-4.65801912689961E-14*Y+7.58669507106800E-13)*Y-
1.186387548048E-11)*Y+1.862334710665E-10)*Y-
2.799399389539E-09)*Y+4.148972684255E-08)*Y-
5.933568079600E-07)*Y+8.168349266115E-06)*Y-
1.08989176177409E-04 )*Y+1.41357961729531E-03 )*Y-
1.87588361833659E-02 )*Y+2.89898651436026E-01;
WW2 = ((((((((((((-1.46345073267549E-14*Y+2.25644205432182E-13)*Y-
3.116258693847E-12)*Y+4.321908756610E-11)*Y-
5.673270062669E-10)*Y+7.006295962960E-09)*Y-
8.120186517000E-08)*Y+8.775294645770E-07)*Y-
8.77829235749024E-06 )*Y+8.04372147732379E-05 )*Y-
6.64149238804153E-04 )*Y+4.81181506827225E-03 )*Y-
2.88982669486183E-02 )*Y+1.56247249979288E-01;
WW3 = ((((((((((((( 9.06812118895365E-15*Y-1.40541322766087E-13)*
Y+1.919270015269E-12)*Y-2.605135739010E-11)*Y+
3.299685839012E-10)*Y-3.86354139348735E-09 )*Y+
4.16265847927498E-08 )*Y-4.09462835471470E-07 )*Y+
3.64018881086111E-06 )*Y-2.88665153269386E-05 )*Y+
2.00515819789028E-04 )*Y-1.18791896897934E-03 )*Y+
5.75223633388589E-03 )*Y-2.09400418772687E-02 )*Y+
4.85368861938873E-02;
WW4 = ((((((((((((((-9.74835552342257E-16*Y+1.57857099317175E-14)*
Y-2.249993780112E-13)*Y+3.173422008953E-12)*Y-
4.161159459680E-11)*Y+5.021343560166E-10)*Y-
5.545047534808E-09)*Y+5.554146993491E-08)*Y-
4.99048696190133E-07 )*Y+3.96650392371311E-06 )*Y-
2.73816413291214E-05 )*Y+1.60106988333186E-04 )*Y-
7.64560567879592E-04 )*Y+2.81330044426892E-03 )*Y-
7.16227030134947E-03 )*Y+9.66077262223353E-03;
} else if (X <= 10.0) {
Y = X-7.5E+00;
RT1 = ((((((((( 4.64217329776215E-15*Y-6.27892383644164E-15)*Y+
3.462236347446E-13)*Y-2.927229355350E-11)*Y+
5.090355371676E-10)*Y-9.97272656345253E-09 )*Y+
2.37835295639281E-07 )*Y-4.60301761310921E-06 )*Y+
8.42824204233222E-05 )*Y-1.37983082233081E-03 )*Y+
1.66630865869375E-02;
RT2 = ((((((((( 2.93981127919047E-14*Y+8.47635639065744E-13)*Y-
1.446314544774E-11)*Y-6.149155555753E-12)*Y+
8.484275604612E-10)*Y-6.10898827887652E-08 )*Y+
2.39156093611106E-06 )*Y-5.35837089462592E-05 )*Y+
1.00967602595557E-03 )*Y-1.57769317127372E-02 )*Y+
1.74853819464285E-01;
RT3 = (((((((((( 2.93523563363000E-14*Y-6.40041776667020E-14)*Y-
2.695740446312E-12)*Y+1.027082960169E-10)*Y-
5.822038656780E-10)*Y-3.159991002539E-08)*Y+
4.327249251331E-07)*Y+4.856768455119E-06)*Y-
2.54617989427762E-04 )*Y+5.54843378106589E-03 )*Y-
7.95013029486684E-02 )*Y+7.20206142703162E-01;
RT4 = (((((((((((-1.62212382394553E-14*Y+7.68943641360593E-13)*Y+
5.764015756615E-12)*Y-1.380635298784E-10)*Y-
1.476849808675E-09)*Y+1.84347052385605E-08 )*Y+
3.34382940759405E-07 )*Y-1.39428366421645E-06 )*Y-
7.50249313713996E-05 )*Y-6.26495899187507E-04 )*Y+
4.69716410901162E-02 )*Y-6.66871297428209E-01 )*Y+
4.11207530217806E+00;
WW1 = ((((((((((-1.65995045235997E-15*Y+6.91838935879598E-14)*Y-
9.131223418888E-13)*Y+1.403341829454E-11)*Y-
3.672235069444E-10)*Y+6.366962546990E-09)*Y-
1.039220021671E-07)*Y+1.959098751715E-06)*Y-
3.33474893152939E-05 )*Y+5.72164211151013E-04 )*Y-
1.05583210553392E-02 )*Y+2.26696066029591E-01;
WW2 = ((((((((((((-3.57248951192047E-16*Y+6.25708409149331E-15)*Y-
9.657033089714E-14)*Y+1.507864898748E-12)*Y-
2.332522256110E-11)*Y+3.428545616603E-10)*Y-
4.698730937661E-09)*Y+6.219977635130E-08)*Y-
7.83008889613661E-07 )*Y+9.08621687041567E-06 )*Y-
9.86368311253873E-05 )*Y+9.69632496710088E-04 )*Y-
8.14594214284187E-03 )*Y+8.50218447733457E-02;
WW3 = ((((((((((((( 1.64742458534277E-16*Y-2.68512265928410E-15)*
Y+3.788890667676E-14)*Y-5.508918529823E-13)*Y+
7.555896810069E-12)*Y-9.69039768312637E-11 )*Y+
1.16034263529672E-09 )*Y-1.28771698573873E-08 )*Y+
1.31949431805798E-07 )*Y-1.23673915616005E-06 )*Y+
1.04189803544936E-05 )*Y-7.79566003744742E-05 )*Y+
5.03162624754434E-04 )*Y-2.55138844587555E-03 )*Y+
1.13250730954014E-02;
WW4 = ((((((((((((((-1.55714130075679E-17*Y+2.57193722698891E-16)*
Y-3.626606654097E-15)*Y+5.234734676175E-14)*Y-
7.067105402134E-13)*Y+8.793512664890E-12)*Y-
1.006088923498E-10)*Y+1.050565098393E-09)*Y-
9.91517881772662E-09 )*Y+8.35835975882941E-08 )*Y-
6.19785782240693E-07 )*Y+3.95841149373135E-06 )*Y-
2.11366761402403E-05 )*Y+9.00474771229507E-05 )*Y-
2.78777909813289E-04 )*Y+5.26543779837487E-04;
} else if (X <= 15.0) {
Y = X-12.5E+00;
RT1 = ((((((((((( 4.94869622744119E-17*Y+8.03568805739160E-16)*Y-
5.599125915431E-15)*Y-1.378685560217E-13)*Y+
7.006511663249E-13)*Y+1.30391406991118E-11 )*Y+
8.06987313467541E-11 )*Y-5.20644072732933E-09 )*Y+
7.72794187755457E-08 )*Y-1.61512612564194E-06 )*Y+
4.15083811185831E-05 )*Y-7.87855975560199E-04 )*Y+
1.14189319050009E-02;
RT2 = ((((((((((( 4.89224285522336E-16*Y+1.06390248099712E-14)*Y-
5.446260182933E-14)*Y-1.613630106295E-12)*Y+
3.910179118937E-12)*Y+1.90712434258806E-10 )*Y+
8.78470199094761E-10 )*Y-5.97332993206797E-08 )*Y+
9.25750831481589E-07 )*Y-2.02362185197088E-05 )*Y+
4.92341968336776E-04 )*Y-8.68438439874703E-03 )*Y+
1.15825965127958E-01;
RT3 = (((((((((( 6.12419396208408E-14*Y+1.12328861406073E-13)*Y-
9.051094103059E-12)*Y-4.781797525341E-11)*Y+
1.660828868694E-09)*Y+4.499058798868E-10)*Y-
2.519549641933E-07)*Y+4.977444040180E-06)*Y-
1.25858350034589E-04 )*Y+2.70279176970044E-03 )*Y-
3.99327850801083E-02 )*Y+4.33467200855434E-01;
RT4 = ((((((((((( 4.63414725924048E-14*Y-4.72757262693062E-14)*Y-
1.001926833832E-11)*Y+6.074107718414E-11)*Y+
1.576976911942E-09)*Y-2.01186401974027E-08 )*Y-
1.84530195217118E-07 )*Y+5.02333087806827E-06 )*Y+
9.66961790843006E-06 )*Y-1.58522208889528E-03 )*Y+
2.80539673938339E-02 )*Y-2.78953904330072E-01 )*Y+
1.82835655238235E+00;
WW4 = ((((((((((((( 2.90401781000996E-18*Y-4.63389683098251E-17)*
Y+6.274018198326E-16)*Y-8.936002188168E-15)*Y+
1.194719074934E-13)*Y-1.45501321259466E-12 )*Y+
1.64090830181013E-11 )*Y-1.71987745310181E-10 )*Y+
1.63738403295718E-09 )*Y-1.39237504892842E-08 )*Y+
1.06527318142151E-07 )*Y-7.27634957230524E-07 )*Y+
4.12159381310339E-06 )*Y-1.74648169719173E-05 )*Y+
8.50290130067818E-05;
WW3 = ((((((((((((-4.19569145459480E-17*Y+5.94344180261644E-16)*Y-
1.148797566469E-14)*Y+1.881303962576E-13)*Y-
2.413554618391E-12)*Y+3.372127423047E-11)*Y-
4.933988617784E-10)*Y+6.116545396281E-09)*Y-
6.69965691739299E-08 )*Y+7.52380085447161E-07 )*Y-
8.08708393262321E-06 )*Y+6.88603417296672E-05 )*Y-
4.67067112993427E-04 )*Y+5.42313365864597E-03;
WW2 = ((((((((((-6.22272689880615E-15*Y+1.04126809657554E-13)*Y-
6.842418230913E-13)*Y+1.576841731919E-11)*Y-
4.203948834175E-10)*Y+6.287255934781E-09)*Y-
8.307159819228E-08)*Y+1.356478091922E-06)*Y-
2.08065576105639E-05 )*Y+2.52396730332340E-04 )*Y-
2.94484050194539E-03 )*Y+6.01396183129168E-02;
WW1 = (((-1.8784686463512E-01/X+2.2991849164985E-01)/X -
4.9893752514047E-01)/X-2.1916512131607E-05)*exp(-X) +
sqrt(PIE4/X)-WW4-WW3-WW2;
} else if (X <= 20.0) {
WW1 = sqrt(PIE4/X);
Y = X-17.5E+00;
RT1 = ((((((((((( 4.36701759531398E-17*Y-1.12860600219889E-16)*Y-
6.149849164164E-15)*Y+5.820231579541E-14)*Y+
4.396602872143E-13)*Y-1.24330365320172E-11 )*Y+
6.71083474044549E-11 )*Y+2.43865205376067E-10 )*Y+
1.67559587099969E-08 )*Y-9.32738632357572E-07 )*Y+
2.39030487004977E-05 )*Y-4.68648206591515E-04 )*Y+
8.34977776583956E-03;
RT2 = ((((((((((( 4.98913142288158E-16*Y-2.60732537093612E-16)*Y-
7.775156445127E-14)*Y+5.766105220086E-13)*Y+
6.432696729600E-12)*Y-1.39571683725792E-10 )*Y+
5.95451479522191E-10 )*Y+2.42471442836205E-09 )*Y+
2.47485710143120E-07 )*Y-1.14710398652091E-05 )*Y+
2.71252453754519E-04 )*Y-4.96812745851408E-03 )*Y+
8.26020602026780E-02;
RT3 = ((((((((((( 1.91498302509009E-15*Y+1.48840394311115E-14)*Y-
4.316925145767E-13)*Y+1.186495793471E-12)*Y+
4.615806713055E-11)*Y-5.54336148667141E-10 )*Y+
3.48789978951367E-10 )*Y-2.79188977451042E-09 )*Y+
2.09563208958551E-06 )*Y-6.76512715080324E-05 )*Y+
1.32129867629062E-03 )*Y-2.05062147771513E-02 )*Y+
2.88068671894324E-01;
RT4 = (((((((((((-5.43697691672942E-15*Y-1.12483395714468E-13)*Y+
2.826607936174E-12)*Y-1.266734493280E-11)*Y-
4.258722866437E-10)*Y+9.45486578503261E-09 )*Y-
5.86635622821309E-08 )*Y-1.28835028104639E-06 )*Y+
4.41413815691885E-05 )*Y-7.61738385590776E-04 )*Y+
9.66090902985550E-03 )*Y-1.01410568057649E-01 )*Y+
9.54714798156712E-01;
WW4 = ((((((((((((-7.56882223582704E-19*Y+7.53541779268175E-18)*Y-
1.157318032236E-16)*Y+2.411195002314E-15)*Y-
3.601794386996E-14)*Y+4.082150659615E-13)*Y-
4.289542980767E-12)*Y+5.086829642731E-11)*Y-
6.35435561050807E-10 )*Y+6.82309323251123E-09 )*Y-
5.63374555753167E-08 )*Y+3.57005361100431E-07 )*Y-
2.40050045173721E-06 )*Y+4.94171300536397E-05;
WW3 = (((((((((((-5.54451040921657E-17*Y+2.68748367250999E-16)*Y+
1.349020069254E-14)*Y-2.507452792892E-13)*Y+
1.944339743818E-12)*Y-1.29816917658823E-11 )*Y+
3.49977768819641E-10 )*Y-8.67270669346398E-09 )*Y+
1.31381116840118E-07 )*Y-1.36790720600822E-06 )*Y+
1.19210697673160E-05 )*Y-1.42181943986587E-04 )*Y+
4.12615396191829E-03;
WW2 = (((((((((((-1.86506057729700E-16*Y+1.16661114435809E-15)*Y+
2.563712856363E-14)*Y-4.498350984631E-13)*Y+
1.765194089338E-12)*Y+9.04483676345625E-12 )*Y+
4.98930345609785E-10 )*Y-2.11964170928181E-08 )*Y+
3.98295476005614E-07 )*Y-5.49390160829409E-06 )*Y+
7.74065155353262E-05 )*Y-1.48201933009105E-03 )*Y+
4.97836392625268E-02;
WW1 = (( 1.9623264149430E-01/X-4.9695241464490E-01)/X -
6.0156581186481E-05)*exp(-X)+WW1-WW2-WW3-WW4;
} else if (X <= 35.0) {
WW1 = sqrt(PIE4/X);
E = exp(-X);
RT1 = ((((((-4.45711399441838E-05*X+1.27267770241379E-03)*X -
2.36954961381262E-01)*X+1.54330657903756E+01)*X -
5.22799159267808E+02)*X+1.05951216669313E+04)*X +
(-2.51177235556236E+06/X+8.72975373557709E+05)/X -
1.29194382386499E+05)*E + R14/(X-R14);
RT2 = (((((-7.85617372254488E-02*X+6.35653573484868E+00)*X -
3.38296938763990E+02)*X+1.25120495802096E+04)*X -
3.16847570511637E+05)*X +
((-1.02427466127427E+09/X +
3.70104713293016E+08)/X-5.87119005093822E+07)/X +
5.38614211391604E+06)*E + R24/(X-R24);
RT3 = (((((-2.37900485051067E-01*X+1.84122184400896E+01)*X -
1.00200731304146E+03)*X+3.75151841595736E+04)*X -
9.50626663390130E+05)*X +
((-2.88139014651985E+09/X +
1.06625915044526E+09)/X-1.72465289687396E+08)/X +
1.60419390230055E+07)*E + R34/(X-R34);
RT4 = ((((((-6.00691586407385E-04*X-3.64479545338439E-01)*X +
1.57496131755179E+01)*X-6.54944248734901E+02)*X +
1.70830039597097E+04)*X-2.90517939780207E+05)*X +
(3.49059698304732E+07/X-1.64944522586065E+07)/X +
2.96817940164703E+06)*E + R44/(X-R44);
if (X <= 25.0)
WW4 = ((((((( 2.33766206773151E-07*X-
3.81542906607063E-05)*X +3.51416601267000E-03)*X-
1.66538571864728E-01)*X +4.80006136831847E+00)*X-
8.73165934223603E+01)*X +9.77683627474638E+02)*X +
1.66000945117640E+04/X -6.14479071209961E+03)*E + W44*WW1;
else
WW4 = (((((( 5.74245945342286E-06*X-
7.58735928102351E-05)*X +2.35072857922892E-04)*X-
3.78812134013125E-03)*X +3.09871652785805E-01)*X-
7.11108633061306E+00)*X +5.55297573149528E+01)*E + W44*WW1;
WW3 = (((((( 2.36392855180768E-04*X-9.16785337967013E-03)*X +
4.62186525041313E-01)*X-1.96943786006540E+01)*X +
4.99169195295559E+02)*X-6.21419845845090E+03)*X +
((+5.21445053212414E+07/X-1.34113464389309E+07)/X +
1.13673298305631E+06)/X-2.81501182042707E+03)*E + W34*WW1;
WW2 = (((((( 7.29841848989391E-04*X-3.53899555749875E-02)*X +
2.07797425718513E+00)*X-1.00464709786287E+02)*X +
3.15206108877819E+03)*X-6.27054715090012E+04)*X +
(+1.54721246264919E+07/X-5.26074391316381E+06)/X +
7.67135400969617E+05)*E + W24*WW1;
WW1 = (( 1.9623264149430E-01/X-4.9695241464490E-01)/X -
6.0156581186481E-05)*E + WW1-WW2-WW3-WW4;
} else if (X <= 53.0) {
WW1 = sqrt(PIE4/X);
E = exp(-X)*pow(X,4.0);
RT4 = ((-2.19135070169653E-03*X-1.19108256987623E-01)*X -
7.50238795695573E-01)*E + R44/(X-R44);
RT3 = ((-9.65842534508637E-04*X-4.49822013469279E-02)*X +
6.08784033347757E-01)*E + R34/(X-R34);
RT2 = ((-3.62569791162153E-04*X-9.09231717268466E-03)*X +
1.84336760556262E-01)*E + R24/(X-R24);
RT1 = ((-4.07557525914600E-05*X-6.88846864931685E-04)*X +
1.74725309199384E-02)*E + R14/(X-R14);
WW4 = (( 5.76631982000990E-06*X-7.89187283804890E-05)*X +
3.28297971853126E-04)*E + W44*WW1;
WW3 = (( 2.08294969857230E-04*X-3.77489954837361E-03)*X +
2.09857151617436E-02)*E + W34*WW1;
WW2 = (( 6.16374517326469E-04*X-1.26711744680092E-02)*X +
8.14504890732155E-02)*E + W24*WW1;
WW1 = WW1-WW2-WW3-WW4;
} else {
WW1 = sqrt(PIE4/X);
RT1 = R14/(X-R14);
RT2 = R24/(X-R24);
RT3 = R34/(X-R34);
RT4 = R44/(X-R44);
WW4 = W44*WW1;
WW3 = W34*WW1;
WW2 = W24*WW1;
WW1 = WW1-WW2-WW3-WW4;
}
roots[0] = RT1;
weights[0] = WW1;
roots[1] = RT2;
weights[1] = WW2;
roots[2] = RT3;
weights[2] = WW3;
roots[3] = RT4;
weights[3] = WW4;
return;
}
__device__ void cuda_Root5_dp(double X, double roots[], double weights[]){
double R15,PIE4,R25,W25,R35,W35,R45,W45,R55,W55;
double RT1=0,RT2=0,RT3=0,RT4=0,RT5=0,
WW1=0,WW2=0,WW3=0,WW4=0,WW5=0;
double Y,E=0,XXX;
R15 = 1.17581320211778E-01;
PIE4 = 7.85398163397448E-01;
R25 = 1.07456201243690E+00;
W25 = 2.70967405960535E-01;
R35 = 3.08593744371754E+00;
W35 = 3.82231610015404E-02;
R45 = 6.41472973366203E+00;
W45 = 1.51614186862443E-03;
R55 = 1.18071894899717E+01;
W55 = 8.62130526143657E-06;
if (X < 3.e-7){
RT1 = 2.26659266316985E-02 -2.15865967920897E-03 *X;
RT2 = 2.31271692140903E-01 -2.20258754389745E-02 *X;
RT3 = 8.57346024118836E-01 -8.16520023025515E-02 *X;
RT4 = 2.97353038120346E+00 -2.83193369647137E-01 *X;
RT5 = 1.84151859759051E+01 -1.75382723579439E+00 *X;
WW1 = 2.95524224714752E-01 -1.96867576909777E-02 *X;
WW2 = 2.69266719309995E-01 -5.61737590184721E-02 *X;
WW3 = 2.19086362515981E-01 -9.71152726793658E-02 *X;
WW4 = 1.49451349150580E-01 -1.02979262193565E-01 *X;
WW5 = 6.66713443086877E-02 -5.73782817488315E-02 *X;
} else if (X < 1.0){
RT1 = ((((((-4.46679165328413E-11*X+1.21879111988031E-09)*X-
2.62975022612104E-08 )*X+5.15106194905897E-07 )*X-
9.27933625824749E-06 )*X+1.51794097682482E-04 )*X-
2.15865967920301E-03 )*X+2.26659266316985E-02;
RT2 = (((((( 1.93117331714174E-10*X-4.57267589660699E-09)*X+
2.48339908218932E-08 )*X+1.50716729438474E-06 )*X-
6.07268757707381E-05 )*X+1.37506939145643E-03 )*X-
2.20258754419939E-02 )*X+2.31271692140905E-01;
RT3 = ((((( 4.84989776180094E-09*X+1.31538893944284E-07)*X-
2.766753852879E-06)*X-7.651163510626E-05)*X+
4.033058545972E-03)*X-8.16520022916145E-02 )*X+
8.57346024118779E-01;
RT4 = ((((-2.48581772214623E-07*X-4.34482635782585E-06)*X-
7.46018257987630E-07 )*X+1.01210776517279E-02 )*X-
2.83193369640005E-01 )*X+2.97353038120345E+00;
RT5 = (((((-8.92432153868554E-09*X+1.77288899268988E-08)*X+
3.040754680666E-06)*X+1.058229325071E-04)*X+
4.596379534985E-02)*X-1.75382723579114E+00 )*X+
1.84151859759049E+01;
WW1 = ((((((-2.03822632771791E-09*X+3.89110229133810E-08)*X-
5.84914787904823E-07 )*X+8.30316168666696E-06 )*X-
1.13218402310546E-04 )*X+1.49128888586790E-03 )*X-
1.96867576904816E-02 )*X+2.95524224714749E-01;
WW2 = ((((((( 8.62848118397570E-09*X-1.38975551148989E-07)*X+
1.602894068228E-06)*X-1.646364300836E-05)*X+
1.538445806778E-04)*X-1.28848868034502E-03 )*X+
9.38866933338584E-03 )*X-5.61737590178812E-02 )*X+
2.69266719309991E-01;
WW3 = ((((((((-9.41953204205665E-09*X+1.47452251067755E-07)*X-
1.57456991199322E-06 )*X+1.45098401798393E-05 )*X-
1.18858834181513E-04 )*X+8.53697675984210E-04 )*X-
5.22877807397165E-03 )*X+2.60854524809786E-02 )*X-
9.71152726809059E-02 )*X+2.19086362515979E-01;
WW4 = ((((((((-3.84961617022042E-08*X+5.66595396544470E-07)*X-
5.52351805403748E-06 )*X+4.53160377546073E-05 )*X-
3.22542784865557E-04 )*X+1.95682017370967E-03 )*X-
9.77232537679229E-03 )*X+3.79455945268632E-02 )*X-
1.02979262192227E-01 )*X+1.49451349150573E-01;
WW5 = ((((((((( 4.09594812521430E-09*X-6.47097874264417E-08)*X+
6.743541482689E-07)*X-5.917993920224E-06)*X+
4.531969237381E-05)*X-2.99102856679638E-04 )*X+
1.65695765202643E-03 )*X-7.40671222520653E-03 )*X+
2.50889946832192E-02 )*X-5.73782817487958E-02 )*X+
6.66713443086877E-02;
} else if (X < 5.0) {
Y = X-3.0E+00;
RT1 = ((((((((-2.58163897135138E-14*Y+8.14127461488273E-13)*Y-
2.11414838976129E-11 )*Y+5.09822003260014E-10 )*Y-
1.16002134438663E-08 )*Y+2.46810694414540E-07 )*Y-
4.92556826124502E-06 )*Y+9.02580687971053E-05 )*Y-
1.45190025120726E-03 )*Y+1.73416786387475E-02;
RT2 = ((((((((( 1.04525287289788E-14*Y+5.44611782010773E-14)*Y-
4.831059411392E-12)*Y+1.136643908832E-10)*Y-
1.104373076913E-09)*Y-2.35346740649916E-08 )*Y+
1.43772622028764E-06 )*Y-4.23405023015273E-05 )*Y+
9.12034574793379E-04 )*Y-1.52479441718739E-02 )*Y+
1.76055265928744E-01;
RT3 = (((((((((-6.89693150857911E-14*Y+5.92064260918861E-13)*Y+
1.847170956043E-11)*Y-3.390752744265E-10)*Y-
2.995532064116E-09)*Y+1.57456141058535E-07 )*Y-
3.95859409711346E-07 )*Y-9.58924580919747E-05 )*Y+
3.23551502557785E-03 )*Y-5.97587007636479E-02 )*Y+
6.46432853383057E-01;
RT4 = ((((((((-3.61293809667763E-12*Y-2.70803518291085E-11)*Y+
8.83758848468769E-10 )*Y+1.59166632851267E-08 )*Y-
1.32581997983422E-07 )*Y-7.60223407443995E-06 )*Y-
7.41019244900952E-05 )*Y+9.81432631743423E-03 )*Y-
2.23055570487771E-01 )*Y+2.21460798080643E+00;
RT5 = ((((((((( 7.12332088345321E-13*Y+3.16578501501894E-12)*Y-
8.776668218053E-11)*Y-2.342817613343E-09)*Y-
3.496962018025E-08)*Y-3.03172870136802E-07 )*Y+
1.50511293969805E-06 )*Y+1.37704919387696E-04 )*Y+
4.70723869619745E-02 )*Y-1.47486623003693E+00 )*Y+
1.35704792175847E+01;
WW1 = ((((((((( 1.04348658616398E-13*Y-1.94147461891055E-12)*Y+
3.485512360993E-11)*Y-6.277497362235E-10)*Y+
1.100758247388E-08)*Y-1.88329804969573E-07 )*Y+
3.12338120839468E-06 )*Y-5.04404167403568E-05 )*Y+
8.00338056610995E-04 )*Y-1.30892406559521E-02 )*Y+
2.47383140241103E-01;
WW2 = ((((((((((( 3.23496149760478E-14*Y-5.24314473469311E-13)*Y+
7.743219385056E-12)*Y-1.146022750992E-10)*Y+
1.615238462197E-09)*Y-2.15479017572233E-08 )*Y+
2.70933462557631E-07 )*Y-3.18750295288531E-06 )*Y+
3.47425221210099E-05 )*Y-3.45558237388223E-04 )*Y+
3.05779768191621E-03 )*Y-2.29118251223003E-02 )*Y+
1.59834227924213E-01;
WW3 = ((((((((((((-3.42790561802876E-14*Y+5.26475736681542E-13)*Y-
7.184330797139E-12)*Y+9.763932908544E-11)*Y-
1.244014559219E-09)*Y+1.472744068942E-08)*Y-
1.611749975234E-07)*Y+1.616487851917E-06)*Y-
1.46852359124154E-05 )*Y+1.18900349101069E-04 )*Y-
8.37562373221756E-04 )*Y+4.93752683045845E-03 )*Y-
2.25514728915673E-02 )*Y+6.95211812453929E-02;
WW4 = ((((((((((((( 1.04072340345039E-14*Y-1.60808044529211E-13)*
Y+2.183534866798E-12)*Y-2.939403008391E-11)*Y+
3.679254029085E-10)*Y-4.23775673047899E-09 )*Y+
4.46559231067006E-08 )*Y-4.26488836563267E-07 )*Y+
3.64721335274973E-06 )*Y-2.74868382777722E-05 )*Y+
1.78586118867488E-04 )*Y-9.68428981886534E-04 )*Y+
4.16002324339929E-03 )*Y-1.28290192663141E-02 )*Y+
2.22353727685016E-02;
WW5 = ((((((((((((((-8.16770412525963E-16*Y+1.31376515047977E-14)*
Y-1.856950818865E-13)*Y+2.596836515749E-12)*Y-
3.372639523006E-11)*Y+4.025371849467E-10)*Y-
4.389453269417E-09)*Y+4.332753856271E-08)*Y-
3.82673275931962E-07 )*Y+2.98006900751543E-06 )*Y-
2.00718990300052E-05 )*Y+1.13876001386361E-04 )*Y-
5.23627942443563E-04 )*Y+1.83524565118203E-03 )*Y-
4.37785737450783E-03 )*Y+5.36963805223095E-03;
} else if (X < 10.0) {
Y = X-7.5E+00;
RT1 = ((((((((-1.13825201010775E-14*Y+1.89737681670375E-13)*Y-
4.81561201185876E-12 )*Y+1.56666512163407E-10 )*Y-
3.73782213255083E-09 )*Y+9.15858355075147E-08 )*Y-
2.13775073585629E-06 )*Y+4.56547356365536E-05 )*Y-
8.68003909323740E-04 )*Y+1.22703754069176E-02;
RT2 = (((((((((-3.67160504428358E-15*Y+1.27876280158297E-14)*Y-
1.296476623788E-12)*Y+1.477175434354E-11)*Y+
5.464102147892E-10)*Y-2.42538340602723E-08 )*Y+
8.20460740637617E-07 )*Y-2.20379304598661E-05 )*Y+
4.90295372978785E-04 )*Y-9.14294111576119E-03 )*Y+
1.22590403403690E-01;
RT3 = ((((((((( 1.39017367502123E-14*Y-6.96391385426890E-13)*Y+
1.176946020731E-12)*Y+1.725627235645E-10)*Y-
3.686383856300E-09)*Y+2.87495324207095E-08 )*Y+
1.71307311000282E-06 )*Y-7.94273603184629E-05 )*Y+
2.00938064965897E-03 )*Y-3.63329491677178E-02 )*Y+
4.34393683888443E-01;
RT4 = ((((((((((-1.27815158195209E-14*Y+1.99910415869821E-14)*Y+
3.753542914426E-12)*Y-2.708018219579E-11)*Y-
1.190574776587E-09)*Y+1.106696436509E-08)*Y+
3.954955671326E-07)*Y-4.398596059588E-06)*Y-
2.01087998907735E-04 )*Y+7.89092425542937E-03 )*Y-
1.42056749162695E-01 )*Y+1.39964149420683E+00;
RT5 = ((((((((((-1.19442341030461E-13*Y-2.34074833275956E-12)*Y+
6.861649627426E-12)*Y+6.082671496226E-10)*Y+
5.381160105420E-09)*Y-6.253297138700E-08)*Y-
2.135966835050E-06)*Y-2.373394341886E-05)*Y+
2.88711171412814E-06 )*Y+4.85221195290753E-02 )*Y-
1.04346091985269E+00 )*Y+7.89901551676692E+00;
WW1 = ((((((((( 7.95526040108997E-15*Y-2.48593096128045E-13)*Y+
4.761246208720E-12)*Y-9.535763686605E-11)*Y+
2.225273630974E-09)*Y-4.49796778054865E-08 )*Y+
9.17812870287386E-07 )*Y-1.86764236490502E-05 )*Y+
3.76807779068053E-04 )*Y-8.10456360143408E-03 )*Y+
2.01097936411496E-01;
WW2 = ((((((((((( 1.25678686624734E-15*Y-2.34266248891173E-14)*Y+
3.973252415832E-13)*Y-6.830539401049E-12)*Y+
1.140771033372E-10)*Y-1.82546185762009E-09 )*Y+
2.77209637550134E-08 )*Y-4.01726946190383E-07 )*Y+
5.48227244014763E-06 )*Y-6.95676245982121E-05 )*Y+
8.05193921815776E-04 )*Y-8.15528438784469E-03 )*Y+
9.71769901268114E-02;
WW3 = ((((((((((((-8.20929494859896E-16*Y+1.37356038393016E-14)*Y-
2.022863065220E-13)*Y+3.058055403795E-12)*Y-
4.387890955243E-11)*Y+5.923946274445E-10)*Y-
7.503659964159E-09)*Y+8.851599803902E-08)*Y-
9.65561998415038E-07 )*Y+9.60884622778092E-06 )*Y-
8.56551787594404E-05 )*Y+6.66057194311179E-04 )*Y-
4.17753183902198E-03 )*Y+2.25443826852447E-02;
WW4 = ((((((((((((((-1.08764612488790E-17*Y+1.85299909689937E-16)*
Y-2.730195628655E-15)*Y+4.127368817265E-14)*Y-
5.881379088074E-13)*Y+7.805245193391E-12)*Y-
9.632707991704E-11)*Y+1.099047050624E-09)*Y-
1.15042731790748E-08 )*Y+1.09415155268932E-07 )*Y-
9.33687124875935E-07 )*Y+7.02338477986218E-06 )*Y-
4.53759748787756E-05 )*Y+2.41722511389146E-04 )*Y-
9.75935943447037E-04 )*Y+2.57520532789644E-03;
WW5 = ((((((((((((((( 7.28996979748849E-19*Y-1.26518146195173E-17)
*Y+1.886145834486E-16)*Y-2.876728287383E-15)*Y+
4.114588668138E-14)*Y-5.44436631413933E-13 )*Y+
6.64976446790959E-12 )*Y-7.44560069974940E-11 )*Y+
7.57553198166848E-10 )*Y-6.92956101109829E-09 )*Y+
5.62222859033624E-08 )*Y-3.97500114084351E-07 )*Y+
2.39039126138140E-06 )*Y-1.18023950002105E-05 )*Y+
4.52254031046244E-05 )*Y-1.21113782150370E-04 )*Y+
1.75013126731224E-04;
} else if (X < 15.0) {
Y = X-12.5E+00;
RT1 = ((((((((((-4.16387977337393E-17*Y+7.20872997373860E-16)*Y+
1.395993802064E-14)*Y+3.660484641252E-14)*Y-
4.154857548139E-12)*Y+2.301379846544E-11)*Y-
1.033307012866E-09)*Y+3.997777641049E-08)*Y-
9.35118186333939E-07 )*Y+2.38589932752937E-05 )*Y-
5.35185183652937E-04 )*Y+8.85218988709735E-03;
RT2 = ((((((((((-4.56279214732217E-16*Y+6.24941647247927E-15)*Y+
1.737896339191E-13)*Y+8.964205979517E-14)*Y-
3.538906780633E-11)*Y+9.561341254948E-11)*Y-
9.772831891310E-09)*Y+4.240340194620E-07)*Y-
1.02384302866534E-05 )*Y+2.57987709704822E-04 )*Y-
5.54735977651677E-03 )*Y+8.68245143991948E-02;
RT3 = ((((((((((-2.52879337929239E-15*Y+2.13925810087833E-14)*Y+
7.884307667104E-13)*Y-9.023398159510E-13)*Y-
5.814101544957E-11)*Y-1.333480437968E-09)*Y-
2.217064940373E-08)*Y+1.643290788086E-06)*Y-
4.39602147345028E-05 )*Y+1.08648982748911E-03 )*Y-
2.13014521653498E-02 )*Y+2.94150684465425E-01;
RT4 = ((((((((((-6.42391438038888E-15*Y+5.37848223438815E-15)*Y+
8.960828117859E-13)*Y+5.214153461337E-11)*Y-
1.106601744067E-10)*Y-2.007890743962E-08)*Y+
1.543764346501E-07)*Y+4.520749076914E-06)*Y-
1.88893338587047E-04 )*Y+4.73264487389288E-03 )*Y-
7.91197893350253E-02 )*Y+8.60057928514554E-01;
RT5 = (((((((((((-2.24366166957225E-14*Y+4.87224967526081E-14)*Y+
5.587369053655E-12)*Y-3.045253104617E-12)*Y-
1.223983883080E-09)*Y-2.05603889396319E-09 )*Y+
2.58604071603561E-07 )*Y+1.34240904266268E-06 )*Y-
5.72877569731162E-05 )*Y-9.56275105032191E-04 )*Y+
4.23367010370921E-02 )*Y-5.76800927133412E-01 )*Y+
3.87328263873381E+00;
WW1 = ((((((((( 8.98007931950169E-15*Y+7.25673623859497E-14)*Y+
5.851494250405E-14)*Y-4.234204823846E-11)*Y+
3.911507312679E-10)*Y-9.65094802088511E-09 )*Y+
3.42197444235714E-07 )*Y-7.51821178144509E-06 )*Y+
1.94218051498662E-04 )*Y-5.38533819142287E-03 )*Y+
1.68122596736809E-01;
WW2 = ((((((((((-1.05490525395105E-15*Y+1.96855386549388E-14)*Y-
5.500330153548E-13)*Y+1.003849567976E-11)*Y-
1.720997242621E-10)*Y+3.533277061402E-09)*Y-
6.389171736029E-08)*Y+1.046236652393E-06)*Y-
1.73148206795827E-05 )*Y+2.57820531617185E-04 )*Y-
3.46188265338350E-03 )*Y+7.03302497508176E-02;
WW3 = ((((((((((( 3.60020423754545E-16*Y-6.24245825017148E-15)*Y+
9.945311467434E-14)*Y-1.749051512721E-12)*Y+
2.768503957853E-11)*Y-4.08688551136506E-10 )*Y+
6.04189063303610E-09 )*Y-8.23540111024147E-08 )*Y+
1.01503783870262E-06 )*Y-1.20490761741576E-05 )*Y+
1.26928442448148E-04 )*Y-1.05539461930597E-03 )*Y+
1.15543698537013E-02;
WW4 = ((((((((((((( 2.51163533058925E-18*Y-4.31723745510697E-17)*
Y+6.557620865832E-16)*Y-1.016528519495E-14)*Y+
1.491302084832E-13)*Y-2.06638666222265E-12 )*Y+
2.67958697789258E-11 )*Y-3.23322654638336E-10 )*Y+
3.63722952167779E-09 )*Y-3.75484943783021E-08 )*Y+
3.49164261987184E-07 )*Y-2.92658670674908E-06 )*Y+
2.12937256719543E-05 )*Y-1.19434130620929E-04 )*Y+
6.45524336158384E-04;
WW5 = ((((((((((((((-1.29043630202811E-19*Y+2.16234952241296E-18)*
Y-3.107631557965E-17)*Y+4.570804313173E-16)*Y-
6.301348858104E-15)*Y+8.031304476153E-14)*Y-
9.446196472547E-13)*Y+1.018245804339E-11)*Y-
9.96995451348129E-11 )*Y+8.77489010276305E-10 )*Y-
6.84655877575364E-09 )*Y+4.64460857084983E-08 )*Y-
2.66924538268397E-07 )*Y+1.24621276265907E-06 )*Y-
4.30868944351523E-06 )*Y+9.94307982432868E-06;
} else if (X < 20.0){
Y = X-17.5E+00;
RT1 = (((((((((( 1.91875764545740E-16*Y+7.8357401095707E-16)*Y-
3.260875931644E-14)*Y-1.186752035569E-13)*Y+
4.275180095653E-12)*Y+3.357056136731E-11)*Y-
1.123776903884E-09)*Y+1.231203269887E-08)*Y-
3.99851421361031E-07 )*Y+1.45418822817771E-05 )*Y-
3.49912254976317E-04 )*Y+6.67768703938812E-03;
RT2 = (((((((((( 2.02778478673555E-15*Y+1.01640716785099E-14)*Y-
3.385363492036E-13)*Y-1.615655871159E-12)*Y+
4.527419140333E-11)*Y+3.853670706486E-10)*Y-
1.184607130107E-08)*Y+1.347873288827E-07)*Y-
4.47788241748377E-06 )*Y+1.54942754358273E-04 )*Y-
3.55524254280266E-03 )*Y+6.44912219301603E-02;
RT3 = (((((((((( 7.79850771456444E-15*Y+6.00464406395001E-14)*Y-
1.249779730869E-12)*Y-1.020720636353E-11)*Y+
1.814709816693E-10)*Y+1.766397336977E-09)*Y-
4.603559449010E-08)*Y+5.863956443581E-07)*Y-
2.03797212506691E-05 )*Y+6.31405161185185E-04 )*Y-
1.30102750145071E-02 )*Y+2.10244289044705E-01;
RT4 = (((((((((((-2.92397030777912E-15*Y+1.94152129078465E-14)*Y+
4.859447665850E-13)*Y-3.217227223463E-12)*Y-
7.484522135512E-11)*Y+7.19101516047753E-10 )*Y+
6.88409355245582E-09 )*Y-1.44374545515769E-07 )*Y+
2.74941013315834E-06 )*Y-1.02790452049013E-04 )*Y+
2.59924221372643E-03 )*Y-4.35712368303551E-02 )*Y+
5.62170709585029E-01;
RT5 = ((((((((((( 1.17976126840060E-14*Y+1.24156229350669E-13)*Y-
3.892741622280E-12)*Y-7.755793199043E-12)*Y+
9.492190032313E-10)*Y-4.98680128123353E-09 )*Y-
1.81502268782664E-07 )*Y+2.69463269394888E-06 )*Y+
2.50032154421640E-05 )*Y-1.33684303917681E-03 )*Y+
2.29121951862538E-02 )*Y-2.45653725061323E-01 )*Y+
1.89999883453047E+00;
WW1 = (((((((((( 1.74841995087592E-15*Y-6.95671892641256E-16)*Y-
3.000659497257E-13)*Y+2.021279817961E-13)*Y+
3.853596935400E-11)*Y+1.461418533652E-10)*Y-
1.014517563435E-08)*Y+1.132736008979E-07)*Y-
2.86605475073259E-06 )*Y+1.21958354908768E-04 )*Y-
3.86293751153466E-03 )*Y+1.45298342081522E-01;
WW2 = ((((((((((-1.11199320525573E-15*Y+1.85007587796671E-15)*Y+
1.220613939709E-13)*Y+1.275068098526E-12)*Y-
5.341838883262E-11)*Y+6.161037256669E-10)*Y-
1.009147879750E-08)*Y+2.907862965346E-07)*Y-
6.12300038720919E-06 )*Y+1.00104454489518E-04 )*Y-
1.80677298502757E-03 )*Y+5.78009914536630E-02;
WW3 = ((((((((((-9.49816486853687E-16*Y+6.67922080354234E-15)*Y+
2.606163540537E-15)*Y+1.983799950150E-12)*Y-
5.400548574357E-11)*Y+6.638043374114E-10)*Y-
8.799518866802E-09)*Y+1.791418482685E-07)*Y-
2.96075397351101E-06 )*Y+3.38028206156144E-05 )*Y-
3.58426847857878E-04 )*Y+8.39213709428516E-03;
WW4 = ((((((((((( 1.33829971060180E-17*Y-3.44841877844140E-16)*Y+
4.745009557656E-15)*Y-6.033814209875E-14)*Y+
1.049256040808E-12)*Y-1.70859789556117E-11 )*Y+
2.15219425727959E-10 )*Y-2.52746574206884E-09 )*Y+
3.27761714422960E-08 )*Y-3.90387662925193E-07 )*Y+
3.46340204593870E-06 )*Y-2.43236345136782E-05 )*Y+
3.54846978585226E-04;
WW5 = ((((((((((((( 2.69412277020887E-20*Y-4.24837886165685E-19)*
Y+6.030500065438E-18)*Y-9.069722758289E-17)*Y+
1.246599177672E-15)*Y-1.56872999797549E-14 )*Y+
1.87305099552692E-13 )*Y-2.09498886675861E-12 )*Y+
2.11630022068394E-11 )*Y-1.92566242323525E-10 )*Y+
1.62012436344069E-09 )*Y-1.23621614171556E-08 )*Y+
7.72165684563049E-08 )*Y-3.59858901591047E-07 )*Y+
2.43682618601000E-06;
} else if (X < 25.0) {
Y = X-22.5E+00;
RT1 = (((((((((-1.13927848238726E-15*Y+7.39404133595713E-15)*Y+
1.445982921243E-13)*Y-2.676703245252E-12)*Y+
5.823521627177E-12)*Y+2.17264723874381E-10 )*Y+
3.56242145897468E-09 )*Y-3.03763737404491E-07 )*Y+
9.46859114120901E-06 )*Y-2.30896753853196E-04 )*Y+
5.24663913001114E-03;
RT2 = (((((((((( 2.89872355524581E-16*Y-1.22296292045864E-14)*Y+
6.184065097200E-14)*Y+1.649846591230E-12)*Y-
2.729713905266E-11)*Y+3.709913790650E-11)*Y+
2.216486288382E-09)*Y+4.616160236414E-08)*Y-
3.32380270861364E-06 )*Y+9.84635072633776E-05 )*Y-
2.30092118015697E-03 )*Y+5.00845183695073E-02;
RT3 = (((((((((( 1.97068646590923E-15*Y-4.89419270626800E-14)*Y+
1.136466605916E-13)*Y+7.546203883874E-12)*Y-
9.635646767455E-11)*Y-8.295965491209E-11)*Y+
7.534109114453E-09)*Y+2.699970652707E-07)*Y-
1.42982334217081E-05 )*Y+3.78290946669264E-04 )*Y-
8.03133015084373E-03 )*Y+1.58689469640791E-01;
RT4 = (((((((((( 1.33642069941389E-14*Y-1.55850612605745E-13)*Y-
7.522712577474E-13)*Y+3.209520801187E-11)*Y-
2.075594313618E-10)*Y-2.070575894402E-09)*Y+
7.323046997451E-09)*Y+1.851491550417E-06)*Y-
6.37524802411383E-05 )*Y+1.36795464918785E-03 )*Y-
2.42051126993146E-02 )*Y+3.97847167557815E-01;
RT5 = ((((((((((-6.07053986130526E-14*Y+1.04447493138843E-12)*Y-
4.286617818951E-13)*Y-2.632066100073E-10)*Y+
4.804518986559E-09)*Y-1.835675889421E-08)*Y-
1.068175391334E-06)*Y+3.292234974141E-05)*Y-
5.94805357558251E-04 )*Y+8.29382168612791E-03 )*Y-
9.93122509049447E-02 )*Y+1.09857804755042E+00;
WW1 = (((((((((-9.10338640266542E-15*Y+1.00438927627833E-13)*Y+
7.817349237071E-13)*Y-2.547619474232E-11)*Y+
1.479321506529E-10)*Y+1.52314028857627E-09 )*Y+
9.20072040917242E-09 )*Y-2.19427111221848E-06 )*Y+
8.65797782880311E-05 )*Y-2.82718629312875E-03 )*Y+
1.28718310443295E-01;
WW2 = ((((((((( 5.52380927618760E-15*Y-6.43424400204124E-14)*Y-
2.358734508092E-13)*Y+8.261326648131E-12)*Y+
9.229645304956E-11)*Y-5.68108973828949E-09 )*Y+
1.22477891136278E-07 )*Y-2.11919643127927E-06 )*Y+
4.23605032368922E-05 )*Y-1.14423444576221E-03 )*Y+
5.06607252890186E-02;
WW3 = ((((((((( 3.99457454087556E-15*Y-5.11826702824182E-14)*Y-
4.157593182747E-14)*Y+4.214670817758E-12)*Y+
6.705582751532E-11)*Y-3.36086411698418E-09 )*Y+
6.07453633298986E-08 )*Y-7.40736211041247E-07 )*Y+
8.84176371665149E-06 )*Y-1.72559275066834E-04 )*Y+
7.16639814253567E-03;
WW4 = (((((((((((-2.14649508112234E-18*Y-2.45525846412281E-18)*Y+
6.126212599772E-16)*Y-8.526651626939E-15)*Y+
4.826636065733E-14)*Y-3.39554163649740E-13 )*Y+
1.67070784862985E-11 )*Y-4.42671979311163E-10 )*Y+
6.77368055908400E-09 )*Y-7.03520999708859E-08 )*Y+
6.04993294708874E-07 )*Y-7.80555094280483E-06 )*Y+
2.85954806605017E-04;
WW5 = ((((((((((((-5.63938733073804E-21*Y+6.92182516324628E-20)*Y-
1.586937691507E-18)*Y+3.357639744582E-17)*Y-
4.810285046442E-16)*Y+5.386312669975E-15)*Y-
6.117895297439E-14)*Y+8.441808227634E-13)*Y-
1.18527596836592E-11 )*Y+1.36296870441445E-10 )*Y-
1.17842611094141E-09 )*Y+7.80430641995926E-09 )*Y-
5.97767417400540E-08 )*Y+1.65186146094969E-06;
} else if (X < 40) {
WW1 = sqrt(PIE4/X);
E = exp(-X);
RT1 = ((((((((-1.73363958895356E-06*X+1.19921331441483E-04)*X -
1.59437614121125E-02)*X+1.13467897349442E+00)*X -
4.47216460864586E+01)*X+1.06251216612604E+03)*X -
1.52073917378512E+04)*X+1.20662887111273E+05)*X -
4.07186366852475E+05)*E + R15/(X-R15);
RT2 = ((((((((-1.60102542621710E-05*X+1.10331262112395E-03)*X -
1.50043662589017E-01)*X+1.05563640866077E+01)*X -
4.10468817024806E+02)*X+9.62604416506819E+03)*X -
1.35888069838270E+05)*X+1.06107577038340E+06)*X -
3.51190792816119E+06)*E + R25/(X-R25);
RT3 = ((((((((-4.48880032128422E-05*X+2.69025112122177E-03)*X -
4.01048115525954E-01)*X+2.78360021977405E+01)*X -
1.04891729356965E+03)*X+2.36985942687423E+04)*X -
3.19504627257548E+05)*X+2.34879693563358E+06)*X -
7.16341568174085E+06)*E + R35/(X-R35);
RT4 = ((((((((-6.38526371092582E-05*X-2.29263585792626E-03)*X -
7.65735935499627E-02)*X+9.12692349152792E+00)*X -
2.32077034386717E+02)*X+2.81839578728845E+02)*X +
9.59529683876419E+04)*X-1.77638956809518E+06)*X +
1.02489759645410E+07)*E + R45/(X-R45);
RT5 = ((((((((-3.59049364231569E-05*X-2.25963977930044E-02)*X +
1.12594870794668E+00)*X-4.56752462103909E+01)*X +
1.05804526830637E+03)*X-1.16003199605875E+04)*X -
4.07297627297272E+04)*X+2.22215528319857E+06)*X -
1.61196455032613E+07)*E + R55/(X-R55);
WW5 = (((((((((-4.61100906133970E-10*X+1.43069932644286E-07)*X -
1.63960915431080E-05)*X+1.15791154612838E-03)*X -
5.30573476742071E-02)*X+1.61156533367153E+00)*X -
3.23248143316007E+01)*X+4.12007318109157E+02)*X -
3.02260070158372E+03)*X+9.71575094154768E+03)*E + W55*WW1;
WW4 = (((((((((-2.40799435809950E-08*X+8.12621667601546E-06)*X -
9.04491430884113E-04)*X+6.37686375770059E-02)*X -
2.96135703135647E+00)*X+9.15142356996330E+01)*X -
1.86971865249111E+03)*X+2.42945528916947E+04)*X -
1.81852473229081E+05)*X+5.96854758661427E+05)*E + W45*WW1;
WW3 = (((((((( 1.83574464457207E-05*X-1.54837969489927E-03)*X +
1.18520453711586E-01)*X-6.69649981309161E+00)*X +
2.44789386487321E+02)*X-5.68832664556359E+03)*X +
8.14507604229357E+04)*X-6.55181056671474E+05)*X +
2.26410896607237E+06)*E + W35*WW1;
WW2 = (((((((( 2.77778345870650E-05*X-2.22835017655890E-03)*X +
1.61077633475573E-01)*X-8.96743743396132E+00)*X +
3.28062687293374E+02)*X-7.65722701219557E+03)*X +
1.10255055017664E+05)*X-8.92528122219324E+05)*X +
3.10638627744347E+06)*E + W25*WW1;
WW1 = WW1-0.01962E+00*E-WW2-WW3-WW4-WW5;
} else if (X < 59.0) {
WW1 = sqrt(PIE4/X);
XXX = pow(X,3.0);
E = XXX*exp(-X);
RT1 = (((-2.43758528330205E-02*X+2.07301567989771E+00)*X -
6.45964225381113E+01)*X+7.14160088655470E+02)*E + R15/(X-R15);
RT2 = (((-2.28861955413636E-01*X+1.93190784733691E+01)*X -
5.99774730340912E+02)*X+6.61844165304871E+03)*E + R25/(X-R25);
RT3 = (((-6.95053039285586E-01*X+5.76874090316016E+01)*X -
1.77704143225520E+03)*X+1.95366082947811E+04)*E + R35/(X-R35);
RT4 = (((-1.58072809087018E+00*X+1.27050801091948E+02)*X -
3.86687350914280E+03)*X+4.23024828121420E+04)*E + R45/(X-R45);
RT5 = (((-3.33963830405396E+00*X+2.51830424600204E+02)*X -
7.57728527654961E+03)*X+8.21966816595690E+04)*E + R55/(X-R55);
E = XXX*E;
WW5 = (( 1.35482430510942E-08*X-3.27722199212781E-07)*X +
2.41522703684296E-06)*E + W55*WW1;
WW4 = (( 1.23464092261605E-06*X-3.55224564275590E-05)*X +
3.03274662192286E-04)*E + W45*WW1;
WW3 = (( 1.34547929260279E-05*X-4.19389884772726E-04)*X +
3.87706687610809E-03)*E + W35*WW1;
WW2 = (( 2.09539509123135E-05*X-6.87646614786982E-04)*X +
6.68743788585688E-03)*E + W25*WW1;
WW1 = WW1-WW2-WW3-WW4-WW5;
} else {
WW1 = sqrt(PIE4/X);
RT1 = R15/(X-R15);
RT2 = R25/(X-R25);
RT3 = R35/(X-R35);
RT4 = R45/(X-R45);
RT5 = R55/(X-R55);
WW2 = W25*WW1;
WW3 = W35*WW1;
WW4 = W45*WW1;
WW5 = W55*WW1;
WW1 = WW1-WW2-WW3-WW4-WW5;
}
roots[0] = RT1;
weights[0] = WW1;
roots[1] = RT2;
weights[1] = WW2;
roots[2] = RT3;
weights[2] = WW3;
roots[3] = RT4;
weights[3] = WW4;
roots[4] = RT5;
weights[4] = WW5;
return;
}
__device__ void cuda_Root6_dp(int n,double X, double roots[], double weights[]){
// Root6 not implemented yet
return;
}
__device__ double cuda_Int1d_dp(int i, int j, int k, int l,
double xi, double xj, double xk, double xl,
double alpha_ij_A, double alpha_kl_B, double sqrt_AB,
double A, double B, double Px, double Qx,
double inv_t1, double B00, double B1, double B1p,
double G[][MAXROOTS])
{
// Form G(n,m)=I(n,0,m,0) intermediate values for a Rys polynomial
int n = i+j;
int m = k+l;
double xij = xi-xj;
double xkl = xk-xl;
// RecurFactorsGamess
double C = (Px-xi) * inv_t1 + (B*(Qx-xi)+A*(Px-xi))*B00*2.0;
double Cp = (Qx-xk) * inv_t1 + (B*(Qx-xk)+A*(Px-xk))*B00*2.0;
// ABD eq 11.
G[0][0] = M_PI * exp(-alpha_ij_A*xij*xij -alpha_kl_B*xkl*xkl) / sqrt_AB;
if (n > 0) { G[1][0] = C *G[0][0]; } // ABD eq 15
if (m > 0) { G[0][1] = Cp*G[0][0]; } // ABD eq 16
for (int a = 2; a < n+1; ++ a) { G[a][0] = B1 *(a-1)*G[a-2][0] + C *G[a-1][0]; }
for (int b = 2; b < m+1; ++ b) { G[0][b] = B1p*(b-1)*G[0][b-2] + Cp*G[0][b-1]; }
if ((m>0) && (n>0)){
for (int a=1; a<n+1; ++a){
G[a][1] = a*B00*G[a-1][0] + Cp*G[a][0];
for (int b=2; b<m+1; ++b)
G[a][b] = B1p*(b-1)*G[a][b-2] + a*B00*G[a-1][b-1] + Cp*G[a][b-1];
}
}
// Compute and output I(i,j,k,l) from I(i+j,0,k+l,0) (G)
double ijkl = 0.0;
for (int m=0; m<l+1; ++m){
double ijm0 = 0.0;
for (int n=0; n<j+1; ++n) // I(i,j,m,0)<-I(n,0,m,0)
ijm0 += cuda_binomial(j,n)*pow(xij,(double)(j-n))*G[n+i][m+k];
ijkl += cuda_binomial(l,m)*pow(xkl,(double)(l-m))*ijm0; // I(i,j,k,l)<-I(i,j,m,0)
}
return ijkl;
}
// calculate ERI over 4 primitive basis functions
__device__ double cuda_rys_pbf_dp(const double *ptr_i, const double *ptr_j,
const double *ptr_k, const double *ptr_l)
{
// download xyz, lmn, expon, and coef*norm
double xa = ptr_i[0];
double ya = ptr_i[1];
double za = ptr_i[2];
int la = (int)ptr_i[3];
int ma = (int)ptr_i[4];
int na = (int)ptr_i[5];
double alphaa = ptr_i[6];
double norma = ptr_i[7];
double xb = ptr_j[0];
double yb = ptr_j[1];
double zb = ptr_j[2];
int lb = (int)ptr_j[3];
int mb = (int)ptr_j[4];
int nb = (int)ptr_j[5];
double alphab = ptr_j[6];
double normb = ptr_j[7];
double xc = ptr_k[0];
double yc = ptr_k[1];
double zc = ptr_k[2];
int lc = (int)ptr_k[3];
int mc = (int)ptr_k[4];
int nc = (int)ptr_k[5];
double alphac = ptr_k[6];
double normc = ptr_k[7];
double xd = ptr_l[0];
double yd = ptr_l[1];
double zd = ptr_l[2];
int ld = (int)ptr_l[3];
int md = (int)ptr_l[4];
int nd = (int)ptr_l[5];
double alphad = ptr_l[6];
double normd = ptr_l[7];
// calculate primitive integral [ij|kl]
int norder,i;
double A,B,xp,yp,zp,xq,yq,zq,X,rho,sum,t,Ix,Iy,Iz;
norder = (la+ma+na+lb+nb+mb+lc+mc+nc+ld+md+nd)/2 + 1;
A = alphaa+alphab;
B = alphac+alphad;
xp = (alphaa*xa+alphab*xb)/A;
yp = (alphaa*ya+alphab*yb)/A;
zp = (alphaa*za+alphab*zb)/A;
xq = (alphac*xc+alphad*xd)/B;
yq = (alphac*yc+alphad*yd)/B;
zq = (alphac*zc+alphad*zd)/B;
rho = A*B/(A+B);
X = rho * ((xp-xq)*(xp-xq)+(yp-yq)*(yp-yq)+(zp-zq)*(zp-zq));
double alpha_ab_A = alphaa * alphab / A;
double alpha_cd_B = alphac * alphad / B;
double sqrt_AB = sqrt(A * B);
double roots[MAXROOTS],weights[MAXROOTS];
double G[MAXROOTS][MAXROOTS];
cuda_Roots_dp(norder,X, roots,weights); // get currect roots/weights
sum = 0.;
for (i=0; i<norder; ++i){
t = roots[i];
double inv_t1, B00, B1, B1p;
inv_t1 = 1.0 / (1 + t);
B00 = 0.5 * t/(A+B) * inv_t1;
B1 = 0.5 / A * inv_t1 + B00;
B1p = 0.5 / B * inv_t1 + B00;
Ix = cuda_Int1d_dp(la,lb,lc,ld, xa,xb,xc,xd,
alpha_ab_A,alpha_cd_B,sqrt_AB, A,B,xp,xq, inv_t1,B00,B1,B1p, G);
Iy = cuda_Int1d_dp(ma,mb,mc,md, ya,yb,yc,yd,
alpha_ab_A,alpha_cd_B,sqrt_AB, A,B,yp,yq, inv_t1,B00,B1,B1p, G);
Iz = cuda_Int1d_dp(na,nb,nc,nd, za,zb,zc,zd,
alpha_ab_A,alpha_cd_B,sqrt_AB, A,B,zp,zq, inv_t1,B00,B1,B1p, G);
sum = sum + Ix*Iy*Iz*weights[i]; /* ABD eq 5 & 9 */
}
// inv_sqrt_pi_2: 2.0*sqrt(1.0/M_PI) = 1.12837916709551255856
return 1.12837916709551255856 * sqrt(rho)*norma*normb*normc*normd*sum; /* ABD eq 5 & 9 */
}
// calculate J matrix using 1-thread-1-primitive-integral scheme
__global__ void cuda_mat_J_PI_dp(
const double *__restrict pbf_xlec,
const int *__restrict pbf_to_cbf,
int n_pbf,
const double *__restrict mat_D,
double *__restrict mat_J_PI,
const double *__restrict mat_Q)
{
__shared__ double elem_J_PI[BLOCKSIZE * BLOCKSIZE];
// each block scans over [ij|??] and sum up to a primitive J matrix element
int i = blockIdx.x;
int j = blockIdx.y;
// avoid accessing out of bounds elements and make use of i<=>j symmetry
if (i >= n_pbf || j > i) { return; }
int ij = cuda_ij2intindex(i,j);
const double *ptr_i = &pbf_xlec[i * 8];
const double *ptr_j = &pbf_xlec[j * 8];
int a = pbf_to_cbf[i];
int b = pbf_to_cbf[j];
int ab = cuda_ij2intindex(a,b);
// initialize shared array
elem_J_PI[threadIdx.x * BLOCKSIZE + threadIdx.y] = 0.0;
for (int k = threadIdx.x; k < n_pbf; k += BLOCKSIZE)
{
int c = pbf_to_cbf[k];
const double *ptr_k = &pbf_xlec[k * 8];
// NOTE: make use of k<=>l symmetry
for (int l = threadIdx.y; l <= k; l += BLOCKSIZE)
{
int d = pbf_to_cbf[l];
int cd = cuda_ij2intindex(c,d);
// Schwartz screening
if (fabs(mat_Q[ab] * mat_Q[cd] * mat_D[cd]) < SCREEN_THR) { continue; }
const double *ptr_l = &pbf_xlec[l * 8];
// calculate ERI
double this_eri = cuda_rys_pbf_dp(ptr_i, ptr_j, ptr_k, ptr_l);
// NOTE: doubling for off-diagonal elements of D due to k<=>l symmetry
elem_J_PI[threadIdx.x * BLOCKSIZE + threadIdx.y] += this_eri * mat_D[cd] * (k == l ? 1.0 : 2.0);
}
}
__syncthreads();
// only update mat_J_PI on one thread of the block
if (0 == threadIdx.x && 0 == threadIdx.y)
{
mat_J_PI[ij] = 0.0;
for (int t1 = 0; t1 < BLOCKSIZE; ++ t1) {
for (int t2 = 0; t2 < BLOCKSIZE; ++ t2) {
mat_J_PI[ij] += elem_J_PI[t1 * BLOCKSIZE + t2];
}
}
}
}
// calculate K matrix using 1-thread-1-primitive-integral scheme
__global__ void cuda_mat_K_PI_dp(
const double *__restrict pbf_xlec,
const int *__restrict pbf_to_cbf,
int n_pbf,
const double *__restrict mat_D,
double *__restrict mat_K_PI,
const double *__restrict mat_Q)
{
__shared__ double elem_K_PI[BLOCKSIZE * BLOCKSIZE];
// each block scans over [i?|k?] and sum up to a primitive K matrix element
int i = blockIdx.x;
int k = blockIdx.y;
// avoid accessing out of bounds elements and make use of ij<=>kl symmetry
if (i >= n_pbf || k > i) { return; }
int ik = cuda_ij2intindex(i,k);
const double *ptr_i = &pbf_xlec[i * 8];
const double *ptr_k = &pbf_xlec[k * 8];
int a = pbf_to_cbf[i];
int c = pbf_to_cbf[k];
// initialize shared array
elem_K_PI[threadIdx.x * BLOCKSIZE + threadIdx.y] = 0.0;
for (int j = threadIdx.x; j < n_pbf; j += BLOCKSIZE)
{
int b = pbf_to_cbf[j];
int ab = cuda_ij2intindex(a,b);
const double *ptr_j = &pbf_xlec[j * 8];
for (int l = threadIdx.y; l < n_pbf; l += BLOCKSIZE)
{
int d = pbf_to_cbf[l];
int cd = cuda_ij2intindex(c,d);
int bd = cuda_ij2intindex(b,d);
// Schwartz screening
if (fabs(mat_Q[ab] * mat_Q[cd] * mat_D[bd]) < SCREEN_THR) { continue; }
const double *ptr_l = &pbf_xlec[l * 8];
// calculate ERI
double this_eri = cuda_rys_pbf_dp(ptr_i, ptr_j, ptr_k, ptr_l);
// NOTE: no doubling for off-diagonal elements of D
elem_K_PI[threadIdx.x * BLOCKSIZE + threadIdx.y] += this_eri * mat_D[bd];
}
}
__syncthreads();
// only update mat_K_PI on one thread of the block
if (0 == threadIdx.x && 0 == threadIdx.y)
{
mat_K_PI[ik] = 0.0;
for (int t1 = 0; t1 < BLOCKSIZE; ++ t1) {
for (int t2 = 0; t2 < BLOCKSIZE; ++ t2) {
mat_K_PI[ik] += elem_K_PI[t1 * BLOCKSIZE + t2];
}
}
}
}
|
the_stack
|
#include <iostream>
using namespace std;
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块的尺寸。
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// 全局变量:_hardIplInimgTex(作为输入图像的纹理内存引用)
// 纹理内存只能用于全局变量,因此将硬件插值的旋转变换的 Kernel 函数的输入图像列
// 于此处。
static texture<unsigned char, 2, cudaReadModeElementType> _hardIplInimgTex;
// Kernel 函数:_hardPerspectiveKer(利用硬件插值实现的射影变换)
// 利用纹理内存提供的硬件插值功能,实现的并行射影变换。没有输入图像的参数,是因
// 为输入图像通过纹理内存来读取数据,纹理内存只能声明为全局变量。
static __global__ void // Kernel 函数无返回值。
_hardPerspectiveKer(
ImageCuda outimg, // 输出图像
PerspectiveMatrix pm // 旋转变换的参数
);
// Kernel 函数:_softPerspectiveKer(利用软件插值实现的射影变换)
// 利用 Fanczos 软件插值功能,实现的并行射影变换。
static __global__ void // Kernel 函数无返回值。
_softPerspectiveKer(
ImageCuda inimg, // 输入图像
ImageCuda outimg, // 输出图像
PerspectiveMatrix pm // 旋转变换的参数
);
// Host 函数:_pointsToPsptMatrix(单侧变换标准矩阵)
// 该函数计算从单位矩形到给定四个点的射影变换所对应的矩阵。由于一个由给定四个点
// 到新的四个点的变换所对应的射影变换可以看成是现将给定四个点变换为一个单位矩
// 形,再由单位矩形变换到新的四个点的两个射影变换的组合,因此这个函数称之为单侧
// 变换的标准矩阵。求的一个完整的变换矩阵的过程也是将两个单侧变换的矩阵通过矩阵
// 乘法进行组合的过程(其中的一个矩阵是逆矩阵)。
static __host__ int // 返回值:函数是否正确执行,若函数正确执行,返
// 回 NO_ERROR。
_pointsToPsptMatrix(
const float pts[4][2], // 给定的四个坐标点
PerspectiveMatrix *pm // 单侧变换的标准矩阵
);
// Host 函数:_detPsptMatrix(射影变换矩阵的行列式)
// 计算射影变换矩阵的行列式。通过这个函数可以判断矩阵是否为满秩的。
static __host__ float // 返回值:行列式计算结果(如果计算发生错
// 误,则返回 0.0f)。
_detPsptMatrix(
const PerspectiveMatrix &pm // 输入矩阵
);
// Host 函数:_invPsptMatrix(射影变换矩阵求逆)
// 为了得到一个变换所对应的逆变换的矩阵,实现了该函数用来求一个矩阵对应的逆矩
// 阵。
static __host__ int // 返回值:函数是否正确执行,若函数正确
// 执行,返回 NO_ERROR。
_invPsptMatrix(
const PerspectiveMatrix &inpm, // 输入矩阵
PerspectiveMatrix *outpm // 输出的逆矩阵
);
// Host 函数:_mulPsptMatrix(射影变换矩阵乘法计算)
// 计算两个矩阵的乘积。这一函数用于将两个连续的变化拼接成一个单次的变化。
static __host__ int // 返回值:函数是否正确执行,若函数正
// 确执行,返回 NO_ERROR。
_mulPsptMatrix(
const PerspectiveMatrix &inpm1, // 输入矩阵 1
const PerspectiveMatrix &inpm2, // 输入矩阵 2
PerspectiveMatrix *outpm // 输出矩阵
);
// Host 函数:_pointsToPsptMatrix(单侧变换标准矩阵)
static __host__ int _pointsToPsptMatrix(const float pts[4][2],
PerspectiveMatrix *pm)
{
// 检查输入参数是否为 NULL
if (pts == NULL || pm == NULL)
return NULL_POINTER;
// 局部变量,比例系数
float d = (pts[1][0] - pts[3][0]) * (pts[2][1] - pts[3][1]) -
(pts[2][0] - pts[3][0]) * (pts[1][1] - pts[3][1]);
if (fabs(d) < 1.0e-8f)
return INVALID_DATA;
// 按照射影变换的公式(这些公式在任何一本介绍图像处理或图形学的书中都会有介
// 绍,这里就不再赘述)求出矩阵的各个元素。
pm->elem[2][0] = ((pts[0][0] - pts[1][0] + pts[3][0] - pts[2][0]) *
(pts[2][1] - pts[3][1]) -
(pts[0][1] - pts[1][1] + pts[3][1] - pts[2][1]) *
(pts[2][0] - pts[3][0])) / d;
pm->elem[2][1] = ((pts[0][1] - pts[1][1] + pts[3][1] - pts[2][1]) *
(pts[1][0] - pts[3][0]) -
(pts[0][0] - pts[1][0] + pts[3][0] - pts[2][0]) *
(pts[1][1] - pts[3][1])) / d;
pm->elem[2][2] = 1.0f;
pm->elem[0][0] = pts[1][0] - pts[0][0] + pm->elem[2][0] * pts[1][0];
pm->elem[0][1] = pts[2][0] - pts[0][0] + pm->elem[2][1] * pts[2][0];
pm->elem[0][2] = pts[0][0];
pm->elem[1][0] = pts[1][1] - pts[0][1] + pm->elem[2][0] * pts[1][1];
pm->elem[1][1] = pts[2][1] - pts[0][1] + pm->elem[2][1] * pts[2][1];
pm->elem[1][2] = pts[0][1];
// 计算结束,退出
return NO_ERROR;
}
// Host 函数:_detPsptMatrix(射影变换矩阵的行列式)
static __host__ float _detPsptMatrix(const PerspectiveMatrix &pm)
{
// 按照行列式的计算公式,返回行列式的值。计算行列式的方法在所有的关于线性代
// 数的书中都有详细的介绍,这里不在赘述。
return pm.elem[0][0] * pm.elem[1][1] * pm.elem[2][2] +
pm.elem[0][1] * pm.elem[1][2] * pm.elem[2][0] +
pm.elem[0][2] * pm.elem[1][0] * pm.elem[2][1] -
pm.elem[0][0] * pm.elem[1][2] * pm.elem[2][1] -
pm.elem[0][1] * pm.elem[1][0] * pm.elem[2][2] -
pm.elem[0][2] * pm.elem[1][1] * pm.elem[2][0];
}
// Host 函数:_invPsptMatrix(射影变换矩阵求逆)
static __host__ int _invPsptMatrix(
const PerspectiveMatrix &inpm, PerspectiveMatrix *outpm)
{
// 检查输入参数是否为 NULL
if (outpm == NULL)
return NULL_POINTER;
// 求出矩阵的行列式,这个行列式可以辅助求逆计算,同时可以检查该矩阵是否为奇
// 异阵。
float det = _detPsptMatrix(inpm);
if (fabs(det) < 1.0e-8f)
return INVALID_DATA;
// 根据 3×3 矩阵求逆的公式,计算给定矩阵的逆矩阵,这个公式可以在任何一本介
// 绍线性代数的书中找到,此处不再赘述。
outpm->elem[0][0] = (inpm.elem[1][1] * inpm.elem[2][2] -
inpm.elem[1][2] * inpm.elem[2][1]) / det;
outpm->elem[0][1] = (inpm.elem[0][2] * inpm.elem[2][1] -
inpm.elem[0][1] * inpm.elem[2][2]) / det;
outpm->elem[0][2] = (inpm.elem[0][1] * inpm.elem[1][2] -
inpm.elem[0][2] * inpm.elem[1][1]) / det;
outpm->elem[1][0] = (inpm.elem[1][2] * inpm.elem[2][0] -
inpm.elem[1][0] * inpm.elem[2][2]) / det;
outpm->elem[1][1] = (inpm.elem[0][0] * inpm.elem[2][2] -
inpm.elem[0][2] * inpm.elem[2][0]) / det;
outpm->elem[1][2] = (inpm.elem[0][2] * inpm.elem[1][0] -
inpm.elem[0][0] * inpm.elem[1][2]) / det;
outpm->elem[2][0] = (inpm.elem[1][0] * inpm.elem[2][1] -
inpm.elem[1][1] * inpm.elem[2][0]) / det;
outpm->elem[2][1] = (inpm.elem[0][1] * inpm.elem[2][0] -
inpm.elem[0][0] * inpm.elem[2][1]) / det;
outpm->elem[2][2] = (inpm.elem[0][0] * inpm.elem[1][1] -
inpm.elem[0][1] * inpm.elem[1][0]) / det;
// 计算完毕返回
return NO_ERROR;
}
// Host 函数:_mulPsptMatrix(射影变换矩阵乘法计算)
static __host__ int _mulPsptMatrix(const PerspectiveMatrix &inpm1,
const PerspectiveMatrix &inpm2,
PerspectiveMatrix *outpm)
{
// 检查输出指针是否为 NULL。
if (outpm == NULL)
return NULL_POINTER;
// 按照矩阵乘法的公式进行计算。由于矩阵乘法的计算公式在任何一本介绍线性代数
// 的书中均有介绍,此处不再赘述。
outpm->elem[0][0] = inpm1.elem[0][0] * inpm2.elem[0][0] +
inpm1.elem[0][1] * inpm2.elem[1][0] +
inpm1.elem[0][2] * inpm2.elem[2][0];
outpm->elem[0][1] = inpm1.elem[0][0] * inpm2.elem[0][1] +
inpm1.elem[0][1] * inpm2.elem[1][1] +
inpm1.elem[0][2] * inpm2.elem[2][1];
outpm->elem[0][2] = inpm1.elem[0][0] * inpm2.elem[0][2] +
inpm1.elem[0][1] * inpm2.elem[1][2] +
inpm1.elem[0][2] * inpm2.elem[2][2];
outpm->elem[1][0] = inpm1.elem[1][0] * inpm2.elem[0][0] +
inpm1.elem[1][1] * inpm2.elem[1][0] +
inpm1.elem[1][2] * inpm2.elem[2][0];
outpm->elem[1][1] = inpm1.elem[1][0] * inpm2.elem[0][1] +
inpm1.elem[1][1] * inpm2.elem[1][1] +
inpm1.elem[1][2] * inpm2.elem[2][1];
outpm->elem[1][2] = inpm1.elem[1][0] * inpm2.elem[0][2] +
inpm1.elem[1][1] * inpm2.elem[1][2] +
inpm1.elem[1][2] * inpm2.elem[2][2];
outpm->elem[2][0] = inpm1.elem[2][0] * inpm2.elem[0][0] +
inpm1.elem[2][1] * inpm2.elem[1][0] +
inpm1.elem[2][2] * inpm2.elem[2][0];
outpm->elem[2][1] = inpm1.elem[2][0] * inpm2.elem[0][1] +
inpm1.elem[2][1] * inpm2.elem[1][1] +
inpm1.elem[2][2] * inpm2.elem[2][1];
outpm->elem[2][2] = inpm1.elem[2][0] * inpm2.elem[0][2] +
inpm1.elem[2][1] * inpm2.elem[1][2] +
inpm1.elem[2][2] * inpm2.elem[2][2];
// 运算完毕,返回
return NO_ERROR;
}
// Host 成员方法:setPerspectiveMatrix(设置放射透视变换矩阵)
__host__ int PerspectiveTrans::setPerspectiveMatrix(
const PerspectiveMatrix &newpm)
{
// 如果给定的矩阵是一个奇异阵(通过判断行列式是否为 0,可以得到一个矩阵是否
// 为奇异阵),则直接报错返回,因为,一个奇异阵无法进行映射变换的计算。
if (fabs(_detPsptMatrix(newpm)) < 1.0e-8f)
return INVALID_DATA;
// 将 impType 成员变量赋成新值
this->psptMatrix = newpm;
return NO_ERROR;
}
// Host 成员方法:setPerspectivePoints(设置射影透视变换四点参数)
__host__ int PerspectiveTrans::setPerspectivePoints(
const PerspectivePoints &newpp)
{
// 局部变量声明
int errcode;
PerspectiveMatrix a1, a2, inva2; // 由于计算四点参数到矩阵,需要使用单位矩
// 形作为中间过度,因此这里需要计算出两个
// 矩阵,最后将这两个矩阵拼合起来。
// 首先,计算源坐标点到单位矩形的单侧变换矩阵。
errcode = _pointsToPsptMatrix(newpp.srcPts, &a1);
if (errcode != NO_ERROR)
return errcode;
// 然后计算目标坐标点到单位矩阵的单侧变换矩阵。
errcode = _pointsToPsptMatrix(newpp.dstPts, &a2);
if (errcode != NO_ERROR)
return errcode;
// 由于需要拼合的两个变换,是首先从源四点参数变换到单位矩形,然后再由单位矩
// 形变换到目标四点参数,这样,需要对第二个单侧矩阵求逆,这样后续步骤才有实
// 际的物理含义。
errcode = _invPsptMatrix(a2, &inva2);
if (errcode != NO_ERROR)
return errcode;
// 通过矩阵乘法将两两步变换进行整合,形成一个综合的矩阵。
errcode = _mulPsptMatrix(a1, inva2, &this->psptMatrix);
if (errcode != NO_ERROR)
return errcode;
// 处理完毕,返回退出。
return NO_ERROR;
}
// Host 成员方法:setPerspectivePoints(设置射影透视变换四点参数)
__host__ int PerspectiveTrans::setPerspectiveUnitRect(
const PerspectiveUnitRect &newur)
{
// 将给定的单位矩形类型的数据转化成内部函数可识别的数据类型。这里将
// PerspectiveUnitRect 转换为 float[4][2]。
float tmppts[4][2];
tmppts[0][0] = newur.pt00[0];
tmppts[0][1] = newur.pt00[1];
tmppts[1][0] = newur.pt10[0];
tmppts[1][1] = newur.pt10[1];
tmppts[2][0] = newur.pt01[0];
tmppts[2][1] = newur.pt01[1];
tmppts[3][0] = newur.pt11[0];
tmppts[3][1] = newur.pt11[1];
// 调用内部的转换函数完成转换。
return _pointsToPsptMatrix(tmppts, &this->psptMatrix);
}
// Kernel 函数:_hardPerspectiveKer(利用硬件插值实现的射影变换)
static __global__ void _hardPerspectiveKer(ImageCuda outimg,
PerspectiveMatrix pm)
{
// 计算想成对应的输出点的位置,其中 dstc 和 dstr 分别表示线程处理的像素点的
// 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并
// 行度缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻
// 4 行上,因此,对于 dstr 需要进行乘 4 计算。
int dstc = blockIdx.x * blockDim.x + threadIdx.x;
int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4;
// 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一
// 方面防止由于段错误导致的程序崩溃。
if (dstc >= outimg.imgMeta.width || dstr >= outimg.imgMeta.height)
return;
// 计算第一个输出坐标点对应的图像数据数组下标。
int dstidx = dstr * outimg.pitchBytes + dstc;
// 声明目标图像输出像素对应的源图像中的坐标点,由于计算会得到小数结果,因此
// 使用浮点型存储该做标。
float srcc, srcr;
// 在计算过程中会使用到一些中间变量。
float dstc1 = dstc + outimg.imgMeta.roiX1; // 由于输入 Kernel 的都是 ROI
float dstr1 = dstr + outimg.imgMeta.roiY1; // 子图像,所以,需要校正出 ROI
// 图像中像素在原图像中的像素坐
// 标。
float tmpc, tmpr; // 没有除以比例系数的临时坐标。
float hh; // 比例系数,这个系数随着坐标位置变化而变化。
// 计算第一个输出坐标点对应的源图像中的坐标点。计算过程实际上是目标点的坐标
// 组成的二维向量扩展成三维向量后乘以映射变换矩阵,之后再将得到的向量的 z
// 分量归一化到 1 后得到新的坐标(具体步骤可参看任何一本图像处理的书籍)。
// 反映到代码上分为三个步骤,首先计算得到比例系数,然后计算得到没有初期比例
// 系数的临时坐标,最后在将这个临时坐标除以比例系统,得到最终的源图像坐标。
hh = pm.elem[2][0] * dstc1 + pm.elem[2][1] * dstr1 + pm.elem[2][2];
tmpc = pm.elem[0][0] * dstc1 + pm.elem[0][1] * dstr1 + pm.elem[0][2];
tmpr = pm.elem[1][0] * dstc1 + pm.elem[1][1] * dstr1 + pm.elem[1][2];
srcc = tmpc / hh;
srcr = tmpr / hh;
// 通过上面的步骤,求出了第一个输出坐标对应的源图像坐标。这里利用纹理内存的
// 硬件插值功能,直接使用浮点型的坐标读取相应的源图像“像素”值,并赋值给目
// 标图像。这里没有进行对源图像读取的越界检查,这是因为纹理内存硬件插值功能
// 可以处理越界访问的情况,越界访问会按照事先的设置得到一个相对合理的像素颜
// 色值,不会引起错误。
outimg.imgMeta.imgData[dstidx] = tex2D(_hardIplInimgTex, srcc, srcr);
// 处理剩下的三个像素点。
for (int i = 0; i < 3; i++) {
// 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因
// 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各点
// 之间没有变化,故不用检查。
if (++dstr >= outimg.imgMeta.height)
return;
// 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y
// 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计
// 算。
dstidx += outimg.pitchBytes;
// 根据上一个源坐标位置计算当前的源坐标位置。由于只有 y 分量增加 1,因
// 此,对应的源坐标只有在涉及到 dstr 的项上有变化,从而消除了一系列乘法
// 计算,而通过两个源坐标的差值进行简单的加减法而得。
hh += pm.elem[2][1];
tmpc += pm.elem[0][1];
tmpr += pm.elem[1][1];
srcc = tmpc / hh;
srcr = tmpr / hh;
// 将对应的源坐标位置出的插值像素写入到目标图像的当前像素点中。
outimg.imgMeta.imgData[dstidx] = tex2D(_hardIplInimgTex, srcc, srcr);
}
}
// Kernel 函数:_softPerspectiveKer(利用软件插值实现的射影变换)
static __global__ void _softPerspectiveKer(
ImageCuda inimg, ImageCuda outimg, PerspectiveMatrix pm)
{
// 计算想成对应的输出点的位置,其中 dstc 和 dstr 分别表示线程处理的像素点的
// 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并
// 行度缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻
// 4 行上,因此,对于 dstr 需要进行乘 4 计算。
int dstc = blockIdx.x * blockDim.x + threadIdx.x;
int dstr = (blockIdx.y * blockDim.y + threadIdx.y) * 4;
// 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一
// 方面防止由于段错误导致的程序崩溃。
if (dstc >= outimg.imgMeta.width || dstr >= outimg.imgMeta.height)
return;
// 计算第一个输出坐标点对应的图像数据数组下标。
int dstidx = dstr * outimg.pitchBytes + dstc;
// 声明目标图像输出像素对应的源图像中的坐标点,由于计算会得到小数结果,因此
// 使用浮点型存储该做标。
float srcc, srcr;
// 在计算过程中会使用到一些中间变量。
float dstc1 = dstc + outimg.imgMeta.roiX1; // 由于输入 Kernel 的都是 ROI
float dstr1 = dstr + outimg.imgMeta.roiY1; // 子图像,所以,需要校正出 ROI
// 图像中像素在原图像中的像素坐
// 标。
float tmpc, tmpr; // 没有除以比例系数的临时坐标。
float hh; // 比例系数,这个系数随着坐标位置变化而变化。
// 计算第一个输出坐标点对应的源图像中的坐标点。计算过程实际上是目标点的坐标
// 组成的二维向量扩展成三维向量后乘以映射变换矩阵,之后再将得到的向量的 z
// 分量归一化到 1 后得到新的坐标(具体步骤可参看任何一本图像处理的书籍)。
// 反映到代码上分为三个步骤,首先计算得到比例系数,然后计算得到没有初期比例
// 系数的临时坐标,最后在将这个临时坐标除以比例系统,得到最终的源图像坐标。
hh = pm.elem[2][0] * dstc1 + pm.elem[2][1] * dstr1 + pm.elem[2][2];
tmpc = pm.elem[0][0] * dstc1 + pm.elem[0][1] * dstr1 + pm.elem[0][2];
tmpr = pm.elem[1][0] * dstc1 + pm.elem[1][1] * dstr1 + pm.elem[1][2];
srcc = tmpc / hh;
srcr = tmpr / hh;
// 通过上面的步骤,求出了第一个输出坐标对应的源图像坐标。这里利用纹理内存的
// 硬件插值功能,直接使用浮点型的坐标读取相应的源图像“像素”值,并赋值给目
// 标图像。这里没有进行对源图像读取的越界检查,这是因为纹理内存硬件插值功能
// 可以处理越界访问的情况,越界访问会按照事先的设置得到一个相对合理的像素颜
// 色值,不会引起错误。
outimg.imgMeta.imgData[dstidx] = _fanczosInterpoDev(inimg, srcc, srcr);
// 处理剩下的三个像素点。
for (int i = 0; i < 3; i++) {
// 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因
// 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各点
// 之间没有变化,故不用检查。
if (++dstr >= outimg.imgMeta.height)
return;
// 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y
// 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计
// 算。
dstidx += outimg.pitchBytes;
// 根据上一个源坐标位置计算当前的源坐标位置。由于只有 y 分量增加 1,因
// 此,对应的源坐标只有在涉及到 dstr 的项上有变化,从而消除了一系列乘法
// 计算,而通过两个源坐标的差值进行简单的加减法而得。
hh += pm.elem[2][1];
tmpc += pm.elem[0][1];
tmpr += pm.elem[1][1];
srcc = tmpc / hh;
srcr = tmpr / hh;
// 将对应的源坐标位置出的插值像素写入到目标图像的当前像素点中。
outimg.imgMeta.imgData[dstidx] = _fanczosInterpoDev(inimg,
srcc, srcr);
}
}
// Host 成员方法:perspectiveTrans(射影透视变换)
__host__ int PerspectiveTrans::perspectiveTrans(Image *inimg, Image *outimg)
{
// 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为输
// 入和输出图像准备内存空间,以便盛放数据。
int errcode; // 局部变量,错误码
// 将输入图像拷贝到 Device 内存中。
errcode = ImageBasicOp::copyToCurrentDevice(inimg);
if (errcode != NO_ERROR)
return errcode;
// 将输出图像拷贝入 Device 内存。
errcode = ImageBasicOp::copyToCurrentDevice(outimg);
if (errcode != NO_ERROR) {
// 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图像
// 尺寸相同的图像。
errcode = ImageBasicOp::makeAtCurrentDevice(
outimg, inimg->width, inimg->height);
// 如果创建图像也操作失败,则说明操作彻底失败,报错退出。
if (errcode != NO_ERROR)
return errcode;
}
// 获得输入图像对应的 ImageCuda 型的结构体。没有取 ROI 子图像,是因为输入图
// 像的 ROI 区域相对于输出图像来说是没有意义的。与其在输出图像中把 ROI 区域
// 以外的区域视为越界区域,还不如把它们纳入计算范畴更为方便。处理起来也不容
// 易收到硬件对齐因素的限制。
ImageCuda *inimgCud = IMAGE_CUDA(inimg);
// 提取输出图像的 ROI 子图像。
ImageCuda outsubimgCud;
errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud);
if (errcode != NO_ERROR)
return errcode;
// 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。
dim3 blocksize, gridsize;
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x;
gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y * 4 - 1) /
(blocksize.y * 4);
// 针对不同的实现类型,选择不同的路径进行处理。
cudaError_t cuerrcode;
switch (this->impType) {
// 使用硬件插值实现射影变换:
case PERSPECT_HARD_IPL:
// 设置数据通道描述符,因为只有一个颜色通道(灰度图),因此描述符中只有
// 第一个分量含有数据。概述据通道描述符用于纹理内存的绑定操作。
struct cudaChannelFormatDesc chndesc;
chndesc = cudaCreateChannelDesc(sizeof (unsigned char) * 8, 0, 0, 0,
cudaChannelFormatKindUnsigned);
// 将输入图像的 ROI 子图像绑定到纹理内存。
cuerrcode = cudaBindTexture2D(
NULL, &_hardIplInimgTex, inimg->imgData, &chndesc,
inimg->width, inimg->height, inimgCud->pitchBytes);
if (cuerrcode != cudaSuccess)
return CUDA_ERROR;
// 调用 Kernel 函数,完成实际的图像射影变换。
_hardPerspectiveKer<<<gridsize, blocksize>>>(
outsubimgCud, this->psptMatrix);
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
break;
// 使用软件插值实现射影变换:
case PERSPECT_SOFT_IPL:
// 调用 Kernel 函数,完成实际的图像射影变换。
_softPerspectiveKer<<<gridsize, blocksize>>>(
*inimgCud, outsubimgCud, this->psptMatrix);
if (cudaGetLastError() != cudaSuccess)
return CUDA_ERROR;
break;
// 其他方式情况下,直接返回非法数据错误。由于 NPP 实现已在前面跳转入了相应
// 的其他函数,该 switch-case 语句中未包含对 NPP 实现的处理。
default:
return INVALID_DATA;
}
// 处理完毕,退出。
return NO_ERROR;
}
|
the_stack
|
#include "k2/csrc/ragged.h"
#include "k2/python/csrc/torch/torch_util.h"
#include "k2/python/csrc/torch/v2/any.h"
#include "k2/python/csrc/torch/v2/doc/any.h"
#include "k2/python/csrc/torch/v2/doc/doc.h"
#include "k2/python/csrc/torch/v2/ragged_any.h"
namespace k2 {
void PybindRaggedAny(py::module &m) {
py::class_<RaggedAny> any(m, "RaggedTensor");
//==================================================
// k2.ragged.Tensor methods
//--------------------------------------------------
any.def(py::init<py::list, py::object, torch::Device>(), py::arg("data"),
py::arg("dtype") = py::none(),
py::arg("device") = torch::Device(torch::kCPU),
kRaggedAnyInitDataDeviceDoc);
any.def(py::init<py::list, py::object, const std::string &>(),
py::arg("data"), py::arg("dtype") = py::none(),
py::arg("device") = "cpu", kRaggedAnyInitDataDeviceDoc);
any.def(py::init<const std::string &, py::object, torch::Device>(),
py::arg("s"), py::arg("dtype") = py::none(),
py::arg("device") = torch::Device(torch::kCPU),
kRaggedAnyInitStrDeviceDoc);
any.def(py::init<const std::string &, py::object, const std::string &>(),
py::arg("s"), py::arg("dtype") = py::none(),
py::arg("device") = torch::Device(torch::kCPU),
kRaggedAnyInitStrDeviceDoc);
any.def(py::init<const RaggedShape &, torch::Tensor>(), py::arg("shape"),
py::arg("value"), kRaggedInitFromShapeAndTensorDoc);
any.def(py::init<torch::Tensor>(), py::arg("tensor"),
kRaggedAnyInitTensorDoc);
any.def(
"__str__",
[](const RaggedAny &self) -> std::string { return self.ToString(); },
kRaggedAnyStrDoc);
any.def(
"to_str_simple",
[](const RaggedAny &self) -> std::string {
return self.ToString(/*compact*/ true);
},
kRaggedAnyToStrSimpleDoc);
any.def(
"__repr__",
[](const RaggedAny &self) -> std::string { return self.ToString(); },
kRaggedAnyStrDoc);
any.def(
"__getitem__",
[](RaggedAny &self, int32_t i) -> py::object {
if (self.any.NumAxes() > 2) {
RaggedAny ragged = self.Index(/*axis*/ 0, i);
return py::cast(ragged);
} else {
DeviceGuard guard(self.any.Context());
K2_CHECK_EQ(self.any.NumAxes(), 2);
Array1<int32_t> row_split = self.any.RowSplits(1).To(GetCpuContext());
const int32_t *row_split_data = row_split.Data();
int32_t begin = row_split_data[i], end = row_split_data[i + 1];
Dtype t = self.any.GetDtype();
FOR_REAL_AND_INT32_TYPES(t, T, {
Array1<T> array =
self.any.Specialize<T>().values.Arange(begin, end);
torch::Tensor tensor = ToTorch(array);
return py::cast(tensor);
});
}
// Unreachable code
return py::none();
},
py::arg("i"), kRaggedAnyGetItemDoc);
any.def(
"__getitem__",
[](RaggedAny &self, const py::slice &slice) -> RaggedAny {
py::ssize_t start = 0, stop = 0, step = 0, slicelength = 0;
if (!slice.compute(self.any.Dim0(), &start, &stop, &step, &slicelength))
throw py::error_already_set();
int32_t istart = static_cast<int32_t>(start);
int32_t istop = static_cast<int32_t>(stop);
int32_t istep = static_cast<int32_t>(step);
K2_CHECK_EQ(istep, 1)
<< "Only support slicing with step 1, given : " << istep;
return self.Arange(/*axis*/ 0, istart, istop);
},
py::arg("key"), kRaggedAnyGetItemSliceDoc);
any.def(
"__getitem__",
[](RaggedAny &self, torch::Tensor key) -> RaggedAny {
// key is a 1-d torch tensor with dtype torch.int32
DeviceGuard guard(self.any.Context());
Array1<int32_t> indexes = FromTorch<int32_t>(key);
Dtype t = self.any.GetDtype();
FOR_REAL_AND_INT32_TYPES(t, T, {
Ragged<T> ans =
k2::Index<T>(self.any.Specialize<T>(), /*axis*/ 0, indexes,
/*value_indexes*/ nullptr);
return RaggedAny(ans.Generic());
});
// Unreachable code
return {};
},
py::arg("key"), kRaggedAnyGetItem1DTensorDoc);
any.def("index",
static_cast<RaggedAny (RaggedAny::*)(RaggedAny &)>(&RaggedAny::Index),
py::arg("indexes"), kRaggedAnyRaggedIndexDoc);
any.def("index",
static_cast<std::pair<RaggedAny, torch::optional<torch::Tensor>> (
RaggedAny::*)(torch::Tensor, int32_t, bool)>(&RaggedAny::Index),
py::arg("indexes"), py::arg("axis"),
py::arg("need_value_indexes") = false, kRaggedAnyTensorIndexDoc);
m.def(
"index",
[](torch::Tensor src, RaggedAny &indexes,
py::object default_value = py::none()) -> RaggedAny {
return indexes.Index(src, default_value);
},
py::arg("src"), py::arg("indexes"), py::arg("default_value") = py::none(),
kRaggedAnyIndexTensorWithRaggedDoc);
m.def(
"index_and_sum",
[](torch::Tensor src, RaggedAny &indexes) -> torch::Tensor {
return indexes.IndexAndSum(src);
},
py::arg("src"), py::arg("indexes"), kRaggedAnyIndexAndSumDoc);
any.def("to",
static_cast<RaggedAny (RaggedAny::*)(torch::Device) const>(
&RaggedAny::To),
py::arg("device"), kRaggedAnyToDeviceDoc);
any.def("to",
static_cast<RaggedAny (RaggedAny::*)(const std::string &) const>(
&RaggedAny::To),
py::arg("device"), kRaggedAnyToDeviceStrDoc);
any.def("to",
static_cast<RaggedAny (RaggedAny::*)(torch::ScalarType) const>(
&RaggedAny::To),
py::arg("dtype"), kRaggedAnyToDtypeDoc);
any.def(
"clone",
[](const RaggedAny &self) -> RaggedAny {
DeviceGuard guard(self.any.Context());
return self.Clone();
},
kRaggedAnyCloneDoc);
any.def(
"__eq__",
[](const RaggedAny &self, const RaggedAny &other) -> bool {
DeviceGuard guard(self.any.Context());
Dtype t = self.any.GetDtype();
bool ans = false;
FOR_REAL_AND_INT32_TYPES(t, T, {
ans = Equal<T>(self.any.Specialize<T>(), other.any.Specialize<T>());
});
return ans;
},
py::arg("other"), kRaggedAnyEqDoc);
any.def(
"__ne__",
[](const RaggedAny &self, const RaggedAny &other) -> bool {
DeviceGuard guard(self.any.Context());
Dtype t = self.any.GetDtype();
bool ans = false;
FOR_REAL_AND_INT32_TYPES(t, T, {
ans = !Equal<T>(self.any.Specialize<T>(), other.any.Specialize<T>());
});
return ans;
},
py::arg("other"), kRaggedAnyNeDoc);
any.def("requires_grad_", &RaggedAny::SetRequiresGrad,
py::arg("requires_grad") = true, kRaggedAnyRequiresGradMethodDoc);
any.def("sum", &RaggedAny::Sum, py::arg("initial_value") = 0,
kRaggedAnySumDoc);
any.def(
"numel",
[](RaggedAny &self) -> int32_t {
DeviceGuard guard(self.any.Context());
return self.any.NumElements();
},
kRaggedAnyNumelDoc);
any.def(
"tot_size",
[](const RaggedAny &self, int32_t axis) -> int32_t {
DeviceGuard guard(self.any.Context());
return self.any.TotSize(axis);
},
py::arg("axis"), kRaggedAnyTotSizeDoc);
any.def(py::pickle(
[](const RaggedAny &self) -> py::tuple {
DeviceGuard guard(self.any.Context());
K2_CHECK(self.any.NumAxes() == 2 || self.any.NumAxes() == 3)
<< "Only support Ragged with NumAxes() == 2 or 3 for now, given "
<< self.any.NumAxes();
Array1<int32_t> row_splits1 = self.any.RowSplits(1);
Dtype t = self.any.GetDtype();
FOR_REAL_AND_INT32_TYPES(t, T, {
auto values = self.any.Specialize<T>().values;
// We use "row_ids" placeholder here to make it compatible for the
// old format file.
if (self.any.NumAxes() == 2) {
return py::make_tuple(ToTorch(row_splits1), "row_ids1",
ToTorch(values));
} else {
Array1<int32_t> row_splits2 = self.any.RowSplits(2);
return py::make_tuple(ToTorch(row_splits1), "row_ids1",
ToTorch(row_splits2), "row_ids2",
ToTorch(values));
}
});
// Unreachable code
return py::none();
},
[](const py::tuple &t) -> RaggedAny {
K2_CHECK(t.size() == 3 || t.size() == 5)
<< "Invalid state. "
<< "Expect a size of 3 or 5. Given: " << t.size();
torch::Tensor row_splits1_tensor = t[0].cast<torch::Tensor>();
DeviceGuard guard(GetContext(row_splits1_tensor));
Array1<int32_t> row_splits1 = FromTorch<int32_t>(row_splits1_tensor);
RaggedShape shape;
if (t.size() == 3) {
auto values_tensor = t[2].cast<torch::Tensor>();
Dtype t = ScalarTypeToDtype(values_tensor.scalar_type());
FOR_REAL_AND_INT32_TYPES(t, T, {
auto values = FromTorch<T>(values_tensor);
shape = RaggedShape2(&row_splits1, nullptr, values.Dim());
Ragged<T> any(shape, values);
return RaggedAny(any.Generic());
});
} else if (t.size() == 5) {
torch::Tensor row_splits2_tensor = t[2].cast<torch::Tensor>();
Array1<int32_t> row_splits2 = FromTorch<int32_t>(row_splits2_tensor);
auto values_tensor = t[4].cast<torch::Tensor>();
Dtype t = ScalarTypeToDtype(values_tensor.scalar_type());
FOR_REAL_AND_INT32_TYPES(t, T, {
auto values = FromTorch<T>(values_tensor);
shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2,
nullptr, values.Dim());
Ragged<T> any(shape, values);
return RaggedAny(any.Generic());
});
} else {
K2_LOG(FATAL) << "Invalid size : " << t.size();
}
// Unreachable code
return {};
}));
SetMethodDoc(&any, "__getstate__", kRaggedAnyGetStateDoc);
SetMethodDoc(&any, "__setstate__", kRaggedAnySetStateDoc);
any.def("remove_axis", &RaggedAny::RemoveAxis, py::arg("axis"),
kRaggedAnyRemoveAxisDoc);
any.def("arange", &RaggedAny::Arange, py::arg("axis"), py::arg("begin"),
py::arg("end"), kRaggedAnyArangeDoc);
any.def("remove_values_leq", &RaggedAny::RemoveValuesLeq, py::arg("cutoff"),
kRaggedAnyRemoveValuesLeqDoc);
any.def("remove_values_eq", &RaggedAny::RemoveValuesEq, py::arg("target"),
kRaggedAnyRemoveValuesEqDoc);
any.def("argmax", &RaggedAny::ArgMax, py::arg("initial_value") = py::none(),
kRaggedAnyArgMaxDoc);
any.def("max", &RaggedAny::Max, py::arg("initial_value") = py::none(),
kRaggedAnyMaxDoc);
any.def("min", &RaggedAny::Min, py::arg("initial_value") = py::none(),
kRaggedAnyMinDoc);
any.def_static("cat", &RaggedAny::Cat, py::arg("srcs"), py::arg("axis"),
kRaggedCatDoc);
m.attr("cat") = any.attr("cat");
any.def("unique", &RaggedAny::Unique, py::arg("need_num_repeats") = false,
py::arg("need_new2old_indexes") = false, kRaggedAnyUniqueDoc);
any.def("normalize", &RaggedAny::Normalize, py::arg("use_log"),
kRaggedAnyNormalizeDoc);
any.def("pad", &RaggedAny::Pad, py::arg("mode"), py::arg("padding_value"),
kRaggedAnyPadDoc);
any.def("tolist", &RaggedAny::ToList, kRaggedAnyToListDoc);
any.def("sort_", &RaggedAny::Sort, py::arg("descending") = false,
py::arg("need_new2old_indexes") = false, kRaggedAnySortDoc);
//==================================================
// k2.ragged.Tensor properties
//--------------------------------------------------
any.def_property_readonly(
"dtype",
[](const RaggedAny &self) -> py::object {
Dtype t = self.any.GetDtype();
auto torch = py::module::import("torch");
switch (t) {
case kFloatDtype:
return torch.attr("float32");
case kDoubleDtype:
return torch.attr("float64");
case kInt32Dtype:
return torch.attr("int32");
default:
K2_LOG(FATAL) << "Unsupported dtype: " << TraitsOf(t).Name();
}
// Unreachable code
return py::none();
},
kRaggedAnyDtypeDoc);
any.def_property_readonly(
"device",
[](const RaggedAny &self) -> py::object {
DeviceType d = self.any.Context()->GetDeviceType();
torch::DeviceType device_type = ToTorchDeviceType(d);
torch::Device device(device_type, self.any.Context()->GetDeviceId());
PyObject *ptr = THPDevice_New(device);
// takes ownership
return py::reinterpret_steal<py::object>(ptr);
},
kRaggedAnyDeviceDoc);
// Return the underlying memory of this tensor.
// No data is copied. Memory is shared.
any.def_property_readonly(
"values", [](RaggedAny &self) -> torch::Tensor { return self.Data(); },
kRaggedAnyValuesDoc);
any.def_property_readonly(
"shape", [](RaggedAny &self) -> RaggedShape { return self.any.shape; },
kRaggedAnyShapeDoc);
any.def_property_readonly(
"grad",
[](RaggedAny &self) -> torch::optional<torch::Tensor> {
if (!self.data.defined()) return {};
return self.Data().grad();
},
kRaggedAnyGradPropDoc);
any.def_property(
"requires_grad",
[](RaggedAny &self) -> bool {
if (!self.data.defined()) return false;
return self.Data().requires_grad();
},
[](RaggedAny &self, bool requires_grad) -> void {
self.SetRequiresGrad(requires_grad);
},
kRaggedAnyRequiresGradPropDoc);
any.def_property_readonly(
"is_cuda",
[](RaggedAny &self) -> bool {
return self.any.Context()->GetDeviceType() == kCuda;
},
kRaggedAnyIsCudaDoc);
// NumAxes() does not access GPU memory
any.def_property_readonly(
"num_axes",
[](const RaggedAny &self) -> int32_t { return self.any.NumAxes(); },
kRaggedAnyNumAxesDoc);
// Dim0() does not access GPU memory
any.def_property_readonly(
"dim0", [](const RaggedAny &self) -> int32_t { return self.any.Dim0(); },
kRaggedAnyDim0Doc);
//==================================================
// _k2.ragged.functions
//--------------------------------------------------
m.def(
"create_ragged_tensor",
[](py::list data, py::object dtype = py::none(),
torch::Device device = torch::kCPU) -> RaggedAny {
return RaggedAny(data, dtype, device);
},
py::arg("data"), py::arg("dtype") = py::none(),
py::arg("device") = torch::Device(torch::kCPU),
kCreateRaggedTensorDataDoc);
m.def(
"create_ragged_tensor",
[](py::list data, py::object dtype = py::none(),
const std::string &device = "cpu") -> RaggedAny {
return RaggedAny(data, dtype, device);
},
py::arg("data"), py::arg("dtype") = py::none(), py::arg("device") = "cpu",
kCreateRaggedTensorDataDoc);
m.def(
"create_ragged_tensor",
[](const std::string &s, py::object dtype = py::none(),
torch::Device device = torch::kCPU) -> RaggedAny {
return RaggedAny(s, dtype, device);
},
py::arg("s"), py::arg("dtype") = py::none(),
py::arg("device") = torch::Device(torch::kCPU),
kCreateRaggedTensorStrDoc);
m.def(
"create_ragged_tensor",
[](const std::string &s, py::object dtype = py::none(),
const std::string &device = "cpu") -> RaggedAny {
return RaggedAny(s, dtype, device);
},
py::arg("s"), py::arg("dtype") = py::none(), py::arg("device") = "cpu",
kCreateRaggedTensorStrDoc);
m.def(
"create_ragged_tensor",
[](torch::Tensor tensor) -> RaggedAny { return RaggedAny(tensor); },
py::arg("tensor"), kCreateRaggedTensorTensorDoc);
}
} // namespace k2
|
the_stack
|
* Copyright (c) 2021 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/device_helper.h"
#include "Fast.h"
namespace Saiga
{
namespace CUDA
{
__constant__ unsigned char c_table[] = {
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff,
0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80,
0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0,
0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80,
0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80,
0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80,
0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80,
0x0, 0xc0, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
0x88, 0x88, 0x88, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
// 1 -> v > x + th
// 2 -> v < x - th
// 0 -> x - th <= v <= x + th
__device__ __forceinline__ int diffType(const int v, const int x, const int th)
{
const int diff = x - v;
return static_cast<int>(diff < -th) + (static_cast<int>(diff > th) << 1);
}
__device__ void calcMask(const uint32_t C[4], const int v, const int th, int& mask1, int& mask2)
{
mask1 = 0;
mask2 = 0;
int d1, d2;
d1 = diffType(v, C[0] & 0xff, th);
d2 = diffType(v, C[2] & 0xff, th);
if ((d1 | d2) == 0) return;
mask1 |= (d1 & 1) << 0;
mask2 |= ((d1 & 2) >> 1) << 0;
mask1 |= (d2 & 1) << 8;
mask2 |= ((d2 & 2) >> 1) << 8;
d1 = diffType(v, C[1] & 0xff, th);
d2 = diffType(v, C[3] & 0xff, th);
if ((d1 | d2) == 0) return;
mask1 |= (d1 & 1) << 4;
mask2 |= ((d1 & 2) >> 1) << 4;
mask1 |= (d2 & 1) << 12;
mask2 |= ((d2 & 2) >> 1) << 12;
d1 = diffType(v, (C[0] >> (2 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (2 * 8)) & 0xff, th);
if ((d1 | d2) == 0) return;
mask1 |= (d1 & 1) << 2;
mask2 |= ((d1 & 2) >> 1) << 2;
mask1 |= (d2 & 1) << 10;
mask2 |= ((d2 & 2) >> 1) << 10;
d1 = diffType(v, (C[1] >> (2 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (2 * 8)) & 0xff, th);
if ((d1 | d2) == 0) return;
mask1 |= (d1 & 1) << 6;
mask2 |= ((d1 & 2) >> 1) << 6;
mask1 |= (d2 & 1) << 14;
mask2 |= ((d2 & 2) >> 1) << 14;
d1 = diffType(v, (C[0] >> (1 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (1 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 1;
mask2 |= ((d1 & 2) >> 1) << 1;
mask1 |= (d2 & 1) << 9;
mask2 |= ((d2 & 2) >> 1) << 9;
d1 = diffType(v, (C[0] >> (3 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (3 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 3;
mask2 |= ((d1 & 2) >> 1) << 3;
mask1 |= (d2 & 1) << 11;
mask2 |= ((d2 & 2) >> 1) << 11;
d1 = diffType(v, (C[1] >> (1 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (1 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 5;
mask2 |= ((d1 & 2) >> 1) << 5;
mask1 |= (d2 & 1) << 13;
mask2 |= ((d2 & 2) >> 1) << 13;
d1 = diffType(v, (C[1] >> (3 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (3 * 8)) & 0xff, th);
mask1 |= (d1 & 1) << 7;
mask2 |= ((d1 & 2) >> 1) << 7;
mask1 |= (d2 & 1) << 15;
mask2 |= ((d2 & 2) >> 1) << 15;
}
// 1 -> v > x + th
// 2 -> v < x - th
// 0 -> not a keypoint
__device__ __forceinline__ bool isKeyPoint(int mask1, int mask2)
{
return (__popc(mask1) > 8 && (c_table[(mask1 >> 3) - 63] & (1 << (mask1 & 7)))) ||
(__popc(mask2) > 8 && (c_table[(mask2 >> 3) - 63] & (1 << (mask2 & 7))));
}
__device__ int cornerScore(const uint32_t C[4], const int v, const int threshold)
{
// binary search in [threshold + 1, 255]
int min = threshold + 1;
int max = 255;
while (min <= max)
{
const int mid = (min + max) >> 1;
int mask1 = 0;
int mask2 = 0;
calcMask(C, v, mid, mask1, mask2);
int isKp = static_cast<int>(isKeyPoint(mask1, mask2));
min = isKp * (mid + 1) + (isKp ^ 1) * min;
max = (isKp ^ 1) * (mid - 1) + isKp * max;
}
return min - 1;
}
__device__ int isKeyPoint2(Saiga::ImageView<unsigned char> img, const int i, const int j, const int threshold)
{
int v;
uint32_t C[4] = {0, 0, 0, 0};
C[2] |= static_cast<uint32_t>(img(i - 3, j - 1)) << 8;
C[2] |= static_cast<uint32_t>(img(i - 3, j));
C[1] |= static_cast<uint32_t>(img(i - 3, j + 1)) << (3 * 8);
C[2] |= static_cast<uint32_t>(img(i - 2, j - 2)) << (2 * 8);
C[1] |= static_cast<uint32_t>(img(i - 2, j + 2)) << (2 * 8);
C[2] |= static_cast<uint32_t>(img(i - 1, j - 3)) << (3 * 8);
C[1] |= static_cast<uint32_t>(img(i - 1, j + 3)) << 8;
C[3] |= static_cast<uint32_t>(img(i, j - 3));
v = static_cast<int>(img(i, j));
C[1] |= static_cast<uint32_t>(img(i, j + 3));
int d1 = diffType(v, C[1] & 0xff, threshold);
int d2 = diffType(v, C[3] & 0xff, threshold);
if ((d1 | d2) == 0)
{
return 0;
}
C[3] |= static_cast<uint32_t>(img(i + 1, j - 3)) << 8;
C[0] |= static_cast<uint32_t>(img(i + 1, j + 3)) << (3 * 8);
C[3] |= static_cast<uint32_t>(img(i + 2, j - 2)) << (2 * 8);
C[0] |= static_cast<uint32_t>(img(i + 2, j + 2)) << (2 * 8);
C[3] |= static_cast<uint32_t>(img(i + 3, j - 1)) << (3 * 8);
C[0] |= static_cast<uint32_t>(img(i + 3, j));
C[0] |= static_cast<uint32_t>(img(i + 3, j + 1)) << 8;
int mask1 = 0;
int mask2 = 0;
calcMask(C, v, threshold, mask1, mask2);
if (isKeyPoint(mask1, mask2))
{
return cornerScore(C, v, threshold);
}
return 0;
}
__device__ bool isMax(int2 loc, Saiga::ImageView<int> scoreMat)
{
int score = scoreMat(loc.y, loc.x);
bool ismax = score > scoreMat(loc.y - 1, loc.x - 1) && score > scoreMat(loc.y - 1, loc.x) &&
score > scoreMat(loc.y - 1, loc.x + 1) &&
score > scoreMat(loc.y, loc.x - 1) && score > scoreMat(loc.y, loc.x + 1) &&
score > scoreMat(loc.y + 1, loc.x - 1) && score > scoreMat(loc.y + 1, loc.x) &&
score > scoreMat(loc.y + 1, loc.x + 1);
return ismax;
}
template <int TILE_SIZE_X, int TILE_SIZE_Y>
__global__ void tileCalcKeypoints_kernel(Saiga::ImageView<unsigned char> img_, short2* kpLoc, float* kpScore,
const unsigned int maxKeypoints, const int highThreshold,
const int lowThreshold, unsigned int* counter_ptr)
{
int max_kps_high = 50;
int max_kps_low = 50;
const int required_border = 4;
const int local_image_w = TILE_SIZE_X + 2 * required_border;
const int local_image_h = TILE_SIZE_Y + 2 * required_border;
static_assert(local_image_w % 4 == 0, "sdjf");
static_assert(local_image_h % 4 == 0, "sdjf");
CUDA_ASSERT(img_.pitchBytes % 4 == 0);
__shared__ int local_image_i[local_image_h][local_image_w / 4];
__shared__ int local_score[local_image_h][local_image_w];
__shared__ unsigned int num_kps;
const int2 global_inner_start = {int(blockIdx.x * blockDim.x), int((blockIdx.y * blockDim.y) * 4)};
const int2 global_outer_start = {global_inner_start.x - 4, global_inner_start.y - 4};
const int block_start_x = blockIdx.x * blockDim.x;
const int block_start_y = (blockIdx.y * blockDim.y) * 4;
const int linear_local_tid = threadIdx.y * blockDim.x + threadIdx.x;
for (int t = linear_local_tid; t < (local_image_w / 4) * local_image_h; t += blockDim.x * blockDim.y)
{
int local_x = t % (local_image_w / 4);
int local_y = t / (local_image_w / 4);
int x = global_outer_start.x + local_x * 4;
int y = global_outer_start.y + local_y;
CUDA_ASSERT(x % 4 == 0);
// clamp to border is better than conditional reads
x = max(0, min(x, (int)img_.pitchBytes - 4));
y = max(0, min(y, img_.rows - 1));
CUDA_ASSERT(x % 4 == 0);
reinterpret_cast<int*>(&local_image_i[local_y][local_x])[0] = reinterpret_cast<const int*>(&img_(y, x))[0];
}
__syncthreads();
Saiga::ImageView<unsigned char> img;
img.w = local_image_w;
img.h = local_image_h;
img.pitchBytes = local_image_w;
img.dataT = reinterpret_cast<unsigned char*>(&local_image_i[0][0]);
Saiga::ImageView<int> scoreMat;
scoreMat.w = local_image_w;
scoreMat.h = local_image_h;
scoreMat.pitchBytes = local_image_w * 4;
scoreMat.dataT = reinterpret_cast<int*>(&local_score[0][0]);
if (linear_local_tid == 0)
{
num_kps = 0;
}
// compute score
for (int t = linear_local_tid; t < (32 + 2) * (32 + 2); t += blockDim.x * blockDim.y)
{
int local_x = t % (32 + 2);
int local_y = t / (32 + 2);
int x = local_x + 3;
int y = local_y + 3;
scoreMat(y, x) = isKeyPoint2(img, y, x, highThreshold);
}
__syncthreads();
for (int t = 0; t < 4; ++t)
{
int inner_x = threadIdx.x;
int inner_y = threadIdx.y + t * 8;
int x = inner_x + 4;
int y = inner_y + 4;
int global_x = inner_x + global_inner_start.x;
int global_y = inner_y + global_inner_start.y;
int score = scoreMat(y, x);
if (score == 0) continue;
if (global_y >= img_.rows - 4 || global_x >= img_.cols - 4 || global_x < 4 || global_y < 4)
{
continue;
}
if (isMax(make_int2(x, y), scoreMat))
{
auto local_index = atomicInc(&num_kps, (unsigned int)(-1));
if (local_index < max_kps_high)
{
auto global_index = atomicInc(counter_ptr, (unsigned int)(-1));
if (global_index < maxKeypoints)
{
short2 loc;
loc.x = global_x;
loc.y = global_y;
kpLoc[global_index] = loc;
kpScore[global_index] = static_cast<float>(score);
}
}
}
}
__syncthreads();
if (num_kps > 0) return;
// compute score
for (int t = linear_local_tid; t < (TILE_SIZE_X + 2) * (TILE_SIZE_Y + 2); t += blockDim.x * blockDim.y)
{
int local_x = t % (TILE_SIZE_X + 2);
int local_y = t / (TILE_SIZE_Y + 2);
int x = local_x + 3;
int y = local_y + 3;
bool in_bounds = block_start_y + y < img_.rows - 3 & block_start_x + x < img_.cols - 3;
scoreMat(y, x) = in_bounds * isKeyPoint2(img, y, x, lowThreshold);
}
__syncthreads();
for (int t = 0; t < 4; ++t)
{
int inner_x = threadIdx.x;
int inner_y = threadIdx.y + t * 8;
int x = inner_x + 4;
int y = inner_y + 4;
int global_x = inner_x + global_inner_start.x;
int global_y = inner_y + global_inner_start.y;
int score = scoreMat(y, x);
if (score == 0) continue;
if (global_y >= img_.rows - 4 || global_x >= img_.cols - 4 || global_x < 4 || global_y < 4)
{
continue;
}
if (isMax(make_int2(x, y), scoreMat))
{
auto local_index = atomicInc(&num_kps, (unsigned int)(-1));
if (local_index < max_kps_low)
{
auto global_index = atomicInc(counter_ptr, (unsigned int)(-1));
if (global_index < maxKeypoints)
{
short2 loc;
loc.x = global_x;
loc.y = global_y;
kpLoc[global_index] = loc;
kpScore[global_index] = static_cast<float>(score);
}
}
}
}
}
__global__ void createKps(Saiga::ArrayView<Saiga::KeyPoint<float>> kps, short2* kpLoc, float* kpScore)
{
Saiga::CUDA::ThreadInfo<> ti;
int i = ti.thread_id;
if (i >= kps.size())
{
return;
}
kps[i] = Saiga::KeyPoint<float>(kpLoc[i].x, kpLoc[i].y, 7, -1, kpScore[i]);
}
Fast::Fast(int highThreshold, int lowThreshold, int maxKeypoints)
: highThreshold(highThreshold), lowThreshold(lowThreshold), maxKeypoints(maxKeypoints)
{
counter_keypoint_location.resize(maxKeypoints + 1);
keypoint_score.resize(maxKeypoints);
h_counter_keypoint_location.resize(maxKeypoints + 1);
h_keypoint_score.resize(maxKeypoints);
}
Fast::~Fast() {}
void Fast::Detect(Saiga::ImageView<unsigned char> d_image, cudaStream_t stream)
{
auto h_counter = (unsigned int*)h_counter_keypoint_location.data();
auto d_counter = (unsigned int*)counter_keypoint_location.data().get();
auto keypoint_location = counter_keypoint_location.data().get() + 1;
{
CHECK_CUDA_ERROR(cudaMemsetAsync(d_counter, 0, sizeof(unsigned int), stream));
dim3 dimBlock(32, 8);
dim3 dimGrid(Saiga::iDivUp(d_image.cols, 32), Saiga::iDivUp(d_image.rows, 32));
tileCalcKeypoints_kernel<32, 32><<<dimGrid, dimBlock, 0, stream>>>(d_image, keypoint_location,
keypoint_score.data().get(), maxKeypoints,
highThreshold, lowThreshold, d_counter);
CHECK_CUDA_ERROR(cudaMemcpyAsync(h_counter_keypoint_location.data(), counter_keypoint_location.data().get(),
sizeof(short2) * (actual_max_keypoints + 1), cudaMemcpyDeviceToHost, stream));
CHECK_CUDA_ERROR(cudaMemcpyAsync(h_keypoint_score.data(), keypoint_score.data().get(),
sizeof(float) * actual_max_keypoints, cudaMemcpyDeviceToHost, stream));
detection_finished.record(stream);
}
}
int Fast::Download(Saiga::ArrayView<Saiga::KeyPoint<float>> keypoints, cudaStream_t stream)
{
detection_finished.synchronize();
auto h_counter = (unsigned int*)h_counter_keypoint_location.data();
auto keypoint_location = counter_keypoint_location.data().get() + 1;
auto count = h_counter[0];
if (count > actual_max_keypoints)
{
auto remaining_points = count - actual_max_keypoints;
CHECK_CUDA_ERROR(cudaMemcpyAsync(h_counter_keypoint_location.data() + actual_max_keypoints + 1,
keypoint_location + actual_max_keypoints, sizeof(short2) * remaining_points,
cudaMemcpyDeviceToHost, stream));
CHECK_CUDA_ERROR(cudaMemcpyAsync(h_keypoint_score.data() + actual_max_keypoints,
keypoint_score.data().get() + actual_max_keypoints,
sizeof(float) * remaining_points, cudaMemcpyDeviceToHost, stream));
actual_max_keypoints = count * 1.05;
CHECK_CUDA_ERROR(cudaStreamSynchronize(stream));
}
SAIGA_ASSERT(keypoints.size() >= count);
for (int i = 0; i < count; ++i)
{
Saiga::KeyPoint<float> kp(h_counter_keypoint_location[i + 1].x, h_counter_keypoint_location[i + 1].y, 0, -1,
h_keypoint_score[i]);
keypoints[i] = kp;
}
return count;
}
} // namespace CUDA
} // namespace Saiga
|
the_stack
|
// input dimension: (B x N x 1 x H x W)
// output dimension: (B x N x 1 x H x W)
__global__ void DepthColorAngleReprojectionNeighbours_forward_depth_kernel(
float *input,
float *output,
float *cameras,
float *invKRs,
float *camlocs,
int batch_size,
int nrcams,
int input_height,
int input_width)
{
int colstep = 1;
int rowstep = colstep * input_width;
int camstep = rowstep * input_height;
int btcstep = camstep * nrcams;
int clocs_camstep = 3;
int clocs_btcstep = clocs_camstep * nrcams;
int invKRs_camstep = 9;
int invKRs_btcstep = invKRs_camstep * nrcams;
int proj[2];
float w[3];
for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < batch_size; b += blockDim.x * gridDim.x) {
float *camera0 = cameras + b * nrcams * 12;
for (int n = 0; n < nrcams; n++) {
for (int in_row = blockIdx.y * blockDim.y + threadIdx.y; in_row < input_height; in_row += blockDim.y * gridDim.y) {
for (int in_col = blockIdx.z * blockDim.z + threadIdx.z; in_col < input_width; in_col += blockDim.z * gridDim.z) {
float depth_n = input[b * btcstep + n * camstep + in_row * rowstep + in_col * colstep];
if(n == 0) {
// simply copy the first camera's depth map
output[b * btcstep + n * camstep + in_row * rowstep + in_col * colstep] = depth_n;
}
else if(depth_n > 0) {
// cast this point into space
float *camloc = camlocs + b * clocs_btcstep + n * clocs_camstep;
float *invKR = invKRs + b * invKRs_btcstep + n * invKRs_camstep;
MYTH_unproject_point(camloc, invKR, in_col, in_row, depth_n, w);
// project it onto the first camera again
if(MYTH_project_point(camera0, w, proj, input_width, input_height)) {
MYTH_atomicMinf(
output + b * btcstep + n * camstep + proj[1] * rowstep + proj[0] * colstep,
MYTH_get_point_depth(camera0, w)
);
}
}
}
}
}
}
}
__global__ void DepthColorAngleReprojectionNeighbours_forward_colorangle_kernel(
float *input_depth,
float *output_depth,
float *input_color,
float *output_color,
float *output_angle,
float *cameras,
float *invKRs,
float *camlocs,
int batch_size,
int nrcams,
int nrchans,
int input_height,
int input_width)
{
int colstep = 1;
int rowstep = colstep * input_width;
int camstep_d = rowstep * input_height;
int btcstep_d = camstep_d * nrcams;
int chnstep_c = rowstep * input_height;
int camstep_c = chnstep_c * nrchans;
int btcstep_c = camstep_c * nrcams;
int clocs_camstep = 3;
int clocs_btcstep = clocs_camstep * nrcams;
int invKRs_camstep = 9;
int invKRs_btcstep = invKRs_camstep * nrcams;
int proj[2];
float w[3];
for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < batch_size; b += blockDim.x * gridDim.x) {
float *camera0 = cameras + b * nrcams * 12;
float *camloc0 = camlocs + b * clocs_btcstep;
for (int n = 0; n < nrcams; n++) {
for (int in_row = blockIdx.y * blockDim.y + threadIdx.y; in_row < input_height; in_row += blockDim.y * gridDim.y) {
for (int in_col = blockIdx.z * blockDim.z + threadIdx.z; in_col < input_width; in_col += blockDim.z * gridDim.z) {
float depth_n = input_depth[b * btcstep_d + n * camstep_d + in_row * rowstep + in_col * colstep];
if(n == 0) {
// simply copy the first camera's color
for(int c = 0; c < nrchans; c++) {
float color_n = input_color[b * btcstep_c + n * camstep_c + c * chnstep_c + in_row * rowstep + in_col * colstep];
output_color[b * btcstep_c + n * camstep_c + c * chnstep_c + in_row * rowstep + in_col * colstep] = color_n;
}
output_angle[b * btcstep_d + n * camstep_d + in_row * rowstep + in_col * colstep] = 1.0f;
}
else if(depth_n > 0) {
// cast this point into space
float *camloc = camloc0 + n * clocs_camstep;
float *invKR = invKRs + b * invKRs_btcstep + n * invKRs_camstep;
MYTH_unproject_point(camloc, invKR, in_col, in_row, depth_n, w);
// project it onto the first camera again
if(MYTH_project_point(camera0, w, proj, input_width, input_height)) {
float zbuffer = output_depth[b * btcstep_d + n * camstep_d + proj[1] * rowstep + proj[0] * colstep];
float this_z = MYTH_get_point_depth(camera0, w);
if(this_z <= zbuffer) {
for(int c = 0; c < nrchans; c++) {
float color_n = input_color[b * btcstep_c + n * camstep_c + c * chnstep_c + in_row * rowstep + in_col * colstep];
output_color[b * btcstep_c + n * camstep_c + c * chnstep_c + proj[1] * rowstep + proj[0] * colstep] = color_n;
}
// also pass the cosine of the angle between the viewing lines as a feature
float angle = 0.0f;
float norm = 0.0f, norm0 = 0.0f;
for(int i = 0; i < 3; i++) {
angle += (camloc[i] - w[i]) * (camloc0[i] - w[i]);
norm += (camloc[i] - w[i]) * (camloc[i] - w[i]);
norm0 += (camloc0[i] - w[i]) * (camloc0[i] - w[i]);
}
output_angle[b * btcstep_d + n * camstep_d + proj[1] * rowstep + proj[0] * colstep] = angle / sqrt(norm * norm0);
}
}
}
}
}
}
}
}
//the input dimension is (B x N x 1 x H x W)
//the output dimension is (B x N x 1 x H x W)
extern "C" void DepthColorAngleReprojectionNeighbours_updateOutput_gpu(
THCudaTensor *input_depth,
THCudaTensor *output_depth,
THCudaTensor *input_color,
THCudaTensor *output_color,
THCudaTensor *output_angle,
THCudaTensor *cameras,
THCudaTensor *invKRs,
THCudaTensor *camlocs)
{
int blkdim = 16;
int batch_size = THCudaTensor_size(state, input_depth,0);
int nrviews = THCudaTensor_size(state, input_depth,1);
int color_channels = THCudaTensor_size(state, input_color,2);
int input_height = THCudaTensor_size(state, input_depth,3);
int input_width = THCudaTensor_size(state, input_depth,4);
// we will use one thread for all depth hypotheses, to save some calculations regarding the directions and matrix inversions
const dim3 block = dim3(1, blkdim, blkdim);
const dim3 grid = dim3(1,ceil(THCudaTensor_size(state, output_depth, 3)*1.0f/blkdim), ceil(THCudaTensor_size(state, output_depth, 4)*1.0f/blkdim));
float *input_depth_p = THCudaTensor_data(state, input_depth);
float *output_depth_p = THCudaTensor_data(state, output_depth);
float *input_color_p = THCudaTensor_data(state, input_color);
float *output_color_p = THCudaTensor_data(state, output_color);
float *output_angle_p = THCudaTensor_data(state, output_angle);
float *cameras_p = THCudaTensor_data(state, cameras);
float *invKRs_p = THCudaTensor_data(state, invKRs);
float *camlocs_p = THCudaTensor_data(state, camlocs);
cudaStream_t stream = THCState_getCurrentStream(state);
DepthColorAngleReprojectionNeighbours_forward_depth_kernel<<<grid, block, 0, stream>>>(input_depth_p, output_depth_p, cameras_p, invKRs_p, camlocs_p, batch_size, nrviews, input_height, input_width);
cudaStreamSynchronize(stream);
DepthColorAngleReprojectionNeighbours_forward_colorangle_kernel<<<grid, block, 0, stream>>>(input_depth_p, output_depth_p, input_color_p, output_color_p, output_angle_p, cameras_p, invKRs_p, camlocs_p, batch_size, nrviews, color_channels, input_height, input_width);
THCudaCheck(cudaGetLastError());
}
__global__ void DepthColorAngleReprojectionNeighbours_backward_color_kernel(
float *input_depth,
float *output_depth,
float *dloss_input_color,
float *dloss_output_color,
float *cameras,
float *invKRs,
float *camlocs,
int batch_size,
int nrcams,
int nrchans,
int input_height,
int input_width)
{
int colstep = 1;
int rowstep = colstep * input_width;
int camstep_d = rowstep * input_height;
int btcstep_d = camstep_d * nrcams;
int chnstep_c = rowstep * input_height;
int camstep_c = chnstep_c * nrchans;
int btcstep_c = camstep_c * nrcams;
int clocs_camstep = 3;
int clocs_btcstep = clocs_camstep * nrcams;
int invKRs_camstep = 9;
int invKRs_btcstep = invKRs_camstep * nrcams;
int proj[2];
float w[3];
for (int b = blockIdx.x * blockDim.x + threadIdx.x; b < batch_size; b += blockDim.x * gridDim.x) {
float *camera0 = cameras + b * nrcams * 12;
float *camloc0 = camlocs + b * clocs_btcstep;
for (int n = 0; n < nrcams; n++) {
for (int in_row = blockIdx.y * blockDim.y + threadIdx.y; in_row < input_height; in_row += blockDim.y * gridDim.y) {
for (int in_col = blockIdx.z * blockDim.z + threadIdx.z; in_col < input_width; in_col += blockDim.z * gridDim.z) {
float depth_n = input_depth[b * btcstep_d + n * camstep_d + in_row * rowstep + in_col * colstep];
if(n == 0) {
// simply copy the first camera's color
for(int c = 0; c < nrchans; c++) {
float dloss_output_n = dloss_output_color[b * btcstep_c + n * camstep_c + c * chnstep_c + in_row * rowstep + in_col * colstep];
atomicAdd(
dloss_input_color + b * btcstep_c + n * camstep_c + c * chnstep_c + in_row * rowstep + in_col * colstep,
dloss_output_n
);
}
}
else if(depth_n > 0) {
// cast this point into space
float *camloc = camloc0 + n * clocs_camstep;
float *invKR = invKRs + b * invKRs_btcstep + n * invKRs_camstep;
MYTH_unproject_point(camloc, invKR, in_col, in_row, depth_n, w);
// project it onto the first camera again
if(MYTH_project_point(camera0, w, proj, input_width, input_height)) {
float zbuffer = output_depth[b * btcstep_d + n * camstep_d + proj[1] * rowstep + proj[0] * colstep];
float this_z = MYTH_get_point_depth(camera0, w);
if(this_z <= zbuffer) {
for(int c = 0; c < nrchans; c++) {
float dloss_output_n = dloss_output_color[b * btcstep_c + n * camstep_c + c * chnstep_c + proj[1] * rowstep + proj[0] * colstep];
atomicAdd(
dloss_input_color + b * btcstep_c + n * camstep_c + c * chnstep_c + in_row * rowstep + in_col * colstep,
dloss_output_n
);
}
}
}
}
}
}
}
}
}
//the input dimension is (B x N x 1 x H x W)
//the output dimension is (B x N x 1 x H x W)
extern "C" void DepthColorAngleReprojectionNeighbours_updateGradInput_gpu(
THCudaTensor *input_depth,
THCudaTensor *output_depth,
THCudaTensor *dloss_input_color,
THCudaTensor *dloss_output_color,
THCudaTensor *cameras,
THCudaTensor *invKRs,
THCudaTensor *camlocs)
{
int blkdim = 16;
int batch_size = THCudaTensor_size(state, input_depth,0);
int nrviews = THCudaTensor_size(state, input_depth,1);
int color_channels = THCudaTensor_size(state, dloss_output_color,2);
int input_height = THCudaTensor_size(state, input_depth,3);
int input_width = THCudaTensor_size(state, input_depth,4);
// we will use one thread for all depth hypotheses, to save some calculations regarding the directions and matrix inversions
const dim3 block = dim3(1, blkdim, blkdim);
const dim3 grid = dim3(1,ceil(THCudaTensor_size(state, output_depth, 3)*1.0f/blkdim), ceil(THCudaTensor_size(state, output_depth, 4)*1.0f/blkdim));
float *input_depth_p = THCudaTensor_data(state, input_depth);
float *output_depth_p = THCudaTensor_data(state, output_depth);
float *dloss_input_color_p = THCudaTensor_data(state, dloss_input_color);
float *dloss_output_color_p = THCudaTensor_data(state, dloss_output_color);
float *cameras_p = THCudaTensor_data(state, cameras);
float *invKRs_p = THCudaTensor_data(state, invKRs);
float *camlocs_p = THCudaTensor_data(state, camlocs);
cudaStream_t stream = THCState_getCurrentStream(state);
DepthColorAngleReprojectionNeighbours_backward_color_kernel<<<grid, block, 0, stream>>>(input_depth_p, output_depth_p, dloss_input_color_p, dloss_output_color_p, cameras_p, invKRs_p, camlocs_p, batch_size, nrviews, color_channels, input_height, input_width);
THCudaCheck(cudaGetLastError());
}
|
the_stack
|
// For internal OP use, not user facing
template <typename T>
__global__ void Pad(const size_t size, const T* input, const int num, const int channels, const int old_height,
const int old_width, const int padded_height, const int padded_width, const int pad_top,
const int pad_left, float pad_value, T* output) {
T pad_value_ = static_cast<T>(pad_value);
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int block_num = pos / padded_width / padded_height;
const int padded_w = pos % padded_width;
const int padded_h = pos / padded_width % padded_height;
if (padded_h - pad_top < 0 || padded_w - pad_left < 0 || padded_h - pad_top >= old_height ||
padded_w - pad_left >= old_width) {
output[pos] = pad_value_;
} else {
output[pos] = input[(block_num * old_height + padded_h - pad_top) * old_width + padded_w - pad_left];
}
}
return;
}
// For internal OP use, not user facing
template <typename T>
__global__ void PadNHWC(const size_t size, const T* input, const int num, const int old_height, const int old_width,
const int channels, const int padded_height, const int padded_width, const int pad_top,
const int pad_left, float pad_value, T* output) {
T pad_value_ = static_cast<T>(pad_value);
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int block_num = pos / channels / padded_width / padded_height;
const int padded_w = pos / channels % padded_width;
const int padded_h = pos / channels / padded_width % padded_height;
if (padded_h - pad_top < 0 || padded_w - pad_left < 0 || padded_h - pad_top >= old_height ||
padded_w - pad_left >= old_width) {
output[pos] = pad_value_;
} else {
output[pos] = input[((block_num * old_height + padded_h - pad_top) * old_width + padded_w - pad_left)
*channels + pos % channels];
}
}
return;
}
// Used by user facing 'Pad' API
template <typename T>
__global__ void PadGeneral(const size_t size, const T *input, const int num, const int channels_orig,
const int pad_channel_before, const int pad_channel_after, const int old_height,
const int old_width, const int padded_height, const int padded_width, const int pad_top,
const int pad_left, float pad_value, T *output) {
T pad_value_template = static_cast<T>(pad_value);
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
int block_num = (pos / padded_width) / padded_height; // total blocks = (batch * channels)
const int padded_w = pos % padded_width; // x coordinate refered to by cur 'pos'
const int padded_h = (pos / padded_width) % padded_height; // y coordinate refered to by cur 'pos'
int channels_new = channels_orig + pad_channel_after + pad_channel_before; // new number of channels from padding
int channel_num = block_num % channels_new; // current channel
int batch_item = block_num / channels_new; // current item in batch
int equiv_block_num = 0; // init variable to select equivalent block to copy data from from input
if (padded_h - pad_top < 0 || padded_w - pad_left < 0 || padded_h - pad_top >= old_height ||
padded_w - pad_left >= old_width || channel_num <= pad_channel_before - 1 ||
channel_num > channels_orig + pad_channel_before - 1) {
output[pos] = pad_value_template;
} else {
// on a block/x,y positon that isn't padding, copy data from the correct block/x,y pos the input
// calculate from number of blocks of padding (due to channel padding) inserted prior
equiv_block_num = block_num - (batch_item * (pad_channel_before + pad_channel_after)) - pad_channel_before;
output[pos] = input[(equiv_block_num * old_height + padded_h - pad_top) * old_width + padded_w - pad_left];
}
}
return;
}
template <typename T>
__global__ void PadGradNHWC(const size_t size, const T* dy, const int num, const int old_height, const int old_width,
const int channels, const int padded_height, const int padded_width, const int pad_top,
const int pad_left, T* dx) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int block_num = pos / channels / old_width / old_height;
const int padded_w = pos / channels % old_width + pad_left;
const int padded_h = pos / channels / old_width % old_height + pad_top;
dx[pos] = dy[((block_num * padded_height + padded_h) * padded_width + padded_w)*channels+pos%channels];
}
return;
}
template <typename T>
__global__ void PadGrad(const size_t size, const T* dy, const int num, const int channels, const int old_height,
const int old_width, const int padded_height, const int padded_width, const int pad_top,
const int pad_left, T* dx) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
int block_num = pos / old_width / old_height;
const int padded_w = pos % old_width + pad_left;
const int padded_h = pos / old_width % old_height + pad_top;
dx[pos] = dy[(block_num * padded_height + padded_h) * padded_width + padded_w];
}
return;
}
template <typename T>
void CalPad(const size_t size, const T* input, const int num, const int channels, const int old_height,
const int old_width, const int padded_height, const int padded_width, const int pad_top, const int pad_left,
const float pad_value, T* output, cudaStream_t cuda_stream) {
Pad<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, input, num, channels, old_height, old_width,
padded_height, padded_width, pad_top, pad_left, pad_value,
output);
return;
}
template <typename T>
void CalPadNHWC(const size_t size, const T* input, const int num, const int old_height, const int old_width,
const int channels, const int padded_height, const int padded_width, const int pad_top,
const int pad_left, const float pad_value, T* output, cudaStream_t cuda_stream) {
PadNHWC<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, input, num, old_height, old_width, channels,
padded_height, padded_width, pad_top, pad_left, pad_value, output);
return;
}
template <typename T>
void CalPadGeneral(const size_t size, const T *input, const int num, const int channels_orig,
const int pad_channel_before, const int pad_channel_after, const int old_height, const int old_width,
const int padded_height, const int padded_width, const int pad_top, const int pad_left,
float pad_value, T *output, cudaStream_t cuda_stream) {
PadGeneral<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, input, num, channels_orig, pad_channel_before,
pad_channel_after, old_height, old_width, padded_height,
padded_width, pad_top, pad_left, pad_value, output);
return;
}
template <typename T>
void CalPadGradNHWC(const size_t size, const T* dy, const int num, const int old_height, const int old_width,
const int channels, const int padded_height, const int padded_width, const int pad_top,
const int pad_left, T* dx, cudaStream_t cuda_stream) {
PadGradNHWC<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, dy, num, old_height, old_width, channels,
padded_height, padded_width, pad_top, pad_left, dx);
return;
}
template <typename T>
void CalPadGrad(const size_t size, const T* dy, const int num, const int channels, const int old_height,
const int old_width, const int padded_height, const int padded_width, const int pad_top,
const int pad_left, T* dx, cudaStream_t cuda_stream) {
PadGrad<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, dy, num, channels, old_height, old_width,
padded_height, padded_width, pad_top, pad_left, dx);
return;
}
template void CalPad<float>(const size_t size, const float* input, const int num, const int channels,
const int old_height, const int old_width, const int padded_height, const int padded_width,
const int pad_top, const int pad_left, float pad_value, float* output,
cudaStream_t cuda_stream);
template void CalPadGrad<float>(const size_t size, const float* dy, const int num, const int channels,
const int old_height, const int old_width, const int padded_height,
const int padded_width, const int pad_top, const int pad_left, float* dx,
cudaStream_t cuda_stream);
template void CalPad<half>(const size_t size, const half* input, const int num, const int channels,
const int old_height, const int old_width, const int padded_height, const int padded_width,
const int pad_top, const int pad_left, float pad_value, half* output,
cudaStream_t cuda_stream);
template void CalPadGrad<half>(const size_t size, const half* dy, const int num, const int channels,
const int old_height, const int old_width, const int padded_height,
const int padded_width, const int pad_top, const int pad_left, half* dx,
cudaStream_t cuda_stream);
template void CalPadNHWC<float>(const size_t size, const float* input, const int num, const int old_height,
const int old_width, const int channels, const int padded_height,
const int padded_width, const int pad_top, const int pad_left, float pad_value,
float* output, cudaStream_t cuda_stream);
template void CalPadNHWC<half>(const size_t size, const half* input, const int num, const int old_height,
const int old_width, const int channels, const int padded_height,
const int padded_width, const int pad_top, const int pad_left, float pad_value,
half* output, cudaStream_t cuda_stream);
template void CalPadGradNHWC<float>(const size_t size, const float* dy, const int num, const int old_height,
const int old_width, const int channels, const int padded_height,
const int padded_width, const int pad_top, const int pad_left, float* dx,
cudaStream_t cuda_stream);
template void CalPadGradNHWC<half>(const size_t size, const half* dy, const int num, const int old_height,
const int old_width, const int channels, const int padded_height,
const int padded_width, const int pad_top, const int pad_left, half* dx,
cudaStream_t cuda_stream);
template void CalPadGeneral<float>(const size_t size, const float *input, const int num, const int channels_orig,
const int pad_channel_before, const int pad_channel_after, const int old_height,
const int old_width, const int padded_height, const int padded_width,
const int pad_top, const int pad_left, float pad_value, float *output,
cudaStream_t cuda_stream);
template void CalPadGeneral<half>(const size_t size, const half *input, const int num, const int channels_orig,
const int pad_channel_before, const int pad_channel_after, const int old_height,
const int old_width, const int padded_height, const int padded_width,
const int pad_top, const int pad_left, float pad_value, half *output,
cudaStream_t cuda_stream);
template void CalPadGeneral<int>(const size_t size, const int *input, const int num, const int channels_orig,
const int pad_channel_before, const int pad_channel_after, const int old_height,
const int old_width, const int padded_height, const int padded_width,
const int pad_top, const int pad_left, float pad_value, int *output,
cudaStream_t cuda_stream);
|
the_stack
|
#ifdef WIN32
# ifndef strncasecmp
# define strncasecmp strnicmp
# endif
#endif
#define GET_LINE() if (!fgets(buf, 1024, f)) return false
#define COND_READ(cond, where, len) if ((cond) && !fread((void *)&(where), (len), 1, f)) return false
#define LINE_IS(text) !strncasecmp(buf, text, strlen(text))
#define BIGNUM 1.0e10
// Forward declarations
static bool read_ply(FILE *f, TriMesh *mesh);
static bool read_3ds(FILE *f, TriMesh *mesh);
static bool read_vvd(FILE *f, TriMesh *mesh);
static bool read_ray(FILE *f, TriMesh *mesh);
static bool read_obj(FILE *f, TriMesh *mesh);
static bool read_off(FILE *f, TriMesh *mesh);
static bool read_sm( FILE *f, TriMesh *mesh);
static bool read_verts_bin(FILE *f, TriMesh *mesh, bool &need_swap,
int nverts, int vert_len, int vert_pos, int vert_norm,
int vert_color, bool float_color, int vert_conf);
static bool slurp_verts_bin(FILE *f, TriMesh *mesh, bool need_swap,
int nverts);
static bool read_verts_asc(FILE *f, TriMesh *mesh,
int nverts, int vert_len, int vert_pos, int vert_norm,
int vert_color, bool float_color, int vert_conf);
static bool read_faces_bin(FILE *f, TriMesh *mesh, bool need_swap,
int nfaces, int face_len, int face_count, int face_idx);
static bool read_faces_asc(FILE *f, TriMesh *mesh, int nfaces,
int face_len, int face_count, int face_idx, bool read_to_eol = false);
static bool read_strips_bin(FILE *f, TriMesh *mesh, bool need_swap);
static bool read_strips_asc(FILE *f, TriMesh *mesh);
static bool read_grid_bin(FILE *f, TriMesh *mesh, bool need_swap);
static bool read_grid_asc(FILE *f, TriMesh *mesh);
static bool ply_property(const char *buf, int &len, bool binary);
static bool we_are_little_endian();
static void check_need_swap(const point &p, bool &need_swap);
static void check_ind_range(TriMesh *mesh);
static void skip_comments(FILE *f);
static void tess(const std::vector<point> &verts, const std::vector<int> &thisface,
std::vector<TriMesh::Face> &tris);
static void write_ply_ascii(TriMesh *mesh, FILE *f,
bool write_norm, bool float_color);
static void write_ply_binary(TriMesh *mesh, FILE *f,
bool need_swap, bool write_norm, bool float_color);
static void write_ray(TriMesh *mesh, FILE *f);
static void write_obj(TriMesh *mesh, FILE *f);
static void write_off(TriMesh *mesh, FILE *f);
static void write_sm(TriMesh *mesh, FILE *f);
static void write_cc(TriMesh *mesh, FILE *f, const char *filename,
bool write_norm, bool float_color);
static void write_verts_asc(TriMesh *mesh, FILE *f,
const char *before_vert,
const char *before_norm,
const char *before_color,
bool float_color,
const char *before_conf,
const char *after_line);
static void write_verts_bin(TriMesh *mesh, FILE *f, bool need_swap,
bool write_norm, bool write_color,
bool float_color, bool write_conf);
static void write_faces_asc(TriMesh *mesh, FILE *f,
const char *before_face, const char *after_line);
static void write_faces_bin(TriMesh *mesh, FILE *f, bool need_swap,
int before_face_len, const char *before_face,
int after_face_len, const char *after_face);
static void write_strips_asc(TriMesh *mesh, FILE *f);
static void write_strips_bin(TriMesh *mesh, FILE *f, bool need_swap);
static void write_grid_asc(TriMesh *mesh, FILE *f);
static void write_grid_bin(TriMesh *mesh, FILE *f, bool need_swap);
// Byte swap uints, ints, and floats
static inline void swap_unsigned(volatile unsigned &x)
{
x = (x << 24u) |
((x << 8u) & 0x00ff0000u) |
((x >> 8u) & 0x0000ff00u) |
(x >> 24u);
}
static inline void swap_int(int &x)
{
swap_unsigned(* (unsigned *)(&x));
}
static inline void swap_double(double &x)
{
unsigned char buf[8];
memcpy(buf, &x, 8);
std::swap(buf[0], buf[7]);
std::swap(buf[1], buf[6]);
std::swap(buf[2], buf[5]);
std::swap(buf[3], buf[4]);
memcpy(&x, buf, 8);
}
static inline void swap_ushort(volatile unsigned short &x)
{
x = (x << 8u) | (x >> 8u);
}
static inline void swap_short(signed short &x)
{
swap_ushort(* (unsigned short *)(&x));
}
// unget a whole string of characters
static void pushback(const char *buf, FILE *f)
{
const char *c = buf;
while (*c)
c++;
while ((--c) >= buf)
ungetc(*c, f);
}
// Read a TriMesh from a file. Defined to use a helper function to make
// subclassing easier.
TriMesh *TriMesh::read(const char *filename)
{
TriMesh *mesh = new TriMesh();
if (read_helper(filename, mesh))
return mesh;
delete mesh;
return NULL;
}
// Actually read a mesh. Tries to figure out type of file from first
// few bytes. Filename can be "-" for stdin
bool TriMesh::read_helper(const char *filename, TriMesh *mesh)
{
if (!filename || *filename == '\0')
return false;
FILE *f = NULL;
bool ok = false;
int c;
if (strcmp(filename, "-") == 0) {
f = stdin;
filename = "standard input";
} else {
f = fopen(filename, "rb");
if (!f) {
perror("fopen");
goto out;
}
}
dprintf("Reading %s... ", filename);
c = fgetc(f);
if (c == EOF) {
fprintf(stderr, "Can't read header\n");
goto out;
}
if (c == 'p') {
// See if it's a ply file
char buf[4];
if (!fgets(buf, 4, f)) {
fprintf(stderr, "Can't read header\n");
goto out;
}
if (strncmp(buf, "ly", 2) == 0)
ok = read_ply(f, mesh);
} else if (c == 0x4d) {
int c2 = fgetc(f);
ungetc(c2, f);
ungetc(c, f);
if (c2 == 0x4d)
ok = read_3ds(f, mesh);
} else if (c == 'V') {
char buf[5];
if (!fgets(buf, 5, f)) {
fprintf(stderr, "Can't read header\n");
goto out;
}
if (strncmp(buf, "IVID", 4) == 0)
ok = read_vvd(f, mesh);
} else if (c == '#') {
char buf[1024];
fscanf(f, "%1024s", buf);
if (LINE_IS("material") || LINE_IS("vertex") ||
LINE_IS("shape_")) {
// Assume a ray file
pushback(buf, f);
ungetc(c, f);
ok = read_ray(f, mesh);
} else {
// Assume an obj file
ok = read_obj(f, mesh);
}
} else if (c == 'v' || c == 'u' || c == 'f' || c == 'g' || c == 's' || c == 'o') {
// Assume an obj file
ungetc(c, f);
ok = read_obj(f, mesh);
} else if (c == 'O') {
// Assume an OFF file
char buf[3];
if (!fgets(buf, 3, f)) {
fprintf(stderr, "Can't read header\n");
goto out;
}
if (strncmp(buf, "FF", 2) == 0)
ok = read_off(f, mesh);
} else if (isdigit(c)) {
// Assume an old-style sm file
ungetc(c, f);
ok = read_sm(f, mesh);
} else {
fprintf(stderr, "Unknown file type\n");
}
out:
if (f)
fclose(f);
if (!ok || (mesh->vertices.empty() && mesh->faces.empty())) {
fprintf(stderr, "\nError reading file [%s]\n", filename);
return false;
}
dprintf("Done.\n");
check_ind_range(mesh);
return true;
}
// Read a ply file
static bool read_ply(FILE *f, TriMesh *mesh)
{
char buf[1024];
bool binary = false, need_swap = false, float_color = false;
int result, nverts = 0, nfaces = 0, nstrips = 0, ngrid = 0;
int vert_len = 0, vert_pos = -1, vert_norm = -1;
int vert_color = -1, vert_conf = -1;
int face_len = 0, face_count = -1, face_idx = -1;
// Read file format
GET_LINE();
while (buf[0] && isspace(buf[0]))
GET_LINE();
if (LINE_IS("format binary_big_endian 1.0")) {
binary = true;
need_swap = we_are_little_endian();
} else if (LINE_IS("format binary_little_endian 1.0")) {
binary = true;
need_swap = !we_are_little_endian();
} else if (LINE_IS("format ascii 1.0")) {
binary = false;
} else {
fprintf(stderr, "Unknown ply format or version\n");
return false;
}
// Skip comments and unknown obj_info lines
GET_LINE();
while (LINE_IS("obj_info") || LINE_IS("comment")) {
if (LINE_IS("obj_info num_cols"))
sscanf(buf, "obj_info num_cols %d", &mesh->grid_width);
if (LINE_IS("obj_info num_rows"))
sscanf(buf, "obj_info num_rows %d", &mesh->grid_height);
GET_LINE();
}
// Skip until we find vertices
int skip1 = 0;
while (!LINE_IS("end_header") && !LINE_IS("element vertex")) {
char elem_name[1024];
int nelem = 0, elem_len = 0;
sscanf(buf, "element %s %d", elem_name, &nelem);
GET_LINE();
while (LINE_IS("property")) {
if (!ply_property(buf, elem_len, binary))
return false;
GET_LINE();
}
skip1 += nelem * elem_len;
}
// Find number of vertices
result = sscanf(buf, "element vertex %d\n", &nverts);
if (result != 1) {
fprintf(stderr, "Expected \"element vertex\"\n");
return false;
}
// Parse vertex properties
GET_LINE();
while (LINE_IS("property")) {
if (LINE_IS("property float x") ||
LINE_IS("property float32 x"))
vert_pos = vert_len;
if (LINE_IS("property float nx") ||
LINE_IS("property float32 nx"))
vert_norm = vert_len;
if (LINE_IS("property uchar diffuse_red") ||
LINE_IS("property uint8 diffuse_red") ||
LINE_IS("property uchar red") ||
LINE_IS("property uint8 red"))
vert_color = vert_len;
if (LINE_IS("property float diffuse_red") ||
LINE_IS("property float32 diffuse_red") ||
LINE_IS("property float red") ||
LINE_IS("property float32 red"))
vert_color = vert_len, float_color = true;
if (LINE_IS("property float confidence") ||
LINE_IS("property float32 confidence"))
vert_conf = vert_len;
if (!ply_property(buf, vert_len, binary))
return false;
GET_LINE();
}
// Skip until we find faces
int skip2 = 0;
while (!LINE_IS("end_header") && !LINE_IS("element face") &&
!LINE_IS("element tristrips") && !LINE_IS("element range_grid")) {
char elem_name[1024];
int nelem = 0, elem_len = 0;
sscanf(buf, "element %s %d", elem_name, &nelem);
GET_LINE();
while (LINE_IS("property")) {
if (!ply_property(buf, elem_len, binary))
return false;
GET_LINE();
}
skip2 += nelem * elem_len;
}
// Look for faces, tristrips, or range grid
if (LINE_IS("element face")) {
if (sscanf(buf, "element face %d\n", &nfaces) != 1)
return false;
GET_LINE();
while (LINE_IS("property")) {
if (LINE_IS("property list uchar int vertex_indices") ||
LINE_IS("property list uint8 int32 vertex_indices") ||
LINE_IS("property list char int vertex_indices") ||
LINE_IS("property list int8 int32 vertex_indices") ||
LINE_IS("property list uchar int vertex_index") ||
LINE_IS("property list uint8 int32 vertex_index") ||
LINE_IS("property list char int vertex_index") ||
LINE_IS("property list int8 int32 vertex_index")) {
face_count = face_len;
face_idx = face_len + 1;
face_len += 1;
} else if
(LINE_IS("property list uint int vertex_indices") ||
LINE_IS("property list uint32 int32 vertex_indices") ||
LINE_IS("property list int int vertex_indices") ||
LINE_IS("property list int32 int32 vertex_indices") ||
LINE_IS("property list uint int vertex_index") ||
LINE_IS("property list uint32 int32 vertex_index") ||
LINE_IS("property list int int vertex_index") ||
LINE_IS("property list int32 int32 vertex_index")) {
face_count = face_len;
face_idx = face_len + (binary ? 4 : 1);
face_len += (binary ? 4 : 1);
} else if (!ply_property(buf, face_len, binary))
return false;
GET_LINE();
}
} else if (LINE_IS("element tristrips")) {
nstrips = 1;
GET_LINE();
if (!LINE_IS("property list int int vertex_indices") &&
!LINE_IS("property list int32 int32 vertex_indices"))
return false;
GET_LINE();
} else if (LINE_IS("element range_grid")) {
if (sscanf(buf, "element range_grid %d\n", &ngrid) != 1)
return false;
if (ngrid != mesh->grid_width*mesh->grid_height) {
fprintf(stderr, "Range grid size does not equal num_rows*num_cols\n");
return false;
}
GET_LINE();
if (!LINE_IS("property list uchar int vertex_indices") &&
!LINE_IS("property list uint8 int32 vertex_indices") &&
!LINE_IS("property list char int vertex_indices") &&
!LINE_IS("property list int8 int32 vertex_indices"))
return false;
GET_LINE();
}
while (LINE_IS("property")) {
if (!ply_property(buf, face_len, binary))
return false;
GET_LINE();
}
// Skip to the end of the header
while (!LINE_IS("end_header"))
GET_LINE();
if (binary && buf[10] == '\r') {
fprintf(stderr, "Warning! Possibly corrupt file\n");
fprintf(stderr, " If things don't work, make sure this file was transferred in BINARY, not ASCII mode\n");
}
// Actually read everything in
if (skip1) {
if (binary)
fseek(f, skip1, SEEK_CUR);
else
for (int i = 0; i < skip1; i++)
fscanf(f, "%s", buf);
}
if (binary) {
if (!read_verts_bin(f, mesh, need_swap, nverts, vert_len,
vert_pos, vert_norm, vert_color,
float_color, vert_conf))
return false;
} else {
if (!read_verts_asc(f, mesh, nverts, vert_len,
vert_pos, vert_norm, vert_color,
float_color, vert_conf))
return false;
}
if (skip2) {
if (binary)
fseek(f, skip2, SEEK_CUR);
else
for (int i = 0; i < skip2; i++)
fscanf(f, "%s", buf);
}
if (ngrid) {
if (binary) {
if (!read_grid_bin(f, mesh, need_swap))
return false;
} else {
if (!read_grid_asc(f, mesh))
return false;
}
} else if (nstrips) {
if (binary) {
if (!read_strips_bin(f, mesh, need_swap))
return false;
} else {
if (!read_strips_asc(f, mesh))
return false;
}
// mesh->convert_strips(TriMesh::TSTRIP_LENGTH);
} else if (nfaces) {
if (binary) {
if (!read_faces_bin(f, mesh, need_swap, nfaces,
face_len, face_count, face_idx))
return false;
} else {
if (!read_faces_asc(f, mesh, nfaces,
face_len, face_count, face_idx))
return false;
}
}
return true;
}
#define CHUNK_3DS_MAIN 0x4d4d
#define CHUNK_3DS_MODEL 0x3d3d
#define CHUNK_3DS_OBJ 0x4000
#define CHUNK_3DS_MESH 0x4100
#define CHUNK_3DS_VERT 0x4110
#define CHUNK_3DS_FACE 0x4120
// Read a 3DS file.
static bool read_3ds(FILE *f, TriMesh *mesh)
{
bool need_swap = !we_are_little_endian();
int mstart = 0;
while (!feof(f)) {
short chunkid;
int chunklen;
if (!fread(&chunkid, 2, 1, f) ||
!fread(&chunklen, 4, 1, f))
return false;
if (need_swap) {
swap_short(chunkid);
swap_int(chunklen);
}
//TriMesh::dprintf("Found chunk %x of length %d\n", chunkid, chunklen);
switch (chunkid) {
case CHUNK_3DS_MAIN:
case CHUNK_3DS_MODEL:
// Just recurse into this chunk
break;
case CHUNK_3DS_OBJ:
// Skip name, then recurse
while (!feof(f) && fgetc(f))
;
break;
case CHUNK_3DS_MESH:
mstart = mesh->vertices.size();
break;
case CHUNK_3DS_VERT: {
unsigned short nverts;
if (!fread(&nverts, 2, 1, f))
return false;
if (need_swap)
swap_ushort(nverts);
read_verts_bin(f, mesh, need_swap,
nverts, 12, 0, -1, -1, false, -1);
break;
}
case CHUNK_3DS_FACE: {
unsigned short nfaces;
if (!fread(&nfaces, 2, 1, f))
return false;
if (need_swap)
swap_ushort(nfaces);
TriMesh::dprintf("\n Reading %d faces... ", nfaces);
int old_nfaces = mesh->faces.size();
int new_nfaces = old_nfaces + nfaces;
mesh->faces.resize(new_nfaces);
for (int i = old_nfaces; i < new_nfaces; i++) {
unsigned short buf[4];
COND_READ(true, buf[0], 8);
if (need_swap) {
swap_ushort(buf[0]);
swap_ushort(buf[1]);
swap_ushort(buf[2]);
}
mesh->faces[i][0] = mstart + buf[0];
mesh->faces[i][1] = mstart + buf[1];
mesh->faces[i][2] = mstart + buf[2];
}
break;
}
default: {
// Skip over this chunk
fseek(f, chunklen-6, SEEK_CUR);
}
}
}
return true;
}
// Read a VVD file.
static bool read_vvd(FILE *f, TriMesh *mesh)
{
bool need_swap = we_are_little_endian();
const int skip = 127;
char buf[skip];
fread(buf, skip, 1, f);
int nverts;
if (fread(&nverts, 4, 1, f) != 1) {
fprintf(stderr, "Couldn't read vertex count\n");
return false;
}
if (need_swap)
swap_int(nverts);
mesh->vertices.resize(nverts);
TriMesh::dprintf("\n Reading %d vertices... ", nverts);
for (int i = 0; i < nverts; i++) {
double v[3];
if (fread(&v[0], 24, 1, f) != 1) {
fprintf(stderr, "Couldn't read vertex\n");
return false;
}
if (need_swap) {
swap_double(v[0]);
swap_double(v[1]);
swap_double(v[2]);
}
mesh->vertices[i] = point(double(v[0]), double(v[1]), double(v[2]));
}
int nfaces;
if (fread(&nfaces, 4, 1, f) != 1) {
fprintf(stderr, "Couldn't read face count\n");
return false;
}
if (need_swap)
swap_int(nfaces);
read_faces_bin(f, mesh, need_swap, nfaces, 4, 0, 4);
return true;
}
// Read a ray file
static bool read_ray(FILE *f, TriMesh *mesh)
{
while (!feof(f)) {
char buf[1024];
buf[0] = '\0';
if (fscanf(f, " %1024s", buf) == 0)
return true;
if (LINE_IS("#vertex")) {
double x, y, z;
if (fscanf(f, "%f %f %f", &x, &y, &z) != 3) {
return false;
}
mesh->vertices.push_back(point(x,y,z));
} else if (LINE_IS("#shape_triangle")) {
int f1, f2, f3, m;
if (fscanf(f, "%d %d %d %d", &m, &f1, &f2, &f3) != 4) {
return false;
}
mesh->faces.push_back(TriMesh::Face(f1,f2,f3));
}
}
return true;
}
// Read an obj file
static bool read_obj(FILE *f, TriMesh *mesh)
{
std::vector<int> thisface;
while (1) {
skip_comments(f);
if (feof(f))
break;
char buf[1024];
GET_LINE();
if (LINE_IS("v ") || LINE_IS("v\t")) {
double x, y, z;
if (sscanf(buf+1, "%f %f %f", &x, &y, &z) != 3) {
return false;
}
mesh->vertices.push_back(point(x,y,z));
} else if (LINE_IS("f ") || LINE_IS("f\t") ||
LINE_IS("t ") || LINE_IS("t\t")) {
thisface.clear();
char *c = buf;
while (1) {
while (*c && *c != '\n' && !isspace(*c))
c++;
while (*c && isspace(*c))
c++;
int thisf;
if (sscanf(c, " %d", &thisf) != 1)
break;
if (thisf < 0)
thisf += mesh->vertices.size();
else
thisf--;
thisface.push_back(thisf);
}
tess(mesh->vertices, thisface, mesh->faces);
}
}
return true;
}
// Read an off file
static bool read_off(FILE *f, TriMesh *mesh)
{
skip_comments(f);
char buf[1024];
GET_LINE();
int nverts, nfaces, unused;
if (sscanf(buf, "%d %d %d", &nverts, &nfaces, &unused) < 2)
return false;
if (!read_verts_asc(f, mesh, nverts, 3, 0, -1, -1, false, -1))
return false;
if (!read_faces_asc(f, mesh, nfaces, 1, 0, 1, true))
return false;
return true;
}
// Read an sm file
static bool read_sm(FILE *f, TriMesh *mesh)
{
int nverts, nfaces;
if (fscanf(f, "%d", &nverts) != 1)
return false;
if (!read_verts_asc(f, mesh, nverts, 3, 0, -1, -1, false, -1))
return false;
skip_comments(f);
if (fscanf(f, "%d", &nfaces) != 1)
return true;
if (!read_faces_asc(f, mesh, nfaces, 0, -1, 0))
return false;
return true;
}
// Read nverts vertices from a binary file.
// vert_len = total length of a vertex record in bytes
// vert_pos, vert_norm, vert_color, vert_conf =
// position of vertex coordinates / normals / color / confidence in record
// need_swap = swap for opposite endianness
// float_color = colors are 4-byte double * 3, vs 1-byte uchar * 3
static bool read_verts_bin(FILE *f, TriMesh *mesh, bool &need_swap,
int nverts, int vert_len, int vert_pos, int vert_norm,
int vert_color, bool float_color, int vert_conf)
{
const int vert_size = 12;
const int norm_size = 12;
const int color_size = float_color ? 12 : 3;
const int conf_size = 4;
if (nverts <= 0 || vert_len < 12 || vert_pos < 0)
return false;
int old_nverts = mesh->vertices.size();
int new_nverts = old_nverts + nverts;
mesh->vertices.resize(new_nverts);
bool have_norm = (vert_norm >= 0);
bool have_color = (vert_color >= 0);
bool have_conf = (vert_conf >= 0);
if (have_norm)
mesh->normals.resize(new_nverts);
if (have_color)
mesh->colors.resize(new_nverts);
if (have_conf)
mesh->confidences.resize(new_nverts);
unsigned char *buf = new unsigned char[vert_len];
COND_READ(true, buf[0], vert_len);
int i = old_nverts;
memcpy(&mesh->vertices[i][0], &buf[vert_pos], vert_size);
if (have_norm)
memcpy(&mesh->normals[i][0], &buf[vert_norm], norm_size);
if (have_color && float_color)
memcpy(&mesh->colors[i][0], &buf[vert_color], color_size);
if (have_color && !float_color)
mesh->colors[i] = Color(&buf[vert_color]);
if (have_conf)
memcpy(&mesh->confidences[i], &buf[vert_conf], conf_size);
check_need_swap(mesh->vertices[i], need_swap);
if (need_swap) {
swap_double(mesh->vertices[i][0]);
swap_double(mesh->vertices[i][1]);
swap_double(mesh->vertices[i][2]);
if (have_norm) {
swap_double(mesh->normals[i][0]);
swap_double(mesh->normals[i][1]);
swap_double(mesh->normals[i][2]);
}
if (have_color && float_color) {
swap_double(mesh->colors[i][0]);
swap_double(mesh->colors[i][1]);
swap_double(mesh->colors[i][2]);
}
if (have_conf)
swap_double(mesh->confidences[i]);
}
TriMesh::dprintf("\n Reading %d vertices... ", nverts);
if (vert_len == 12 && sizeof(point) == 12 && nverts > 1)
return slurp_verts_bin(f, mesh, need_swap, nverts);
while (++i < new_nverts) {
COND_READ(true, buf[0], vert_len);
memcpy(&mesh->vertices[i][0], &buf[vert_pos], vert_size);
if (have_norm)
memcpy(&mesh->normals[i][0], &buf[vert_norm], norm_size);
if (have_color && float_color)
memcpy(&mesh->colors[i][0], &buf[vert_color], color_size);
if (have_color && !float_color)
mesh->colors[i] = Color(&buf[vert_color]);
if (have_conf)
memcpy(&mesh->confidences[i], &buf[vert_conf], conf_size);
if (need_swap) {
swap_double(mesh->vertices[i][0]);
swap_double(mesh->vertices[i][1]);
swap_double(mesh->vertices[i][2]);
if (have_norm) {
swap_double(mesh->normals[i][0]);
swap_double(mesh->normals[i][1]);
swap_double(mesh->normals[i][2]);
}
if (have_color && float_color) {
swap_double(mesh->colors[i][0]);
swap_double(mesh->colors[i][1]);
swap_double(mesh->colors[i][2]);
}
if (have_conf)
swap_double(mesh->confidences[i]);
}
}
return true;
}
// Optimized reader for the simple case of just vertices w/o other properties
static bool slurp_verts_bin(FILE *f, TriMesh *mesh, bool need_swap, int nverts)
{
int first = mesh->vertices.size() - nverts + 1;
COND_READ(true, mesh->vertices[first][0], (nverts-1)*12);
if (need_swap) {
for (int i = first; i < mesh->vertices.size(); i++) {
swap_double(mesh->vertices[i][0]);
swap_double(mesh->vertices[i][1]);
swap_double(mesh->vertices[i][2]);
}
}
return true;
}
// Read a bunch of vertices from an ASCII file.
// Parameters are as in read_verts_bin, but offsets are in
// (white-space-separated) words, rather than in bytes
static bool read_verts_asc(FILE *f, TriMesh *mesh,
int nverts, int vert_len, int vert_pos, int vert_norm,
int vert_color, bool float_color, int vert_conf)
{
if (nverts <= 0 || vert_len < 3 || vert_pos < 0)
return false;
int old_nverts = mesh->vertices.size();
int new_nverts = old_nverts + nverts;
mesh->vertices.resize(new_nverts);
if (vert_norm > 0)
mesh->normals.resize(new_nverts);
if (vert_color > 0)
mesh->colors.resize(new_nverts);
if (vert_conf > 0)
mesh->confidences.resize(new_nverts);
char buf[1024];
skip_comments(f);
TriMesh::dprintf("\n Reading %d vertices... ", nverts);
for (int i = old_nverts; i < new_nverts; i++) {
for (int j = 0; j < vert_len; j++) {
if (j == vert_pos) {
if (fscanf(f, "%lf %lf %lf",
&mesh->vertices[i][0],
&mesh->vertices[i][1],
&mesh->vertices[i][2]) != 3)
return false;
j += 2;
} else if (j == vert_norm) {
if (fscanf(f, "%lf %lf %lf",
&mesh->normals[i][0],
&mesh->normals[i][1],
&mesh->normals[i][2]) != 3)
return false;
j += 2;
} else if (j == vert_color && float_color) {
double r, g, b;
if (fscanf(f, "%lf %lf %lf", &r, &g, &b) != 3)
return false;
mesh->colors[i] = Color(r,g,b);
j += 2;
} else if (j == vert_color && !float_color) {
int r, g, b;
if (fscanf(f, "%d %d %d", &r, &g, &b) != 3)
return false;
mesh->colors[i] = Color(r,g,b);
j += 2;
} else if (j == vert_conf) {
if (fscanf(f, "%lf", &mesh->confidences[i]) != 1)
return false;
} else {
fscanf(f, " %1024s", buf);
}
}
}
return true;
}
// Read nfaces faces from a binary file.
// face_len = total length of face record, *not counting the indices*
// (Yes, this is bizarre, but there is potentially a variable # of indices...)
// face_count = offset within record of the count of indices in this face
// (If this is -1, does not read a count and assumes triangles)
// face_idx = offset within record of the indices themselves
static bool read_faces_bin(FILE *f, TriMesh *mesh, bool need_swap,
int nfaces, int face_len, int face_count, int face_idx)
{
if (nfaces < 0 || face_idx < 0)
return false;
if (nfaces == 0)
return true;
TriMesh::dprintf("\n Reading %d faces... ", nfaces);
int old_nfaces = mesh->faces.size();
int new_nfaces = old_nfaces + nfaces;
mesh->faces.reserve(new_nfaces);
// face_len doesn't include the indices themeselves, since that's
// potentially variable-length
int face_skip = face_len - face_idx;
std::vector<unsigned char> buf(max(face_idx, face_skip));
std::vector<int> thisface;
for (int i = 0; i < nfaces; i++) {
COND_READ(face_idx > 0, buf[0], face_idx);
unsigned this_ninds = 3;
if (face_count >= 0) {
// Read count - either 1 or 4 bytes
if (face_idx - face_count == 4) {
this_ninds = * (unsigned *) &(buf[face_count]);
if (need_swap)
swap_unsigned(this_ninds);
} else {
this_ninds = buf[face_count];
}
}
thisface.resize(this_ninds);
COND_READ(true, thisface[0], 4*this_ninds);
if (need_swap) {
for (size_t j = 0; j < thisface.size(); j++)
swap_int(thisface[j]);
}
tess(mesh->vertices, thisface, mesh->faces);
COND_READ(face_skip > 0, buf[0], face_skip);
}
return true;
}
// Read a bunch of faces from an ASCII file
static bool read_faces_asc(FILE *f, TriMesh *mesh, int nfaces,
int face_len, int face_count, int face_idx, bool read_to_eol /* = false */)
{
if (nfaces < 0 || face_idx < 0)
return false;
if (nfaces == 0)
return true;
int old_nfaces = mesh->faces.size();
int new_nfaces = old_nfaces + nfaces;
mesh->faces.reserve(new_nfaces);
char buf[1024];
skip_comments(f);
TriMesh::dprintf("\n Reading %d faces... ", nfaces);
std::vector<int> thisface;
for (int i = 0; i < nfaces; i++) {
thisface.clear();
int this_face_count = 3;
for (int j = 0; j < face_len + this_face_count; j++) {
if (j >= face_idx && j < face_idx + this_face_count) {
thisface.push_back(0);
if (!fscanf(f, " %d", &(thisface.back()))) {
TriMesh::dprintf("Couldn't read vertex index %d for face %d\n",
j - face_idx, i);
return false;
}
} else if (j == face_count) {
if (!fscanf(f, " %d", &this_face_count)) {
TriMesh::dprintf("Couldn't read vertex count for face %d\n", i);
return false;
}
} else {
fscanf(f, " %s", buf);
}
}
tess(mesh->vertices, thisface, mesh->faces);
if (read_to_eol) {
while (1) {
int c = fgetc(f);
if (c == EOF || c == '\n')
break;
}
}
}
return true;
}
// Read triangle strips from a binary file
static bool read_strips_bin(FILE *f, TriMesh *mesh, bool need_swap)
{
int striplen;
COND_READ(true, striplen, 4);
if (need_swap)
swap_int(striplen);
int old_striplen = mesh->tstrips.size();
int new_striplen = old_striplen + striplen;
mesh->tstrips.resize(new_striplen);
TriMesh::dprintf("\n Reading triangle strips... ");
COND_READ(true, mesh->tstrips[old_striplen], 4*striplen);
if (need_swap) {
for (int i = old_striplen; i < new_striplen; i++)
swap_int(mesh->tstrips[i]);
}
return true;
}
// Read triangle strips from an ASCII file
static bool read_strips_asc(FILE *f, TriMesh *mesh)
{
skip_comments(f);
int striplen;
if (fscanf(f, "%d", &striplen) != 1)
return false;
int old_striplen = mesh->tstrips.size();
int new_striplen = old_striplen + striplen;
mesh->tstrips.resize(new_striplen);
TriMesh::dprintf("\n Reading triangle strips... ");
skip_comments(f);
for (int i = old_striplen; i < new_striplen; i++)
if (fscanf(f, "%d", &mesh->tstrips[i]) != 1)
return false;
return true;
}
// Read range grid data from a binary file
static bool read_grid_bin(FILE *f, TriMesh *mesh, bool need_swap)
{
// TriMesh::dprintf("\n Reading range grid... ");
// int ngrid = mesh->grid_width * mesh->grid_height;
// mesh->grid.resize(ngrid, TriMesh::GRID_INVALID);
// for (int i = 0; i < ngrid; i++) {
// int n = fgetc(f);
// if (n == EOF)
// return false;
// while (n--) {
// if (!fread((void *)&(mesh->grid[i]), 4, 1, f))
// return false;
// if (need_swap)
// swap_int(mesh->grid[i]);
// }
// }
//
// mesh->triangulate_grid();
return true;
}
// Read range grid data from an ASCII file
static bool read_grid_asc(FILE *f, TriMesh *mesh)
{
// TriMesh::dprintf("\n Reading range grid... ");
// int ngrid = mesh->grid_width * mesh->grid_height;
// mesh->grid.resize(ngrid, TriMesh::GRID_INVALID);
// for (int i = 0; i < ngrid; i++) {
// int n;
// if (fscanf(f, "%d", &n) != 1)
// return false;
// while (n--) {
// if (fscanf(f, "%d", &(mesh->grid[i])) != 1)
// return false;
// }
// }
// mesh->triangulate_grid();
return true;
}
// Parse a PLY property line, and figure how many bytes it represents
// Increments "len" by the number of bytes, or by 1 if !binary
static bool ply_property(const char *buf, int &len, bool binary)
{
if (LINE_IS("property char") ||
LINE_IS("property uchar") ||
LINE_IS("property int8") ||
LINE_IS("property uint8")) {
len += 1;
} else if (LINE_IS("property short") ||
LINE_IS("property ushort") ||
LINE_IS("property int16") ||
LINE_IS("property uint16")) {
len += (binary ? 2 : 1);
} else if (LINE_IS("property int") ||
LINE_IS("property uint") ||
LINE_IS("property double") ||
LINE_IS("property int32") ||
LINE_IS("property uint32") ||
LINE_IS("property float32") ||
LINE_IS("property float")) {
len += (binary ? 4 : 1);
} else if (LINE_IS("property double") ||
LINE_IS("property float64")) {
len += (binary ? 8 : 1);
} else {
fprintf(stderr, "Unsupported vertex property: %s\n", buf);
return false;
}
return true;
}
// Figure out whether this machine is little- or big-endian
static bool we_are_little_endian()
{
char buf[4];
*(int *)(&buf[0]) = 1;
return (buf[0] == 1);
}
// Figure out whether the need_swap setting makes sense, or whether this
// file incorrectly declares its endianness
static void check_need_swap(const point &p, bool &need_swap)
{
double p0 = p[0], p1 = p[1], p2 = p[2];
if (need_swap) {
swap_double(p0);
swap_double(p1);
swap_double(p2);
}
bool makes_sense = (p0 > -BIGNUM && p0 < BIGNUM &&
p1 > -BIGNUM && p1 < BIGNUM &&
p2 > -BIGNUM && p2 < BIGNUM);
if (makes_sense)
return;
swap_double(p0);
swap_double(p1);
swap_double(p2);
bool makes_sense_swapped = (p0 > -BIGNUM && p0 < BIGNUM &&
p1 > -BIGNUM && p1 < BIGNUM &&
p2 > -BIGNUM && p2 < BIGNUM);
if (makes_sense_swapped) {
fprintf(stderr, "Compensating for bogus endianness.\n");
need_swap = !need_swap;
}
}
// Check whether the indices in the file mistakenly go
// from 1..N instead of 0..N-1
static void check_ind_range(TriMesh *mesh)
{
if (mesh->faces.empty())
return;
int min_ind = mesh->faces[0][0];
int max_ind = mesh->faces[0][0];
for (int i = 0; i < mesh->faces.size(); i++) {
for (int j = 0; j < 3; j++) {
min_ind = min(min_ind, mesh->faces[i][j]);
max_ind = max(max_ind, mesh->faces[i][j]);
}
}
int nv = mesh->vertices.size();
// All good
if (min_ind == 0 && max_ind == nv-1)
return;
// Simple fix: offset everything
if (max_ind - min_ind == nv-1) {
TriMesh::dprintf("Found indices ranging from %d through %d\n",
min_ind, max_ind);
TriMesh::dprintf("Remapping to %d through %d\n", 0, nv-1);
for (int i = 0; i < mesh->faces.size(); i++)
for (int j = 0; j < 3; j++)
mesh->faces[i][j] -= min_ind;
return;
}
// Else can't do anything...
}
// Skip comments in an ASCII file (lines beginning with #)
static void skip_comments(FILE *f)
{
int c;
bool in_comment = false;
while (1) {
c = fgetc(f);
if (c == EOF)
return;
if (in_comment) {
if (c == '\n')
in_comment = false;
} else if (c == '#') {
in_comment = true;
} else if (!isspace(c)) {
break;
}
}
ungetc(c, f);
}
// Tesselate an arbitrary n-gon. Appends triangles to "tris".
static void tess(const std::vector<point> &verts, const std::vector<int> &thisface,
std::vector<TriMesh::Face> &tris)
{
if (thisface.size() < 3)
return;
if (thisface.size() == 3) {
tris.push_back(TriMesh::Face(thisface[0],
thisface[1],
thisface[2]));
return;
}
if (thisface.size() == 4) {
// Triangulate in the direction that
// gives the shorter diagonal
const point &p0 = verts[thisface[0]], &p1 = verts[thisface[1]];
const point &p2 = verts[thisface[2]], &p3 = verts[thisface[3]];
double d02 = dist2(p0, p2);
double d13 = dist2(p1, p3);
int i = (d02 < d13) ? 0 : 1;
tris.push_back(TriMesh::Face(thisface[i],
thisface[(i+1)%4],
thisface[(i+2)%4]));
tris.push_back(TriMesh::Face(thisface[i],
thisface[(i+2)%4],
thisface[(i+3)%4]));
return;
}
// 5-gon or higher - just tesselate arbitrarily...
for (int i = 2; i < thisface.size(); i++)
tris.push_back(TriMesh::Face(thisface[0],
thisface[i-1],
thisface[i]));
}
// Write mesh to a file
void TriMesh::write(const char *filename)
{
if (!filename || *filename == '\0')
return;
if (vertices.empty()) {
fprintf(stderr, "Empty mesh - nothing to write!\n");
return;
}
enum { PLY_ASCII, PLY_BINARY_BE, PLY_BINARY_LE,
RAY, OBJ, OFF, SM, CC } filetype;
// Set default file type to be native-endian binary ply
filetype = PLY_ASCII;//we_are_little_endian() ? PLY_BINARY_LE : PLY_BINARY_BE;
bool write_norm = false;
bool float_color = false;
// Infer file type from file extension
//const char *c = strrchr(filename, '.');
//if (c) {
// if (!strncasecmp(c, ".ply", 4))
// filetype = we_are_little_endian() ?
// PLY_BINARY_LE :
// PLY_BINARY_BE;
// else if (!strncasecmp(c, ".ray", 4))
// filetype = RAY;
// else if (!strncasecmp(c, ".obj", 4))
// filetype = OBJ;
// else if (!strncasecmp(c, ".off", 4))
// filetype = OFF;
// else if (!strncasecmp(c, ".sm", 3))
// filetype = SM;
// else if (!strncasecmp(c, ".cc", 3))
// filetype = CC;
// else if (!strncasecmp(c, ".c++", 4))
// filetype = CC;
// else if (!strncasecmp(c, ".cpp", 4))
// filetype = CC;
// else if (!strncasecmp(c, ".C", 2))
// filetype = CC;
//}
// Handle filetype:filename.foo constructs
//while (1) {
// if (!strncasecmp(filename, "norm:", 5)) {
// filename += 5;
// write_norm = true;
// } else if (!strncasecmp(filename, "cflt:", 5)) {
// filename += 5;
// float_color = true;
// } else if (!strncasecmp(filename, "ply:", 4)) {
// filename += 4;
// filetype = we_are_little_endian() ?
// PLY_BINARY_LE :
// PLY_BINARY_BE;
// } else if (!strncasecmp(filename, "ply_binary:", 11)) {
// filename += 11;
// filetype = we_are_little_endian() ?
// PLY_BINARY_LE :
// PLY_BINARY_BE;
// } else if (!strncasecmp(filename, "ply_binary_be:", 14)) {
// filename += 14;
// filetype = PLY_BINARY_BE;
// } else if (!strncasecmp(filename, "ply_binary_le:", 14)) {
// filename += 14;
// filetype = PLY_BINARY_LE;
// } else if (!strncasecmp(filename, "ply_ascii:", 10)) {
// filename += 10;
// filetype = PLY_ASCII;
// } else if (!strncasecmp(filename, "ply_asc:", 8)) {
// filename += 8;
// filetype = PLY_ASCII;
// } else if (!strncasecmp(filename, "ascii:", 6)) {
// filename += 6;
// filetype = PLY_ASCII;
// } else if (!strncasecmp(filename, "asc:", 4)) {
// filename += 4;
// filetype = PLY_ASCII;
// } else if (!strncasecmp(filename, "be:", 3)) {
// filename += 3;
// filetype = PLY_BINARY_BE;
// } else if (!strncasecmp(filename, "le:", 3)) {
// filename += 3;
// filetype = PLY_BINARY_LE;
// } else if (!strncasecmp(filename, "ray:", 4)) {
// filename += 4;
// filetype = RAY;
// } else if (!strncasecmp(filename, "obj:", 4)) {
// filename += 4;
// filetype = OBJ;
// } else if (!strncasecmp(filename, "off:", 4)) {
// filename += 4;
// filetype = OFF;
// } else if (!strncasecmp(filename, "sm:", 3)) {
// filename += 3;
// filetype = SM;
// } else {
// break;
// }
//}
FILE *f = NULL;
if (strcmp(filename, "-") == 0) {
f = stdout;
filename = "standard output";
} else {
f = fopen(filename, "wb");
if (!f) {
perror("fopen");
fprintf(stderr, "Error opening %s for writing.\n", filename);
return;
}
}
dprintf("Writing %s... ", filename);
switch (filetype) {
case PLY_ASCII:
write_ply_ascii(this, f, write_norm, float_color);
break;
case PLY_BINARY_BE:
write_ply_binary(this, f,
we_are_little_endian(), write_norm, float_color);
break;
case PLY_BINARY_LE:
write_ply_binary(this, f,
!we_are_little_endian(), write_norm, float_color);
break;
case RAY:
write_ray(this, f);
break;
case OBJ:
write_obj(this, f);
break;
case OFF:
write_off(this, f);
break;
case SM:
write_sm(this, f);
break;
case CC:
write_cc(this, f, filename, write_norm, float_color);
break;
}
fclose(f);
dprintf("Done.\n");
}
// Write a ply header
static void write_ply_header(TriMesh *mesh, FILE *f, const char *format,
bool write_grid, bool write_tstrips,
bool write_norm, bool float_color)
{
fprintf(f, "ply\nformat %s 1.0\n", format);
if (write_grid) {
fprintf(f, "obj_info num_cols %d\n", mesh->grid_width);
fprintf(f, "obj_info num_rows %d\n", mesh->grid_height);
}
fprintf(f, "element vertex %lu\n",
(unsigned long) mesh->vertices.size());
fprintf(f, "property float x\n");
fprintf(f, "property float y\n");
fprintf(f, "property float z\n");
if (write_norm && !mesh->normals.empty()) {
fprintf(f, "property float nx\n");
fprintf(f, "property float ny\n");
fprintf(f, "property float nz\n");
}
if (!mesh->colors.empty() && float_color) {
fprintf(f, "property float diffuse_red\n");
fprintf(f, "property float diffuse_green\n");
fprintf(f, "property float diffuse_blue\n");
}
if (!mesh->colors.empty() && !float_color) {
fprintf(f, "property uchar diffuse_red\n");
fprintf(f, "property uchar diffuse_green\n");
fprintf(f, "property uchar diffuse_blue\n");
}
if (!mesh->confidences.empty()) {
fprintf(f, "property float confidence\n");
}
if (write_grid) {
int ngrid = mesh->grid_width * mesh->grid_height;
fprintf(f, "element range_grid %d\n", ngrid);
fprintf(f, "property list uchar int vertex_indices\n");
} else if (write_tstrips) {
fprintf(f, "element tristrips 1\n");
fprintf(f, "property list int int vertex_indices\n");
} else {
fprintf(f, "element face %lu\n",
(unsigned long) mesh->faces.size());
fprintf(f, "property list uchar int vertex_indices\n");
}
fprintf(f, "end_header\n");
}
// Write an ASCII ply file
static void write_ply_ascii(TriMesh *mesh, FILE *f, bool write_norm,
bool float_color)
{
// if (write_norm)
// mesh->need_normals();
bool write_grid = !mesh->grid.empty();
bool write_tstrips = !write_grid && !mesh->tstrips.empty();
write_ply_header(mesh, f, "ascii", write_grid, write_tstrips,
write_norm, float_color);
write_verts_asc(mesh, f, "", write_norm ? " " : 0, " ", float_color,
" ", "");
if (write_grid) {
write_grid_asc(mesh, f);
} else if (write_tstrips) {
fprintf(f, "%lu ", (unsigned long) mesh->tstrips.size());
// mesh->convert_strips(TriMesh::TSTRIP_TERM);
write_strips_asc(mesh, f);
// mesh->convert_strips(TriMesh::TSTRIP_LENGTH);
} else {
write_faces_asc(mesh, f, "3 ", "");
}
}
// Write a binary ply file
static void write_ply_binary(TriMesh *mesh, FILE *f,
bool need_swap, bool write_norm, bool float_color)
{
// if (write_norm)
// mesh->need_normals();
const char *format = (need_swap ^ we_are_little_endian()) ?
"binary_little_endian" : "binary_big_endian";
bool write_grid = !mesh->grid.empty();
bool write_tstrips = !write_grid && !mesh->tstrips.empty();
write_ply_header(mesh, f, format, write_grid, write_tstrips,
write_norm, float_color);
write_verts_bin(mesh, f, need_swap, write_norm, true, float_color, true);
if (write_grid) {
write_grid_bin(mesh, f, need_swap);
} else if (write_tstrips) {
int s = mesh->tstrips.size();
if (need_swap)
swap_int(s);
fwrite(&s, 4, 1, f);
// mesh->convert_strips(TriMesh::TSTRIP_TERM);
write_strips_bin(mesh, f, need_swap);
// mesh->convert_strips(TriMesh::TSTRIP_LENGTH);
} else {
char buf[1] = { 3 };
write_faces_bin(mesh, f, need_swap, 1, buf, 0, 0);
}
}
// Write a ray file
static void write_ray(TriMesh *mesh, FILE *f)
{
// fprintf(f, "#camera 0 0 1 0 0 -1 0 1 0 0.2\n");
// fprintf(f, "#background 0 0 0\n");
// fprintf(f, "#ambient 0 0 0\n");
// fprintf(f, "#material_num 1\n");
// fprintf(f, "#material 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 -1 !!\n");
// fprintf(f, "#vertex_num %lu\n", (unsigned long) mesh->vertices.size());
// mesh->need_normals();
// write_verts_asc(mesh, f, "#vertex ", " ", 0, false, 0, " 0 0");
// mesh->need_faces();
// write_faces_asc(mesh, f, "#shape_triangle 0 ", "");
}
// Write a obj file
static void write_obj(TriMesh *mesh, FILE *f)
{
fprintf(f, "# OBJ\n");
write_verts_asc(mesh, f, "v ", 0, 0, false, 0, "");
// mesh->need_faces();
for (int i = 0; i < mesh->faces.size(); i++) {
mesh->faces[i][0]++;
mesh->faces[i][1]++;
mesh->faces[i][2]++;
}
write_faces_asc(mesh, f, "f ", "");
for (int i = 0; i < mesh->faces.size(); i++) {
mesh->faces[i][0]--;
mesh->faces[i][1]--;
mesh->faces[i][2]--;
}
}
// Write a off file
static void write_off(TriMesh *mesh, FILE *f)
{
fprintf(f, "OFF\n");
// mesh->need_faces();
fprintf(f, "%lu %lu 0\n", (unsigned long) mesh->vertices.size(),
(unsigned long) mesh->faces.size());
write_verts_asc(mesh, f, "", 0, 0, false, 0, "");
write_faces_asc(mesh, f, "3 ", "");
}
// Write an SM file
static void write_sm(TriMesh *mesh, FILE *f)
{
fprintf(f, "%lu\n", (unsigned long) mesh->vertices.size());
write_verts_asc(mesh, f, "", 0, 0, false, 0, "");
// mesh->need_faces();
fprintf(f, "%lu\n", (unsigned long) mesh->faces.size());
write_faces_asc(mesh, f, "", "");
fprintf(f, "0 0\n");
}
// Convert colors double -> uchar
static unsigned char color2uchar(double p)
{
return min(max(int(255.0f * p + 0.5f), 0), 255);
}
// Write C++ code
static void write_cc(TriMesh *mesh, FILE *f, const char *filename,
bool write_norm, bool float_color)
{
// mesh->need_faces();
// if (write_norm)
// mesh->need_normals();
char *meshname = new char[strlen(filename)+1];
strcpy(meshname, filename);
char *c = strrchr(meshname, '.');
if (c)
*c = '\0';
fprintf(f, "#include <string.h>\n");
fprintf(f, "#include \"TriMesh.h\"\n\n");
fprintf(f, "TriMesh *make_%s()\n{", meshname);
delete [] meshname;
fprintf(f, "\tstatic const double vertdata[][3] = {\n");
int nv = mesh->vertices.size(), nf = mesh->faces.size();
for (int i = 0; i < nv; i++) {
fprintf(f, "\t\t{ %.7g, %.7g, %.7g },\n",
mesh->vertices[i][0],
mesh->vertices[i][1],
mesh->vertices[i][2]);
}
fprintf(f, "\t};\n");
// if (write_norm) {
// fprintf(f, "\tstatic const double normdata[][3] = {\n");
// for (int i = 0; i < nv; i++) {
// fprintf(f, "\t\t{ %.7g, %.7g, %.7g },\n",
// mesh->normals[i][0],
// mesh->normals[i][1],
// mesh->normals[i][2]);
// }
// fprintf(f, "\t};\n");
// }
if (!mesh->colors.empty() && float_color) {
fprintf(f, "\tstatic const double colordata[][3] = {\n");
for (int i = 0; i < nv; i++) {
fprintf(f, "\t\t{ %.7g, %.7g, %.7g },\n",
mesh->colors[i][0],
mesh->colors[i][1],
mesh->colors[i][2]);
}
fprintf(f, "\t};\n");
}
if (!mesh->colors.empty() && !float_color) {
fprintf(f, "\tstatic const unsigned char colordata[][3] = {\n");
for (int i = 0; i < nv; i++) {
fprintf(f, "\t\t{ %d, %d, %d },\n",
color2uchar(mesh->colors[i][0]),
color2uchar(mesh->colors[i][1]),
color2uchar(mesh->colors[i][2]));
}
fprintf(f, "\t};\n");
}
fprintf(f, "\tstatic const int facedata[][3] = {\n");
for (int i = 0; i < nf; i++) {
fprintf(f, "\t\t{ %d, %d, %d },\n",
mesh->faces[i][0],
mesh->faces[i][1],
mesh->faces[i][2]);
}
fprintf(f, "\t};\n");
fprintf(f, "\n\tTriMesh *m = new TriMesh;\n");
fprintf(f, "\tm->vertices.resize(%d);\n", nv);
fprintf(f, "\tmemcpy(&m->vertices[0][0], vertdata, sizeof(vertdata));\n");
if (!mesh->colors.empty()) {
fprintf(f, "\tm->colors.resize(%d);\n", nv);
fprintf(f, "\tmemcpy(&m->colors[0][0], colordata, sizeof(colordata));\n");
}
// if (write_norm) {
// fprintf(f, "\tm->normals.resize(%d);\n", nv);
// fprintf(f, "\tmemcpy(&m->normals[0][0], normdata, sizeof(normdata));\n");
// }
fprintf(f, "\tm->faces.resize(%d);\n", nf);
fprintf(f, "\tmemcpy(&m->faces[0][0], facedata, sizeof(facedata));\n");
fprintf(f, "\n\treturn m;\n");
fprintf(f, "}\n");
}
// Write a bunch of vertices to an ASCII file
static void write_verts_asc(TriMesh *mesh, FILE *f,
const char *before_vert,
const char *before_norm,
const char *before_color,
bool float_color,
const char *before_conf,
const char *after_line)
{
for (int i = 0; i < mesh->vertices.size(); i++) {
fprintf(f, "%s%.7g %.7g %.7g", before_vert,
mesh->vertices[i][0],
mesh->vertices[i][1],
mesh->vertices[i][2]);
if (!mesh->normals.empty() && before_norm)
fprintf(f, "%s%.7g %.7g %.7g", before_norm,
mesh->normals[i][0],
mesh->normals[i][1],
mesh->normals[i][2]);
if (!mesh->colors.empty() && before_color && float_color)
fprintf(f, "%s%.7g %.7g %.7g", before_color,
mesh->colors[i][0],
mesh->colors[i][1],
mesh->colors[i][2]);
if (!mesh->colors.empty() && before_color && !float_color)
fprintf(f, "%s%d %d %d", before_color,
color2uchar(mesh->colors[i][0]),
color2uchar(mesh->colors[i][1]),
color2uchar(mesh->colors[i][2]));
if (!mesh->confidences.empty() && before_conf)
fprintf(f, "%s%.7g", before_conf, mesh->confidences[i]);
fprintf(f, "%s\n", after_line);
}
}
// Write a bunch of vertices to a binary file
static void write_verts_bin(TriMesh *mesh, FILE *f, bool need_swap,
bool write_norm, bool write_color,
bool float_color, bool write_conf)
{
if (need_swap) {
for (int i = 0; i < mesh->vertices.size(); i++) {
swap_double(mesh->vertices[i][0]);
swap_double(mesh->vertices[i][1]);
swap_double(mesh->vertices[i][2]);
}
if (!mesh->normals.empty()) {
for (int i = 0; i < mesh->normals.size(); i++) {
swap_double(mesh->normals[i][0]);
swap_double(mesh->normals[i][1]);
swap_double(mesh->normals[i][2]);
}
}
if (!mesh->colors.empty() && float_color) {
for (int i = 0; i < mesh->normals.size(); i++) {
swap_double(mesh->colors[i][0]);
swap_double(mesh->colors[i][1]);
swap_double(mesh->colors[i][2]);
}
}
if (!mesh->confidences.empty()) {
for (int i = 0; i < mesh->confidences.size(); i++)
swap_double(mesh->confidences[i]);
}
}
if ((mesh->normals.empty() || !write_norm) &&
(mesh->colors.empty() || !write_color) &&
(mesh->confidences.empty() || !write_conf)) {
// Optimized vertex-only code
fwrite(&(mesh->vertices[0][0]), 12*mesh->vertices.size(), 1, f);
} else {
// Generic code
for (int i = 0; i < mesh->vertices.size(); i++) {
fwrite(&(mesh->vertices[i][0]), 12, 1, f);
if (!mesh->normals.empty() && write_norm)
fwrite(&(mesh->normals[i][0]), 12, 1, f);
if (!mesh->colors.empty() && write_color && float_color)
fwrite(&(mesh->colors[i][0]), 12, 1, f);
if (!mesh->colors.empty() && write_color && !float_color) {
char c[3] = {
color2uchar(mesh->colors[i][0]),
color2uchar(mesh->colors[i][1]),
color2uchar(mesh->colors[i][2]) };
fwrite(&c, 3, 1, f);
}
if (!mesh->confidences.empty() && write_conf)
fwrite(&(mesh->confidences[i]), 4, 1, f);
}
}
if (need_swap) {
for (int i = 0; i < mesh->vertices.size(); i++) {
swap_double(mesh->vertices[i][0]);
swap_double(mesh->vertices[i][1]);
swap_double(mesh->vertices[i][2]);
}
if (!mesh->normals.empty()) {
for (int i = 0; i < mesh->normals.size(); i++) {
swap_double(mesh->normals[i][0]);
swap_double(mesh->normals[i][1]);
swap_double(mesh->normals[i][2]);
}
}
if (!mesh->colors.empty() && float_color) {
for (int i = 0; i < mesh->normals.size(); i++) {
swap_double(mesh->colors[i][0]);
swap_double(mesh->colors[i][1]);
swap_double(mesh->colors[i][2]);
}
}
if (!mesh->confidences.empty()) {
for (int i = 0; i < mesh->confidences.size(); i++)
swap_double(mesh->confidences[i]);
}
}
}
// Write a bunch of faces to an ASCII file
static void write_faces_asc(TriMesh *mesh, FILE *f,
const char *before_face, const char *after_line)
{
// mesh->need_faces();
for (int i = 0; i < mesh->faces.size(); i++) {
fprintf(f, "%s%d %d %d%s\n", before_face, mesh->faces[i][0],
mesh->faces[i][1], mesh->faces[i][2], after_line);
}
}
// Write a bunch of faces to a binary file
static void write_faces_bin(TriMesh *mesh, FILE *f, bool need_swap,
int before_face_len, const char *before_face,
int after_face_len, const char *after_face)
{
if (need_swap) {
for (int i = 0; i < mesh->faces.size(); i++) {
swap_int(mesh->faces[i][0]);
swap_int(mesh->faces[i][1]);
swap_int(mesh->faces[i][2]);
}
}
for (int i = 0; i < mesh->faces.size(); i++) {
if (before_face_len)
fwrite(before_face, before_face_len, 1, f);
fwrite(&(mesh->faces[i][0]), 12, 1, f);
if (after_face_len)
fwrite(after_face, after_face_len, 1, f);
}
if (need_swap) {
for (int i = 0; i < mesh->faces.size(); i++) {
swap_int(mesh->faces[i][0]);
swap_int(mesh->faces[i][1]);
swap_int(mesh->faces[i][2]);
}
}
}
// Write tstrips to an ASCII file
static void write_strips_asc(TriMesh *mesh, FILE *f)
{
// for (int i = 0; i < mesh->tstrips.size(); i++) {
// fprintf(f, "%d ", mesh->tstrips[i]);
// }
// fprintf(f, "\n");
}
// Write tstrips to a binary file
static void write_strips_bin(TriMesh *mesh, FILE *f, bool need_swap)
{
if (need_swap) {
for (int i = 0; i < mesh->tstrips.size(); i++)
swap_int(mesh->tstrips[i]);
}
fwrite(&(mesh->tstrips[0]), 4*mesh->tstrips.size(), 1, f);
if (need_swap) {
for (int i = 0; i < mesh->tstrips.size(); i++)
swap_int(mesh->tstrips[i]);
}
}
// Write range grid to an ASCII file
static void write_grid_asc(TriMesh *mesh, FILE *f)
{
for (int i = 0; i < mesh->grid.size(); i++) {
if (mesh->grid[i] < 0)
fprintf(f, "0\n");
else
fprintf(f, "1 %d\n", mesh->grid[i]);
}
}
// Write range grid to a binary file
static void write_grid_bin(TriMesh *mesh, FILE *f, bool need_swap)
{
unsigned char zero = 0;
unsigned char one = 1;
for (int i = 0; i < mesh->grid.size(); i++) {
if (mesh->grid[i] < 0) {
fwrite(&zero, 1, 1, f);
} else {
fwrite(&one, 1, 1, f);
int g = mesh->grid[i];
if (need_swap)
swap_int(g);
fwrite(&g, 4, 1, f);
}
}
}
// Debugging printout, controllable by a "verbose"ness parameter
int TriMesh::verbose = 1;
void TriMesh::set_verbose(int verbose_)
{
verbose = verbose_;
}
int TriMesh::dprintf(const char *format, ...)
{
if (!verbose)
return 0;
va_list ap;
va_start(ap, format);
int ret = vfprintf(stderr, format, ap);
va_end(ap);
fflush(stderr);
return ret;
}
|
the_stack
|
namespace AggMIS {
namespace Aggregation {
Types::IntVector_h* AggregateToNearest(Types::Graph_h &graph,
Types::IntVector_h &roots) {
// Allocating an array for distances:
Types::IntVector_h rootDistance(roots.size());
// Allocating return array
Types::IntVector_h *aggregation = new Types::IntVector_h(roots.size());
// A queue of unallocated nodes
std::queue<int> toAllocate;
// Assigning initial distances, numbering aggregates, and adding
// nodes to queue for allocation.
int nextAggId = 0;
for (int i = 0; i < rootDistance.size(); i++) {
// If node is root assign Id and add neighbors to queue
if (roots[i] == 1) {
rootDistance[i] = 0;
(*aggregation)[i] = nextAggId++;
// Adding neighbors to queue to handle
int start = (*(graph.indices))[i];
int end = (*(graph.indices))[i + 1];
for (int nIt = start; nIt < end; nIt++)
toAllocate.push((*(graph.adjacency))[nIt]);
}
// If node is not root mark as unassigned
else {
rootDistance[i] = -1;
(*aggregation)[i] = -1;
}
}
// Handling unallocated nodes in the queue:
while (!toAllocate.empty())
{
// Pull node off queue
int node = toAllocate.front();
toAllocate.pop();
// Check if already handled
if ((*aggregation)[node] != -1 && rootDistance[node] != -1)
continue;
// Check it's neighbors to find where to allocate
int newAgg = -1;
int bestDistance = -1;
int start = (*(graph.indices))[node];
int end = (*(graph.indices))[node + 1];
for (int nIt = start; nIt < end; nIt++)
{
int neighbor = (*(graph.adjacency))[nIt];
int neighborDist = rootDistance[neighbor];
// We only care about non-negative distances
if (neighborDist >= 0)
{
int neighborAgg = (*aggregation)[neighbor];
// If this is the first real distance seen take it
if (bestDistance == -1)
{
bestDistance = neighborDist;
newAgg = (*aggregation)[neighbor];
}
// If this distance ties break tie with root id
// else if (neighborDist == bestDistance && rootPoints[neighborAgg] > rootPoints[newAgg])
// newAgg = aggregation[neighbor];
// If this distance is better take it
else if (neighborDist < bestDistance)
{
newAgg = (*aggregation)[neighbor];
bestDistance = neighborDist;
}
}
// If the neighbor is unallocated add to queue
else
toAllocate.push(neighbor);
}
// Set aggregate of current node:
(*aggregation)[node] = newAgg;
rootDistance[node] = bestDistance + 1;
}
// Clean up temp vector
rootDistance.clear();
return aggregation;
}
bool IsValidAggregation(Types::Graph_h &graph,
Types::IntVector_h &aggregation,
bool verbose) {
int errorsFound = 0;
Types::IntVector_h* ps = GetPartSizes(aggregation);
Types::IntVector_h &partSizes = *ps;
Types::IntVector_h visitedNodes(graph.Size(), 0);
Types::IntVector_h exploredAggregates(partSizes.size(), 0);
std::set<int> problemAggregates;
std::queue<int> toExplore;
for (int i = 0; i < graph.Size(); i++)
{
int thisAggregate = aggregation[i];
if (exploredAggregates[thisAggregate] == 0)
{
// Explore the aggregate starting from this node
toExplore.push(i);
while(!toExplore.empty()) {
int node = toExplore.front();
toExplore.pop();
if (visitedNodes[node] == 0) {
visitedNodes[node] = 1;
int start = (*(graph.indices))[node];
int end = (*(graph.indices))[node + 1];
for (int nIt = start; nIt < end; nIt++) {
int neighbor = (*(graph.adjacency))[nIt];
if (aggregation[neighbor] == thisAggregate)
toExplore.push(neighbor);
}
}
}
exploredAggregates[thisAggregate] = 1;
}
else if (visitedNodes[i] == 0)
{
// This node is not connected to others in the same aggregate
if (verbose)
printf("Node %d in aggregate %d was not visited but aggregate %d was explored!\n", i, thisAggregate, thisAggregate);
problemAggregates.insert(thisAggregate);
errorsFound++;
}
}
if (errorsFound > 0)
{
printf("Found %d errors while checking aggregation!\n", errorsFound);
std::set<int>::iterator it;
for (it = problemAggregates.begin(); it != problemAggregates.end(); it++)
printf("\t%d", *it);
printf("\n");
return false;
}
return true;
}
Types::IntVector_h* GetPartSizes(Types::IntVector_h &aggregation) {
// Allocate return array
Types::IntVector_h *partSizes = new Types::IntVector_h();
// Iterate over the aggregation and count nodes
for (int i = 0; i < aggregation.size(); i++) {
int part = aggregation[i];
if (part >= partSizes->size())
partSizes->resize(part + 1, 0);
(*partSizes)[part]++;
}
return partSizes;
}
Types::IntVector_h* GetPartSizes(Types::IntVector_h &aggregation,
Types::IntVector_h &nodeWeights) {
// Allocate return array
Types::IntVector_h *partSizes = new Types::IntVector_h();
// Iterate over the aggregation and count nodes
for (int i = 0; i < aggregation.size(); i++) {
int part = aggregation[i];
if (part >= partSizes->size())
partSizes->resize(part + 1, 0);
(*partSizes)[part] += nodeWeights[i];
}
return partSizes;
}
std::vector<std::vector<int> >* GetAggregateGraph(Types::Graph_h& graph,
std::vector<int> &nodeList) {
// Create the return structure.
std::vector<std::vector<int> > *aggGraph = new std::vector<std::vector<int> >(nodeList.size());
// Fill the adjacency by translating the adjacency to local indices
for (int nIt = 0; nIt < nodeList.size(); nIt++) {
for (int* n = graph.nStart(nodeList[nIt]);
n != graph.nEnd(nodeList[nIt]);
n++) {
// Trying to find the neighbor in aggregate's nodeList
int localId = Helper::BinarySearch(*n, &nodeList[0], nodeList.size());
// If found add ID to neighbors
if (localId != -1)
(*aggGraph)[nIt].push_back(localId);
}
}
return aggGraph;
}
int FindFarthestNode(std::vector<std::vector<int> > &graph,
int start) {
// Data structures for flood fill
std::vector<int> distances(graph.size(), -1);
std::queue<int> toExplore;
toExplore.push(start);
distances[start] = 0;
int farthestNode = start;
int maxDistance = 0;
while (!toExplore.empty())
{
// Getting next node off of queue
int explorer = toExplore.front();
toExplore.pop();
int distance = distances[explorer] + 1;
// Checking the neighbors to see if they need to go on the queue
for (int nIt = 0; nIt < graph[explorer].size(); nIt++)
{
int neighbor = graph[explorer][nIt];
if (distances[neighbor] == -1)
{
if (distance > maxDistance)
{
farthestNode = neighbor;
maxDistance = distance;
}
distances[neighbor] = distance;
toExplore.push(neighbor);
}
}
}
return farthestNode;
}
void MarkDistances(std::vector<std::vector<int> >& graph,
std::vector<int>& distances,
int startPoint) {
// Put single start point into vector and call vector version.
std::vector<int> startPoints;
startPoints.push_back(startPoint);
MarkDistances(graph, distances, startPoints);
}
void MarkDistances(std::vector<std::vector<int> >& graph,
std::vector<int>& distances,
std::vector<int> startPoints) {
// Initialize data structures for flood fill
distances.assign(graph.size(), -1);
std::queue<int> toExplore;
// Handle start points
for (int i = 0; i < startPoints.size(); i++) {
toExplore.push(startPoints[i]);
distances[startPoints[i]] = 0;
}
// Explore the rest of the graph
while (!toExplore.empty())
{
// Getting next node off of queue
int explorer = toExplore.front();
toExplore.pop();
// Checking the neighbors to see if they need to go on the queue
for (int nIt = 0; nIt < graph[explorer].size(); nIt++)
{
int neighbor = graph[explorer][nIt];
if (distances[neighbor] == -1) {
distances[neighbor] = distances[explorer] + 1;
toExplore.push(neighbor);
}
}
}
}
int FindMassScore(std::vector<std::vector<int> >& graph,
int startPoint) {
// Initialize data structures for flood fill
std::vector<int> distances(graph.size(), -1);
std::queue<int> toExplore;
// Put start point on queue
toExplore.push(startPoint);
distances[startPoint] = 0;
// Explore the rest of the graph
int score = 0;
while (!toExplore.empty())
{
// Getting next node off of queue
int explorer = toExplore.front();
toExplore.pop();
// Add score of current node to total
score += distances[explorer];
// Checking the neighbors to see if they need to go on the queue
for (int nIt = 0; nIt < graph[explorer].size(); nIt++)
{
int neighbor = graph[explorer][nIt];
if (distances[neighbor] == -1) {
distances[neighbor] = distances[explorer] + 1;
toExplore.push(neighbor);
}
}
}
return score;
}
std::vector<int>* GetCentroid(std::vector<std::vector<int> >& graph,
int startPoint) {
std::vector<int> scores(graph.size(), -1);
int currentNode = startPoint;
// Find score for first node
int bestScore = FindMassScore(graph, currentNode);
scores[currentNode] = bestScore;
bool betterFound = true;
while(betterFound)
{
betterFound = false;
for (int i = 0; i < graph[currentNode].size() && !betterFound; i++)
{
int neighbor = graph[currentNode][i];
if (scores[neighbor] == -1)
scores[neighbor] = FindMassScore(graph, currentNode);
if (scores[neighbor] < bestScore) {
bestScore = scores[neighbor];
currentNode = neighbor;
betterFound = true;
}
}
}
// Find any adjacent nodes with equivalent score
std::vector<int> *result = new std::vector<int>();
result->push_back(currentNode);
for (int i = 0; i < graph[currentNode].size(); i++)
{
int neighbor = graph[currentNode][i];
if (scores[neighbor] == -1)
scores[neighbor] = FindMassScore(graph, currentNode);
if (scores[neighbor] == bestScore) {
result->push_back(neighbor);
}
}
return result;
}
}
}
|
the_stack
|
// "* 1" removes the warning:
// enumeral mismatch in conditional expression: ‘<anonymous enum>’ vs ‘<anonymous enum>’
__host__ __device__
inline int Dir_x( int octant ) { return octant & (1<<0) ? DIR_DN * 1: DIR_UP * 1; }
__host__ __device__
inline int Dir_y( int octant ) { return octant & (1<<1) ? DIR_DN * 1: DIR_UP * 1; }
__host__ __device__
inline int Dir_z( int octant ) { return octant & (1<<2) ? DIR_DN * 1 : DIR_UP * 1; }
int Arguments_exists( const Arguments* args, const char* arg_name )
{
int result = 0;
int i = 0;
for( i=0; i<args->argc; ++i )
{
if( args->argv_unconsumed[i] == NULL )
{
continue;
}
result = result || strcmp( args->argv_unconsumed[i], arg_name ) == 0;
}
return result;
}
/*===========================================================================*/
/* Process an argument of type int, remove from list---*/
int Arguments_consume_int_( Arguments* args,
const char* arg_name )
{
int result = 0;
int found = 0;
if( found ) {} /*---Remove unused var warning---*/
int i = 0;
for( i=0; i<args->argc; ++i )
{
if( args->argv_unconsumed[i] == NULL )
{
continue;
}
if( strcmp( args->argv_unconsumed[i], arg_name ) == 0 )
{
found = 1;
args->argv_unconsumed[i] = NULL;
++i;
assert( i<args->argc );
result = atoi( args->argv_unconsumed[i] );
args->argv_unconsumed[i] = NULL;
}
}
return result;
}
/*===========================================================================*/
/* Consume an argument of type int, if not present then set to a default---*/
int Arguments_consume_int_or_default( Arguments* args,
const char* arg_name,
int default_value )
{
assert( args != NULL );
assert( arg_name != NULL );
return Arguments_exists( args, arg_name ) ?
Arguments_consume_int_( args, arg_name ) : default_value;
}
/*===========================================================================*/
/* Pseudo-destructor for Arguments struct---*/
void Arguments_destroy( Arguments* args )
{
assert( args != NULL );
free( (void*) args->argv_unconsumed );
if( args->argstring )
{
free( (void*) args->argstring );
}
} /*---Arguments_destroy---*/
static inline int Quantities_scalefactor_space_( int ix_g, int iy_g, int iz_g )
{
int result = 0;
#ifndef RELAXED_TESTING
const int im = 134456;
const int ia = 8121;
const int ic = 28411;
result = ( (result+(ix_g+2))*ia + ic ) % im;
result = ( (result+(iy_g+2))*ia + ic ) % im;
result = ( (result+(iz_g+2))*ia + ic ) % im;
result = ( (result+(ix_g+3*iy_g+7*iz_g+2))*ia + ic ) % im;
result = ix_g+3*iy_g+7*iz_g+2;
result = result & ( (1<<2) - 1 );
#endif
result = 1 << result;
return result;
}
static inline int Quantities_scalefactor_energy_( int ie, Dimensions dims )
{
/*---Random power-of-two multiplier for each energy group,
to help catch errors regarding indexing of energy groups.
---*/
assert( ie >= 0 && ie < dims.ne );
const int im = 714025;
const int ia = 1366;
const int ic = 150889;
int result = ( (ie)*ia + ic ) % im;
result = result & ( (1<<2) - 1 );
result = 1 << result;
return result;
}
static inline int Quantities_scalefactor_unknown_( int iu )
{
/*---Random power-of-two multiplier for each cell unknown,
to help catch errors regarding indexing of cell unknowns.
---*/
assert( iu >= 0 && iu < NU );
const int im = 312500;
const int ia = 741;
const int ic = 66037;
int result = ( (iu)*ia + ic ) % im;
result = result & ( (1<<2) - 1 );
result = 1 << result;
return result;
}
void initialize_input_state( P* const __restrict__ v,
const Dimensions dims,
const int nu)
{
for( int iz=0; iz<dims.ncell_z; ++iz )
for( int iy=0; iy<dims.ncell_y; ++iy )
for( int ix=0; ix<dims.ncell_x; ++ix )
for( int ie=0; ie<dims.ne; ++ie )
for( int im=0; im<dims.nm; ++im )
for( int iu=0; iu<nu; ++iu )
{
v[im + dims.nm * (
iu + nu * (
ix + dims.ncell_x * (
iy + dims.ncell_y * (
ie + dims.ne * (
iz + dims.ncell_z * ( /*---NOTE: This axis MUST be slowest-varying---*/
0 ))))))] =
( (P) (1 + im ) )
* ( (P) Quantities_scalefactor_space_(ix, iy, iz) )
* ( (P) Quantities_scalefactor_energy_( ie, dims ) )
* ( (P) Quantities_scalefactor_unknown_( iu ) );
}
}
size_t Dimensions_size_state( const Dimensions dims, int nu )
{
return ( (size_t)dims.ncell_x )
* ( (size_t)dims.ncell_y )
* ( (size_t)dims.ncell_z )
* ( (size_t)dims.ne )
* ( (size_t)dims.nm )
* ( (size_t)nu );
}
size_t Dimensions_size_facexy( const Dimensions dims,
int nu,
int num_face_octants_allocated )
{
return ( (size_t)dims.ncell_x )
* ( (size_t)dims.ncell_y )
* ( (size_t)dims.ne )
* ( (size_t)dims.na )
* ( (size_t)nu )
* ( (size_t)num_face_octants_allocated );
}
size_t Dimensions_size_facexz( const Dimensions dims,
int nu,
int num_face_octants_allocated )
{
return ( (size_t)dims.ncell_x )
* ( (size_t)dims.ncell_z )
* ( (size_t)dims.ne )
* ( (size_t)dims.na )
* ( (size_t)nu )
* ( (size_t)num_face_octants_allocated );
}
/*---------------------------------------------------------------------------*/
size_t Dimensions_size_faceyz( const Dimensions dims,
int nu,
int num_face_octants_allocated )
{
return ( (size_t)dims.ncell_y )
* ( (size_t)dims.ncell_z )
* ( (size_t)dims.ne )
* ( (size_t)dims.na )
* ( (size_t)nu )
* ( (size_t)num_face_octants_allocated );
}
int StepScheduler_nblock( const StepScheduler* stepscheduler )
{
return stepscheduler->nblock_z_;
}
int StepScheduler_nstep( const StepScheduler* stepscheduler )
{
int result = 0; // no step on error
switch( stepscheduler->nblock_octant_ )
{
case 8:
result = 8 * StepScheduler_nblock( stepscheduler )
+ 2 * ( stepscheduler->nproc_x_ - 1 )
+ 3 * ( stepscheduler->nproc_y_ - 1 );
break;
case 4:
result = 4 * StepScheduler_nblock( stepscheduler )
+ 1 * ( stepscheduler->nproc_x_ - 1 )
+ 2 * ( stepscheduler->nproc_y_ - 1 );
break;
case 2:
result = 2 * StepScheduler_nblock( stepscheduler )
+ 1 * ( stepscheduler->nproc_x_ - 1 )
+ 1 * ( stepscheduler->nproc_y_ - 1 );
break;
case 1:
result = 1 * StepScheduler_nblock( stepscheduler )
+ 1 * ( stepscheduler->nproc_x_ - 1 )
+ 1 * ( stepscheduler->nproc_y_ - 1 );
break;
default:
printf("Error: unknown nblock octant %d. ", stepscheduler->nblock_octant_);
printf("The value of next step is 0\n");
break;
}
return result;
}
StepInfo StepScheduler_stepinfo( const StepScheduler* stepscheduler,
const int step,
const int octant_in_block,
const int proc_x,
const int proc_y )
{
assert( octant_in_block>=0 &&
octant_in_block * stepscheduler->nblock_octant_ < NOCTANT );
/*
const int nblock_octant = stepscheduler->nblock_octant_;
*/
const int nproc_x = stepscheduler->nproc_x_;
const int nproc_y = stepscheduler->nproc_y_;
const int nblock = StepScheduler_nblock( stepscheduler );
const int nstep = StepScheduler_nstep( stepscheduler );
const int noctant_per_block = stepscheduler->noctant_per_block_;
int octant_key = 0;
int wave = 0;
int step_base = 0;
int block = 0;
int octant = 0;
int dir_x = 0;
int dir_y = 0;
int dir_z = 0;
int start_x = 0;
int start_y = 0;
int start_z = 0;
int folded_octant = 0;
int folded_block = 0;
StepInfo stepinfo;
const int octant_selector[NOCTANT] = { 0, 4, 2, 6, 3, 7, 1, 5 };
const int is_folded_x = noctant_per_block >= 2;
const int is_folded_y = noctant_per_block >= 4;
const int is_folded_z = noctant_per_block >= 8;
const int folded_proc_x = ( is_folded_x && ( octant_in_block & (1<<0) ) )
? ( nproc_x - 1 - proc_x )
: proc_x;
const int folded_proc_y = ( is_folded_y && ( octant_in_block & (1<<1) ) )
? ( nproc_y - 1 - proc_y )
: proc_y;
/*===========================================================================
For a given step and octant_in_block, the following computes the
octant block (i.e., octant step), from which the octant can be
computed, and the wavefront number, starting from the relevant begin
corner of the selected octant.
For the nblock_octant==8 case, the 8 octants are processed in sequence,
in the order xyz = +++, ++-, -++, -+-, --+, ---, +-+, +--.
This order is chosen to "pack" the wavefronts to minimize
the KBA wavefront startup latency.
For nblock_octant=k for some smaller k, this sequence is divided into
subsequences of length k, and each subsequence defines the schedule
for a given octant_in_block.
The code below is essentially a search into the first subsequence
to determine where the requested step is located. Locations in
the other subsequences can be derived from this.
NOTE: the following does not address possibility that for a single
step, two or more octants could update the same block.
===========================================================================*/
wave = step - ( step_base );
octant_key = 0;
step_base += nblock;
if ( step >= ( step_base + folded_proc_x
+ folded_proc_y ) && ! is_folded_z )
{
wave = step - ( step_base );
octant_key = 1;
}
step_base += nblock;
if ( step >= ( step_base + folded_proc_x
+ folded_proc_y ) && ! is_folded_y )
{
wave = step - ( step_base + (nproc_y-1) );
octant_key = 2;
}
step_base += nblock + (nproc_y-1);
if ( step >= ( step_base + (nproc_y-1-folded_proc_y)
+ folded_proc_x ) && ! is_folded_y )
{
wave = step - ( step_base );
octant_key = 3;
}
step_base += nblock;
if ( step >= ( step_base + (nproc_y-1-folded_proc_y)
+ folded_proc_x ) && ! is_folded_x )
{
wave = step - ( step_base + (nproc_x-1) );
octant_key = 4;
}
step_base += nblock + (nproc_x-1);
if ( step >= ( step_base + (nproc_y-1-folded_proc_y)
+ (nproc_x-1-folded_proc_x) ) && ! is_folded_x )
{
wave = step - ( step_base );
octant_key = 5;
}
step_base += nblock;
if ( step >= ( step_base + (nproc_y-1-folded_proc_y)
+ (nproc_x-1-folded_proc_x) ) && ! is_folded_x )
{
wave = step - ( step_base + (nproc_y-1) );
octant_key = 6;
}
step_base += nblock + (nproc_y-1);
if ( step >= ( step_base + folded_proc_y
+ (nproc_x-1-folded_proc_x) ) && ! is_folded_x )
{
wave = step - ( step_base );
octant_key = 7;
}
folded_octant = octant_selector[ octant_key ];
octant = folded_octant + octant_in_block;
/*---Next convert the wavefront number to a block number based on
location in the domain. Use the equation that defines the plane.
---*/
dir_x = Dir_x( folded_octant );
dir_y = Dir_y( folded_octant );
dir_z = Dir_z( folded_octant );
/*---Get coordinates of the starting corner block of the wavefront---*/
start_x = dir_x==DIR_UP ? 0 : ( nproc_x - 1 );
start_y = dir_y==DIR_UP ? 0 : ( nproc_y - 1 );
start_z = dir_z==DIR_UP ? 0 : ( nblock - 1 );
/*---Get coordinate of block on this processor to be processed---*/
folded_block = ( wave - ( start_x + folded_proc_x * dir_x )
- ( start_y + folded_proc_y * dir_y )
- ( start_z ) ) / dir_z;
block = ( is_folded_z && ( octant_in_block & (1<<2) ) )
? ( nblock - 1 - folded_block )
: folded_block;
/*---Now determine whether the block calculation is active based on whether
the block in question falls within the physical domain.
---*/
stepinfo.is_active = block >= 0 && block < nblock &&
step >= 0 && step < nstep &&
proc_x >= 0 && proc_x < nproc_x &&
proc_y >= 0 && proc_y < nproc_y;
/*---Set remaining values---*/
stepinfo.block_z = stepinfo.is_active ? block : 0;
stepinfo.octant = octant;
return stepinfo;
}
double get_time()
{
struct timeval tv;
int i = gettimeofday( &tv, NULL );
double result = ( (double) tv.tv_sec +
(double) tv.tv_usec * 1.e-6 );
return result;
}
double Quantities_flops_per_solve( const Dimensions dims )
{
return 3. + 3. * NDIM;
}
/*---Size of state vector in angles space---*/
size_t Dimensions_size_state_angles( const Dimensions dims, int nu )
{
return ( (size_t)dims.ncell_x )
* ( (size_t)dims.ncell_y )
* ( (size_t)dims.ncell_z )
* ( (size_t)dims.ne )
* ( (size_t)dims.na )
* ( (size_t)nu )
* ( (size_t)NOCTANT );
}
|
the_stack
|
namespace amgx
{
/***************************************
* Source Definitions
***************************************/
template <class T_Config>
void CommsMPIDirect<T_Config>::exchange_matrix_halo(IVector_Array &row_offsets,
I64Vector_Array &col_indices,
MVector_Array &values,
I64Vector_Array &halo_row_ids,
IVector_h &neighbors_list,
int global_id)
{
if (TConfig::memSpace == AMGX_host)
{
FatalError("MPI Comms module no implemented for host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
#ifdef AMGX_WITH_MPI
int total = 0;
MPI_Comm mpi_comm = CommsMPIHostBufferStream<T_Config>::get_mpi_comm();
std::vector<MPI_Request> &requests = CommsMPIHostBufferStream<T_Config>::get_requests();
MPI_Comm_size( mpi_comm, &total );
int num_neighbors = neighbors_list.size();
IVector_Array local_row_offsets(num_neighbors);
I64Vector_Array local_col_indices(num_neighbors);
MVector_Array local_values(num_neighbors);
I64Vector_Array local_row_ids(0);
if (halo_row_ids.size() != 0)
{
local_row_ids.resize(num_neighbors);
}
// send metadata
std::vector<INDEX_TYPE> metadata(num_neighbors * 2); // num_rows+1, num_nz
for (int i = 0; i < num_neighbors; i++)
{
metadata[i * 2 + 0] = row_offsets[i].size();
metadata[i * 2 + 1] = col_indices[i].size();
MPI_Isend(&metadata[i * 2 + 0], 2, MPI_INT, neighbors_list[i], 0, mpi_comm, &requests[i]);
}
// receive metadata
std::vector<INDEX_TYPE> metadata_recv(2);
for (int i = 0; i < num_neighbors; i++)
{
MPI_Recv(&metadata_recv[0], 2, MPI_INT, neighbors_list[i], 0, mpi_comm, MPI_STATUSES_IGNORE);
local_row_offsets[i].resize(metadata_recv[0]);
local_col_indices[i].resize(metadata_recv[1]);
local_values[i].resize(metadata_recv[1]);
if (local_row_ids.size() != 0)
{
if (metadata_recv[0] - 1 > 0)
{
local_row_ids[i].resize(metadata_recv[0] - 1); // row_ids is one smaller than row_offsets
}
}
}
MPI_Waitall(num_neighbors, &requests[0], MPI_STATUSES_IGNORE); // data is already received, just closing the handles
// receive matrix data
typedef typename T_Config::MatPrec mvalue;
for (int i = 0; i < num_neighbors; i++)
{
MPI_Irecv(local_row_offsets[i].raw(), local_row_offsets[i].size(), MPI_INT, neighbors_list[i], 10 * neighbors_list[i] + 0, mpi_comm, &requests[3 * num_neighbors + i]);
MPI_Irecv(local_col_indices[i].raw(), local_col_indices[i].size()*sizeof(int64_t), MPI_BYTE, neighbors_list[i], 10 * neighbors_list[i] + 1, mpi_comm, &requests[4 * num_neighbors + i]);
MPI_Irecv(local_values[i].raw(), local_values[i].size()*sizeof(mvalue), MPI_BYTE, neighbors_list[i], 10 * neighbors_list[i] + 2, mpi_comm, &requests[5 * num_neighbors + i]);
if (halo_row_ids.size() != 0)
{
MPI_Irecv(local_row_ids[i].raw(), local_row_ids[i].size()*sizeof(int64_t), MPI_BYTE, neighbors_list[i], 10 * neighbors_list[i] + 3, mpi_comm, &requests[7 * num_neighbors + i]);
}
}
// send matrix: row offsets, col indices, values
for (int i = 0; i < num_neighbors; i++)
{
MPI_Isend(row_offsets[i].raw(), row_offsets[i].size(), MPI_INT, neighbors_list[i], 10 * global_id + 0, mpi_comm, &requests[i]);
MPI_Isend(col_indices[i].raw(), col_indices[i].size()*sizeof(int64_t), MPI_BYTE, neighbors_list[i], 10 * global_id + 1, mpi_comm, &requests[num_neighbors + i]);
MPI_Isend(values[i].raw(), values[i].size()*sizeof(mvalue), MPI_BYTE, neighbors_list[i], 10 * global_id + 2, mpi_comm, &requests[2 * num_neighbors + i]);
if (halo_row_ids.size() != 0)
{
MPI_Isend(halo_row_ids[i].raw(), halo_row_ids[i].size()*sizeof(int64_t), MPI_BYTE, neighbors_list[i], 10 * global_id + 3, mpi_comm, &requests[6 * num_neighbors + i]);
}
}
if (halo_row_ids.size() != 0)
{
MPI_Waitall(8 * num_neighbors, &requests[0], MPI_STATUSES_IGNORE); //I have to wait for my stuff to be sent too, because I deallocate those matrices upon exditing this function
}
else
{
MPI_Waitall(6 * num_neighbors, &requests[0], MPI_STATUSES_IGNORE); //I have to wait for my stuff to be sent too, because I deallocate those matrices upon exditing this function
}
row_offsets.swap(local_row_offsets);
col_indices.swap(local_col_indices);
values.swap(local_values);
halo_row_ids.swap(local_row_ids);
#else
FatalError("MPI Comms module requires compiling with MPI", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
}
template <class T_Config>
void CommsMPIDirect<T_Config>::exchange_matrix_halo(Matrix_Array &halo_rows, DistributedManager_Array &halo_btl, const Matrix<TConfig> &m)
{
if (TConfig::memSpace == AMGX_host)
{
FatalError("MPI Comms module no implemented for host", AMGX_ERR_NOT_IMPLEMENTED);
}
else
{
#ifdef AMGX_WITH_MPI
int total = 0;
int neighbors = CommsMPIHostBufferStream<T_Config>::get_neighbors();
MPI_Comm mpi_comm = CommsMPIHostBufferStream<T_Config>::get_mpi_comm();
std::vector<MPI_Request> &requests = CommsMPIHostBufferStream<T_Config>::get_requests();
MPI_Comm_size( mpi_comm, &total );
int bsize = m.get_block_size();
int rings = m.manager->B2L_rings[0].size() - 1;
int diag = m.hasProps(DIAG);
std::vector<Matrix<TConfig>> local_copy(halo_rows.size());
std::vector<DistributedManager<TConfig>> local_copy_manager(halo_rows.size());
{
// there shouldn't be any uncompleted requests, because we don't want to rewrite them
int completed;
MPI_Testall(requests.size(), &requests[0], &completed, MPI_STATUSES_IGNORE);
if (!completed)
{
MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
}
}
std::vector<INDEX_TYPE> metadata(neighbors * (rings + 1 + 5)); //ring offsets (rings+1), num_rows, num_nz, base_index, index_range
for (int i = 0; i < neighbors; i++)
{
for (int j = 0; j <= rings; j++) { metadata[i * (rings + 1 + 5) + j] = halo_btl[i].B2L_rings[0][j]; }
metadata[i * (rings + 1 + 5) + rings + 1] = halo_rows[i].get_num_rows();
metadata[i * (rings + 1 + 5) + rings + 2] = halo_rows[i].get_num_nz();
metadata[i * (rings + 1 + 5) + rings + 3] = halo_btl[i].base_index();
metadata[i * (rings + 1 + 5) + rings + 4] = halo_btl[i].index_range();
metadata[i * (rings + 1 + 5) + rings + 5] = halo_btl[i].L2H_maps[0].size();
MPI_Isend(&metadata[i * (rings + 1 + 5)], rings + 6, MPI_INT, m.manager->neighbors[i], 0, mpi_comm, &requests[i]);
}
std::vector<INDEX_TYPE> metadata_recv(rings + 1 + 5);
for (int i = 0; i < neighbors; i++)
{
MPI_Recv(&metadata_recv[0], rings + 6, MPI_INT, m.manager->neighbors[i], 0, mpi_comm, MPI_STATUSES_IGNORE);
local_copy[i].addProps(CSR);
if (diag) { local_copy[i].addProps(DIAG); }
local_copy[i].resize(metadata_recv[rings + 1], metadata_recv[rings + 1], metadata_recv[rings + 2], m.get_block_dimy(), m.get_block_dimx(), 1);
local_copy_manager[i].set_base_index(metadata_recv[rings + 3]);
local_copy_manager[i].set_index_range(metadata_recv[rings + 4]);
local_copy_manager[i].B2L_rings.resize(1);
local_copy_manager[i].B2L_rings[0].resize(rings + 1);
local_copy_manager[i].B2L_maps.resize(1);
local_copy_manager[i].B2L_maps[0].resize(local_copy[i].get_num_rows());
local_copy_manager[i].L2H_maps.resize(1);
local_copy_manager[i].L2H_maps[0].resize(metadata_recv[rings + 5]);
for (int j = 0; j <= rings; j++) { local_copy_manager[i].B2L_rings[0][j] = metadata_recv[j]; }
}
MPI_Waitall(neighbors, &requests[0], MPI_STATUSES_IGNORE); //I have to wait for my stuff to be sent too, because I deallocate those matrices upon exditing this function
typedef typename T_Config::MatPrec mvalue;
for (int i = 0; i < neighbors; i++)
{
MPI_Irecv(local_copy[i].row_offsets.raw(), local_copy[i].row_offsets.size(), MPI_INT, m.manager->neighbors[i], 10 * m.manager->neighbors[i] + 0, mpi_comm, &requests[5 * neighbors + 5 * i]);
MPI_Irecv(local_copy[i].col_indices.raw(), local_copy[i].col_indices.size(), MPI_INT, m.manager->neighbors[i], 10 * m.manager->neighbors[i] + 1, mpi_comm, &requests[5 * neighbors + 5 * i + 1]);
MPI_Irecv(local_copy_manager[i].B2L_maps[0].raw(), local_copy_manager[i].B2L_maps[0].size(), MPI_INT, m.manager->neighbors[i], 10 * m.manager->neighbors[i] + 2, mpi_comm, &requests[5 * neighbors + 5 * i + 2]);
MPI_Irecv(local_copy_manager[i].L2H_maps[0].raw(), local_copy_manager[i].L2H_maps[0].size(), MPI_INT, m.manager->neighbors[i], 10 * m.manager->neighbors[i] + 3, mpi_comm, &requests[5 * neighbors + 5 * i + 3]);
MPI_Irecv(local_copy[i].values.raw(), local_copy[i].values.size()*sizeof(mvalue), MPI_BYTE, m.manager->neighbors[i], 10 * m.manager->neighbors[i] + 4, mpi_comm, &requests[5 * neighbors + 5 * i + 4]);
}
for (int i = 0; i < neighbors; i++)
{
MPI_Isend(halo_rows[i].row_offsets.raw(), halo_rows[i].row_offsets.size(), MPI_INT, m.manager->neighbors[i], 10 * m.manager->global_id() + 0, mpi_comm, &requests[5 * i]);
MPI_Isend(halo_rows[i].col_indices.raw(), halo_rows[i].col_indices.size(), MPI_INT, m.manager->neighbors[i], 10 * m.manager->global_id() + 1, mpi_comm, &requests[5 * i + 1]);
MPI_Isend(halo_btl[i].B2L_maps[0].raw(), halo_btl[i].B2L_maps[0].size(), MPI_INT, m.manager->neighbors[i], 10 * m.manager->global_id() + 2, mpi_comm, &requests[5 * i + 2]);
MPI_Isend(halo_btl[i].L2H_maps[0].raw(), halo_btl[i].L2H_maps[0].size(), MPI_INT, m.manager->neighbors[i], 10 * m.manager->global_id() + 3, mpi_comm, &requests[5 * i + 3]);
MPI_Isend(halo_rows[i].values.raw(), halo_rows[i].values.size()*sizeof(mvalue), MPI_BYTE, m.manager->neighbors[i], 10 * m.manager->global_id() + 4, mpi_comm, &requests[5 * i + 4]);
}
MPI_Waitall(2 * 5 * neighbors, &requests[0], MPI_STATUSES_IGNORE); //I have to wait for my stuff to be sent too, because I deallocate those matrices upon exditing this function
halo_rows.swap(local_copy);
halo_btl.swap(local_copy_manager);
#else
FatalError("MPI Comms module requires compiling with MPI", AMGX_ERR_NOT_IMPLEMENTED);
#endif
}
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class CommsMPIDirect<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
|
the_stack
|
#ifndef INCLUDE_GGNN_CUDA_KNN_GGNN_MULTI_GPU_CUH_
#define INCLUDE_GGNN_CUDA_KNN_GGNN_MULTI_GPU_CUH_
#include <chrono>
#include <limits>
#include <string>
#include <thread>
#include <stdio.h>
#include <cstring>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include "cub/cub.cuh"
#include "ggnn/cuda_knn_ggnn_gpu_instance.cuh"
#include "ggnn/graph/cuda_knn_ggnn_graph_device.cuh"
#include "ggnn/graph/cuda_knn_ggnn_graph_host.cuh"
#include "ggnn/query/cuda_knn_query_layer.cuh"
#include "ggnn/query/cuda_knn_ggnn_query.cuh"
#include "ggnn/query/cuda_knn_bf_query_layer.cuh"
#include "ggnn/query/cuda_knn_stats_query_layer.cuh"
#include "ggnn/utils/cuda_knn_utils.cuh"
#include "ggnn/utils/cuda_knn_constants.cuh"
#include "ggnn/utils/cuda_knn_dataset.cuh"
#include "ggnn/utils/cuda_knn_ggnn_results.cuh"
// only needed for getTotalSystemMemory()
#include <unistd.h>
size_t getTotalSystemMemory()
{
size_t pages = sysconf(_SC_PHYS_PAGES);
// this excludes memory used for caching files...
//size_t free_pages = sysconf(_SC_AVPHYS_PAGES);
size_t page_size = sysconf(_SC_PAGE_SIZE);
return pages * page_size;
}
/**
* GGNN multi-GPU wrapper
*
* @param measure distance measure: Euclidean or Cosine
* @param KeyT datatype of dataset indices (needs to be able to represent
* N_base, signed integer required)
* @param ValueT distance value type
* @param GAddrT address type used to access neighborhood vectors (needs to be
* able to represent N_all*K)
* @param BaseT datatype of dataset vector elements
* @param BAddrT address type used to access dataset vectors (needs to be able
* to represent N_base*D)
* @param D dimension of dataset
* @param KBuild neighbors per node in the GGNN graph
* @param KF maximum number of inverse links per node in the GGNN graph
* @param KQuery number of nearest neighbors to retrieve during query
* @param S segment size
*/
template <DistanceMeasure measure,
typename KeyT, typename ValueT, typename GAddrT, typename BaseT,
typename BAddrT, int D, int KBuild, int KF, int KQuery, int S>
struct GGNNMultiGPU {
using Dataset = Dataset<KeyT, BaseT, BAddrT>;
using GGNNGPUInstance = GGNNGPUInstance<measure, KeyT, ValueT, GAddrT, BaseT, BAddrT, D, KBuild, KF, KQuery, S>;
using GGNNResults = GGNNResults<measure, KeyT, ValueT, BaseT, BAddrT, KQuery>;
Dataset dataset;
/// one instance per GPU
std::vector<GGNNGPUInstance> ggnn_gpu_instances;
int num_parts {0};
bool swap_to_disk {false};
bool swap_to_ram {false};
bool process_shards_back_to_front {false};
std::string graph_dir;
const int L;
const float tau_build;
const bool generate_gt;
GGNNMultiGPU(const std::string& basePath, const std::string& queryPath,
const std::string& gtPath, const int L, const float tau_build, const size_t N_base = std::numeric_limits<size_t>::max())
: dataset{basePath, queryPath, gtPath, N_base},
L{L},
tau_build{tau_build},
generate_gt{gtPath.empty()} {
CHECK_EQ(dataset.D, D) << "DIM needs to be the same";
}
void ggnnMain(const std::vector<int>& gpus, const std::string& mode,
const int N_shard, const std::string& graph_dir,
const int refinement_iterations,
const bool grid_search) {
const bool build = mode.find('b') != std::string::npos;
const bool store = build && mode.find('s') != std::string::npos;
const bool load = !build && mode.find('l') != std::string::npos;
const bool query = mode.find('q') != std::string::npos;
{
std::string mode("Mode: ");
if (build)
mode += "BUILD";
else if (load)
mode += "LOAD";
if (store)
mode += " AND STORE";
if (query)
mode += " AND QUERY";
VLOG(0) << mode;
}
configure(gpus, build, N_shard, graph_dir);
if (build) {
this->build(refinement_iterations);
if (store)
this->store();
}
else if (load)
this->load();
if (query) {
if (grid_search) {
for (int i=0; i<70; ++i)
this->query(i*0.01f);
for (int i=7; i<=20; ++i)
this->query(i*0.1f);
}
else {
this->query(0.3f);
this->query(0.4f);
this->query(0.5f);
this->query(0.6f);
}
}
}
static size_t computeGraphSize(const int N_shard, const int L) {
/// theoretical growth factor (number of sub-graphs merged together per
/// layer)
const float growth = powf(N_shard / static_cast<float>(S), 1.f / (L - 1));
const int Gf = growth;
const int Gc = growth + 1;
const float S0f = N_shard / (pow(Gf, (L - 1)));
const float S0c = N_shard / (pow(Gc, (L - 1)));
const bool is_floor =
(growth > 0) && ((S0c < KBuild) || (fabs(S0f - S) < fabs(S0c - S)));
const int G = (is_floor) ? Gf : Gc;
const int S0 = (is_floor) ? S0f : S0c;
const int S0_off = N_shard - pow(G, L - 1) * S0;
int N_all = 0;
int ST_all = 0;
int N_current = N_shard;
for (int l = 0; l < L; l++) {
N_all += N_current;
if (l) {
ST_all += N_current;
N_current /= G;
}
else {
N_current = S;
for (int i=2;i<L; ++i)
N_current *= G;
}
}
// just to make sure that everything is sufficiently aligned
auto align8 = [](size_t size) -> size_t {return ((size+7)/8)*8;};
const size_t graph_size = align8(static_cast<size_t>(N_all) * KBuild * sizeof(KeyT));
const size_t selection_translation_size = align8(ST_all * sizeof(KeyT));
// const size_t nn1_dist_buffer_size = N * sizeof(ValueT);
const size_t nn1_stats_size = align8(2 * sizeof(ValueT));
const size_t total_graph_size = graph_size + 2 * selection_translation_size + nn1_stats_size;
return total_graph_size;
}
void configure(const std::vector<int>& gpu_ids={0}, bool enable_construction=true,
int N_shard=-1, const std::string graph_dir="") {
ggnn_gpu_instances.clear();
CHECK(!graph_dir.empty());
if (graph_dir.back() == '/')
this->graph_dir = graph_dir;
else
this->graph_dir = graph_dir+'/';
const int num_gpus = gpu_ids.size();
// determine shard sizes and number of iterations
if (N_shard < 0)
N_shard = dataset.N_base/num_gpus;
const int num_iterations = dataset.N_base/(N_shard * num_gpus);
num_parts = num_gpus*num_iterations;
CHECK_EQ(N_shard*num_gpus*num_iterations, dataset.N_base) << "N_shard x num_gpus xnum_iterations needs to be equal to N_base, for now.";
// determine number of cpu-side buffers
const size_t total_graph_size = computeGraphSize(N_shard, L);
const size_t total_memory = getTotalSystemMemory();
// guess the available memory (assume 1/8 used elsewhere, subtract dataset)
const size_t available_memory = total_memory-total_memory/8-sizeof(ValueT)*static_cast<size_t>(dataset.N_base)*D;
const int max_parts_per_gpu = available_memory/(total_graph_size*num_gpus);
LOG(INFO) << "estimated remaining host memory (" << available_memory/(1024.0f*1024.0f*1024.0f)
<< " GB) suffices for " << max_parts_per_gpu << " parts per GPU ("
<< total_graph_size/(1024.0f*1024.0f*1024.0f) << " GB each).";
CHECK_GT(max_parts_per_gpu, 0) << "use smaller shards.";
const int num_cpu_buffers_per_gpu = min(num_iterations, max_parts_per_gpu);
swap_to_disk = num_cpu_buffers_per_gpu < num_iterations;
ggnn_gpu_instances.reserve(num_gpus);
VLOG(4) << "allocating shards...";
for (int device_i=0; device_i<num_gpus; ++device_i) {
const int gpu_id = gpu_ids[device_i];
CHECK_CUDA(cudaSetDevice(gpu_id));
ggnn_gpu_instances.emplace_back(gpu_id, &dataset, N_shard, L, enable_construction, tau_build, num_iterations, num_cpu_buffers_per_gpu);
swap_to_ram |= ggnn_gpu_instances.at(device_i).ggnn_shards.size() < num_iterations;
if (!swap_to_disk) {
const size_t num_gpu_shards = ggnn_gpu_instances.at(device_i).ggnn_shards.size();
for (int i=0; i<num_gpu_shards; ++ i)
ggnn_gpu_instances.at(device_i).loadShardBaseDataAsync(device_i * num_iterations + i, i);
}
}
CHECK_CUDA(cudaPeekAtLastError());
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaPeekAtLastError());
VLOG(4) << "GGNN multi-GPU setup configured.";
if (swap_to_disk)
VLOG(4) << "shards will be swapped to disk. (not all parts fit into ram simultaneously)";
if (swap_to_ram)
VLOG(4) << "shards will be swapped to ram. (not all shards fit onto the gpu simultaneously)";
}
void build(const int refinement_iterations) {
CHECK(!ggnn_gpu_instances.empty()) << "configure() the multi-GPU setup first!";
const int num_gpus = int(ggnn_gpu_instances.size());
const int N_shard = ggnn_gpu_instances[0].N_shard;
const int num_iterations = int(num_parts/ggnn_gpu_instances.size());
std::vector<int64_t> build_times(num_parts);
VLOG(0) << "GGNN::build()"
<< " | num_gpus: " << num_gpus
<< " | N_shard: " << N_shard
<< " | num_iterations: " << num_iterations;
std::vector<std::thread> threads;
threads.reserve(num_gpus);
for (int device_i = 0; device_i < num_gpus; device_i++) {
std::thread t([&, device_i]() {
auto& gpu_instance = ggnn_gpu_instances.at(device_i);
const int gpu_id = gpu_instance.gpu_id;
const int num_gpu_buffers = gpu_instance.ggnn_shards.size();
const int num_cpu_buffers = gpu_instance.ggnn_cpu_buffers.size();
CHECK_CUDA(cudaSetDevice(gpu_id));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// printf("[gpu: %d] N_shard: %d \n", gpu_id, N_shard);
VLOG(1) << "[GPU: " << gpu_id << "] N_shard: " << N_shard;
if (swap_to_disk) {
for (int i = 0; i < num_gpu_buffers; i++)
gpu_instance.loadShardBaseDataAsync(device_i * num_iterations + i, i);
}
if (swap_to_ram) {
for (int i = 0; i < num_cpu_buffers; i++)
gpu_instance.ggnn_cpu_buffers[i].current_part_id = -1;
}
for (int i = 0; i < num_iterations; i++)
{
const int part_id = device_i * num_iterations + i;
auto& shard = gpu_instance.ggnn_shards.at(i%gpu_instance.ggnn_shards.size());
cudaStreamSynchronize(shard.stream);
cudaEventRecord(start, shard.stream);
gpu_instance.build(part_id, i);
for (int refinement_step = 0; refinement_step < refinement_iterations;
++refinement_step) {
DLOG(INFO) << "Refinement step " << refinement_step;
gpu_instance.refine(i);
}
cudaEventRecord(stop, shard.stream);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
VLOG(0) << "[GPU: " << gpu_id << "] part: " << part_id << " => seconds: " << milliseconds/1000.f << " [" << N_shard << " points build -> " << milliseconds*1000.0f/N_shard << " us/point] \n";
build_times[part_id] = milliseconds;
if (swap_to_disk || swap_to_ram) {
if (swap_to_disk)
gpu_instance.storePartAsync(graph_dir, part_id, i);
else
gpu_instance.downloadPartAsync(part_id, i);
if (i+num_gpu_buffers < num_iterations)
gpu_instance.loadShardBaseDataAsync(part_id+num_gpu_buffers, i+num_gpu_buffers);
}
}
if (swap_to_disk || swap_to_ram)
{
for (int i = 0; i < num_iterations; i++)
gpu_instance.waitForDiskIO(i);
}
VLOG(0) << "[GPU: " << gpu_id << "] build() done.";
});
threads.push_back(std::move(t));
}
for (auto&& t : threads) {
t.join();
}
float build_time_ms = 0.f;
for (auto&& b : build_times)
{
build_time_ms += static_cast<float>(b);
}
VLOG(0) << "Combined build time: " << build_time_ms/1000.f << " s \n";
process_shards_back_to_front = true;
}
void store() {
CHECK(!ggnn_gpu_instances.empty()) << "configure() the multi-GPU setup first!";
if (swap_to_disk) {
VLOG(4) << "graph should already be stored on-the-fly";
return;
}
const int num_gpus = int(ggnn_gpu_instances.size());
const int num_iterations = int(num_parts/ggnn_gpu_instances.size());
std::vector<std::thread> threads;
threads.reserve(num_gpus);
for (int device_i = 0; device_i < num_gpus; device_i++) {
std::thread t([&, device_i]() {
auto& gpu_instance = ggnn_gpu_instances.at(device_i);
const int gpu_id = gpu_instance.gpu_id;
for (int i = 0; i < num_iterations; i++) {
const int part_id = device_i * num_iterations + i;
gpu_instance.storePartAsync(graph_dir, part_id, i);
}
for (int i = 0; i < num_iterations; i++) {
gpu_instance.waitForDiskIO(i);
}
VLOG(0) << "[GPU: " << gpu_id << "] store() done.";
});
threads.push_back(std::move(t));
}
for (auto&& t : threads) {
t.join();
}
}
void load() {
CHECK(!ggnn_gpu_instances.empty()) << "configure() the multi-GPU setup first!";
if (swap_to_disk) {
VLOG(4) << "graph will be loaded on-the-fly";
return;
}
const int num_gpus = int(ggnn_gpu_instances.size());
const int num_iterations = int(num_parts/ggnn_gpu_instances.size());
std::vector<std::thread> threads;
threads.reserve(num_gpus);
for (int device_i = 0; device_i < num_gpus; device_i++) {
std::thread t([&, device_i]() {
auto& gpu_instance = ggnn_gpu_instances.at(device_i);
const int gpu_id = gpu_instance.gpu_id;
for (int i = 0; i < num_iterations; i++) {
const int part_id = device_i * num_iterations + i;
gpu_instance.loadPartAsync(graph_dir, part_id, i);
}
for (int i = 0; i < num_iterations; i++)
gpu_instance.waitForDiskIO(i);
VLOG(0) << "[GPU: " << gpu_id << "] load() done.";
});
threads.push_back(std::move(t));
}
for (auto&& t : threads) {
t.join();
}
}
void query(const float tau_query) {
CHECK(!ggnn_gpu_instances.empty()) << "configure() the multi-GPU setup first!";
dataset.template checkForDuplicatesInGroundTruth<measure, ValueT>(KQuery);
const int num_gpus = int(ggnn_gpu_instances.size());
const int N_shard = ggnn_gpu_instances[0].N_shard;
const int num_iterations = int(num_parts/ggnn_gpu_instances.size());
VLOG(0) << "GGNN::query()"
<< " | tau_query: " << tau_query
<< " | num_gpus: " << num_gpus
<< " | N_shard: " << N_shard
<< " | num_iterations: " << num_iterations;
GGNNResults ggnn_results{&dataset, num_gpus, num_iterations};
std::vector<std::thread> threads;
threads.reserve(num_gpus);
for (int device_i = 0; device_i < num_gpus; device_i++) {
std::thread t([&, device_i]() {
auto& gpu_instance = ggnn_gpu_instances.at(device_i);
const int gpu_id = gpu_instance.gpu_id;
const int num_gpu_buffers = gpu_instance.ggnn_shards.size();
const int num_cpu_buffers = gpu_instance.ggnn_cpu_buffers.size();
const int prefetch_amount = min(num_cpu_buffers, num_gpu_buffers);
CHECK_CUDA(cudaSetDevice(gpu_id));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
cudaMemcpyToSymbol(c_tau_query, &tau_query, sizeof(float));
CHECK_CUDA(cudaPeekAtLastError());
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaPeekAtLastError());
if (swap_to_disk || swap_to_ram) {
// initially, prefetch for the entire gpu
for (int i = 0; i < num_gpu_buffers; i++) {
const int j = process_shards_back_to_front ? num_iterations-i-1 : i;
const int part_id = device_i * num_iterations + j;
gpu_instance.loadPartAsync(graph_dir, part_id, j);
}
}
// TODO: warmup (here or in another function?)
for (int i = 0; i < num_iterations; i++)
{
const int j = process_shards_back_to_front ? num_iterations-i-1 : i;
const int part_id = device_i * num_iterations + j;
auto& shard = gpu_instance.ggnn_shards.at(j%gpu_instance.ggnn_shards.size());
if (swap_to_disk || swap_to_ram) {
auto begin = std::chrono::high_resolution_clock::now();
gpu_instance.waitForDiskIO(j);
auto end = std::chrono::high_resolution_clock::now();
auto cpu_us = std::chrono::duration_cast<std::chrono::microseconds>(end - begin);
VLOG(0) << "[GPU: " << gpu_id << "] shard-swap delay: " << cpu_us.count()*0.001f << " ms.";
}
cudaStreamSynchronize(shard.stream);
cudaEventRecord(start, shard.stream);
gpu_instance.template queryLayer<32, 400, 448, 64>(j);
cudaEventRecord(stop, shard.stream);
if (swap_to_disk || swap_to_ram) {
// start the upload for the next shard after starting the current query
// then, it should be able to overlap
// prefetch only as much in parallel as there are cpu buffers
if (process_shards_back_to_front) {
if (j-prefetch_amount < num_iterations-num_gpu_buffers && j-prefetch_amount >= 0) {
gpu_instance.loadPartAsync(graph_dir, part_id-prefetch_amount, j-prefetch_amount);
}
}
else if (j+prefetch_amount >= num_gpu_buffers && j+prefetch_amount < num_iterations) {
gpu_instance.loadPartAsync(graph_dir, part_id+prefetch_amount, j+prefetch_amount);
}
}
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
VLOG(0) << "[GPU: " << gpu_id << "] query part: " << part_id << " => ms: " << milliseconds << " [" << dataset.N_query << " points query -> " << milliseconds*1000.0f/dataset.N_query << " us/point] \n";
}
const cudaStream_t shard0Stream = gpu_instance.ggnn_shards.at(0).stream;
cudaEventRecord(start, shard0Stream);
gpu_instance.ggnn_query.sortAsync(shard0Stream);
cudaEventRecord(stop, shard0Stream);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
if(num_iterations > 1) {
VLOG(0) << "[GPU: " << device_i << "] query sort: " << " => ms: " << milliseconds << " [" << dataset.N_query << " points query -> " << milliseconds*1000.0f/dataset.N_query << " us/point] \n";
}
ggnn_results.loadAsync(gpu_instance.ggnn_query, device_i, shard0Stream);
cudaStreamSynchronize(shard0Stream);
cudaEventDestroy(start);
cudaEventDestroy(stop);
CHECK_CUDA(cudaPeekAtLastError());
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaPeekAtLastError());
VLOG(0) << "[GPU: " << gpu_id << "] query() done.";
});
threads.push_back(std::move(t));
}
for (auto&& t : threads) {
t.join();
}
// CPU Zone:
ggnn_results.merge();
ggnn_results.evaluateResults();
// process the shards in reverse order during the next query for improved cache utilization
process_shards_back_to_front = !process_shards_back_to_front;
}
}; // GGNN
#endif // INCLUDE_GGNN_CUDA_KNN_GGNN_MULTI_GPU_CUH_
|
the_stack
|
using namespace FW;
//------------------------------------------------------------------------
// Global variables.
//------------------------------------------------------------------------
__constant__ int4 c_input[(sizeof(RenderInput) + sizeof(int4) - 1) / sizeof(int4)];
__constant__ int4 c_blurLUT[BLUR_LUT_SIZE];
__device__ S32 g_warpCounter;
texture<U32, 1> texIndexToPixel;
texture<U32, 1> texIndexToPixelCoarse;
texture<F32, 1> texFrameCoarseIn;
texture<uchar4, 1, cudaReadModeNormalizedFloat> texTempFrameIn;
texture<uchar4, 1, cudaReadModeNormalizedFloat> texAASamplesIn;
//------------------------------------------------------------------------
// Common helper functions.
//------------------------------------------------------------------------
__device__ inline const RenderInput& getInput(void)
{
return *(const RenderInput*)c_input;
}
__device__ inline void updateCounter(PerfCounter counter, int amount = 1)
{
#ifdef ENABLE_PERF_COUNTERS
int warpIdx = threadIdx.y + blockIdx.x * RCK_TRACE_BLOCK_HEIGHT;
volatile S64* ptr = (S64*)getInput().perfCounters + (warpIdx * PerfCounter_Max + counter) * 33;
ptr[threadIdx.x] += amount; // thread counter
ptr[32] += amount; // warp counter
#endif
}
#ifdef ENABLE_PERF_COUNTERS
__device__ inline bool checkTransaction(int page)
{
volatile __shared__ U32 buffer[RCK_TRACE_BLOCK_WIDTH * RCK_TRACE_BLOCK_HEIGHT];
int fullIdx = threadIdx.x + threadIdx.y * RCK_TRACE_BLOCK_WIDTH;
volatile U32* ptr = &buffer[fullIdx & -16];
int idx = fullIdx & 15;
// Clear buffer.
for (int i = 0; i < 16; i++)
ptr[i] = 0xFFFFFFFF;
// Write address.
ptr[idx] = page;
// Check for duplicates.
for (int i = 0; i < idx; i++)
if (ptr[i] == page)
return false;
return true;
}
#endif
__device__ inline void updateCountersForGlobalAccess(int sizeLog2, S32* addr)
{
#ifdef ENABLE_PERF_COUNTERS
updateCounter(PerfCounter_GlobalAccesses);
updateCounter(PerfCounter_GlobalBytes, 1 << sizeLog2);
if (checkTransaction((U32)addr >> ::min(sizeLog2 + 5, 7)))
updateCounter(PerfCounter_GlobalTransactions);
#endif
}
__device__ inline void updateCountersForLocalAccess(int sizeLog2, int id)
{
#ifdef ENABLE_PERF_COUNTERS
updateCounter(PerfCounter_LocalAccesses);
updateCounter(PerfCounter_LocalBytes, 1 << sizeLog2);
if (checkTransaction(id))
updateCounter(PerfCounter_LocalTransactions);
#endif
}
//------------------------------------------------------------------------
// Utility routines.
//------------------------------------------------------------------------
#include "Util.inl"
#include "Raycast.inl"
#include "AttribLookup.inl"
//------------------------------------------------------------------------
// Private definitions.
//------------------------------------------------------------------------
#define BLUR_FACTOR 1.0f // Controls total amount of blurring. Larger than 1.0 causes everything to blur.
struct Aux // shared memory auxiliary storage
{
U32* framePtr;
#ifdef LARGE_RECONSTRUCTION_KERNEL
U32* aaSamplePtr;
#endif
#ifdef JITTER_LOD
float vSizeMultiplier;
#endif
union
{
S32 fetchWorkTemp;
Ray ray;
struct
{
U32 color;
U32 alpha;
} aa;
};
};
__constant__ float2 c_aa4table[4] =
{
{ 0.125f, 0.375f },
{ 0.375f, 0.875f },
{ 0.875f, 0.625f },
{ 0.625f, 0.125f }
};
//------------------------------------------------------------------------
// Ray generation.
//------------------------------------------------------------------------
__device__ Ray constructPrimaryRay(int ppos, int ridx, volatile Aux& aux)
{
const RenderInput& input = getInput();
float vsize = input.maxVoxelSize;
int xsize = input.frameSize.x;
// if coarse pass, make voxel large enough so that rays cannot accidentally get past it
if (input.flags & RenderFlags_CoarsePass)
{
vsize = (float)input.coarseSize * 2.83f; // sqrt(8)
xsize = input.coarseFrameSize.x;
} else
{
#ifdef JITTER_LOD
// perturb randomly
U32 a = ppos;
U32 b = ridx;
U32 c = 0x9e3779b9u;
jenkinsMix(a, b, c);
float f = (float)c / ((float)(1u << 31) * 2.f);
f = .5f + .5f*f;
aux.vSizeMultiplier = f;
vsize *= f;
#endif
}
// find ray coordinates
int pixely = ppos / xsize;
int pixelx = ppos - (pixely * xsize);
F32 fx = pixelx;
F32 fy = pixely;
if (input.flags & RenderFlags_CoarsePass)
{
fx *= (float)input.coarseSize;
fy *= (float)input.coarseSize;
} else
{
if (input.aaRays == 1)
{
fx += .5f; // center of pixel
fy += .5f;
} else if (input.aaRays == 4)
{
int aidx = (ridx & 3);
fx += c_aa4table[aidx].x;
fy += c_aa4table[aidx].y;
}
}
F32 tmin = 0.f;
if (getInput().flags & RenderFlags_UseCoarseData)
{
// fetch tmin
int bx = pixelx / input.coarseSize;
int by = pixely / input.coarseSize;
int bidx = bx + by * input.coarseFrameSize.x;
F32 tmin0 = tex1Dfetch(texFrameCoarseIn, bidx);
F32 tmin1 = tex1Dfetch(texFrameCoarseIn, bidx+1);
F32 tmin2 = tex1Dfetch(texFrameCoarseIn, bidx+input.coarseFrameSize.x);
F32 tmin3 = tex1Dfetch(texFrameCoarseIn, bidx+input.coarseFrameSize.x+1);
tmin = fminf(fminf(tmin0, tmin1), fminf(tmin2, tmin3));
tmin = fminf(tmin, 0.9999f);
}
const Mat4f& vtc = input.octreeMatrices.viewportToCamera;
const Mat4f& cto = input.octreeMatrices.cameraToOctree;
float4 pos = make_float4(
vtc.m00 * fx + vtc.m01 * fy + vtc.m03,
vtc.m10 * fx + vtc.m11 * fy + vtc.m13,
vtc.m20 * fx + vtc.m21 * fy + vtc.m23,
vtc.m30 * fx + vtc.m31 * fy + vtc.m33);
float3 near = make_float3(
pos.x - vtc.m02,
pos.y - vtc.m12,
pos.z - vtc.m22);
float near_sz = input.octreeMatrices.pixelInOctree * vsize;
float3 diff = make_float3(
vtc.m32 * pos.x - vtc.m02 * pos.w,
vtc.m32 * pos.y - vtc.m12 * pos.w,
vtc.m32 * pos.z - vtc.m22 * pos.w);
float diff_sz = near_sz * vtc.m32;
float a = 1.0f / (pos.w - vtc.m32);
float b = 2.0f * a / fmaxf(pos.w + vtc.m32, 1.0e-8f);
float c = tmin * b;
Ray ray;
ray.orig = near * a - diff * c;
ray.dir = diff * (c - b);
ray.orig_sz = near_sz * a - diff_sz * c;
ray.dir_sz = diff_sz * (c - b);
ray.orig = cto * ray.orig;
ray.dir = make_float3(
cto.m00 * ray.dir.x + cto.m01 * ray.dir.y + cto.m02 * ray.dir.z,
cto.m10 * ray.dir.x + cto.m11 * ray.dir.y + cto.m12 * ray.dir.z,
cto.m20 * ray.dir.x + cto.m21 * ray.dir.y + cto.m22 * ray.dir.z);
return ray;
}
//------------------------------------------------------------------------
// Ray processing.
//------------------------------------------------------------------------
__device__ U32 processPrimaryRay(volatile Ray& ray, volatile F32& vSizeMultiplier)
{
// Cast primary ray.
CastResult castRes;
CastStack stack;
castRay(castRes, stack, ray);
// Handle visualizations.
if (getInput().flags & RenderFlags_VisualizeIterations)
{
F32 v = 255.0f * (F32)castRes.iter / 64.0f;
return toABGR(make_float4(v, v, v, 0.0f));
}
else if (getInput().flags & RenderFlags_VisualizeRaycastLevel)
{
F32 v = 0.0f;
if (castRes.t <= 1.0f)
v = 255.0f - ((F32)CAST_STACK_DEPTH - (F32)castRes.stackPtr) * (255.0f / 18.0f);
return toABGR(make_float4(v * 0.5f, v, v * 0.5f, 0.0f));
}
// Initialize light and incident vectors.
float3 L = make_float3(0.3643f, 0.3535f, 0.8616f);
float3 I = normalize(extractMat3f(getInput().octreeMatrices.octreeToWorld) * get(ray.dir));
// No hit => sky.
if (castRes.t > 1.0f)
{
float3 c;
if (I.y >= 0.f)
{
float3 horz = { 179.0f, 205.0f, 253.0f };
float3 zen = { 77.0f, 102.0f, 179.0f };
c = horz + (zen - horz) * I.y * I.y;
c *= 2.5f;
}
else
{
float3 horz = { 192.0f, 154.0f, 102.0f };
float3 zen = { 128.0f, 102.0f, 77.0f };
c = horz - (zen - horz) * I.y;
}
c *= fmaxf(L.y, 0.0f);
float IL = dot(I, L);
if (IL > 0.0f)
c += make_float3(255.0f, 179.0f, 102.0f) * powf(IL, 1000.0f); // sun
return toABGR(make_float4(c.x, c.y, c.z, 0.0f));
}
// Get voxel color, normal, and ambient.
float4 voxelColor;
float3 voxelNormal;
lookupVoxelColorNormal(voxelColor, voxelNormal, castRes, stack);
F32 voxelAmbient = 1.0f;
#ifdef VOXELATTRIB_AO
lookupVoxelAO(voxelAmbient, castRes, stack);
#endif
// Calculate world-space normal and reflection vectors.
float3 N = normalize(getInput().octreeMatrices.octreeToWorldN * voxelNormal);
float3 R = (I - N * (dot(N, I) * 2.0f));
F32 LN = dot(L, N);
// Cast shadow ray.
bool shadow = (LN <= 0.0f);
#ifdef ENABLE_SHADOWS
if (!shadow)
{
Ray rayShad;
rayShad.orig_sz = 0.0f;
rayShad.dir_sz = 0.0f;
rayShad.orig = castRes.pos + L * 0.0006f;
rayShad.dir = L * 3.0f;
CastResult castResShad;
CastStack stackShad;
castRay(castResShad, stackShad, rayShad);
shadow = (castResShad.t <= 1.0f);
}
#endif
// Shade.
float4 shadedColor = voxelColor * (voxelAmbient * (0.25f + LN * ((LN < 0.0f) ? 0.15f : (shadow) ? 0.25f : 1.0f)));
if (!shadow)
shadedColor += make_float4(32.f, 32.f, 32.f, 0.0f) * powf(fmaxf(dot(L, R), 0.0f), 18.0f); // specular
shadedColor *= getInput().brightness;
U32 color = toABGR(shadedColor);
// Determine post-process filter radius.
float vSize = (F32)(1 << castRes.stackPtr) / (F32)(1 << CAST_STACK_DEPTH);
float pSize = ray.orig_sz + castRes.t * ray.dir_sz;
#ifdef JITTER_LOD
vSize *= vSizeMultiplier;
#endif
float blurRadius = ::max(vSize / pSize * getInput().maxVoxelSize, 1.0f);
// Encode in the alpha channel.
shadedColor.w = log2f(blurRadius) * 32.0f + 0.5f;
return toABGR(shadedColor);
}
//------------------------------------------------------------------------
// Persistent threads.
//------------------------------------------------------------------------
__device__ void fetchWorkFirst(int& warp, int& batchCounter, int* warpCounter, int batchSize, volatile S32& sharedTemp)
{
#ifdef PERSISTENT_THREADS
if (threadIdx.x == 0)
sharedTemp = atomicAdd(warpCounter, batchSize);
warp = sharedTemp;
batchCounter = batchSize;
#else
warp = threadIdx.y + blockIdx.x * RCK_TRACE_BLOCK_HEIGHT;
batchCounter = 0;
#endif
}
__device__ void fetchWorkNext(int& warp, int& batchCounter, int* warpCounter, int batchSize, volatile S32& sharedTemp)
{
#ifdef PERSISTENT_THREADS
batchCounter--;
if (batchCounter > 0)
warp++;
else
{
if (threadIdx.x == 0)
sharedTemp = atomicAdd(warpCounter, batchSize);
batchCounter = batchSize;
warp = sharedTemp;
}
#else
warp = 0x03FFFFFF;
#endif
}
//------------------------------------------------------------------------
// Rendering kernel.
//------------------------------------------------------------------------
#ifdef KERNEL_RENDER
extern "C" __global__ void kernel(void)
{
const RenderInput& input = getInput();
__shared__ Aux auxbuf[RCK_TRACE_BLOCK_WIDTH * RCK_TRACE_BLOCK_HEIGHT];
volatile Aux& aux0 = auxbuf[RCK_TRACE_BLOCK_WIDTH * threadIdx.y];
volatile Aux& aux = auxbuf[threadIdx.x + RCK_TRACE_BLOCK_WIDTH * threadIdx.y];
// fetch first warp of work
int warp, batchCounter;
fetchWorkFirst(warp, batchCounter, &g_warpCounter, input.batchSize, aux0.fetchWorkTemp);
if (warp * 32 >= input.totalWork)
return; // terminate before starting at all
#ifdef PERSISTENT_THREADS
// notice that work is being done in this warp slot
((S32*)input.activeWarps)[threadIdx.y + blockIdx.x * RCK_TRACE_BLOCK_HEIGHT] = 1;
#endif
// main warp loop
for (;;)
{
// ray index
int ridx = warp * 32 + threadIdx.x;
if (ridx >= input.totalWork)
return; // terminate individual rays
// calculate pixel index, position, and frame buffer pointer
int pidx = (ridx / input.aaRays) % input.numPrimaryRays;
int ppos;
if (input.flags & RenderFlags_CoarsePass)
{
ppos = tex1Dfetch(texIndexToPixelCoarse, pidx);
aux.framePtr = (U32*)input.frameCoarse + ppos;
}
else
{
ppos = tex1Dfetch(texIndexToPixel, pidx);
aux.framePtr = (U32*)input.frame + ppos;
#ifdef LARGE_RECONSTRUCTION_KERNEL
aux.aaSamplePtr = (U32*)input.aaSampleBuffer + ppos * input.aaRays + (ridx % input.aaRays);
#endif
}
// construct ray
Ray ray = constructPrimaryRay(ppos, ridx, aux);
aux.ray.orig.x = ray.orig.x;
aux.ray.orig.y = ray.orig.y;
aux.ray.orig.z = ray.orig.z;
aux.ray.dir.x = ray.dir.x;
aux.ray.dir.y = ray.dir.y;
aux.ray.dir.z = ray.dir.z;
aux.ray.orig_sz = ray.orig_sz;
aux.ray.dir_sz = ray.dir_sz;
if (getInput().flags & RenderFlags_CoarsePass)
{
CastResult castRes;
CastStack stack;
castRay(castRes, stack, aux.ray);
if (castRes.t < 1.0f)
{
F32 size = (F32)(1 << castRes.stackPtr) / (F32)(1 << CAST_STACK_DEPTH);
castRes.t -= size / length(get(aux.ray.dir)) * 0.5f;
}
*(float*)aux.framePtr = ::max(castRes.t, 0.0f);
} else
{
#ifdef JITTER_LOD
U32 color = processPrimaryRay(aux.ray, aux.vSizeMultiplier);
#else
U32 color = processPrimaryRay(aux.ray, aux.ray.orig.x);
#endif
// write results
if (input.aaRays == 1)
*aux.framePtr = color; // no AA
else
{
#ifdef LARGE_RECONSTRUCTION_KERNEL
*aux.aaSamplePtr = color; // individual sample result
#endif
// unpack result
U32 resc = (color & 0xff) | ((color & 0xff00) << 2) | ((color & 0xff0000) << 4);
aux.aa.color = resc; // rgb with bits shifted up
aux.aa.alpha = color; // original color
// sum with one thread
if ((threadIdx.x & 3) == 0)
{
// rgb
U32 resc0 = (&aux)[0].aa.color;
U32 resc1 = (&aux)[1].aa.color;
U32 resc2 = (&aux)[2].aa.color;
U32 resc3 = (&aux)[3].aa.color;
resc = (resc0 + resc1 + resc2 + resc3);
resc = ((resc >> 2) & 0xff) | ((resc >> 4) & 0xff00) | ((resc >> 6) & 0xff0000);
// alpha
U32 resa0 = (&aux)[0].aa.alpha;
U32 resa1 = (&aux)[1].aa.alpha;
U32 resa2 = (&aux)[2].aa.alpha;
U32 resa3 = (&aux)[3].aa.alpha;
U32 resa = ::min(::min(resa0, resa1), ::min(resa2, resa3));;
// combine min alpha and avg color
*aux.framePtr = (resa & 0xff000000) | resc;
}
}
}
// fetch more work
fetchWorkNext(warp, batchCounter, &g_warpCounter, input.batchSize, aux0.fetchWorkTemp);
}
}
#endif
//------------------------------------------------------------------------
// Performance measurement kernel.
//------------------------------------------------------------------------
#ifdef KERNEL_RAYCAST_PERF
extern "C" __global__ void kernel(void)
{
const RenderInput& input = getInput();
__shared__ Aux auxbuf[RCK_TRACE_BLOCK_WIDTH * RCK_TRACE_BLOCK_HEIGHT];
volatile Aux& aux0 = auxbuf[RCK_TRACE_BLOCK_WIDTH * threadIdx.y];
volatile Aux& aux = auxbuf[threadIdx.x + RCK_TRACE_BLOCK_WIDTH * threadIdx.y];
// fetch first warp of work
int warp, batchCounter;
fetchWorkFirst(warp, batchCounter, &g_warpCounter, input.batchSize, aux0.fetchWorkTemp);
if (warp * 32 >= input.totalWork)
return; // terminate before starting at all
#ifdef PERSISTENT_THREADS
// notice that work is being done in this warp slot
((S32*)input.activeWarps)[threadIdx.y + blockIdx.x * RCK_TRACE_BLOCK_HEIGHT] = 1;
#endif
// main warp loop
for (;;)
{
// ray index
int ridx = warp * 32 + threadIdx.x;
if (ridx >= input.totalWork)
return; // terminate individual rays
// calculate pixel index, position, and frame buffer pointer
int pidx = ridx % input.numPrimaryRays;
int ppos;
if (input.flags & RenderFlags_CoarsePass)
{
ppos = tex1Dfetch(texIndexToPixelCoarse, pidx);
aux.framePtr = (U32*)input.frameCoarse + ppos;
}
else
{
ppos = tex1Dfetch(texIndexToPixel, pidx);
aux.framePtr = (U32*)input.frame + ppos;
}
// construct ray
Ray ray = constructPrimaryRay(ppos, pidx, aux);
aux.ray.orig.x = ray.orig.x;
aux.ray.orig.y = ray.orig.y;
aux.ray.orig.z = ray.orig.z;
aux.ray.dir.x = ray.dir.x;
aux.ray.dir.y = ray.dir.y;
aux.ray.dir.z = ray.dir.z;
aux.ray.orig_sz = ray.orig_sz;
aux.ray.dir_sz = ray.dir_sz;
CastResult castRes;
CastStack stack;
castRay(castRes, stack, aux.ray);
if (castRes.t < 1.0f)
{
F32 size = (F32)(1 << castRes.stackPtr) / (F32)(1 << CAST_STACK_DEPTH);
castRes.t -= size / length(get(aux.ray.dir)) * 0.5f;
}
*(float*)aux.framePtr = castRes.t;
// fetch more work
fetchWorkNext(warp, batchCounter, &g_warpCounter, input.batchSize, aux0.fetchWorkTemp);
}
}
#endif
//------------------------------------------------------------------------
// Post-process filter kernel.
//------------------------------------------------------------------------
extern "C" __global__ void blurKernel(void)
{
const RenderInput& input = getInput();
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
int cx = input.frameSize.x;
int cy = input.frameSize.y;
if (px >= cx || py >= cy)
return;
U32* pResult = ((U32*)input.frame) + (px + cx*py);
float4 ccol = tex1Dfetch(texTempFrameIn, px + cx*py);
float rad = exp2f(ccol.w * (255.f / 32.f)) * BLUR_FACTOR;
if (rad <= 1.f)
{
// single-pixel case
#ifdef LARGE_RECONSTRUCTION_KERNEL
if (input.aaRays == 4)
{
int apos = (px + cx*py) * 4;
cx *= 4;
ccol *= 4.f;
ccol += tex1Dfetch(texAASamplesIn, apos-4 +2);
ccol += tex1Dfetch(texAASamplesIn, apos-4 +3);
ccol += tex1Dfetch(texAASamplesIn, apos+4 +0);
ccol += tex1Dfetch(texAASamplesIn, apos+4 +1);
ccol += tex1Dfetch(texAASamplesIn, apos-cx+1);
ccol += tex1Dfetch(texAASamplesIn, apos-cx+2);
ccol += tex1Dfetch(texAASamplesIn, apos+cx+0);
ccol += tex1Dfetch(texAASamplesIn, apos+cx+3);
ccol *= (1.f/12.f);
ccol.w = 1.f;
*pResult = toABGR(ccol * 255.f);
} else
#endif
{
ccol.w = 1.f;
*pResult = toABGR(ccol * 255.f);
}
return;
}
float4 accum = {0, 0, 0, 0};
for (int i=0; i < BLUR_LUT_SIZE; i++)
{
int4 b = c_blurLUT[i];
float d = __int_as_float(b.w);
if (d >= rad)
break;
int x = px + b.x;
int y = py + b.y;
float w = __int_as_float(b.z);
if (x < 0) w = 0.f;
if (y < 0) w = 0.f;
if (x >= input.frameSize.x) w = 0.f;
if (y >= input.frameSize.y) w = 0.f;
float4 c = tex1Dfetch(texTempFrameIn, x + __mul24(cx, y));
float rad2 = exp2f(c.w * (255.f / 32.f)) * BLUR_FACTOR;
if (w > 0.f)
rad = ::min(rad, rad2);
w *= fminf(fmaxf(rad - d, 0.f), 1.f);
accum.x += c.x * w;
accum.y += c.y * w;
accum.z += c.z * w;
accum.w += w;
}
float invw = 1.f / accum.w;
accum.x *= invw;
accum.y *= invw;
accum.z *= invw;
accum.w = 1.f;
*pResult = toABGR(accum * 255.f);
}
//------------------------------------------------------------------------
|
the_stack
|
* \file
* cub::DeviceHisto256 provides device-wide parallel operations for constructing 256-bin histogram(s) over data samples residing within global memory.
*/
#pragma once
#include <stdio.h>
#include <iterator>
#include "persistent_block/persistent_block_histo_256.cuh"
#include "../block/block_load.cuh"
#include "../thread/thread_reduce.cuh"
#include "../util_allocator.cuh"
#include "../grid/grid_even_share.cuh"
#include "../grid/grid_queue.cuh"
#include "../util_debug.cuh"
#include "../util_iterator.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Kernel entry points
*****************************************************************************/
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
/**
* Initialization kernel for queue descriptor preparation and for zeroing global counters
*/
template <
int ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed
typename SizeT, ///< Integral type used for global array indexing
typename HistoCounter> ///< Integral type for counting sample occurrences per histogram bin
__launch_bounds__ (256, 1)
__global__ void InitHisto256Kernel(
GridQueue<SizeT> grid_queue, ///< [in] Descriptor for performing dynamic mapping of tile data to thread blocks
ArrayWrapper<HistoCounter*, ACTIVE_CHANNELS> d_out_histograms, ///< [out] Histogram counter data having logical dimensions <tt>HistoCounter[ACTIVE_CHANNELS][256]</tt>
SizeT num_samples) ///< [in] Total number of samples \p d_samples for all channels
{
d_out_histograms.array[blockIdx.x][threadIdx.x] = 0;
if (threadIdx.x == 0) grid_queue.ResetDrain(num_samples);
}
/**
* Multi-block histogram kernel entry point. Computes privatized histograms, one per thread block.
*/
template <
typename PersistentBlockHisto256Policy, ///< Tuning policy for cub::PersistentBlockHisto256 abstraction
int CHANNELS, ///< Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed)
int ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed
typename InputIteratorRA, ///< The input iterator type (may be a simple pointer type). Must have a value type that is assignable to <tt>unsigned char</tt>
typename HistoCounter, ///< Integral type for counting sample occurrences per histogram bin
typename SizeT> ///< Integral type used for global array indexing
__launch_bounds__ (int(PersistentBlockHisto256Policy::BLOCK_THREADS), PersistentBlockHisto256Policy::SM_OCCUPANCY)
__global__ void MultiBlockHisto256Kernel(
InputIteratorRA d_samples, ///< [in] Array of sample data. (Channels, if any, are interleaved in "AOS" format)
ArrayWrapper<HistoCounter*, ACTIVE_CHANNELS> d_out_histograms, ///< [out] Histogram counter data having logical dimensions <tt>HistoCounter[ACTIVE_CHANNELS][gridDim.x][256]</tt>
SizeT num_samples, ///< [in] Total number of samples \p d_samples for all channels
GridEvenShare<SizeT> even_share, ///< [in] Descriptor for how to map an even-share of tiles across thread blocks
GridQueue<SizeT> queue) ///< [in] Descriptor for performing dynamic mapping of tile data to thread blocks
{
// Constants
enum
{
BLOCK_THREADS = PersistentBlockHisto256Policy::BLOCK_THREADS,
ITEMS_PER_THREAD = PersistentBlockHisto256Policy::ITEMS_PER_THREAD,
TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD,
};
// Thread block type for compositing input tiles
typedef PersistentBlockHisto256<PersistentBlockHisto256Policy, CHANNELS, ACTIVE_CHANNELS, InputIteratorRA, HistoCounter, SizeT> PersistentBlockHisto256T;
// Shared memory for PersistentBlockHisto256
__shared__ typename PersistentBlockHisto256T::SmemStorage smem_storage;
// Thread block instance
PersistentBlockHisto256T tiles(smem_storage, d_samples, d_out_histograms.array);
// Consume tiles using thread block instance
int dummy_result;
GridMapping<PersistentBlockHisto256Policy::GRID_MAPPING>::ConsumeTiles(
tiles, num_samples, even_share, queue, dummy_result);
}
/**
* Aggregation kernel for aggregating privatized threadblock histograms from a previous kernel invocation.
*/
template <
int ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed
typename HistoCounter> ///< Integral type for counting sample occurrences per histogram bin
__launch_bounds__ (256, 1)
__global__ void AggregateHisto256Kernel(
HistoCounter* d_block_histograms_linear, ///< [in] Histogram counter data having logical dimensions <tt>HistoCounter[ACTIVE_CHANNELS][num_threadblocks][256]</tt>
ArrayWrapper<HistoCounter*, ACTIVE_CHANNELS> d_out_histograms, ///< [out] Histogram counter data having logical dimensions <tt>HistoCounter[ACTIVE_CHANNELS][256]</tt>
int num_threadblocks) ///< [in] Number of threadblock histograms per channel in \p d_block_histograms
{
// Accumulate threadblock-histograms from the channel
HistoCounter bin_aggregate = 0;
int block_offset = blockIdx.x * (num_threadblocks * 256);
int block_oob = block_offset + (num_threadblocks * 256);
#if CUB_PTX_ARCH >= 200
#pragma unroll 32
#endif
while (block_offset < block_oob)
{
bin_aggregate += d_block_histograms_linear[block_offset + threadIdx.x];
block_offset += 256;
}
// Output
d_out_histograms.array[blockIdx.x][threadIdx.x] = bin_aggregate;
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
/******************************************************************************
* DeviceHisto256
*****************************************************************************/
/**
* \addtogroup DeviceModule
* @{
*/
/**
* \brief DeviceHisto256 provides device-wide parallel operations for constructing 256-bin histogram(s) over samples data residing within global memory. 
*/
struct DeviceHisto256
{
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
/// Generic structure for encapsulating dispatch properties. Mirrors the constants within PersistentBlockHisto256Policy.
struct KernelDispachParams
{
// Policy fields
int block_threads;
int items_per_thread;
PersistentBlockHisto256Algorithm block_algorithm;
GridMappingStrategy grid_mapping;
int subscription_factor;
// Derived fields
int tile_size;
template <typename PersistentBlockHisto256Policy>
__host__ __device__ __forceinline__
void Init(int subscription_factor = 1)
{
block_threads = PersistentBlockHisto256Policy::BLOCK_THREADS;
items_per_thread = PersistentBlockHisto256Policy::ITEMS_PER_THREAD;
block_algorithm = PersistentBlockHisto256Policy::GRID_ALGORITHM;
grid_mapping = PersistentBlockHisto256Policy::GRID_MAPPING;
this->subscription_factor = subscription_factor;
tile_size = block_threads * items_per_thread;
}
__host__ __device__ __forceinline__
void Print()
{
printf("%d, %d, %d, %d, %d",
block_threads,
items_per_thread,
block_algorithm,
grid_mapping,
subscription_factor);
}
};
/// Specializations of tuned policy types for different PTX architectures
template <
int CHANNELS,
int ACTIVE_CHANNELS,
PersistentBlockHisto256Algorithm GRID_ALGORITHM,
int ARCH>
struct TunedPolicies;
/// SM35 tune
template <int CHANNELS, int ACTIVE_CHANNELS, PersistentBlockHisto256Algorithm GRID_ALGORITHM>
struct TunedPolicies<CHANNELS, ACTIVE_CHANNELS, GRID_ALGORITHM, 350>
{
typedef PersistentBlockHisto256Policy<
(GRID_ALGORITHM == GRID_HISTO_256_SORT) ? 128 : 256,
(GRID_ALGORITHM == GRID_HISTO_256_SORT) ? 12 : (30 / ACTIVE_CHANNELS),
GRID_ALGORITHM,
(GRID_ALGORITHM == GRID_HISTO_256_SORT) ? GRID_MAPPING_DYNAMIC : GRID_MAPPING_EVEN_SHARE,
(GRID_ALGORITHM == GRID_HISTO_256_SORT) ? 8 : 1> MultiBlockPolicy;
enum { SUBSCRIPTION_FACTOR = 7 };
};
/// SM30 tune
template <int CHANNELS, int ACTIVE_CHANNELS, PersistentBlockHisto256Algorithm GRID_ALGORITHM>
struct TunedPolicies<CHANNELS, ACTIVE_CHANNELS, GRID_ALGORITHM, 300>
{
typedef PersistentBlockHisto256Policy<
128,
(GRID_ALGORITHM == GRID_HISTO_256_SORT) ? 20 : (22 / ACTIVE_CHANNELS),
GRID_ALGORITHM,
(GRID_ALGORITHM == GRID_HISTO_256_SORT) ? GRID_MAPPING_DYNAMIC : GRID_MAPPING_EVEN_SHARE,
1> MultiBlockPolicy;
enum { SUBSCRIPTION_FACTOR = 1 };
};
/// SM20 tune
template <int CHANNELS, int ACTIVE_CHANNELS, PersistentBlockHisto256Algorithm GRID_ALGORITHM>
struct TunedPolicies<CHANNELS, ACTIVE_CHANNELS, GRID_ALGORITHM, 200>
{
typedef PersistentBlockHisto256Policy<
128,
(GRID_ALGORITHM == GRID_HISTO_256_SORT) ? 21 : (23 / ACTIVE_CHANNELS),
GRID_ALGORITHM,
GRID_MAPPING_DYNAMIC,
1> MultiBlockPolicy;
enum { SUBSCRIPTION_FACTOR = 1 };
};
/// SM10 tune
template <int CHANNELS, int ACTIVE_CHANNELS, PersistentBlockHisto256Algorithm GRID_ALGORITHM>
struct TunedPolicies<CHANNELS, ACTIVE_CHANNELS, GRID_ALGORITHM, 100>
{
typedef PersistentBlockHisto256Policy<
128,
7,
GRID_HISTO_256_SORT, // (use sort regardless because atomics are perf-useless)
GRID_MAPPING_EVEN_SHARE,
1> MultiBlockPolicy;
enum { SUBSCRIPTION_FACTOR = 1 };
};
/// Tuning policy(ies) for the PTX architecture that DeviceHisto256 operations will get dispatched to
template <
int CHANNELS,
int ACTIVE_CHANNELS,
PersistentBlockHisto256Algorithm GRID_ALGORITHM>
struct PtxDefaultPolicies
{
static const int PTX_TUNE_ARCH = (CUB_PTX_ARCH >= 350) ?
350 :
(CUB_PTX_ARCH >= 300) ?
300 :
(CUB_PTX_ARCH >= 200) ?
200 :
100;
// Tuned policy set for the current PTX compiler pass
typedef TunedPolicies<CHANNELS, ACTIVE_CHANNELS, GRID_ALGORITHM, PTX_TUNE_ARCH> PtxPassTunedPolicies;
// Subscription factor for the current PTX compiler pass
static const int SUBSCRIPTION_FACTOR = PtxPassTunedPolicies::SUBSCRIPTION_FACTOR;
// MultiBlockPolicy that opaquely derives from the specialization corresponding to the current PTX compiler pass
struct MultiBlockPolicy : PtxPassTunedPolicies::MultiBlockPolicy {};
/**
* Initialize dispatch params with the policies corresponding to the PTX assembly we will use
*/
static void InitDispatchParams(int ptx_version, KernelDispachParams &multi_block_dispatch_params)
{
if (ptx_version >= 350)
{
typedef TunedPolicies<CHANNELS, ACTIVE_CHANNELS, GRID_ALGORITHM, 350> TunedPolicies;
multi_block_dispatch_params.Init<typename TunedPolicies::MultiBlockPolicy>(TunedPolicies::SUBSCRIPTION_FACTOR);
}
else if (ptx_version >= 300)
{
typedef TunedPolicies<CHANNELS, ACTIVE_CHANNELS, GRID_ALGORITHM, 300> TunedPolicies;
multi_block_dispatch_params.Init<typename TunedPolicies::MultiBlockPolicy>(TunedPolicies::SUBSCRIPTION_FACTOR);
}
else if (ptx_version >= 200)
{
typedef TunedPolicies<CHANNELS, ACTIVE_CHANNELS, GRID_ALGORITHM, 200> TunedPolicies;
multi_block_dispatch_params.Init<typename TunedPolicies::MultiBlockPolicy>(TunedPolicies::SUBSCRIPTION_FACTOR);
}
else
{
typedef TunedPolicies<CHANNELS, ACTIVE_CHANNELS, GRID_ALGORITHM, 100> TunedPolicies;
multi_block_dispatch_params.Init<typename TunedPolicies::MultiBlockPolicy>(TunedPolicies::SUBSCRIPTION_FACTOR);
}
}
};
/**
* Internal dispatch routine for invoking device-wide, multi-channel, 256-bin histogram
*/
template <
int CHANNELS, ///< Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed)
int ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed
typename InitHisto256KernelPtr, ///< Function type of cub::InitHisto256Kernel
typename MultiBlockHisto256KernelPtr, ///< Function type of cub::MultiBlockHisto256Kernel
typename AggregateHisto256KernelPtr, ///< Function type of cub::AggregateHisto256Kernel
typename InputIteratorRA, ///< The input iterator type (may be a simple pointer type). Must have a value type that is assignable to <tt>unsigned char</tt>
typename HistoCounter, ///< Integral type for counting sample occurrences per histogram bin
typename SizeT> ///< Integral type used for global array indexing
__host__ __device__ __forceinline__
static cudaError_t Dispatch(
InitHisto256KernelPtr init_kernel_ptr, ///< [in] Kernel function pointer to parameterization of cub::InitHisto256Kernel
MultiBlockHisto256KernelPtr multi_block_kernel_ptr, ///< [in] Kernel function pointer to parameterization of cub::MultiBlockHisto256Kernel
AggregateHisto256KernelPtr aggregate_kernel_ptr, ///< [in] Kernel function pointer to parameterization of cub::AggregateHisto256Kernel
KernelDispachParams &multi_block_dispatch_params, ///< [in] Dispatch parameters that match the policy that \p multi_block_kernel_ptr was compiled for
InputIteratorRA d_samples, ///< [in] Input samples to histogram
HistoCounter *(&d_histograms)[ACTIVE_CHANNELS], ///< [out] Array of channel histograms, each having 256 counters of integral type \p HistoCounter.
SizeT num_samples, ///< [in] Number of samples to process
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream-0.
bool stream_synchronous = false, ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Default is \p false.
DeviceAllocator *device_allocator = DefaultDeviceAllocator()) ///< [in] <b>[optional]</b> Allocator for allocating and freeing device memory. Default is provided by DefaultDeviceAllocator.
{
#ifndef CUB_RUNTIME_ENABLED
// Kernel launch not supported from this device
return CubDebug(cudaErrorInvalidConfiguration);
#else
HistoCounter *d_block_histograms_linear = NULL; // Temporary storage
GridEvenShare<SizeT> even_share; // Even-share work distribution
GridQueue<SizeT> queue; // Dynamic, queue-based work distribution
cudaError error = cudaSuccess;
do
{
// Setup array wrapper for histogram channel output because we can't pass static arrays as kernel parameters
ArrayWrapper<HistoCounter*, ACTIVE_CHANNELS> d_histo_wrapper;
for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL)
{
d_histo_wrapper.array[CHANNEL] = d_histograms[CHANNEL];
}
// Initialize counters and queue descriptor if necessary
if ((multi_block_dispatch_params.grid_mapping == GRID_MAPPING_DYNAMIC) ||
(multi_block_dispatch_params.block_algorithm == GRID_HISTO_256_GLOBAL_ATOMIC))
{
queue.Allocate(device_allocator);
if (stream_synchronous) CubLog("Invoking init_kernel_ptr<<<%d, 256, 0, %ul>>>()\n", ACTIVE_CHANNELS, (unsigned long) stream);
init_kernel_ptr<<<ACTIVE_CHANNELS, 256, 0, stream>>>(queue, d_histo_wrapper, num_samples);
#ifndef __CUDA_ARCH__
// Sync the stream on the host
if (stream_synchronous && CubDebug(error = cudaStreamSynchronize(stream))) break;
#else
// Sync the entire device on the device (cudaStreamSynchronize doesn't exist on device)
if (stream_synchronous && CubDebug(error = cudaDeviceSynchronize())) break;
#endif
}
// Determine grid size for the multi-block kernel
int device_ordinal;
if (CubDebug(error = cudaGetDevice(&device_ordinal))) break;
int sm_count;
if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break;
// Rough estimate of SM occupancies based upon the maximum SM occupancy of the targeted PTX architecture
int multi_sm_occupancy = CUB_MIN(
ArchProps<CUB_PTX_ARCH>::MAX_SM_THREADBLOCKS,
ArchProps<CUB_PTX_ARCH>::MAX_SM_THREADS / multi_block_dispatch_params.block_threads);
#ifndef __CUDA_ARCH__
// We're on the host, so come up with a more accurate estimate of SM occupancies from actual device properties
Device device_props;
if (CubDebug(error = device_props.Init(device_ordinal))) break;
if (CubDebug(error = device_props.MaxSmOccupancy(
multi_sm_occupancy,
multi_block_kernel_ptr,
multi_block_dispatch_params.block_threads))) break;
#endif
int multi_occupancy = multi_sm_occupancy * sm_count;
int multi_tile_size = multi_block_dispatch_params.block_threads * multi_block_dispatch_params.items_per_thread;
int multi_grid_size;
switch (multi_block_dispatch_params.grid_mapping)
{
case GRID_MAPPING_EVEN_SHARE:
// Work is distributed evenly
even_share.GridInit(
num_samples,
multi_occupancy * multi_block_dispatch_params.subscription_factor,
multi_tile_size);
// Set MultiBlock grid size
multi_grid_size = even_share.grid_size;
break;
case GRID_MAPPING_DYNAMIC:
// Prepare queue to distribute work dynamically
int num_tiles = (num_samples + multi_tile_size - 1) / multi_tile_size;
// Set MultiBlock grid size
multi_grid_size = (num_tiles < multi_occupancy) ?
num_tiles : // Not enough to fill the device with threadblocks
multi_occupancy; // Fill the device with threadblocks
break;
};
// Bind textures if the iterator supports it
#ifndef __CUDA_ARCH__
if (CubDebug(error = BindIteratorTexture(d_samples))) break;
#endif // __CUDA_ARCH__
// Invoke MultiBlockHisto256
if (stream_synchronous) CubLog("Invoking multi_block_kernel_ptr<<<%d, %d, 0, %ul>>>(), %d items per thread, %d SM occupancy\n",
multi_grid_size, multi_block_dispatch_params.block_threads, (unsigned long) stream, multi_block_dispatch_params.items_per_thread, multi_sm_occupancy);
if ((multi_grid_size == 1) || (multi_block_dispatch_params.block_algorithm == GRID_HISTO_256_GLOBAL_ATOMIC))
{
// A single pass will do
multi_block_kernel_ptr<<<multi_grid_size, multi_block_dispatch_params.block_threads, 0, stream>>>(
d_samples,
d_histo_wrapper,
num_samples,
even_share,
queue);
}
else
{
// Use two-pass approach to compute and reduce privatized block histograms
// Allocate temporary storage for privatized thread block histograms in each channel
if (CubDebug(error = DeviceAllocate(
(void**) &d_block_histograms_linear,
ACTIVE_CHANNELS * multi_grid_size * sizeof(HistoCounter) * 256,
device_allocator))) break;
// Setup array wrapper for temporary histogram channel output because we can't pass static arrays as kernel parameters
ArrayWrapper<HistoCounter*, ACTIVE_CHANNELS> d_temp_histo_wrapper;
for (int CHANNEL = 0; CHANNEL < ACTIVE_CHANNELS; ++CHANNEL)
{
d_temp_histo_wrapper.array[CHANNEL] = d_block_histograms_linear + (CHANNEL * multi_grid_size * 256);
}
multi_block_kernel_ptr<<<multi_grid_size, multi_block_dispatch_params.block_threads, 0, stream>>>(
d_samples,
d_temp_histo_wrapper,
num_samples,
even_share,
queue);
#ifndef __CUDA_ARCH__
// Sync the stream on the host
if (stream_synchronous && CubDebug(error = cudaStreamSynchronize(stream))) break;
#else
// Sync the entire device on the device (cudaStreamSynchronize doesn't exist on device)
if (stream_synchronous && CubDebug(error = cudaDeviceSynchronize())) break;
#endif
if (stream_synchronous) CubLog("Invoking aggregate_kernel_ptr<<<%d, %d, 0, %ul>>>()\n",
ACTIVE_CHANNELS, 256, (unsigned long) stream);
aggregate_kernel_ptr<<<ACTIVE_CHANNELS, 256, 0, stream>>>(
d_block_histograms_linear,
d_histo_wrapper,
multi_grid_size);
}
#ifndef __CUDA_ARCH__
// Sync the stream on the host
if (stream_synchronous && CubDebug(error = cudaStreamSynchronize(stream))) break;
#else
// Sync the entire device on the device (cudaStreamSynchronize doesn't exist on device)
if (stream_synchronous && CubDebug(error = cudaDeviceSynchronize())) break;
#endif
}
while (0);
// Free temporary storage allocation
if (d_block_histograms_linear)
error = CubDebug(DeviceFree(d_block_histograms_linear, device_allocator));
// Free queue allocation
if ((multi_block_dispatch_params.grid_mapping == GRID_MAPPING_DYNAMIC) ||
(multi_block_dispatch_params.block_algorithm == GRID_HISTO_256_GLOBAL_ATOMIC))
{
error = CubDebug(queue.Free(device_allocator));
}
// Unbind texture
#ifndef __CUDA_ARCH__
error = CubDebug(UnbindIteratorTexture(d_samples));
#endif // __CUDA_ARCH__
return error;
#endif
}
/**
* \brief Computes a 256-bin device-wide histogram
*
* \tparam GRID_ALGORITHM cub::PersistentBlockHisto256Algorithm enumerator specifying the underlying algorithm to use
* \tparam CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed)
* \tparam ACTIVE_CHANNELS <b>[inferred]</b> Number of channels actively being histogrammed
* \tparam InputIteratorRA <b>[inferred]</b> The random-access iterator type for input (may be a simple pointer type). Must have a value type that is assignable to <tt>unsigned char</tt>
* \tparam HistoCounter <b>[inferred]</b> Integral type for counting sample occurrences per histogram bin
*/
template <
PersistentBlockHisto256Algorithm GRID_ALGORITHM,
int CHANNELS, ///< Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed)
int ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed
typename InputIteratorRA,
typename HistoCounter>
__host__ __device__ __forceinline__
static cudaError_t Dispatch(
InputIteratorRA d_samples, ///< [in] Input samples to histogram
HistoCounter *(&d_histograms)[ACTIVE_CHANNELS], ///< [out] Array of channel histograms, each having 256 counters of integral type \p HistoCounter.
int num_samples, ///< [in] Number of samples to process
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream-0.
bool stream_synchronous = false, ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Default is \p false.
DeviceAllocator* device_allocator = DefaultDeviceAllocator()) ///< [in] <b>[optional]</b> Allocator for allocating and freeing device memory. Default is provided by DefaultDeviceAllocator.
{
// Type used for array indexing
typedef int SizeT;
// Tuning polices for the PTX architecture that will get dispatched to
typedef PtxDefaultPolicies<CHANNELS, ACTIVE_CHANNELS, GRID_ALGORITHM> PtxDefaultPolicies;
typedef typename PtxDefaultPolicies::MultiBlockPolicy MultiBlockPolicy;
cudaError error = cudaSuccess;
do
{
// Declare dispatch parameters
KernelDispachParams multi_block_dispatch_params;
#ifdef __CUDA_ARCH__
// We're on the device, so initialize the dispatch parameters with the PtxDefaultPolicies directly
multi_block_dispatch_params.Init<MultiBlockPolicy>(PtxDefaultPolicies::SUBSCRIPTION_FACTOR);
#else
// We're on the host, so lookup and initialize the dispatch parameters with the policies that match the device's PTX version
int ptx_version;
if (CubDebug(error = PtxVersion(ptx_version))) break;
PtxDefaultPolicies::InitDispatchParams(ptx_version, multi_block_dispatch_params);
#endif
Dispatch<CHANNELS, ACTIVE_CHANNELS>(
InitHisto256Kernel<ACTIVE_CHANNELS, SizeT, HistoCounter>,
MultiBlockHisto256Kernel<MultiBlockPolicy, CHANNELS, ACTIVE_CHANNELS, InputIteratorRA, HistoCounter, SizeT>,
AggregateHisto256Kernel<ACTIVE_CHANNELS, HistoCounter>,
multi_block_dispatch_params,
d_samples,
d_histograms,
num_samples,
stream,
stream_synchronous,
device_allocator);
if (CubDebug(error)) break;
}
while (0);
return error;
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
//---------------------------------------------------------------------
// Public interface
//---------------------------------------------------------------------
/**
* \brief Computes a 256-bin device-wide histogram. Uses fast block-sorting to compute the histogram.
*
* Delivers consistent throughput regardless of sample diversity.
*
* \tparam InputIteratorRA <b>[inferred]</b> The random-access iterator type for input (may be a simple pointer type). Must have a value type that is assignable to <tt>unsigned char</tt>
* \tparam HistoCounter <b>[inferred]</b> Integral type for counting sample occurrences per histogram bin
*/
template <
typename InputIteratorRA,
typename HistoCounter>
__host__ __device__ __forceinline__
static cudaError_t SingleChannel(
InputIteratorRA d_samples, ///< [in] Input samples
HistoCounter* d_histogram, ///< [out] Array of 256 counters of integral type \p HistoCounter.
int num_samples, ///< [in] Number of samples to process
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream-0.
bool stream_synchronous = false, ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Default is \p false.
DeviceAllocator* device_allocator = DefaultDeviceAllocator()) ///< [in] <b>[optional]</b> Allocator for allocating and freeing device memory. Default is provided by DefaultDeviceAllocator.
{
return Dispatch<GRID_HISTO_256_SORT, 1, 1>(
d_samples, &d_histogram, num_samples, stream, stream_synchronous, device_allocator);
}
/**
* \brief Computes a 256-bin device-wide histogram. Uses shared-memory atomic read-modify-write operations to compute the histogram.
*
* Sample input having lower diversity cause performance to be degraded.
*
* \tparam InputIteratorRA <b>[inferred]</b> The random-access iterator type for input (may be a simple pointer type). Must have a value type that is assignable to <tt>unsigned char</tt>
* \tparam HistoCounter <b>[inferred]</b> Integral type for counting sample occurrences per histogram bin
*/
template <
typename InputIteratorRA,
typename HistoCounter>
__host__ __device__ __forceinline__
static cudaError_t SingleChannelAtomic(
InputIteratorRA d_samples, ///< [in] Input samples
HistoCounter* d_histogram, ///< [out] Array of 256 counters of integral type \p HistoCounter.
int num_samples, ///< [in] Number of samples to process
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream-0.
bool stream_synchronous = false, ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Default is \p false.
DeviceAllocator* device_allocator = DefaultDeviceAllocator()) ///< [in] <b>[optional]</b> Allocator for allocating and freeing device memory. Default is provided by DefaultDeviceAllocator.
{
return Dispatch<GRID_HISTO_256_SHARED_ATOMIC, 1, 1>(
d_samples, &d_histogram, num_samples, stream, stream_synchronous, device_allocator);
}
/**
* \brief Computes a 256-bin device-wide histogram. Uses global-memory atomic read-modify-write operations to compute the histogram.
*
* Sample input having lower diversity cause performance to be degraded.
*
* \tparam InputIteratorRA <b>[inferred]</b> The random-access iterator type for input (may be a simple pointer type). Must have a value type that is assignable to <tt>unsigned char</tt>
* \tparam HistoCounter <b>[inferred]</b> Integral type for counting sample occurrences per histogram bin
*/
template <
typename InputIteratorRA,
typename HistoCounter>
__host__ __device__ __forceinline__
static cudaError_t SingleChannelGlobalAtomic(
InputIteratorRA d_samples, ///< [in] Input samples
HistoCounter* d_histogram, ///< [out] Array of 256 counters of integral type \p HistoCounter.
int num_samples, ///< [in] Number of samples to process
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream-0.
bool stream_synchronous = false, ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Default is \p false.
DeviceAllocator* device_allocator = DefaultDeviceAllocator()) ///< [in] <b>[optional]</b> Allocator for allocating and freeing device memory. Default is provided by DefaultDeviceAllocator.
{
return Dispatch<GRID_HISTO_256_GLOBAL_ATOMIC, 1, 1>(
d_samples, &d_histogram, num_samples, stream, stream_synchronous, device_allocator);
}
/**
* \brief Computes a 256-bin device-wide histogram from multi-channel data. Uses fast block-sorting to compute the histogram.
*
* Delivers consistent throughput regardless of sample diversity.
*
* \tparam CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed)
* \tparam ACTIVE_CHANNELS <b>[inferred]</b> Number of channels actively being histogrammed
* \tparam InputIteratorRA <b>[inferred]</b> The random-access iterator type for input (may be a simple pointer type). Must have a value type that is assignable to <tt>unsigned char</tt>
* \tparam HistoCounter <b>[inferred]</b> Integral type for counting sample occurrences per histogram bin
*/
template <
int CHANNELS,
int ACTIVE_CHANNELS,
typename InputIteratorRA,
typename HistoCounter>
__host__ __device__ __forceinline__
static cudaError_t MultiChannel(
InputIteratorRA d_samples, ///< [in] Input samples. (Channels, if any, are interleaved in "AOS" format)
HistoCounter *(&d_histograms)[ACTIVE_CHANNELS], ///< [out] Array of channel histograms, each having 256 counters of integral type \p HistoCounter.
int num_samples, ///< [in] Number of samples to process
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream-0.
bool stream_synchronous = false, ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Default is \p false.
DeviceAllocator* device_allocator = DefaultDeviceAllocator()) ///< [in] <b>[optional]</b> Allocator for allocating and freeing device memory. Default is provided by DefaultDeviceAllocator.
{
return Dispatch<GRID_HISTO_256_SORT, CHANNELS, ACTIVE_CHANNELS>(
d_samples, d_histograms, num_samples, stream, stream_synchronous, device_allocator);
}
/**
* \brief Computes a 256-bin device-wide histogram from multi-channel data. Uses shared-memory atomic read-modify-write operations to compute the histogram.
*
* Sample input having lower diversity cause performance to be degraded.
*
* \tparam CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed)
* \tparam ACTIVE_CHANNELS <b>[inferred]</b> Number of channels actively being histogrammed
* \tparam InputIteratorRA <b>[inferred]</b> The random-access iterator type for input (may be a simple pointer type). Must have a value type that is assignable to <tt>unsigned char</tt>
* \tparam HistoCounter <b>[inferred]</b> Integral type for counting sample occurrences per histogram bin
*/
template <
int CHANNELS, ///< Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed)
int ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed
typename InputIteratorRA,
typename HistoCounter>
__host__ __device__ __forceinline__
static cudaError_t MultiChannelAtomic(
InputIteratorRA d_samples, ///< [in] Input samples. (Channels, if any, are interleaved in "AOS" format)
HistoCounter *(&d_histograms)[ACTIVE_CHANNELS], ///< [out] Array of channel histograms, each having 256 counters of integral type \p HistoCounter.
int num_samples, ///< [in] Number of samples to process
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream-0.
bool stream_synchronous = false, ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Default is \p false.
DeviceAllocator* device_allocator = DefaultDeviceAllocator()) ///< [in] <b>[optional]</b> Allocator for allocating and freeing device memory. Default is provided by DefaultDeviceAllocator.
{
return Dispatch<GRID_HISTO_256_SHARED_ATOMIC, CHANNELS, ACTIVE_CHANNELS>(
d_samples, d_histograms, num_samples, stream, stream_synchronous, device_allocator);
}
/**
* \brief Computes a 256-bin device-wide histogram from multi-channel data. Uses global-memory atomic read-modify-write operations to compute the histogram.
*
* Sample input having lower diversity cause performance to be degraded.
*
* \tparam CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed)
* \tparam ACTIVE_CHANNELS <b>[inferred]</b> Number of channels actively being histogrammed
* \tparam InputIteratorRA <b>[inferred]</b> The random-access iterator type for input (may be a simple pointer type). Must have a value type that is assignable to <tt>unsigned char</tt>
* \tparam HistoCounter <b>[inferred]</b> Integral type for counting sample occurrences per histogram bin
*/
template <
int CHANNELS, ///< Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed)
int ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed
typename InputIteratorRA,
typename HistoCounter>
__host__ __device__ __forceinline__
static cudaError_t MultiChannelGlobalAtomic(
InputIteratorRA d_samples, ///< [in] Input samples. (Channels, if any, are interleaved in "AOS" format)
HistoCounter *(&d_histograms)[ACTIVE_CHANNELS], ///< [out] Array of channel histograms, each having 256 counters of integral type \p HistoCounter.
int num_samples, ///< [in] Number of samples to process
cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream-0.
bool stream_synchronous = false, ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Default is \p false.
DeviceAllocator* device_allocator = DefaultDeviceAllocator()) ///< [in] <b>[optional]</b> Allocator for allocating and freeing device memory. Default is provided by DefaultDeviceAllocator.
{
return Dispatch<GRID_HISTO_256_GLOBAL_ATOMIC, CHANNELS, ACTIVE_CHANNELS>(
d_samples, d_histograms, num_samples, stream, stream_synchronous, device_allocator);
}
};
/** @} */ // DeviceModule
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
|
the_stack
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
#include <curand.h>
#include <cuda_runtime_api.h>
#include "cub/device/device_radix_sort.cuh"
#include "cub/util_allocator.cuh"
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <sys/time.h>
#include <algorithm>
// #include "printFunctions.cuh"
// #include "generateProblems.cuh"
// #include "topk.h"
using namespace std;
using namespace cub;
#define maxThreadsPerBlock 1024
/**
* Computes the histogram over the digit values of an array of keys that MUST have a length of an integer multiple of (KPT * blockDim.x).
* The padding to the integer multiple can be done by adding 0's at the end and subtracting the number of padded 0's from the final result's 0 bin.
* The 2^NUM_BITS possible counts (0..2^NUM_BITSNUM_BITS-1) will be placed in global_histo.
* @param keys [IN] The keys for which to compute the histogram
* @param digit [IN]
* @param global_histo [OUT] The array of element counts, MUST be 256 in size.
* @param per_block_histo [OUT]
*/
template<
typename KeyT, // Data type of the keys within device memory. Data will be twiddled (if necessary) to unsigned type
typename IndexT, // Data type used for key's offsets and counters (limits number of supported keys, uint = 2^32)
int NUM_BITS, // Number of bits being sorted at a time
int KPT, // Number of keys per thread
int TPB, // Number of threads per block
int PRE_SORT_RUNS_LENGTH // For values greater than 1, this causes to sort a thread's keys by runs of a given length to improve run-length encoded updates to shared memory.
>
__global__ void rdxsrt_histogram(KeyT *__restrict__ keys, const uint digit, IndexT *global_histo)
{
/*** TYPEDEFs***/
typedef Traits<KeyT> KeyTraits;
typedef typename KeyTraits::UnsignedBits UnsignedBits;
/*typedef LoadUnit<IndexT, RDXSRT_LOAD_STRIDE_WARP, KPT, TPB> KeyLoader;*/
/*** DECLARATIONS ***/
UnsignedBits tloc_keys[KPT]; // local keys in a thread
uint tloc_masked[KPT];
__shared__ uint shared_bins[0x01<<NUM_BITS]; // allocate a shared histogram in shared memory
/*** INIT SHARED HISTO ***/
if(threadIdx.x < 32){
#pragma unroll
for(int i=0;i<(0x01<<NUM_BITS);i+=32){
shared_bins[i+threadIdx.x] = 0;
}
}
__syncthreads();
/*** GET KEYS & PREPARE KEYS FOR HISTO ***/
// Bucket index used to determine the memory offset of the bucket's global histogram
const uint bucket_idx = 0;
// This thread block's keys memory offset, pointing to the index of its first key
const IndexT block_offset = (blockDim.x * blockIdx.x * KPT);
// Load keys
// KeyLoader(block_offset, threadIdx.x).template LoadStrided<UnsignedBits, KeyT, 0, KPT>(keys, tloc_keys);
#pragma unroll
for (int i=0; i<KPT; i++) {
tloc_keys[i] = reinterpret_cast<UnsignedBits*>(keys)[block_offset + threadIdx.x + blockDim.x * i];
}
#if true || USE_RLE_HISTO
// Mask
#pragma unroll
for (int i=0; i<KPT; i++) {
tloc_keys[i] = KeyTraits::TwiddleIn(tloc_keys[i]);
tloc_masked[i] = (tloc_keys[i]>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1); // get the bin index
}
/*** COMPUTE HISTO ***/
uint rle = 1;
#pragma unroll
for(int i=1; i<KPT; i++){
if(tloc_masked[i] == tloc_masked[i-1]) // decrease the number of atomicAdd as much as possible
rle++;
else{
atomicAdd(&shared_bins[tloc_masked[i-1]], rle);
rle=1;
}
}
atomicAdd(&shared_bins[tloc_masked[KPT-1]], rle);
#else
#pragma unroll
for(int i=0; i<KPT; i++){
tloc_masked[i] = (tloc_keys[i]>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1);
atomicAdd(&shared_bins[tloc_masked[i]], 1);
}
#endif
// Make sure we've got the counts from all threads
__syncthreads();
/*** Write shared histo to global histo ***/
if(threadIdx.x < 32){
for(int i=0;i<(0x01<<NUM_BITS);i+=32){
atomicAdd(&global_histo[(0x01<<NUM_BITS)*bucket_idx+i+threadIdx.x], shared_bins[i+threadIdx.x]); // actually bucket_idx is 0 all the time (according to the code), thus we have global_histo index equal to shared_bins index
// per_block_histo[blockIdx.x*(0x01<<NUM_BITS)+i+threadIdx.x] = shared_bins[i+threadIdx.x];
}
}
}
template<
typename KeyT, // Data type of the keys within device memory. Data will be twiddled (if necessary) to unsigned type
typename IndexT, // Data type used for key's offsets and counters (limits number of supported keys, uint = 2^32)
int NUM_BITS, // Number of bits being sorted at a time
int KPT, // Number of keys per thread
int TPB, // Number of threads per block
int PRE_SORT_RUNS_LENGTH // For values greater than 1, this causes to sort a thread's keys by runs of a given length to improve run-length encoded updates to shared memory.
>
__global__ void rdxsrt_histogram_with_guards(KeyT *__restrict__ keys, const uint digit, IndexT *global_histo, const IndexT total_keys, const int block_index_offset)
{
/*** TYPEDEFs***/
typedef Traits<KeyT> KeyTraits;
typedef typename KeyTraits::UnsignedBits UnsignedBits;
/*typedef LoadUnit<IndexT, RDXSRT_LOAD_STRIDE_WARP, KPT, TPB> KeyLoader;*/
/*** DECLARATIONS ***/
UnsignedBits tloc_keys[KPT];
uint tloc_masked[KPT];
__shared__ uint shared_bins[(0x01<<NUM_BITS) + 1];
/*** INIT SHARED HISTO ***/
if (threadIdx.x < 32) {
#pragma unroll
for(int i=0;i<(0x01<<NUM_BITS);i+=32){
shared_bins[i+threadIdx.x] = 0;
}
}
__syncthreads();
/*** GET KEYS & PREPARE KEYS FOR HISTO ***/
// Bucket index used to determine the memory offset of the bucket's global histogram
const uint bucket_idx = 0;
// This thread block's keys memory offset, pointing to the index of its first key
const IndexT block_offset = (blockDim.x * (block_index_offset + blockIdx.x) * KPT);
// Maximum number of keys the block may fetch
const IndexT block_max_num_keys = total_keys - block_offset;
// KeyLoader(block_offset, threadIdx.x).template LoadStridedWithGuards<UnsignedBits, KeyT, 0, KPT>(keys, tloc_keys, block_max_num_keys);
#pragma unroll
for (int i=0; i<KPT; i++) {
if ((threadIdx.x + blockDim.x * i) < block_max_num_keys) {
tloc_keys[i] = reinterpret_cast<UnsignedBits*>(keys)[block_offset + threadIdx.x + blockDim.x * i];
}
}
#pragma unroll
for(int i=0; i<KPT; i++){
// if(KeyLoader(block_offset, threadIdx.x).ThreadIndexInBounds(block_max_num_keys, i)){
if ((threadIdx.x + blockDim.x * i) < block_max_num_keys) {
tloc_keys[i] = KeyTraits::TwiddleIn(tloc_keys[i]);
tloc_masked[i] = (tloc_keys[i]>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1);
atomicAdd(&shared_bins[tloc_masked[i]], 1);
}
}
// Make sure we've got the counts from all threads
__syncthreads();
/*** Write shared histo to global histo ***/
if(threadIdx.x < 32){
for(int i=0;i<(0x01<<NUM_BITS);i+=32){
atomicAdd(&global_histo[(0x01<<NUM_BITS)*bucket_idx+i+threadIdx.x], shared_bins[i+threadIdx.x]);
// per_block_histo[(block_index_offset + blockIdx.x)*(0x01<<NUM_BITS)+i+threadIdx.x] = shared_bins[i+threadIdx.x];
}
}
}
/**
* Makes a single pass over the input array to find entries whose digit is equal to selected digit value and greater than
* digit value. Entries equal to digit value are written to keys_buffer for future processing, entries greater
* are written to output array.
* @param d_keys_in [IN] The keys for which to compute the histogram
* @param d_values_in [IN] The values corresponding to the keys
* @param digit [IN] Digit index (0 => highest digit, 3 => lowest digit for 32-bit)
* @param digit_val [IN] Digit value.
* @param num_items [IN] Number of entries.
* @param d_keys_buffer [OUT] Entries with x[digit] = digit_val.
* @param d_keys_out [OUT] Entries with x[digit] > digit_val.
* @param d_values_buffer [OUT] Entry values with x[digit] = digit_val.
* @param d_values_out [OUT] Entry values with x[digit] > digit_val.
* @param d_index_buffer [OUT] Index into d_keys_buffer.
* @param d_index_out [OUT] Index into d_keys_out.
*/
template<
typename KeyT, // Data type of the keys within device memory. Data will be twiddled (if necessary) to unsigned type
typename IndexT, // Data type used for key's offsets and counters (limits number of supported keys, uint = 2^32)
int NUM_BITS, // Number of bits being sorted at a time
int KPT, // Number of keys per thread
int TPB // Number of threads per block
>
__global__ void select_kth_bucket(KeyT* d_keys_in, unsigned int* d_values_in, const uint digit, const uint digit_val, uint num_items,
KeyT* d_keys_buffer, KeyT* d_keys_out, unsigned int* d_values_buffer, unsigned int* d_values_out, uint* d_index_buffer, uint* d_index_out)
{
typedef Traits<KeyT> KeyTraits;
typedef typename KeyTraits::UnsignedBits UnsignedBits;
// Specialize BlockLoad for a 1D block of TPB threads owning KPT integer items each
typedef cub::BlockLoad<UnsignedBits, TPB, KPT, BLOCK_LOAD_TRANSPOSE> BlockLoadT;
// Specialize BlockScan type for our thread block
typedef BlockScan<int, TPB, BLOCK_SCAN_RAKING> BlockScanT;
// in some sense, tile means block
const int tile_size = TPB * KPT;
int tile_idx = blockIdx.x; // Current tile index
int tile_offset = tile_idx * tile_size;
// Allocate shared memory for BlockLoad
__shared__ union TempStorage
{
typename BlockLoadT::TempStorage load_items;
typename BlockScanT::TempStorage scan;
int offset[1];
UnsignedBits raw_exchange[2 * TPB * KPT];
} temp_storage;
// Load a segment of consecutive items that are blocked across threads
UnsignedBits key_entries[KPT];
unsigned int value_entries[KPT];
/*float payload_entries[KPT];*/
int selection_flags[KPT];
int selection_indices[KPT];
int num_tiles = (num_items + tile_size - 1) / tile_size;
int num_tile_items = tile_size;
bool is_last_tile = false;
if (tile_idx == num_tiles - 1) {
num_tile_items = num_items - tile_offset;
is_last_tile = true;
}
// Load keys and values
if (is_last_tile) {
BlockLoadT(temp_storage.load_items).Load(reinterpret_cast<UnsignedBits*>(d_keys_in) + tile_offset, key_entries, num_tile_items);
__syncthreads();
BlockLoadT(temp_storage.load_items).Load(reinterpret_cast<unsigned int*>(d_values_in) + tile_offset, value_entries, num_tile_items);
}
else {
BlockLoadT(temp_storage.load_items).Load(reinterpret_cast<UnsignedBits*>(d_keys_in) + tile_offset, key_entries);
__syncthreads();
BlockLoadT(temp_storage.load_items).Load(reinterpret_cast<unsigned int*>(d_values_in) + tile_offset, value_entries);
}
__syncthreads();
/*** Step 1: Find keys with digit value to selected digit value ***/
#pragma unroll
for (int ITEM = 0; ITEM < KPT; ++ITEM)
{
// Out-of-bounds items are selection_flags
selection_flags[ITEM] = 0;
if (!is_last_tile || (int(threadIdx.x * KPT) + ITEM < num_tile_items)) {
UnsignedBits key = KeyTraits::TwiddleIn(key_entries[ITEM]);
uint masked_key = (key>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1);
selection_flags[ITEM] = (masked_key > digit_val);
}
}
__syncthreads();
// Compute exclusive prefix sum
int num_selected;
BlockScanT(temp_storage.scan).ExclusiveSum(selection_flags, selection_indices, num_selected);
__syncthreads();
if (num_selected > 0) {
int index_out;
if (threadIdx.x == 0) {
// Find index into keys_out array
index_out = atomicAdd(d_index_out, num_selected);
temp_storage.offset[0] = index_out;
}
__syncthreads();
index_out = temp_storage.offset[0];
__syncthreads();
// Compact and scatter items
#pragma unroll
for (int ITEM = 0; ITEM < KPT; ++ITEM)
{
int local_scatter_offset = selection_indices[ITEM];
if (selection_flags[ITEM])
{
temp_storage.raw_exchange[local_scatter_offset] = key_entries[ITEM];
temp_storage.raw_exchange[tile_size + local_scatter_offset] = value_entries[ITEM];
/*temp_storage.raw_exchange[tile_size + local_scatter_offset] = payload_entries[ITEM];*/
}
}
__syncthreads();
// Write out matched entries to output array
for (int item = threadIdx.x; item < num_selected; item += TPB)
{
reinterpret_cast<UnsignedBits*>(d_keys_out)[index_out + item] = temp_storage.raw_exchange[item];
d_values_out[index_out + item] = temp_storage.raw_exchange[tile_size + item];
}
__syncthreads();
#if 0
for (int item = threadIdx.x; item < num_selected; item += TPB)
{
payload_out[num_selections_prefix + item] = temp_storage.raw_exchange[tile_size + item];
}
#endif
}
/*** Step 2: Find entries that have digit equal to digit value ***/
#pragma unroll
for (int ITEM = 0; ITEM < KPT; ++ITEM)
{
// Out-of-bounds items are selection_flags
selection_flags[ITEM] = 0;
if (!is_last_tile || (int(threadIdx.x * KPT) + ITEM < num_tile_items)) {
UnsignedBits key = KeyTraits::TwiddleIn(key_entries[ITEM]);
uint masked_key = (key>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1);
selection_flags[ITEM] = (masked_key == digit_val);
}
}
__syncthreads();
// Compute exclusive prefix sum
BlockScanT(temp_storage.scan).ExclusiveSum(selection_flags, selection_indices, num_selected);
__syncthreads();
if (num_selected > 0) {
int index_buffer;
if (threadIdx.x == 0) {
index_buffer = atomicAdd(d_index_buffer, num_selected);
temp_storage.offset[0] = index_buffer;
}
__syncthreads();
index_buffer = temp_storage.offset[0];
__syncthreads();
// Compact and scatter items
#pragma unroll
for (int ITEM = 0; ITEM < KPT; ++ITEM)
{
int local_scatter_offset = selection_indices[ITEM];
if (selection_flags[ITEM])
{
temp_storage.raw_exchange[local_scatter_offset] = key_entries[ITEM];
temp_storage.raw_exchange[tile_size + local_scatter_offset] = value_entries[ITEM];
/*temp_storage.raw_exchange[tile_size + local_scatter_offset] = payload_entries[ITEM];*/
}
}
__syncthreads();
// Write out output entries
for (int item = threadIdx.x; item < num_selected; item += TPB)
{
reinterpret_cast<UnsignedBits*>(d_keys_buffer)[index_buffer + item] = temp_storage.raw_exchange[item];
d_values_buffer[index_buffer + item] = temp_storage.raw_exchange[tile_size + item];
}
__syncthreads();
}
}
__global__ void set_index_array(unsigned int* array, unsigned int len) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int gridSize = blockDim.x * gridDim.x;
while (i < len) {
array[i] = i;
i += gridSize;
}
}
#define KPT 16
#define TPB 384
#define DIGIT_BITS 8
cudaError_t CUDARadixSelectTopK(torch::Tensor d_keys_in,
torch::Tensor d_indices_in,
unsigned int num_items,
unsigned int k,
float *d_keys_out,
unsigned int *d_values_out) {
cudaError error = cudaSuccess;
// get helper buffers
// unsigned int *d_histogram = buf->histogram;
// unsigned int *d_index_out = buf->index_out;
// unsigned int *d_index_buffer = buf->index_buffer;
// float* keys_double_buffer[2] = {buf->keys_buffer0, buf->keys_buffer1};
// unsigned int* values_double_buffer[2] = {buf->value_buffer0, buf->value_buffer1};
unsigned char current_keys_buffer = 0;
//initialize buffer with empty tensor
//unsigned int *d_histogram = (uint*)torch::zeros(256*128, torch::TensorOptions().dtype(torch::kInt).device(d_keys_in.device())).data_ptr();
//unsigned int *d_index_out = (uint*)torch::zeros(128, torch::TensorOptions().dtype(torch::kInt).device(d_keys_in.device())).data_ptr();
//unsigned int *d_index_buffer = (uint*)torch::zeros(128, torch::TensorOptions().dtype(torch::kInt).device(d_keys_in.device())).data_ptr();
unsigned int *d_histogram, *d_index_out, *d_index_buffer;
cudaMalloc(&d_histogram, 256*128);
cudaMalloc(&d_index_out, 128);
cudaMalloc(&d_index_buffer, 128);
torch::Tensor keys_double_tensor[2] = {d_keys_in.clone(), d_keys_in.clone()};
torch::Tensor indices_double_tensor[2] = {d_indices_in.clone(), d_indices_in.clone()};
float* keys_double_buffer[2] = {(float*)keys_double_tensor[0].data_ptr(), (float*)keys_double_tensor[1].data_ptr()};
unsigned int* values_double_buffer[2] = {(unsigned int*)indices_double_tensor[0].data_ptr(), (unsigned int*)indices_double_tensor[1].data_ptr()};
//float* keys_double_buffer[2] = {(float*)d_keys_in.clone().data_ptr(),
// (float*)d_keys_in.clone().data_ptr()};
//unsigned int* values_double_buffer[2] = {(uint*)d_indices_in.clone().data_ptr(),
// (uint*)d_indices_in.clone().data_ptr()};
// Set the index into output array to 0.
cudaMemset(d_index_out, 0, 4);
unsigned int KPB = KPT * TPB;
unsigned int *h_histogram = new unsigned int[256];
// set value array (index)
// int blocksPerGrid = (int) ceil(1.0 * num_items / TPB);
// set_index_array<<<blocksPerGrid, TPB, 0>>>(values_double_buffer[current_keys_buffer], num_items);
// enumerate each digit (32-bit data (float32) / 8-bit/pass, so that's 4 digit in total)
for (unsigned int digit = 0; digit < 4; digit++) {
unsigned int num_blocks = num_items / KPB;// Pass-0 rough processing blocks (floor on purpose)
unsigned int processed_elements = num_blocks * KPB;// Pass-0 number of rough processed elements
unsigned int remaining_elements = num_items - processed_elements;// Do the remaining elements with a check in the inner loop
unsigned int remainder_blocks = (KPB - 1 + remaining_elements) / KPB;// Number of blocks required for remaining elements (typically 0 or 1)
/******************************************************************************************/
/* Caluclate Histogram */
/******************************************************************************************/
// Zero out the histogram
cudaMemset(d_histogram, 0, 256 * sizeof(int));
float* d_current_keys_in = keys_double_buffer[current_keys_buffer];
unsigned int* d_current_value_in = values_double_buffer[current_keys_buffer];
if (num_blocks > 0)
rdxsrt_histogram<float, uint, DIGIT_BITS, KPT, TPB, 9><<<num_blocks, TPB, 0>>>(d_current_keys_in, digit, d_histogram);
if (remaining_elements > 0)
rdxsrt_histogram_with_guards<float, uint, DIGIT_BITS, KPT, TPB, 9><<<remainder_blocks, TPB, 0>>>(d_current_keys_in, digit, d_histogram, num_items, num_blocks);
/******************************************************************************************/
/* Find the bin which contains the Kth largest element */
/******************************************************************************************/
cudaMemcpy(h_histogram, d_histogram, 256 * sizeof(uint), cudaMemcpyDeviceToHost);
// currently we find the bin on host, hence we need to synchronize the stream
// cudaStreamSynchronize(stream);
unsigned int rolling_sum = 0;
unsigned int digit_val;
for (int i = 255; i >= 0; i--) {
if ((rolling_sum + h_histogram[i]) > k) {
digit_val = i;
k -= rolling_sum;
break;
}
rolling_sum += h_histogram[i];
}
cudaMemset(d_index_buffer, 0, 4);
select_kth_bucket<float, unsigned int, DIGIT_BITS, KPT, TPB><<<num_blocks + remainder_blocks, TPB, 0>>>(d_current_keys_in,
d_current_value_in,
digit,
digit_val,
num_items,
keys_double_buffer[1-current_keys_buffer],
d_keys_out,
values_double_buffer[1-current_keys_buffer],
d_values_out,
d_index_buffer,
d_index_out);
uint h_index_out;
uint h_index_buffer;
cudaMemcpy(&h_index_out, d_index_out, sizeof(uint), cudaMemcpyDeviceToHost);
cudaMemcpy(&h_index_buffer, d_index_buffer, sizeof(uint), cudaMemcpyDeviceToHost);
// cudaStreamSynchronize(stream);
// Update number of items to reflect reduced number of elements.
num_items = h_index_buffer;
if (k == 0) break;
else if (k != 0 && digit == 3) {
// We are at last digit and k != 4 implies that kth value has repetition.
// Copy any of the repeated values(and keys!) to out array to complete the array.
cudaMemcpy(d_keys_out + h_index_out, keys_double_buffer[1-current_keys_buffer] ,k * sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_values_out + h_index_out, values_double_buffer[1-current_keys_buffer], k * sizeof(float), cudaMemcpyDeviceToDevice);
k -= k;
}
current_keys_buffer = 1 - current_keys_buffer;
}
delete[] h_histogram;
cudaFree(d_histogram);
cudaFree(d_index_out);
cudaFree(d_index_buffer);
}
// __global__ void _Uint32ToInt32(int *dst_data,
// unsigned int *src_data,
// unsigned int n)
// {
// // set thread ID
// unsigned int tid = threadIdx.x;
// unsigned int gridSize = blockDim.x * gridDim.x;
// unsigned int i = blockIdx.x * blockDim.x + tid;
// unsigned int blockSize = blockDim.x;
// while (i < n) {
// dst_data[i] = (int)src_data[i];
// i += gridSize;
// }
// }
// void Uint32ToInt32(int *dst_data,
// unsigned int *src_data,
// unsigned int num_elements)
// {
// int blocksPerGrid = (int) ceil(1.0 * num_elements / maxThreadsPerBlock);
// _Uint32ToInt32<<<blocksPerGrid, maxThreadsPerBlock, 0>>>(dst_data, src_data, num_elements);
// }
std::vector<torch::Tensor> rdxtopk_cuda(
torch::Tensor input,torch::Tensor indices, unsigned int k) {
unsigned int num_items = input.numel();
auto d_keys_out = torch::zeros(k, torch::TensorOptions().dtype(torch::kFloat32).device(input.device()));
auto d_values_out = torch::zeros(k, torch::TensorOptions().dtype(torch::kInt).device(input.device()));
CUDARadixSelectTopK(input,indices,
num_items,
k,
(float*)d_keys_out.data_ptr(),
(uint*)d_values_out.data_ptr());
// Uint32ToInt32((int*)d_values_out.data_ptr(), (uint*)d_values_out.data_ptr(), k);
return {d_keys_out, d_values_out};
}
|
the_stack
|
* \test Testing the BLAS level 3 routines in the ViennaCL BLAS-like shared library
**/
// include necessary system headers
#include <iostream>
#include <vector>
// Some helper functions for this tutorial:
#include "viennacl.hpp"
#include "viennacl/tools/random.hpp"
#include "viennacl/vector.hpp"
template<typename ScalarType>
ScalarType diff(ScalarType const & s1, ScalarType const & s2)
{
if (s1 > s2 || s1 < s2)
return (s1 - s2) / std::max(std::fabs(s1), std::fabs(s2));
return ScalarType(0);
}
template<typename ScalarType, typename ViennaCLVectorType>
ScalarType diff(std::vector<ScalarType> const & v1, ViennaCLVectorType const & vcl_vec)
{
std::vector<ScalarType> v2_cpu(vcl_vec.size());
viennacl::backend::finish();
viennacl::copy(vcl_vec, v2_cpu);
ScalarType inf_norm = 0;
for (unsigned int i=0;i<v1.size(); ++i)
{
if ( std::max( std::fabs(v2_cpu[i]), std::fabs(v1[i]) ) > 0 )
v2_cpu[i] = std::fabs(v2_cpu[i] - v1[i]) / std::max( std::fabs(v2_cpu[i]), std::fabs(v1[i]) );
else
v2_cpu[i] = 0.0;
if (v2_cpu[i] > inf_norm)
inf_norm = v2_cpu[i];
}
return inf_norm;
}
template<typename T, typename U, typename EpsilonT>
void check(T const & t, U const & u, EpsilonT eps)
{
EpsilonT rel_error = std::fabs(static_cast<EpsilonT>(diff(t,u)));
if (rel_error > eps)
{
std::cerr << "Relative error: " << rel_error << std::endl;
std::cerr << "Aborting!" << std::endl;
exit(EXIT_FAILURE);
}
std::cout << "SUCCESS ";
}
template<typename T>
T get_value(std::vector<T> & array, ViennaCLInt i, ViennaCLInt j,
ViennaCLInt start1, ViennaCLInt start2,
ViennaCLInt stride1, ViennaCLInt stride2,
ViennaCLInt rows, ViennaCLInt cols,
ViennaCLOrder order, ViennaCLTranspose trans)
{
// row-major
if (order == ViennaCLRowMajor && trans == ViennaCLTrans)
return array[static_cast<std::size_t>((j*stride1 + start1) * cols + (i*stride2 + start2))];
else if (order == ViennaCLRowMajor && trans != ViennaCLTrans)
return array[static_cast<std::size_t>((i*stride1 + start1) * cols + (j*stride2 + start2))];
// column-major
else if (order != ViennaCLRowMajor && trans == ViennaCLTrans)
return array[static_cast<std::size_t>((j*stride1 + start1) + (i*stride2 + start2) * rows)];
return array[static_cast<std::size_t>((i*stride1 + start1) + (j*stride2 + start2) * rows)];
}
void test_blas(ViennaCLBackend my_backend,
float eps_float, double eps_double,
std::vector<float> & C_float, std::vector<double> & C_double,
std::vector<float> & A_float, std::vector<double> & A_double,
std::vector<float> & B_float, std::vector<double> & B_double,
ViennaCLOrder order_C, ViennaCLOrder order_A, ViennaCLOrder order_B,
ViennaCLTranspose trans_A, ViennaCLTranspose trans_B,
viennacl::vector<float> & host_C_float, viennacl::vector<double> & host_C_double,
viennacl::vector<float> & host_A_float, viennacl::vector<double> & host_A_double,
viennacl::vector<float> & host_B_float, viennacl::vector<double> & host_B_double
#ifdef VIENNACL_WITH_CUDA
, viennacl::vector<float> & cuda_C_float, viennacl::vector<double> & cuda_C_double
, viennacl::vector<float> & cuda_A_float, viennacl::vector<double> & cuda_A_double
, viennacl::vector<float> & cuda_B_float, viennacl::vector<double> & cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, viennacl::vector<float> & opencl_C_float, viennacl::vector<double> * opencl_C_double
, viennacl::vector<float> & opencl_A_float, viennacl::vector<double> * opencl_A_double
, viennacl::vector<float> & opencl_B_float, viennacl::vector<double> * opencl_B_double
#endif
);
void test_blas(ViennaCLBackend my_backend,
float eps_float, double eps_double,
std::vector<float> & C_float, std::vector<double> & C_double,
std::vector<float> & A_float, std::vector<double> & A_double,
std::vector<float> & B_float, std::vector<double> & B_double,
ViennaCLOrder order_C, ViennaCLOrder order_A, ViennaCLOrder order_B,
ViennaCLTranspose trans_A, ViennaCLTranspose trans_B,
viennacl::vector<float> & host_C_float, viennacl::vector<double> & host_C_double,
viennacl::vector<float> & host_A_float, viennacl::vector<double> & host_A_double,
viennacl::vector<float> & host_B_float, viennacl::vector<double> & host_B_double
#ifdef VIENNACL_WITH_CUDA
, viennacl::vector<float> & cuda_C_float, viennacl::vector<double> & cuda_C_double
, viennacl::vector<float> & cuda_A_float, viennacl::vector<double> & cuda_A_double
, viennacl::vector<float> & cuda_B_float, viennacl::vector<double> & cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, viennacl::vector<float> & opencl_C_float, viennacl::vector<double> * opencl_C_double
, viennacl::vector<float> & opencl_A_float, viennacl::vector<double> * opencl_A_double
, viennacl::vector<float> & opencl_B_float, viennacl::vector<double> * opencl_B_double
#endif
)
{
ViennaCLInt C_size1 = 42;
ViennaCLInt C_size2 = 43;
ViennaCLInt C_start1 = 10;
ViennaCLInt C_start2 = 11;
ViennaCLInt C_stride1 = 2;
ViennaCLInt C_stride2 = 3;
ViennaCLInt C_rows = C_size1 * C_stride1 + C_start1 + 5;
ViennaCLInt C_columns = C_size2 * C_stride2 + C_start2 + 5;
ViennaCLInt A_size1 = trans_A ? 44 : 42;
ViennaCLInt A_size2 = trans_A ? 42 : 44;
ViennaCLInt A_start1 = 12;
ViennaCLInt A_start2 = 13;
ViennaCLInt A_stride1 = 4;
ViennaCLInt A_stride2 = 5;
ViennaCLInt A_rows = A_size1 * A_stride1 + A_start1 + 5;
ViennaCLInt A_columns = A_size2 * A_stride2 + A_start2 + 5;
ViennaCLInt B_size1 = trans_B ? 43 : 44;
ViennaCLInt B_size2 = trans_B ? 44 : 43;
ViennaCLInt B_start1 = 14;
ViennaCLInt B_start2 = 15;
ViennaCLInt B_stride1 = 6;
ViennaCLInt B_stride2 = 7;
ViennaCLInt B_rows = B_size1 * B_stride1 + B_start1 + 5;
ViennaCLInt B_columns = B_size2 * B_stride2 + B_start2 + 5;
// Compute reference:
ViennaCLInt size_k = trans_A ? A_size1 : A_size2;
for (ViennaCLInt i=0; i<C_size1; ++i)
for (ViennaCLInt j=0; j<C_size2; ++j)
{
float val_float = 0;
double val_double = 0;
for (ViennaCLInt k=0; k<size_k; ++k)
{
float val_A_float = get_value(A_float, i, k, A_start1, A_start2, A_stride1, A_stride2, A_rows, A_columns, order_A, trans_A);
double val_A_double = get_value(A_double, i, k, A_start1, A_start2, A_stride1, A_stride2, A_rows, A_columns, order_A, trans_A);
float val_B_float = get_value(B_float, k, j, B_start1, B_start2, B_stride1, B_stride2, B_rows, B_columns, order_B, trans_B);
double val_B_double = get_value(B_double, k, j, B_start1, B_start2, B_stride1, B_stride2, B_rows, B_columns, order_B, trans_B);
val_float += val_A_float * val_B_float;
val_double += val_A_double * val_B_double;
}
// write result
if (order_C == ViennaCLRowMajor)
{
C_float [static_cast<std::size_t>((i*C_stride1 + C_start1) * C_columns + (j*C_stride2 + C_start2))] = val_float;
C_double[static_cast<std::size_t>((i*C_stride1 + C_start1) * C_columns + (j*C_stride2 + C_start2))] = val_double;
}
else
{
C_float [static_cast<std::size_t>((i*C_stride1 + C_start1) + (j*C_stride2 + C_start2) * C_rows)] = val_float;
C_double[static_cast<std::size_t>((i*C_stride1 + C_start1) + (j*C_stride2 + C_start2) * C_rows)] = val_double;
}
}
// Run GEMM and compare results:
ViennaCLHostSgemm(my_backend,
order_A, trans_A, order_B, trans_B, order_C,
C_size1, C_size2, size_k,
1.0f,
viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_A_float), A_start1, A_start2, A_stride1, A_stride2, (order_A == ViennaCLRowMajor) ? A_columns : A_rows,
viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_B_float), B_start1, B_start2, B_stride1, B_stride2, (order_B == ViennaCLRowMajor) ? B_columns : B_rows,
0.0f,
viennacl::linalg::host_based::detail::extract_raw_pointer<float>(host_C_float), C_start1, C_start2, C_stride1, C_stride2, (order_C == ViennaCLRowMajor) ? C_columns : C_rows);
check(C_float, host_C_float, eps_float);
ViennaCLHostDgemm(my_backend,
order_A, trans_A, order_B, trans_B, order_C,
C_size1, C_size2, size_k,
1.0,
viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_A_double), A_start1, A_start2, A_stride1, A_stride2, (order_A == ViennaCLRowMajor) ? A_columns : A_rows,
viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_B_double), B_start1, B_start2, B_stride1, B_stride2, (order_B == ViennaCLRowMajor) ? B_columns : B_rows,
0.0,
viennacl::linalg::host_based::detail::extract_raw_pointer<double>(host_C_double), C_start1, C_start2, C_stride1, C_stride2, (order_C == ViennaCLRowMajor) ? C_columns : C_rows);
check(C_double, host_C_double, eps_double);
#ifdef VIENNACL_WITH_CUDA
ViennaCLCUDASgemm(my_backend,
order_A, trans_A, order_B, trans_B, order_C,
C_size1, C_size2, size_k,
1.0f,
viennacl::cuda_arg(cuda_A_float), A_start1, A_start2, A_stride1, A_stride2, (order_A == ViennaCLRowMajor) ? A_columns : A_rows,
viennacl::cuda_arg(cuda_B_float), B_start1, B_start2, B_stride1, B_stride2, (order_B == ViennaCLRowMajor) ? B_columns : B_rows,
0.0f,
viennacl::cuda_arg(cuda_C_float), C_start1, C_start2, C_stride1, C_stride2, (order_C == ViennaCLRowMajor) ? C_columns : C_rows);
check(C_float, cuda_C_float, eps_float);
ViennaCLCUDADgemm(my_backend,
order_A, trans_A, order_B, trans_B, order_C,
C_size1, C_size2, size_k,
1.0,
viennacl::cuda_arg(cuda_A_double), A_start1, A_start2, A_stride1, A_stride2, (order_A == ViennaCLRowMajor) ? A_columns : A_rows,
viennacl::cuda_arg(cuda_B_double), B_start1, B_start2, B_stride1, B_stride2, (order_B == ViennaCLRowMajor) ? B_columns : B_rows,
0.0,
viennacl::cuda_arg(cuda_C_double), C_start1, C_start2, C_stride1, C_stride2, (order_C == ViennaCLRowMajor) ? C_columns : C_rows);
check(C_double, cuda_C_double, eps_double);
#endif
#ifdef VIENNACL_WITH_OPENCL
ViennaCLOpenCLSgemm(my_backend,
order_A, trans_A, order_B, trans_B, order_C,
C_size1, C_size2, size_k,
1.0f,
viennacl::traits::opencl_handle(opencl_A_float), A_start1, A_start2, A_stride1, A_stride2, (order_A == ViennaCLRowMajor) ? A_columns : A_rows,
viennacl::traits::opencl_handle(opencl_B_float), B_start1, B_start2, B_stride1, B_stride2, (order_B == ViennaCLRowMajor) ? B_columns : B_rows,
0.0f,
viennacl::traits::opencl_handle(opencl_C_float), C_start1, C_start2, C_stride1, C_stride2, (order_C == ViennaCLRowMajor) ? C_columns : C_rows);
check(C_float, opencl_C_float, eps_float);
if (opencl_A_double != NULL && opencl_B_double != NULL && opencl_C_double != NULL)
{
ViennaCLOpenCLDgemm(my_backend,
order_A, trans_A, order_B, trans_B, order_C,
C_size1, C_size2, size_k,
1.0,
viennacl::traits::opencl_handle(*opencl_A_double), A_start1, A_start2, A_stride1, A_stride2, (order_A == ViennaCLRowMajor) ? A_columns : A_rows,
viennacl::traits::opencl_handle(*opencl_B_double), B_start1, B_start2, B_stride1, B_stride2, (order_B == ViennaCLRowMajor) ? B_columns : B_rows,
0.0,
viennacl::traits::opencl_handle(*opencl_C_double), C_start1, C_start2, C_stride1, C_stride2, (order_C == ViennaCLRowMajor) ? C_columns : C_rows);
check(C_double, *opencl_C_double, eps_double);
}
#endif
std::cout << std::endl;
}
void test_blas(ViennaCLBackend my_backend,
float eps_float, double eps_double,
std::vector<float> & C_float, std::vector<double> & C_double,
std::vector<float> & A_float, std::vector<double> & A_double,
std::vector<float> & B_float, std::vector<double> & B_double,
ViennaCLOrder order_C, ViennaCLOrder order_A, ViennaCLOrder order_B,
viennacl::vector<float> & host_C_float, viennacl::vector<double> & host_C_double,
viennacl::vector<float> & host_A_float, viennacl::vector<double> & host_A_double,
viennacl::vector<float> & host_B_float, viennacl::vector<double> & host_B_double
#ifdef VIENNACL_WITH_CUDA
, viennacl::vector<float> & cuda_C_float, viennacl::vector<double> & cuda_C_double
, viennacl::vector<float> & cuda_A_float, viennacl::vector<double> & cuda_A_double
, viennacl::vector<float> & cuda_B_float, viennacl::vector<double> & cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, viennacl::vector<float> & opencl_C_float, viennacl::vector<double> * opencl_C_double
, viennacl::vector<float> & opencl_A_float, viennacl::vector<double> * opencl_A_double
, viennacl::vector<float> & opencl_B_float, viennacl::vector<double> * opencl_B_double
#endif
);
void test_blas(ViennaCLBackend my_backend,
float eps_float, double eps_double,
std::vector<float> & C_float, std::vector<double> & C_double,
std::vector<float> & A_float, std::vector<double> & A_double,
std::vector<float> & B_float, std::vector<double> & B_double,
ViennaCLOrder order_C, ViennaCLOrder order_A, ViennaCLOrder order_B,
viennacl::vector<float> & host_C_float, viennacl::vector<double> & host_C_double,
viennacl::vector<float> & host_A_float, viennacl::vector<double> & host_A_double,
viennacl::vector<float> & host_B_float, viennacl::vector<double> & host_B_double
#ifdef VIENNACL_WITH_CUDA
, viennacl::vector<float> & cuda_C_float, viennacl::vector<double> & cuda_C_double
, viennacl::vector<float> & cuda_A_float, viennacl::vector<double> & cuda_A_double
, viennacl::vector<float> & cuda_B_float, viennacl::vector<double> & cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, viennacl::vector<float> & opencl_C_float, viennacl::vector<double> * opencl_C_double
, viennacl::vector<float> & opencl_A_float, viennacl::vector<double> * opencl_A_double
, viennacl::vector<float> & opencl_B_float, viennacl::vector<double> * opencl_B_double
#endif
)
{
std::cout << " -> trans-trans: ";
test_blas(my_backend,
eps_float, eps_double,
C_float, C_double, A_float, A_double, B_float, B_double,
order_C, order_A, order_B,
ViennaCLTrans, ViennaCLTrans,
host_C_float, host_C_double, host_A_float, host_A_double, host_B_float, host_B_double
#ifdef VIENNACL_WITH_CUDA
, cuda_C_float, cuda_C_double, cuda_A_float, cuda_A_double, cuda_B_float, cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, opencl_C_float, opencl_C_double, opencl_A_float, opencl_A_double, opencl_B_float, opencl_B_double
#endif
);
std::cout << " -> trans-no: ";
test_blas(my_backend,
eps_float, eps_double,
C_float, C_double, A_float, A_double, B_float, B_double,
order_C, order_A, order_B,
ViennaCLTrans, ViennaCLNoTrans,
host_C_float, host_C_double, host_A_float, host_A_double, host_B_float, host_B_double
#ifdef VIENNACL_WITH_CUDA
, cuda_C_float, cuda_C_double, cuda_A_float, cuda_A_double, cuda_B_float, cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, opencl_C_float, opencl_C_double, opencl_A_float, opencl_A_double, opencl_B_float, opencl_B_double
#endif
);
std::cout << " -> no-trans: ";
test_blas(my_backend,
eps_float, eps_double,
C_float, C_double, A_float, A_double, B_float, B_double,
order_C, order_A, order_B,
ViennaCLNoTrans, ViennaCLTrans,
host_C_float, host_C_double, host_A_float, host_A_double, host_B_float, host_B_double
#ifdef VIENNACL_WITH_CUDA
, cuda_C_float, cuda_C_double, cuda_A_float, cuda_A_double, cuda_B_float, cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, opencl_C_float, opencl_C_double, opencl_A_float, opencl_A_double, opencl_B_float, opencl_B_double
#endif
);
std::cout << " -> no-no: ";
test_blas(my_backend,
eps_float, eps_double,
C_float, C_double, A_float, A_double, B_float, B_double,
order_C, order_A, order_B,
ViennaCLNoTrans, ViennaCLNoTrans,
host_C_float, host_C_double, host_A_float, host_A_double, host_B_float, host_B_double
#ifdef VIENNACL_WITH_CUDA
, cuda_C_float, cuda_C_double, cuda_A_float, cuda_A_double, cuda_B_float, cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, opencl_C_float, opencl_C_double, opencl_A_float, opencl_A_double, opencl_B_float, opencl_B_double
#endif
);
}
void test_blas(ViennaCLBackend my_backend,
float eps_float, double eps_double,
std::vector<float> & C_float, std::vector<double> & C_double,
std::vector<float> & A_float, std::vector<double> & A_double,
std::vector<float> & B_float, std::vector<double> & B_double,
viennacl::vector<float> & host_C_float, viennacl::vector<double> & host_C_double,
viennacl::vector<float> & host_A_float, viennacl::vector<double> & host_A_double,
viennacl::vector<float> & host_B_float, viennacl::vector<double> & host_B_double
#ifdef VIENNACL_WITH_CUDA
, viennacl::vector<float> & cuda_C_float, viennacl::vector<double> & cuda_C_double
, viennacl::vector<float> & cuda_A_float, viennacl::vector<double> & cuda_A_double
, viennacl::vector<float> & cuda_B_float, viennacl::vector<double> & cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, viennacl::vector<float> & opencl_C_float, viennacl::vector<double> * opencl_C_double
, viennacl::vector<float> & opencl_A_float, viennacl::vector<double> * opencl_A_double
, viennacl::vector<float> & opencl_B_float, viennacl::vector<double> * opencl_B_double
#endif
);
void test_blas(ViennaCLBackend my_backend,
float eps_float, double eps_double,
std::vector<float> & C_float, std::vector<double> & C_double,
std::vector<float> & A_float, std::vector<double> & A_double,
std::vector<float> & B_float, std::vector<double> & B_double,
viennacl::vector<float> & host_C_float, viennacl::vector<double> & host_C_double,
viennacl::vector<float> & host_A_float, viennacl::vector<double> & host_A_double,
viennacl::vector<float> & host_B_float, viennacl::vector<double> & host_B_double
#ifdef VIENNACL_WITH_CUDA
, viennacl::vector<float> & cuda_C_float, viennacl::vector<double> & cuda_C_double
, viennacl::vector<float> & cuda_A_float, viennacl::vector<double> & cuda_A_double
, viennacl::vector<float> & cuda_B_float, viennacl::vector<double> & cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, viennacl::vector<float> & opencl_C_float, viennacl::vector<double> * opencl_C_double
, viennacl::vector<float> & opencl_A_float, viennacl::vector<double> * opencl_A_double
, viennacl::vector<float> & opencl_B_float, viennacl::vector<double> * opencl_B_double
#endif
)
{
std::cout << " -> C: row, A: row, B: row" << std::endl;
test_blas(my_backend,
eps_float, eps_double,
C_float, C_double, A_float, A_double, B_float, B_double,
ViennaCLRowMajor, ViennaCLRowMajor, ViennaCLRowMajor,
host_C_float, host_C_double, host_A_float, host_A_double, host_B_float, host_B_double
#ifdef VIENNACL_WITH_CUDA
, cuda_C_float, cuda_C_double, cuda_A_float, cuda_A_double, cuda_B_float, cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, opencl_C_float, opencl_C_double, opencl_A_float, opencl_A_double, opencl_B_float, opencl_B_double
#endif
);
std::cout << " -> C: row, A: row, B: col" << std::endl;
test_blas(my_backend,
eps_float, eps_double,
C_float, C_double, A_float, A_double, B_float, B_double,
ViennaCLRowMajor, ViennaCLRowMajor, ViennaCLColumnMajor,
host_C_float, host_C_double, host_A_float, host_A_double, host_B_float, host_B_double
#ifdef VIENNACL_WITH_CUDA
, cuda_C_float, cuda_C_double, cuda_A_float, cuda_A_double, cuda_B_float, cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, opencl_C_float, opencl_C_double, opencl_A_float, opencl_A_double, opencl_B_float, opencl_B_double
#endif
);
std::cout << " -> C: row, A: col, B: row" << std::endl;
test_blas(my_backend,
eps_float, eps_double,
C_float, C_double, A_float, A_double, B_float, B_double,
ViennaCLRowMajor, ViennaCLColumnMajor, ViennaCLRowMajor,
host_C_float, host_C_double, host_A_float, host_A_double, host_B_float, host_B_double
#ifdef VIENNACL_WITH_CUDA
, cuda_C_float, cuda_C_double, cuda_A_float, cuda_A_double, cuda_B_float, cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, opencl_C_float, opencl_C_double, opencl_A_float, opencl_A_double, opencl_B_float, opencl_B_double
#endif
);
std::cout << " -> C: row, A: col, B: col" << std::endl;
test_blas(my_backend,
eps_float, eps_double,
C_float, C_double, A_float, A_double, B_float, B_double,
ViennaCLRowMajor, ViennaCLColumnMajor, ViennaCLColumnMajor,
host_C_float, host_C_double, host_A_float, host_A_double, host_B_float, host_B_double
#ifdef VIENNACL_WITH_CUDA
, cuda_C_float, cuda_C_double, cuda_A_float, cuda_A_double, cuda_B_float, cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, opencl_C_float, opencl_C_double, opencl_A_float, opencl_A_double, opencl_B_float, opencl_B_double
#endif
);
std::cout << " -> C: col, A: row, B: row" << std::endl;
test_blas(my_backend,
eps_float, eps_double,
C_float, C_double, A_float, A_double, B_float, B_double,
ViennaCLColumnMajor, ViennaCLRowMajor, ViennaCLRowMajor,
host_C_float, host_C_double, host_A_float, host_A_double, host_B_float, host_B_double
#ifdef VIENNACL_WITH_CUDA
, cuda_C_float, cuda_C_double, cuda_A_float, cuda_A_double, cuda_B_float, cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, opencl_C_float, opencl_C_double, opencl_A_float, opencl_A_double, opencl_B_float, opencl_B_double
#endif
);
std::cout << " -> C: col, A: row, B: col" << std::endl;
test_blas(my_backend,
eps_float, eps_double,
C_float, C_double, A_float, A_double, B_float, B_double,
ViennaCLColumnMajor, ViennaCLRowMajor, ViennaCLColumnMajor,
host_C_float, host_C_double, host_A_float, host_A_double, host_B_float, host_B_double
#ifdef VIENNACL_WITH_CUDA
, cuda_C_float, cuda_C_double, cuda_A_float, cuda_A_double, cuda_B_float, cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, opencl_C_float, opencl_C_double, opencl_A_float, opencl_A_double, opencl_B_float, opencl_B_double
#endif
);
std::cout << " -> C: col, A: col, B: row" << std::endl;
test_blas(my_backend,
eps_float, eps_double,
C_float, C_double, A_float, A_double, B_float, B_double,
ViennaCLColumnMajor, ViennaCLColumnMajor, ViennaCLRowMajor,
host_C_float, host_C_double, host_A_float, host_A_double, host_B_float, host_B_double
#ifdef VIENNACL_WITH_CUDA
, cuda_C_float, cuda_C_double, cuda_A_float, cuda_A_double, cuda_B_float, cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, opencl_C_float, opencl_C_double, opencl_A_float, opencl_A_double, opencl_B_float, opencl_B_double
#endif
);
std::cout << " -> C: col, A: col, B: col" << std::endl;
test_blas(my_backend,
eps_float, eps_double,
C_float, C_double, A_float, A_double, B_float, B_double,
ViennaCLColumnMajor, ViennaCLColumnMajor, ViennaCLColumnMajor,
host_C_float, host_C_double, host_A_float, host_A_double, host_B_float, host_B_double
#ifdef VIENNACL_WITH_CUDA
, cuda_C_float, cuda_C_double, cuda_A_float, cuda_A_double, cuda_B_float, cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, opencl_C_float, opencl_C_double, opencl_A_float, opencl_A_double, opencl_B_float, opencl_B_double
#endif
);
}
int main()
{
viennacl::tools::uniform_random_numbers<float> randomFloat;
viennacl::tools::uniform_random_numbers<double> randomDouble;
std::size_t size = 500*500;
float eps_float = 1e-5f;
double eps_double = 1e-12;
std::vector<float> C_float(size);
std::vector<float> A_float(size);
std::vector<float> B_float(size);
std::vector<double> C_double(size);
std::vector<double> A_double(size);
std::vector<double> B_double(size);
// fill with random data:
for (std::size_t i = 0; i < size; ++i)
{
C_float[i] = 0.5f + 0.1f * randomFloat();
A_float[i] = 0.5f + 0.1f * randomFloat();
B_float[i] = 0.5f + 0.1f * randomFloat();
C_double[i] = 0.5 + 0.2 * randomDouble();
A_double[i] = 0.5 + 0.2 * randomDouble();
B_double[i] = 0.5 + 0.2 * randomDouble();
}
// Host setup
ViennaCLBackend my_backend;
ViennaCLBackendCreate(&my_backend);
viennacl::vector<float> host_C_float(size, viennacl::context(viennacl::MAIN_MEMORY)); viennacl::copy(C_float, host_C_float);
viennacl::vector<float> host_A_float(size, viennacl::context(viennacl::MAIN_MEMORY)); viennacl::copy(A_float, host_A_float);
viennacl::vector<float> host_B_float(size, viennacl::context(viennacl::MAIN_MEMORY)); viennacl::copy(B_float, host_B_float);
viennacl::vector<double> host_C_double(size, viennacl::context(viennacl::MAIN_MEMORY)); viennacl::copy(C_double, host_C_double);
viennacl::vector<double> host_A_double(size, viennacl::context(viennacl::MAIN_MEMORY)); viennacl::copy(A_double, host_A_double);
viennacl::vector<double> host_B_double(size, viennacl::context(viennacl::MAIN_MEMORY)); viennacl::copy(B_double, host_B_double);
// CUDA setup
#ifdef VIENNACL_WITH_CUDA
viennacl::vector<float> cuda_C_float(size, viennacl::context(viennacl::CUDA_MEMORY)); viennacl::copy(C_float, cuda_C_float);
viennacl::vector<float> cuda_A_float(size, viennacl::context(viennacl::CUDA_MEMORY)); viennacl::copy(A_float, cuda_A_float);
viennacl::vector<float> cuda_B_float(size, viennacl::context(viennacl::CUDA_MEMORY)); viennacl::copy(B_float, cuda_B_float);
viennacl::vector<double> cuda_C_double(size, viennacl::context(viennacl::CUDA_MEMORY)); viennacl::copy(C_double, cuda_C_double);
viennacl::vector<double> cuda_A_double(size, viennacl::context(viennacl::CUDA_MEMORY)); viennacl::copy(A_double, cuda_A_double);
viennacl::vector<double> cuda_B_double(size, viennacl::context(viennacl::CUDA_MEMORY)); viennacl::copy(B_double, cuda_B_double);
#endif
// OpenCL setup
#ifdef VIENNACL_WITH_OPENCL
ViennaCLInt context_id = 0;
viennacl::vector<float> opencl_C_float(size, viennacl::context(viennacl::ocl::get_context(context_id))); viennacl::copy(C_float, opencl_C_float);
viennacl::vector<float> opencl_A_float(size, viennacl::context(viennacl::ocl::get_context(context_id))); viennacl::copy(A_float, opencl_A_float);
viennacl::vector<float> opencl_B_float(size, viennacl::context(viennacl::ocl::get_context(context_id))); viennacl::copy(B_float, opencl_B_float);
viennacl::vector<double> *opencl_C_double = NULL;
viennacl::vector<double> *opencl_A_double = NULL;
viennacl::vector<double> *opencl_B_double = NULL;
if ( viennacl::ocl::current_device().double_support() )
{
opencl_C_double = new viennacl::vector<double>(size, viennacl::context(viennacl::ocl::get_context(context_id))); viennacl::copy(C_double, *opencl_C_double);
opencl_A_double = new viennacl::vector<double>(size, viennacl::context(viennacl::ocl::get_context(context_id))); viennacl::copy(A_double, *opencl_A_double);
opencl_B_double = new viennacl::vector<double>(size, viennacl::context(viennacl::ocl::get_context(context_id))); viennacl::copy(B_double, *opencl_B_double);
}
ViennaCLBackendSetOpenCLContextID(my_backend, context_id);
#endif
// consistency checks:
check(C_float, host_C_float, eps_float);
check(A_float, host_A_float, eps_float);
check(B_float, host_B_float, eps_float);
check(C_double, host_C_double, eps_double);
check(A_double, host_A_double, eps_double);
check(B_double, host_B_double, eps_double);
#ifdef VIENNACL_WITH_CUDA
check(C_float, cuda_C_float, eps_float);
check(A_float, cuda_A_float, eps_float);
check(B_float, cuda_B_float, eps_float);
check(C_double, cuda_C_double, eps_double);
check(A_double, cuda_A_double, eps_double);
check(B_double, cuda_B_double, eps_double);
#endif
#ifdef VIENNACL_WITH_OPENCL
check(C_float, opencl_C_float, eps_float);
check(A_float, opencl_A_float, eps_float);
check(B_float, opencl_B_float, eps_float);
if ( viennacl::ocl::current_device().double_support() )
{
check(C_double, *opencl_C_double, eps_double);
check(A_double, *opencl_A_double, eps_double);
check(B_double, *opencl_B_double, eps_double);
}
#endif
std::cout << std::endl;
test_blas(my_backend,
eps_float, eps_double,
C_float, C_double,
A_float, A_double,
B_float, B_double,
host_C_float, host_C_double,
host_A_float, host_A_double,
host_B_float, host_B_double
#ifdef VIENNACL_WITH_CUDA
, cuda_C_float, cuda_C_double
, cuda_A_float, cuda_A_double
, cuda_B_float, cuda_B_double
#endif
#ifdef VIENNACL_WITH_OPENCL
, opencl_C_float, opencl_C_double
, opencl_A_float, opencl_A_double
, opencl_B_float, opencl_B_double
#endif
);
#ifdef VIENNACL_WITH_OPENCL
//cleanup
if ( viennacl::ocl::current_device().double_support() )
{
delete opencl_C_double;
delete opencl_A_double;
delete opencl_B_double;
}
#endif
ViennaCLBackendDestroy(&my_backend);
//
// That's it.
//
std::cout << std::endl << "!!!! TEST COMPLETED SUCCESSFULLY !!!!" << std::endl;
return EXIT_SUCCESS;
}
|
the_stack
|
// Project
#include "Geometry.h"
#include "PerfTimer.h"
extern "C" {
void exactinit();
RealType orient3d( RealType *pa, RealType *pb, RealType *pc, RealType *pd );
RealType insphere( RealType *pa, RealType *pb, RealType *pc, RealType *pd, RealType *pe );
}
/////////////////////////////////////////////////////////////////////// Point //
bool Point3::lessThan( const Point3& pt ) const
{
if ( _p[0] < pt._p[0] )
return true;
if ( _p[0] > pt._p[0] )
return false;
if ( _p[1] < pt._p[1] )
return true;
if ( _p[1] > pt._p[1] )
return false;
if ( _p[2] < pt._p[2] )
return true;
return false;
}
bool Point3::operator < ( const Point3& pt ) const
{
return lessThan( pt );
}
///////////////////////////////////////////////////////////////////// Segment //
__host__ __device__ bool Segment::equal( const Segment& seg ) const
{
return ( ( _v[0] == seg._v[0] ) && ( _v[1] == seg._v[1] ) );
}
__host__ __device__ bool Segment::lessThan( const Segment& seg ) const
{
if ( _v[0] < seg._v[0] )
return true;
if ( _v[0] > seg._v[0] )
return false;
if ( _v[1] < seg._v[1] )
return true;
return false;
}
__host__ __device__ bool Segment::operator == ( const Segment& seg ) const
{
return equal( seg );
}
__host__ __device__ bool Segment::operator < ( const Segment& seg ) const
{
return lessThan( seg );
}
//////////////////////////////////////////////////////////////////// Triangle //
__host__ __device__ bool Triangle::equal( const Triangle& tri ) const
{
return ( ( _v[0] == tri._v[0] ) && ( _v[1] == tri._v[1] ) && ( _v[2] == tri._v[2] ) );
}
__host__ __device__ bool Triangle::lessThan( const Triangle& tri ) const
{
if ( _v[0] < tri._v[0] )
return true;
if ( _v[0] > tri._v[0] )
return false;
if ( _v[1] < tri._v[1] )
return true;
if ( _v[1] > tri._v[1] )
return false;
if ( _v[2] < tri._v[2] )
return true;
return false;
}
__host__ __device__ bool Triangle::operator == ( const Triangle& tri ) const
{
return equal( tri );
}
__host__ __device__ bool Triangle::operator < ( const Triangle& tri ) const
{
return lessThan( tri );
}
///////////////////////////////////////////////////////////////// Tetrahedron //
const int TetSegNum = 6;
const int TetSeg[ TetSegNum ][2] = {
{ 0, 1 },
{ 0, 2 },
{ 0, 3 },
{ 1, 2 },
{ 1, 3 },
{ 2, 3 },
};
void Tetrahedron::getSegments( Segment* segArr ) const
{
for ( int i = 0; i < TetSegNum; ++i )
{
int vert[2] = { _v[ TetSeg[i][0] ], _v[ TetSeg[i][1] ] };
if ( vert[0] > vert[1] ) std::swap( vert[0], vert[1] );
const Segment seg = { vert[0], vert[1] };
segArr[i] = seg;
}
return;
}
// Vertices of 4 triangles of tetra
const int TetTriNum = 4;
const int TetTri[ TetTriNum ][3] = {
{ 0, 1, 2 },
{ 0, 1, 3 },
{ 0, 2, 3 },
{ 1, 2, 3 },
};
void Tetrahedron::getTriangles( Triangle* triArr ) const
{
for ( int i = 0; i < TetTriNum; ++i )
{
// Triangle vertices
int vert[3] = { _v[ TetTri[i][0] ], _v[ TetTri[i][1] ], _v[ TetTri[i][2] ] };
// Sort
if ( vert[0] > vert[1] ) std::swap( vert[0], vert[1] );
if ( vert[1] > vert[2] ) std::swap( vert[1], vert[2] );
if ( vert[0] > vert[1] ) std::swap( vert[0], vert[1] );
// Add triangle
const Triangle tri = { vert[0], vert[1], vert[2] };
triArr[ i ] = tri;
}
return;
}
void TetraMesh::setPoints( const Point3HVec& pointVec )
{
_pointVec = pointVec;
return;
}
void TetraMesh::setTetra( const TetraHVec& tetraVec )
{
_tetraVec = tetraVec;
return;
}
void TetraMesh::check()
{
_checkEuler();
exactinit();
_checkOrientation();
_checkInSphere();
return;
}
void TetraMesh::_checkEuler() const
{
const int v = _getVertexCount();
const int e = _getSegmentCount();
const int f = _getTriangleCount();
const int t = ( int ) _tetraVec.size();
const int euler = v - e + f - t;
cout << "Euler Characteristic:" << endl;
cout << "V: " << v << " E: " << e << " F: " << f << " T: " << t << " Euler: " << euler << endl;
if ( 1 != euler )
{
cout << "Euler check failed!" << endl;
}
return;
}
int TetraMesh::_getVertexCount() const
{
// Estimate space
const int tetNum = ( int ) _tetraVec.size();
const int estPointNum = tetNum * 4;
// Reserve space
IntVec vertVec;
vertVec.reserve( estPointNum );
// Add vertices
for ( int ti = 0; ti < tetNum; ++ti )
{
const Tetrahedron& tet = _tetraVec[ ti ];
for ( int vi = 0; vi < 4; ++vi )
vertVec.push_back( tet._v[ vi ] );
}
// Sort and remove dups
std::sort( vertVec.begin(), vertVec.end() );
vertVec.erase( std::unique( vertVec.begin(), vertVec.end() ), vertVec.end() );
const int vertNum = ( int ) vertVec.size();
return vertNum;
}
int TetraMesh::_getSegmentCount() const
{
// Estimate size
const int tetNum = ( int ) _tetraVec.size();
const int estSegNum = ( int ) ( tetNum * TetSegNum );
// Reserve space
SegmentHVec segVec;
segVec.reserve( estSegNum );
// Read segments
Segment segArr[ TetSegNum ];
for ( int ti = 0; ti < tetNum; ++ti )
{
const Tetrahedron& tet = _tetraVec[ ti ];
tet.getSegments( segArr );
std::copy( segArr, segArr + TetSegNum, std::back_inserter( segVec ) );
}
// Sort and remove dups
std::sort( segVec.begin(), segVec.end() );
segVec.erase( std::unique( segVec.begin(), segVec.end() ), segVec.end() );
const int segNum = ( int ) segVec.size();
return segNum;
}
int TetraMesh::_getTriangleCount() const
{
// Estimate size
const int tetNum = ( int ) _tetraVec.size();
const int estTriNum = ( int ) ( tetNum * TetTriNum );
// Reserve space
TriangleHVec triVec;
triVec.reserve( estTriNum );
// Read triangles
Triangle triArr[ TetTriNum ];
for ( int ti = 0; ti < tetNum; ++ti )
{
const Tetrahedron& tet = _tetraVec[ ti ];
tet.getTriangles( triArr );
std::copy( triArr, triArr + TetTriNum, std::back_inserter( triVec ) );
}
// Sort and remove dups
std::sort( triVec.begin(), triVec.end() );
triVec.erase( std::unique( triVec.begin(), triVec.end() ), triVec.end() );
const int triNum = ( int ) triVec.size();
return triNum;
}
void TetraMesh::_checkOrientation()
{
const int tetNum = ( int ) _tetraVec.size();
int failCount = 0;
for ( int ti = 0; ti < tetNum; ++ti )
{
const Tetrahedron& tet = _tetraVec[ ti ];
Point3* tp[4] = { &_pointVec[ tet._v[0] ], &_pointVec[ tet._v[1] ], &_pointVec[ tet._v[2] ], &_pointVec[ tet._v[3] ] };
const RealType ord = orient3d( tp[0]->_p, tp[1]->_p, tp[2]->_p, tp[3]->_p );
if ( ord > 0 )
++failCount;
}
if ( failCount > 0 )
{
cout << "Orientation check failed!!!" << endl;
cout << "Tetra failures: " << failCount << endl;
}
else
{
cout << "Tetra orientation is correct!" << endl;
}
return;
}
void TetraMesh::_checkInSphere()
{
const int tetNum = ( int ) _tetraVec.size();
int failCount = 0;
for ( int tetIdx = 0; tetIdx < tetNum; ++tetIdx )
{
const Tetrahedron& tet = _tetraVec[ tetIdx ];
// Iterate 4 faces of tetra
for ( int vi = 0; vi < 4; ++vi )
{
const int oppTetIdx = tet._opp[ vi ];
if ( -1 == oppTetIdx )
continue;
assert( ( oppTetIdx >= 0 ) && ( oppTetIdx < tetNum ) && "Invalid opposite tetra index!" );
// Check each pair only once
if ( tetIdx > oppTetIdx )
continue;
// Check in-sphere
const Tetrahedron& oppTet = _tetraVec[ oppTetIdx ];
const int oppVi = oppTet.indexOfOpp( tetIdx );
const int oppVert = oppTet._v[ oppVi ];
Point3* tp[5] = { &_pointVec[ tet._v[0] ], &_pointVec[ tet._v[1] ], &_pointVec[ tet._v[2] ], &_pointVec[ tet._v[3] ], &_pointVec[ oppVert ] };
const RealType side = insphere( tp[0]->_p, tp[1]->_p, tp[2]->_p, tp[3]->_p, tp[4]->_p );
if ( side < 0 )
++failCount;
}
}
if ( failCount > 0 )
{
cout << "In-sphere check failed!!!" << endl;
cout << "Tetra failures: " << failCount << endl;
}
else
{
cout << "Tetra in-sphere is correct!" << endl;
}
return;
}
// Write out mesh as PLY file
void TetraMesh::writeToFile( const string& outFilename )
{
ofstream outFile( outFilename.c_str() );
if ( !outFile )
{
cerr << "Error opening output file: " << outFilename << "!" << endl;
exit( 1 );
}
////
// Header
////
const int pointNum = _pointVec.size();
const int tetraNum = _tetraVec.size();
outFile << "ply" << endl;
outFile << "format ascii 1.0" << endl;
outFile << "element vertex " << pointNum << endl;
outFile << "property float x" << endl;
outFile << "property float y" << endl;
outFile << "property float z" << endl;
outFile << "element face " << tetraNum * 3 << endl;
outFile << "property list uchar int vertex_index" << endl;
outFile << "end_header" << endl;
////
// Points
////
for ( int pi = 0; pi < pointNum; ++pi )
{
const Point3& pt = _pointVec[ pi ];
for ( int vi = 0; vi < 3; ++vi )
{
outFile << pt._p[ vi ] << " ";
}
outFile << endl;
}
////
// Tetrahedron faces
////
const int Faces[3][3] = {
{ 0, 1, 2 },
{ 0, 1, 3 },
{ 0, 2, 3 } };
for ( int ti = 0; ti < tetraNum; ++ti )
{
const Tetrahedron& tet = _tetraVec[ ti ];
for ( int fi = 0; fi < 3; ++fi )
{
outFile << "3 ";
for ( int vi = 0; vi < 3; ++vi )
{
outFile << tet._v[ Faces[ fi ][ vi ] ] << " ";
}
outFile << endl;
}
}
return;
}
////////////////////////////////////////////////////////////////////////////////
|
the_stack
|
extern "C" {
#include <stdint.h>
#include <memory.h>
}
#include "cuda_helper.h"
#define TPB52 1024
#define TPB50 384
#define NPT 2
#define NBN 2
uint32_t *d_nounce[MAX_GPUS];
uint32_t *h_nounce[MAX_GPUS];
__constant__ uint2 c_PaddedMessage80[ 6]; // padded message (80 bytes + padding?)
__constant__ uint2 c_mid[17];
__constant__ uint2 keccak_round_constants[24] = {
{ 0x00000001, 0x00000000 }, { 0x00008082, 0x00000000 }, { 0x0000808a, 0x80000000 }, { 0x80008000, 0x80000000 },
{ 0x0000808b, 0x00000000 }, { 0x80000001, 0x00000000 }, { 0x80008081, 0x80000000 }, { 0x00008009, 0x80000000 },
{ 0x0000008a, 0x00000000 }, { 0x00000088, 0x00000000 }, { 0x80008009, 0x00000000 }, { 0x8000000a, 0x00000000 },
{ 0x8000808b, 0x00000000 }, { 0x0000008b, 0x80000000 }, { 0x00008089, 0x80000000 }, { 0x00008003, 0x80000000 },
{ 0x00008002, 0x80000000 }, { 0x00000080, 0x80000000 }, { 0x0000800a, 0x00000000 }, { 0x8000000a, 0x80000000 },
{ 0x80008081, 0x80000000 }, { 0x00008080, 0x80000000 }, { 0x80000001, 0x00000000 }, { 0x80008008, 0x80000000 }
};
#if __CUDA_ARCH__ <= 500
__global__ __launch_bounds__(TPB50, 2)
#else
__global__ __launch_bounds__(TPB52, 1)
#endif
void keccak256_gpu_hash_80(uint32_t threads, uint32_t startNounce,uint32_t *resNounce,const uint2 highTarget){
uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x;
uint2 s[25],t[5], v, w, u[ 5];
#if __CUDA_ARCH__ > 500
uint64_t step = gridDim.x * blockDim.x;
uint64_t maxNonce = startNounce + threads;
for(uint64_t nounce = startNounce + thread; nounce<maxNonce;nounce+=step){
#else
uint32_t nounce = startNounce+thread;
if(thread<threads){
#endif
s[ 9] = make_uint2(c_PaddedMessage80[0].x,cuda_swab32(nounce));
s[10] = keccak_round_constants[0];
t[ 4] = c_PaddedMessage80[ 1]^s[ 9];
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
u[ 0]=t[ 4]^c_mid[ 0];
u[ 1]=c_mid[ 1]^ROL2(t[ 4],1);
u[ 2]=c_mid[ 2];
/* thetarho pi: b[..] = rotl(a[..] ^ d[...], ..)*/
s[ 7] = ROL2(s[10]^u[ 0], 3);
s[10] = c_mid[ 3];
w = c_mid[ 4];
s[20] = c_mid[ 5];
s[ 6] = ROL2(s[ 9]^u[ 2],20);
s[ 9] = c_mid[ 6];
s[22] = c_mid[ 7];
s[14] = ROL2(u[ 0],18);
s[ 2] = c_mid[ 8];
s[12] = ROL2(u[ 1],25);
s[13] = c_mid[ 9];
s[19] = ROR8(u[ 1]);
s[23] = ROR2(u[ 0],23);
s[15] = c_mid[10];
s[ 4] = c_mid[11];
s[24] = c_mid[12];
s[21] = ROR2(c_PaddedMessage80[ 2]^u[ 1], 9);
s[ 8] = c_mid[13];
s[16] = ROR2(c_PaddedMessage80[ 3]^u[ 0],28);
s[ 5] = ROL2(c_PaddedMessage80[ 4]^u[ 1],28);
s[ 3] = ROL2(u[ 1],21);
s[18] = c_mid[14];
s[17] = c_mid[15];
s[11] = c_mid[16];
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
v = c_PaddedMessage80[ 5]^u[ 0];
s[ 0] = chi(v,w,s[ 2]);
s[ 1] = chi(w,s[ 2],s[ 3]);
s[ 2] = chi(s[ 2],s[ 3],s[ 4]);
s[ 3] = chi(s[ 3],s[ 4],v);
s[ 4] = chi(s[ 4],v,w);
v = s[ 5];w = s[ 6];s[ 5] = chi(v,w,s[ 7]);s[ 6] = chi(w,s[ 7],s[ 8]);s[ 7] = chi(s[ 7],s[ 8],s[ 9]);s[ 8] = chi(s[ 8],s[ 9],v);s[ 9] = chi(s[ 9],v,w);
v = s[10];w = s[11];s[10] = chi(v,w,s[12]);s[11] = chi(w,s[12],s[13]);s[12] = chi(s[12],s[13],s[14]);s[13] = chi(s[13],s[14],v);s[14] = chi(s[14],v,w);
v = s[15];w = s[16];s[15] = chi(v,w,s[17]);s[16] = chi(w,s[17],s[18]);s[17] = chi(s[17],s[18],s[19]);s[18] = chi(s[18],s[19],v);s[19] = chi(s[19],v,w);
v = s[20];w = s[21];s[20] = chi(v,w,s[22]);s[21] = chi(w,s[22],s[23]);s[22] = chi(s[22],s[23],s[24]);s[23] = chi(s[23],s[24],v);s[24] = chi(s[24],v,w);
/* iota: a[0,0] ^= round constant */
s[ 0] ^=keccak_round_constants[ 0];
#if __CUDA_ARCH__ > 500
#pragma unroll 22
#else
#pragma unroll 4
#endif
for (int i = 1; i < 23; i++) {
#pragma unroll
for(int j=0;j<5;j++){
t[ j] = vectorize(xor5(devectorize(s[ j]),devectorize(s[j+5]),devectorize(s[j+10]),devectorize(s[j+15]),devectorize(s[j+20])));
}
/*theta*/
#pragma unroll
for(int j=0;j<5;j++){
u[ j] = ROL2(t[ j], 1);
}
s[ 4] = xor3x(s[ 4], t[3], u[ 0]);s[ 9] = xor3x(s[ 9], t[3], u[ 0]);s[14] = xor3x(s[14], t[3], u[ 0]);s[19] = xor3x(s[19], t[3], u[ 0]);s[24] = xor3x(s[24], t[3], u[ 0]);
s[ 0] = xor3x(s[ 0], t[4], u[ 1]);s[ 5] = xor3x(s[ 5], t[4], u[ 1]);s[10] = xor3x(s[10], t[4], u[ 1]);s[15] = xor3x(s[15], t[4], u[ 1]);s[20] = xor3x(s[20], t[4], u[ 1]);
s[ 1] = xor3x(s[ 1], t[0], u[ 2]);s[ 6] = xor3x(s[ 6], t[0], u[ 2]);s[11] = xor3x(s[11], t[0], u[ 2]);s[16] = xor3x(s[16], t[0], u[ 2]);s[21] = xor3x(s[21], t[0], u[ 2]);
s[ 2] = xor3x(s[ 2], t[1], u[ 3]);s[ 7] = xor3x(s[ 7], t[1], u[ 3]);s[12] = xor3x(s[12], t[1], u[ 3]);s[17] = xor3x(s[17], t[1], u[ 3]);s[22] = xor3x(s[22], t[1], u[ 3]);
s[ 3] = xor3x(s[ 3], t[2], u[ 4]);s[ 8] = xor3x(s[ 8], t[2], u[ 4]);s[13] = xor3x(s[13], t[2], u[ 4]);s[18] = xor3x(s[18], t[2], u[ 4]);s[23] = xor3x(s[23], t[2], u[ 4]);
/*rho pi: b[..] = rotl(a[..] ^ d[...], ..)*/
v = s[ 1];
s[ 1] = ROL2(s[ 6],44); s[ 6] = ROL2(s[ 9],20); s[ 9] = ROL2(s[22],61); s[22] = ROL2(s[14],39);
s[14] = ROL2(s[20],18); s[20] = ROL2(s[ 2],62); s[ 2] = ROL2(s[12],43); s[12] = ROL2(s[13],25);
s[13] = ROL8(s[19]); s[19] = ROR8(s[23]); s[23] = ROL2(s[15],41); s[15] = ROL2(s[ 4],27);
s[ 4] = ROL2(s[24],14); s[24] = ROL2(s[21], 2); s[21] = ROL2(s[ 8],55); s[ 8] = ROL2(s[16],45);
s[16] = ROL2(s[ 5],36); s[ 5] = ROL2(s[ 3],28); s[ 3] = ROL2(s[18],21); s[18] = ROL2(s[17],15);
s[17] = ROL2(s[11],10); s[11] = ROL2(s[ 7], 6); s[ 7] = ROL2(s[10], 3); s[10] = ROL2(v, 1);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
#pragma unroll
for(int j=0;j<25;j+=5){
v=s[j];w=s[j + 1];s[j] = chi(s[j],s[j+1],s[j+2]);s[j+1] = chi(s[j+1],s[j+2],s[j+3]);s[j+2]=chi(s[j+2],s[j+3],s[j+4]);s[j+3]=chi(s[j+3],s[j+4],v);s[j+4]=chi(s[j+4],v,w);
}
/* iota: a[0,0] ^= round constant */
s[ 0] ^=keccak_round_constants[ i];
}
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */
#pragma unroll 5
for(int j=0;j<5;j++){
t[ j] = xor3x(xor3x(s[j+0],s[j+5],s[j+10]),s[j+15],s[j+20]);
}
s[24] = xor3x(s[24],t[3],ROL2(t[0],1));
s[18] = xor3x(s[18],t[2],ROL2(t[4],1));
s[ 0] = xor3x(s[ 0],t[4],ROL2(t[1],1));
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
s[24] = ROL2(s[24],14);
s[18] = ROL2(s[18],21);
if (devectorize(chi(s[18],s[24],s[ 0])) <= devectorize(highTarget)){
// if(chi(s[18].x,s[24].x,s[0].x)<=highTarget.x){
// if(chi(s[18].y,s[24].y,s[0].y)<=highTarget.y){
const uint32_t tmp = atomicExch(&resNounce[0], nounce);
if (tmp != UINT32_MAX)
resNounce[1] = tmp;
// return;
// }
}
}
}
__host__
void keccak256_cpu_hash_80(int thr_id, uint32_t threads, uint32_t startNounce,const uint2 highTarget){
uint32_t tpb;
dim3 grid;
if (device_sm[device_map[thr_id]] <= 500){
tpb = TPB50;
grid.x = (threads + tpb-1)/tpb;
}else{
tpb = TPB52;
grid.x = (threads + (NPT*tpb)-1)/(NPT*tpb);
}
const dim3 block(tpb);
keccak256_gpu_hash_80<<<grid, block>>>(threads, startNounce, d_nounce[thr_id],highTarget);
// cudaThreadSynchronize();
cudaMemcpy(h_nounce[thr_id], d_nounce[thr_id], NBN*sizeof(uint32_t), cudaMemcpyDeviceToHost);
}
#if __CUDA_ARCH__ <= 500
__global__ __launch_bounds__(TPB50, 2)
#else
__global__ __launch_bounds__(TPB52, 1)
#endif
void keccak256_gpu_hash_32(uint32_t threads, uint2* outputHash){
uint32_t thread = blockDim.x * blockIdx.x + threadIdx.x;
uint2 s[25],t[5], v, w, u[ 5];
if(thread<threads){
#pragma unroll 25
for (int i = 0; i<25; i++) {
if (i<4) s[i] = __ldg(&outputHash[i*threads+thread]);
else s[i] = make_uint2(0, 0);
}
s[4] = keccak_round_constants[ 0];
s[16] = make_uint2(0, 0x80000000);
#if __CUDA_ARCH__ > 500
#pragma unroll
#else
#pragma unroll 4
#endif
for (uint32_t i = 0; i < 23; i++) {
/*theta*/
#pragma unroll 5
for(int j=0;j<5;j++){
t[ j] = vectorize(xor5(devectorize(s[ j]),devectorize(s[j+5]),devectorize(s[j+10]),devectorize(s[j+15]),devectorize(s[j+20])));
}
/*theta*/
#pragma unroll 5
for(int j=0;j<5;j++){
u[ j] = ROL2(t[ j], 1);
}
s[ 4] = xor3x(s[ 4], t[3], u[ 0]);s[ 9] = xor3x(s[ 9], t[3], u[ 0]);s[14] = xor3x(s[14], t[3], u[ 0]);s[19] = xor3x(s[19], t[3], u[ 0]);s[24] = xor3x(s[24], t[3], u[ 0]);
s[ 0] = xor3x(s[ 0], t[4], u[ 1]);s[ 5] = xor3x(s[ 5], t[4], u[ 1]);s[10] = xor3x(s[10], t[4], u[ 1]);s[15] = xor3x(s[15], t[4], u[ 1]);s[20] = xor3x(s[20], t[4], u[ 1]);
s[ 1] = xor3x(s[ 1], t[0], u[ 2]);s[ 6] = xor3x(s[ 6], t[0], u[ 2]);s[11] = xor3x(s[11], t[0], u[ 2]);s[16] = xor3x(s[16], t[0], u[ 2]);s[21] = xor3x(s[21], t[0], u[ 2]);
s[ 2] = xor3x(s[ 2], t[1], u[ 3]);s[ 7] = xor3x(s[ 7], t[1], u[ 3]);s[12] = xor3x(s[12], t[1], u[ 3]);s[17] = xor3x(s[17], t[1], u[ 3]);s[22] = xor3x(s[22], t[1], u[ 3]);
s[ 3] = xor3x(s[ 3], t[2], u[ 4]);s[ 8] = xor3x(s[ 8], t[2], u[ 4]);s[13] = xor3x(s[13], t[2], u[ 4]);s[18] = xor3x(s[18], t[2], u[ 4]);s[23] = xor3x(s[23], t[2], u[ 4]);
/*rho pi: b[..] = rotl(a[..] ^ d[...], ..)*/
v = s[ 1];
s[ 1] = ROL2(s[ 6],44); s[ 6] = ROL2(s[ 9],20); s[ 9] = ROL2(s[22],61); s[22] = ROL2(s[14],39);
s[14] = ROL2(s[20],18); s[20] = ROL2(s[ 2],62); s[ 2] = ROL2(s[12],43); s[12] = ROL2(s[13],25);
s[13] = ROL8(s[19]); s[19] = ROR8(s[23]); s[23] = ROL2(s[15],41); s[15] = ROL2(s[ 4],27);
s[ 4] = ROL2(s[24],14); s[24] = ROL2(s[21], 2); s[21] = ROL2(s[ 8],55); s[ 8] = ROL2(s[16],45);
s[16] = ROL2(s[ 5],36); s[ 5] = ROL2(s[ 3],28); s[ 3] = ROL2(s[18],21); s[18] = ROL2(s[17],15);
s[17] = ROL2(s[11],10); s[11] = ROL2(s[ 7], 6); s[ 7] = ROL2(s[10], 3); s[10] = ROL2(v, 1);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
#pragma unroll 5
for(int j=0;j<25;j+=5){
v=s[j];w=s[j + 1];s[j] = chi(v,w,s[j+2]);s[j+1] = chi(w,s[j+2],s[j+3]);s[j+2]=chi(s[j+2],s[j+3],s[j+4]);s[j+3]=chi(s[j+3],s[j+4],v);s[j+4]=chi(s[j+4],v,w);
}
/* iota: a[0,0] ^= round constant */
s[ 0] ^=keccak_round_constants[ i];
}
/* theta: c = a[0,i] ^ a[1,i] ^ .. a[4,i] */
#pragma unroll 5
for(int j=0;j<5;j++){
t[ j] = xor3x(xor3x(s[j+0],s[j+5],s[j+10]),s[j+15],s[j+20]);
}
/* theta: d[i] = c[i+4] ^ rotl(c[i+1],1) */
#pragma unroll 5
for(int j=0;j<5;j++){
u[ j] = ROL2(t[ j],1);
}
/* thetarho pi: b[..] = rotl(a[..] ^ d[...], ..) //There's no need to perform theta and -store- the result since it's unique for each a[..]*/
s[ 4] = xor3x(s[24],t[ 3],u[ 0]);
s[ 0] = xor3x(s[ 0],t[ 4],u[ 1]);
s[ 1] = xor3x(s[ 6],t[ 0],u[ 2]);
s[ 2] = xor3x(s[12],t[ 1],u[ 3]);
s[ 3] = xor3x(s[18],t[ 2],u[ 4]);
s[ 1] = ROR2(s[ 1],20);
s[ 2] = ROR2(s[ 2],21);
s[ 3] = ROL2(s[ 3],21);
s[ 4] = ROL2(s[ 4],14);
/* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */
outputHash[0*threads+thread] = chi(s[ 0],s[ 1],s[ 2]) ^ keccak_round_constants[23];
outputHash[1*threads+thread] = chi(s[ 1],s[ 2],s[ 3]);
outputHash[2*threads+thread] = chi(s[ 2],s[ 3],s[ 4]);
outputHash[3*threads+thread] = chi(s[ 3],s[ 4],s[ 0]);
}
}
__host__
void keccak256_cpu_hash_32(const int thr_id,const uint32_t threads, uint2* d_hash){
uint32_t tpb = TPB52;
if (device_sm[device_map[thr_id]] == 500) tpb = TPB50;
const dim3 grid((threads + tpb-1)/tpb);
const dim3 block(tpb);
keccak256_gpu_hash_32 <<<grid, block>>> (threads, d_hash);
}
__host__
void keccak256_setBlock_80(uint64_t *PaddedMessage80){
uint64_t s[25],t[5],u[5],midstate[17];
s[10] = 1;//(uint64_t)make_uint2(1, 0);
s[16] = (uint64_t)1<<63;//(uint64_t)make_uint2(0, 0x80000000);
t[ 0] = PaddedMessage80[ 0]^PaddedMessage80[ 5]^s[10];
t[ 1] = PaddedMessage80[ 1]^PaddedMessage80[ 6]^s[16];
t[ 2] = PaddedMessage80[ 2]^PaddedMessage80[ 7];
t[ 3] = PaddedMessage80[ 3]^PaddedMessage80[ 8];
midstate[ 0] = ROTL64(t[ 1],1); //u[0] -partial
u[ 1] = t[ 0]^ROTL64(t[ 2],1); //u[1]
u[ 2] = t[ 1]^ROTL64(t[ 3],1); //u[2]
midstate[ 1] = t[ 2]; //u[3]; -partial
midstate[ 2] = t[ 3]^ROTL64(t[ 0],1); //u[4];
midstate[ 3] = ROTL64(PaddedMessage80[ 1]^u[ 1],1); //v
midstate[ 4] = ROTL64(PaddedMessage80[ 6]^u[ 1],44);
midstate[ 5] = ROTL64(PaddedMessage80[ 2]^u[ 2],62);
midstate[ 6] = ROTL64(u[ 2],61);
midstate[ 7] = ROTL64(midstate[ 2],39);
midstate[ 8] = ROTL64(u[ 2],43);
midstate[ 9] = ROTL64(midstate[ 2], 8);
midstate[10] = ROTL64(PaddedMessage80[ 4]^midstate[ 2],27);
midstate[11] = ROTL64(midstate[ 2],14);
midstate[12] = ROTL64(u[ 1], 2);
midstate[13] = ROTL64(s[16]^u[ 1],45);
midstate[14] = ROTL64(u[ 2],15);
midstate[15] = ROTL64(u[ 1],10);
midstate[16] = ROTL64(PaddedMessage80[ 7]^u[ 2], 6);
CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_mid, midstate,17*sizeof(uint64_t), 0, cudaMemcpyHostToDevice));
//rearrange PaddedMessage80, pass only what's needed
uint64_t PaddedMessage[ 6];
PaddedMessage[ 0] = PaddedMessage80[ 9];
PaddedMessage[ 1] = PaddedMessage80[ 4];
PaddedMessage[ 2] = PaddedMessage80[ 8];
PaddedMessage[ 3] = PaddedMessage80[ 5];
PaddedMessage[ 4] = PaddedMessage80[ 3];
PaddedMessage[ 5] = PaddedMessage80[ 0];
CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_PaddedMessage80, PaddedMessage, 6*sizeof(uint64_t), 0, cudaMemcpyHostToDevice));
}
__host__
void keccak256_cpu_init(int thr_id)
{
CUDA_SAFE_CALL(cudaMalloc(&d_nounce[thr_id], NBN*sizeof(uint32_t)));
// CUDA_SAFE_CALL(cudaMallocHost(&h_nounce[thr_id], NBN*sizeof(uint32_t)));
h_nounce[thr_id] = (uint32_t*) malloc(NBN * sizeof(uint32_t));
if(h_nounce[thr_id] == NULL){
gpulog(LOG_ERR,thr_id,"Host memory allocation failed");
exit(EXIT_FAILURE);
}
}
__host__
void keccak256_setOutput(int thr_id)
{
CUDA_SAFE_CALL(cudaMemset(d_nounce[thr_id], 0xff, NBN*sizeof(uint32_t)));
}
__host__
void keccak256_cpu_free(int thr_id)
{
cudaFree(d_nounce[thr_id]);
cudaFreeHost(h_nounce[thr_id]);
}
|
the_stack
|
#ifndef INCLUDE_GGNN_CUDA_KNN_GGNN_CUH_
#define INCLUDE_GGNN_CUDA_KNN_GGNN_CUH_
#include <limits>
#include <string>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include "cub/cub.cuh"
#include "ggnn/cuda_knn_ggnn_gpu_instance.cuh"
#include "ggnn/query/cuda_knn_query_layer.cuh"
#include "ggnn/query/cuda_knn_bf_query_layer.cuh"
#include "ggnn/query/cuda_knn_stats_query_layer.cuh"
#include "ggnn/query/cuda_knn_no_slack_query_layer.cuh"
#include "ggnn/utils/cuda_knn_utils.cuh"
#include "ggnn/utils/cuda_knn_constants.cuh"
#include "ggnn/utils/cuda_knn_dataset.cuh"
#include "ggnn/utils/cuda_knn_ggnn_results.cuh"
// for storing generated ground truth data
#include "io/storer_ann.hpp"
// only needed for file_exists check
#include <sys/stat.h>
inline bool file_exists(const std::string& name) {
struct stat buffer;
return (stat(name.c_str(), &buffer) == 0);
}
/**
* GGNN single-GPU wrapper
*
* @param measure distance measure: Euclidean or Cosine
* @param KeyT datatype of dataset indices (needs to be able to represent
* N_base, signed integer required)
* @param ValueT distance value type
* @param GAddrT address type used to access neighborhood vectors (needs to be
* able to represent N_all*K)
* @param BaseT datatype of dataset vector elements
* @param BAddrT address type used to access dataset vectors (needs to be able
* to represent N_base*D)
* @param D dimension of dataset
* @param KBuild neighbors per node in the GGNN graph
* @param KF maximum number of inverse links per node in the GGNN graph
* @param KQuery number of nearest neighbors to retrieve during query
* @param S segment size
*/
template <DistanceMeasure measure,
typename KeyT, typename ValueT, typename GAddrT, typename BaseT,
typename BAddrT, int D, int KBuild, int KF, int KQuery, int S>
struct GGNN {
using Dataset = Dataset<KeyT, BaseT, BAddrT>;
using GGNNGPUInstance = GGNNGPUInstance<measure, KeyT, ValueT, GAddrT, BaseT, BAddrT, D, KBuild, KF, KQuery, S>;
using GGNNResults = GGNNResults<measure, KeyT, ValueT, BaseT, BAddrT, KQuery>;
Dataset dataset;
GGNNGPUInstance ggnn_gpu_instance;
GGNNResults ggnn_results {&dataset};
GGNN(const std::string& basePath, const std::string& queryPath,
const std::string& gtPath, const int L, const float tau_build,
const size_t N_base = std::numeric_limits<size_t>::max())
: dataset{basePath, queryPath, file_exists(gtPath) ? gtPath : "", N_base},
ggnn_gpu_instance{[](){int device; cudaGetDevice(&device); return device;}(), &dataset, dataset.N_base, L, true, tau_build} {
CHECK_EQ(dataset.D, D) << "DIM needs to be the same";
const auto& shard = ggnn_gpu_instance.ggnn_shards.at(0);
ggnn_gpu_instance.loadShardBaseDataAsync(0, 0);
cudaStreamSynchronize(shard.stream);
if (gtPath.empty() || !file_exists(gtPath)) {
generateGTUsingBF();
if (!gtPath.empty()) {
LOG(INFO) << "exporting brute-forced ground truth data.";
IVecsStorer gt_storer(gtPath, dataset.K_gt,
dataset.N_query);
gt_storer.store(dataset.gt, dataset.N_query);
}
}
}
void ggnnMain(const std::string& graph_filename, const int refinement_iterations) {
const bool export_graph =
!graph_filename.empty() && !file_exists(graph_filename);
const bool import_graph =
!graph_filename.empty() && file_exists(graph_filename);
const bool perform_build = export_graph || !import_graph;
if (perform_build) {
std::vector<float> construction_times;
construction_times.reserve(refinement_iterations+1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
LOG(INFO) << "Starting Graph construction... (tau=" << ggnn_gpu_instance.tau_build << ")";
cudaEventRecord(start);
build();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
construction_times.push_back(milliseconds);
for (int refinement_step = 0; refinement_step < refinement_iterations;
++refinement_step) {
DLOG(INFO) << "Refinement step " << refinement_step;
refine();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float elapsed_milliseconds = 0;
cudaEventElapsedTime(&elapsed_milliseconds, start, stop);
construction_times.push_back(elapsed_milliseconds);
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
for (int refinement_step = 0;
refinement_step < construction_times.size(); refinement_step++) {
const float elapsed_milliseconds = construction_times[refinement_step];
const float elapsed_seconds = elapsed_milliseconds / 1000.0f;
const int number_of_points = ggnn_gpu_instance.N_shard;
LOG(INFO) << "Graph construction + " << refinement_step << " refinement step(s)";
LOG(INFO) << " -- secs: " << elapsed_seconds;
LOG(INFO) << " -- points: " << number_of_points;
LOG(INFO) << " -- ms/point: "
<< elapsed_milliseconds / number_of_points;
}
if (export_graph) {
write(graph_filename);
}
}
if (import_graph) {
read(graph_filename);
}
}
/**
* reset the graph and prepare for a subset of size N
*/
void reinit_graph_for_subset(KeyT N) {
CHECK_LE(N, dataset.N_base);
ggnn_gpu_instance.N_shard = N;
ggnn_gpu_instance.computeGraphParameters();
ggnn_gpu_instance.copyConstantsToGPU();
dataset.top1DuplicateEnd.clear();
dataset.topKDuplicateEnd.clear();
}
void read(const std::string& filename) {
auto& ggnn_host = ggnn_gpu_instance.ggnn_cpu_buffers.at(0);
auto& ggnn_device = ggnn_gpu_instance.ggnn_shards.at(0);
ggnn_host.load(filename);
ggnn_host.uploadAsync(ggnn_device);
cudaStreamSynchronize(ggnn_device.stream);
}
void write(const std::string& filename) {
auto& ggnn_host = ggnn_gpu_instance.ggnn_cpu_buffers.at(0);
auto& ggnn_device = ggnn_gpu_instance.ggnn_shards.at(0);
ggnn_host.downloadAsync(ggnn_device);
cudaStreamSynchronize(ggnn_device.stream);
ggnn_host.store(filename);
}
void evaluateKNNGraph() {
CHECK_EQ(dataset.N_base, dataset.N_query) << "the base needs to be loaded as the query set.";
CHECK_GE(KBuild/2, KQuery) << "there aren't as many nearest neighbors in the graph as queried for.";
CHECK_GE(dataset.K_gt, KQuery+1) << "need one additional ground truth entry to exclude the point itself.";
KeyT* const original_gt = dataset.gt;
dataset.top1DuplicateEnd.clear();
dataset.topKDuplicateEnd.clear();
dataset.gt = new KeyT[static_cast<size_t>(dataset.N_query)*dataset.K_gt];
// shift ground truth left by one to exclude the point itself
std::copy_n(original_gt+1, static_cast<size_t>(dataset.N_query)*dataset.K_gt-1, dataset.gt);
dataset.template checkForDuplicatesInGroundTruth<measure, ValueT>(KQuery);
auto& ggnn_host = ggnn_gpu_instance.ggnn_cpu_buffers.at(0);
auto& ggnn_device = ggnn_gpu_instance.ggnn_shards.at(0);
ggnn_host.downloadAsync(ggnn_device);
cudaStreamSynchronize(ggnn_device.stream);
// simply copy the neighbors from the graph into the results
for (size_t n=0; n<dataset.N_query; ++n) {
std::copy_n(ggnn_host.h_graph+n*KBuild, KQuery, ggnn_results.h_sorted_ids+n*KQuery);
}
ggnn_results.evaluateResults();
delete[] dataset.gt;
dataset.gt = original_gt;
}
template <int BLOCK_DIM_X = 32, int MAX_ITERATIONS = 400, int CACHE_SIZE = 512, int SORTED_SIZE = 256, bool DIST_STATS = false>
void queryLayer() {
dataset.template checkForDuplicatesInGroundTruth<measure, ValueT>(KQuery);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
const auto& shard = ggnn_gpu_instance.ggnn_shards.at(0);
cudaEventRecord(start, shard.stream);
ggnn_gpu_instance.template queryLayer<BLOCK_DIM_X, MAX_ITERATIONS, CACHE_SIZE, SORTED_SIZE, DIST_STATS>();
cudaEventRecord(stop, shard.stream);
ggnn_gpu_instance.ggnn_query.sortAsync(shard.stream);
ggnn_results.loadAsync(ggnn_gpu_instance.ggnn_query, 0, shard.stream);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
VLOG(0) << "[GPU: " << ggnn_gpu_instance.gpu_id << "] query part: " << 0 << " => ms: " << milliseconds << " [" << dataset.N_query << " points query -> " << milliseconds*1000.0f/dataset.N_query << " us/point] \n";
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaStreamSynchronize(shard.stream);
ggnn_results.merge();
ggnn_results.evaluateResults();
}
template <int BLOCK_DIM_X = 32, int MAX_ITERATIONS = 400, int CACHE_SIZE = 512, int SORTED_SIZE = 256, int BEST_SIZE = 128, bool DIST_STATS = false>
void noSlackQueryLayer() {
dataset.template checkForDuplicatesInGroundTruth<measure, ValueT>(KQuery);
auto& shard = ggnn_gpu_instance.ggnn_shards.at(0);
typedef NoSlackQueryKernel<measure, ValueT, KeyT, D, KBuild, KF, KQuery, S, BLOCK_DIM_X, BaseT,
BAddrT, GAddrT, DIST_STATS, false, MAX_ITERATIONS, CACHE_SIZE, SORTED_SIZE, BEST_SIZE>
QueryKernel;
KeyT* m_query_results;
cudaMallocManaged(&m_query_results,
dataset.N_query * KQuery * sizeof(KeyT));
int* m_dist_statistics = nullptr;
if (DIST_STATS)
cudaMallocManaged(&m_dist_statistics, dataset.N_query * sizeof(int));
QueryKernel query_kernel;
query_kernel.d_base = shard.d_base;
query_kernel.d_query = ggnn_gpu_instance.ggnn_query.d_query;
query_kernel.d_graph = shard.d_graph;
query_kernel.d_query_results = ggnn_gpu_instance.ggnn_query.d_query_result_ids;
query_kernel.d_translation = shard.d_translation;
query_kernel.d_nn1_stats = shard.d_nn1_stats;
query_kernel.N = dataset.N_query;
query_kernel.N_offset = 0;
query_kernel.d_dist_stats = m_dist_statistics;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
cudaEventRecord(start, shard.stream);
query_kernel.launch(shard.stream);
cudaEventRecord(stop, shard.stream);
ggnn_gpu_instance.ggnn_query.sortAsync(shard.stream);
ggnn_results.loadAsync(ggnn_gpu_instance.ggnn_query, 0, shard.stream);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
VLOG(0) << "[GPU: " << ggnn_gpu_instance.gpu_id << "] query part: " << 0 << " => ms: " << milliseconds << " [" << dataset.N_query << " points query -> " << milliseconds*1000.0f/dataset.N_query << " us/point] \n";
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaStreamSynchronize(shard.stream);
ggnn_results.merge();
ggnn_results.evaluateResults();
CHECK_CUDA(cudaPeekAtLastError());
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaPeekAtLastError());
}
/// verbose query with additional logging
/// templated mainly to avoid compilation when not used
template <int BLOCK_DIM_X = 32, int MAX_ITERATIONS = 400, int CACHE_SIZE = 512, int SORTED_SIZE = 256>
void queryLayerDebug() {
dataset.template checkForDuplicatesInGroundTruth<measure, ValueT>(KQuery);
auto& shard = ggnn_gpu_instance.ggnn_shards.at(0);
/*
typedef QueryKernel<ValueT, KeyT, D, KBuild, KF, KQuery, S, BLOCK_DIM_X,
BaseT, BAddrT, GAddrT, true, false, MAX_ITERATIONS, CACHE_SIZE, SORTED_SIZE, true>
*/
typedef StatsQueryKernel<measure, ValueT, KeyT, D, KBuild, KF, KQuery, S, BLOCK_DIM_X, BaseT,
BAddrT, GAddrT, true, false, MAX_ITERATIONS, CACHE_SIZE, SORTED_SIZE>
QueryKernel;
KeyT* m_query_results;
cudaMallocManaged(&m_query_results,
dataset.N_query * KQuery * sizeof(KeyT));
ValueT* m_query_results_dists;
cudaMallocManaged(&m_query_results_dists,
dataset.N_query * KQuery * sizeof(ValueT));
int* m_dist_statistics;
cudaMallocManaged(&m_dist_statistics, dataset.N_query * sizeof(int));
ValueT* m_dist_1_best_stats;
ValueT* m_dist_k_best_stats;
cudaMallocManaged(&m_dist_1_best_stats,
dataset.N_query * (MAX_ITERATIONS+1) * sizeof(ValueT));
cudaMallocManaged(&m_dist_k_best_stats,
dataset.N_query * (MAX_ITERATIONS+1) * sizeof(ValueT));
cudaMemset(m_dist_1_best_stats, -1, dataset.N_query * (MAX_ITERATIONS+1) * sizeof(ValueT));
cudaMemset(m_dist_k_best_stats, -1, dataset.N_query * (MAX_ITERATIONS+1) * sizeof(ValueT));
const KeyT debug_query_id = -1;
KeyT* m_debug_query_visited_ids;
if (debug_query_id > 0) {
cudaMallocManaged(&m_debug_query_visited_ids, MAX_ITERATIONS * sizeof(KeyT));
cudaMemset(m_debug_query_visited_ids, -1, MAX_ITERATIONS * sizeof(KeyT));
}
QueryKernel query_kernel;
query_kernel.d_base = shard.d_base;
query_kernel.d_query = ggnn_gpu_instance.ggnn_query.d_query;
query_kernel.d_graph = shard.d_graph;
query_kernel.d_query_results = m_query_results;
query_kernel.d_query_results_dists = m_query_results_dists;
query_kernel.d_dist_1_best_stats = m_dist_1_best_stats;
query_kernel.d_dist_k_best_stats = m_dist_k_best_stats;
query_kernel.d_debug_query_visited_ids = m_debug_query_visited_ids;
query_kernel.debug_query_id = debug_query_id;
query_kernel.d_translation = shard.d_translation;
query_kernel.d_nn1_stats = shard.d_nn1_stats;
//query_kernel.N_base = dataset.N_base;
query_kernel.N = dataset.N_query;
query_kernel.N_offset = 0;
query_kernel.d_dist_stats = m_dist_statistics;
CHECK_CUDA(cudaPeekAtLastError());
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaPeekAtLastError());
time_launcher(0, &query_kernel, query_kernel.N);
CHECK_CUDA(cudaPeekAtLastError());
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaPeekAtLastError());
std::ofstream distance_stats_file("distances_k_best.csv", std::ofstream::out);
distance_stats_file << "top-layer;";
for (int j=0; j<MAX_ITERATIONS; ++j)
distance_stats_file << "iteration " << j << ";";
distance_stats_file << "last improvement;last distance" << std::endl;
for (int i=0; i<dataset.N_query; ++i) {
ValueT last_dist = std::numeric_limits<ValueT>::infinity();
int last_improvement = 0;
for (int j=0; j<MAX_ITERATIONS+1; ++j) {
const ValueT dist = m_dist_k_best_stats[i*(MAX_ITERATIONS+1)+j];
distance_stats_file << dist << ";";
if (dist < last_dist) {
last_dist = dist;
last_improvement = j;
}
}
distance_stats_file << last_improvement << ";" << last_dist << std::endl;
}
distance_stats_file.close();
if (debug_query_id > 0) {
// compute distance matrix for multi dimensional scaling
std::vector<ValueT> distance_matrix;
// wasteful, but easier than indexing a triangle matrix
distance_matrix.resize(MAX_ITERATIONS*MAX_ITERATIONS, std::numeric_limits<ValueT>::infinity());
for (int i=0; i<MAX_ITERATIONS; ++i) {
for (int j=i+1; j<MAX_ITERATIONS; ++j) { // this will take some time
distance_matrix[i*MAX_ITERATIONS+j] = dataset.template compute_distance_base_to_base<measure, ValueT>(m_debug_query_visited_ids[i], m_debug_query_visited_ids[j]);
}
}
std::vector<ValueT> distances_to_query;
distances_to_query.resize(MAX_ITERATIONS);
std::ofstream visited_distance_matrix_file("visited_distance_matrix.csv", std::ofstream::out);
visited_distance_matrix_file << ValueT(0);
for (int i=0; i<MAX_ITERATIONS; ++i) {
distances_to_query[i] = dataset.template compute_distance_query<measure, ValueT>(m_debug_query_visited_ids[i], query_kernel.debug_query_id);
visited_distance_matrix_file << ';' << distances_to_query[i];
}
visited_distance_matrix_file << std::endl;
for (int i=0; i<MAX_ITERATIONS; ++i) {
// insert query point as first point
visited_distance_matrix_file << distances_to_query[i];
for (int j=0; j<MAX_ITERATIONS; ++j) {
visited_distance_matrix_file << ';';
if (j<i)
visited_distance_matrix_file << distance_matrix[j*MAX_ITERATIONS+i];
else if (i < j)
visited_distance_matrix_file << distance_matrix[i*MAX_ITERATIONS+j];
else // if (i == j)
visited_distance_matrix_file << 0;
}
visited_distance_matrix_file << std::endl;
}
visited_distance_matrix_file.close();
}
printf("query results:\n");
for (int i=0; i<min(100, dataset.N_query); ++i) {
KeyT gt_index = dataset.gt[i*dataset.K_gt];
printf("query %i:", i);
for (int j=0; j<KQuery; ++j) {
ValueT result_distance = measure == Euclidean ? sqrtf(m_query_results_dists[i*KQuery+j]) : m_query_results_dists[i*KQuery+j];
printf("\t%i (%f)", m_query_results[i*KQuery+j], result_distance);
}
printf("\tgt: %i (%f)\n", gt_index, dataset.template compute_distance_query<measure, ValueT>(gt_index, i));
}
std::copy_n(m_query_results, static_cast<size_t>(dataset.N_query)*KQuery, ggnn_results.h_sorted_ids);
ggnn_results.evaluateResults();
cudaFree(m_query_results);
cudaFree(m_query_results_dists);
cudaFree(m_dist_statistics);
cudaFree(m_dist_1_best_stats);
cudaFree(m_dist_k_best_stats);
if (debug_query_id > 0)
cudaFree(m_debug_query_visited_ids);
CHECK_CUDA(cudaPeekAtLastError());
CHECK_CUDA(cudaDeviceSynchronize());
CHECK_CUDA(cudaPeekAtLastError());
}
void generateGTUsingBF() {
ggnn_gpu_instance.generateGTUsingBF(0);
}
void build() {
ggnn_gpu_instance.build(0);
}
void refine() {
ggnn_gpu_instance.refine();
}
};
#endif // INCLUDE_GGNN_CUDA_KNN_GGNN_CUH_
|
the_stack
|
namespace amgx
{
// -----------
// Kernels
// -----------
/*************************************************************************
* "random" hash function for both device and host
************************************************************************/
__host__ __device__ static int ourHash(const int i, const int max)
{
unsigned int a = i;
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) + (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a ^ 0xd3a2646c) + (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) + (a >> 16);
return int(((a ^ 0x4a51e590) / (float)UINT_MAX) * max);
}
struct prg
{
float a, b;
int max_int;
__host__ __device__
prg(int _max_int, float _a = 0.f, float _b = 1.f) : a(_a), b(_b), max_int(_max_int) {};
__host__ __device__
int operator()(const unsigned int n) const
{
int ru = ourHash(n, max_int);
return (ru);
}
};
template <class Vector>
void initRandom(Vector &vec, int size, int max_int)
{
vec.resize(size);
thrust::counting_iterator<unsigned int> index_sequence_begin(0);
thrust::transform(index_sequence_begin,
index_sequence_begin + size,
vec.begin(),
prg(max_int));
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void compute_anorm_kernel(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
ValueTypeA *d)
{
IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int ridx = tidx; ridx < num_rows; ridx += blockDim.x * gridDim.x)
{
ValueTypeB d_ = 0;
IndexType row_start = Ap[ridx];
IndexType row_end = Ap[ridx + 1];
for (int j = row_start; j < row_end; j++)
{
ValueTypeB Aij = Ax[j];
d_ += Aij * Aij;
}
// Store L2-norm
d[ridx] = d_;
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void compute_multicolor_anorm_kernel(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const ValueTypeA *d,
const int *sorted_rows_by_color,
const int num_rows_per_color)
{
IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int ridx = tidx; ridx < num_rows_per_color; ridx += blockDim.x * gridDim.x)
{
int i = sorted_rows_by_color[ridx];
ValueTypeB d_ = 0;
IndexType row_start = Ap[i];
IndexType row_end = Ap[i + 1];
for (int j = row_start; j < row_end; j++)
{
ValueTypeB Aij = Ax[j];
d_ += Aij * Aij;
}
// Store L2-norm
d[ridx] = d_;
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void compute_cumul_inv_kernel(const IndexType a_cum_num_rows,
ValueTypeA *a_cum,
ValueTypeB d_inv,
int c_inv_sz,
IndexType *c_inv)
{
const int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int idx, idx1, idx2;
for (int ridx = tidx; ridx < a_cum_num_rows; ridx += blockDim.x * gridDim.x)
{
// printf("%d %f %f\n", ridx, d_inv, a_cum[ridx]);
//printf("%f\n", a_cum[ridx]);
double a = a_cum[ridx];
// if (ridx < 0 || ridx >= a_cum_num_rows)
// printf("!! %d %d\n", ridx, idx);
idx1 = int(a / d_inv) - 1; // get index in inverse table (floor - 1)
if (ridx < a_cum_num_rows - 1)
{
idx2 = a_cum[ridx + 1] / d_inv - 1; // get index in inverse table (floor - 1)
}
else
{
idx2 = c_inv_sz;
}
// printf("%d %d\n", idx1, idx2);
for ( idx = idx1; idx < idx2; idx++)
{
if (idx >= c_inv_sz || idx < 0)
{
printf("Ai! %d %d\n", idx, ridx);
}
c_inv[idx] = ridx;
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void compute_amax_kernel(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
IndexType *amax_idx)
{
ValueTypeA maxVal(0), avalue;
IndexType jmax;
IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int ridx = tidx; ridx < num_rows; ridx += blockDim.x * gridDim.x)
{
IndexType row_start = Ap[ridx];
IndexType row_end = Ap[ridx + 1];
jmax = row_start;
for (int j = row_start; j < row_end; j++)
{
avalue = Ax[j];
if (avalue > maxVal)
{
maxVal = avalue;
jmax = j;
}
}
// Store position of maxvalue
amax_idx[ridx] = jmax;
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void kaczmarz_smooth_kernel_naive_atomics(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const ValueTypeA *d,
const ValueTypeB *b,
const ValueTypeB *x,
ValueTypeB *xout,
const IndexType row_offset)
{
// Naive implementation, needs x copy in xout at the very beginning
IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int ridx = row_offset + tidx; ridx < num_rows; ridx += blockDim.x * gridDim.x)
{
IndexType row_start = Ap[ridx];
IndexType row_end = Ap[ridx + 1];
ValueTypeB Axi = 0.0;
ValueTypeB r;
for (int j = row_start; j < row_end; j++)
{
Axi += Ax[j] * xout[Aj[j]];
}
r = (b[ridx] - Axi) / ( isNotCloseToZero( d[ridx]) ? d[ridx] : epsilon(d[ridx]) );
for (int j = row_start; j < row_end; j++)
{
//xout[Aj[j]] += r*Ax[j];
utils::atomic_add(&xout[Aj[j]], r * Ax[j]);
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int kCtaSize>
__global__ void kaczmarz_smooth_kernel_warp_atomics(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const ValueTypeA *d,
const ValueTypeB *b,
const ValueTypeB *x,
ValueTypeB *xout,
const IndexType row_offset)
{
const int num_warps = kCtaSize / 32;
const int num_rows_per_iter = num_warps * gridDim.x;
const int warpId = threadIdx.x / 32;
const int laneId = threadIdx.x % 32;
for ( int ridx = blockIdx.x * num_warps + warpId ; ridx < num_rows ;
ridx += num_rows_per_iter )
{
IndexType row_start = Ap[ridx];
IndexType row_end = Ap[ridx + 1];
ValueTypeB Axi = 0.0;
ValueTypeB r;
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
ValueTypeB aValue = j < row_end ? Ax[j] : ValueTypeB(0);
ValueTypeB xValue = j < row_end ? xout[Aj[j]] : ValueTypeB(0);
r = utils::warp_reduce<1, utils::Add>(aValue * xValue);
Axi += r;
}
r = (b[ridx] - Axi) / ( isNotCloseToZero( d[ridx]) ? d[ridx] : epsilon(d[ridx]) );
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
//ValueTypeB dx = j < row_end ? r*Ax[j] : ValueTypeB(0);
//int aj = j < row_end ? r*Ax[j] : ValueTypeB(0);
if (j < row_end)
{
utils::atomic_add(&xout[Aj[j]], r * Ax[j]);
}
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int kCtaSize>
__global__ void randomized_kaczmarz_smooth_kernel_warp_atomics(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const IndexType *c_inv,
const IndexType *rnd_rows,
const ValueTypeB *b,
const ValueTypeB *x,
ValueTypeB *xout,
const IndexType row_offset)
{
const int num_warps = kCtaSize / 32;
const int num_rows_per_iter = num_warps * gridDim.x;
const int warpId = threadIdx.x / 32;
const int laneId = threadIdx.x % 32;
for ( int ridx = blockIdx.x * num_warps + warpId ; ridx < num_rows ;
ridx += num_rows_per_iter )
{
int irow = c_inv[rnd_rows[ridx]];
IndexType row_start = Ap[irow];
IndexType row_end = Ap[irow + 1];
ValueTypeB Axi = 0.0;
ValueTypeB r;
ValueTypeA aa;
ValueTypeA AA = 0.0;
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
ValueTypeB aValue = j < row_end ? Ax[j] : ValueTypeB(0);
ValueTypeB xValue = j < row_end ? xout[Aj[j]] : ValueTypeB(0);
r = utils::warp_reduce<1, utils::Add>(aValue * xValue);
aa = utils::warp_reduce<1, utils::Add>(aValue * aValue);
Axi += r;
AA += aa;
}
r = (b[ridx] - Axi) / ( isNotCloseToZero( AA) ? AA : epsilon(AA) );
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
//ValueTypeB dx = j < row_end ? r*Ax[j] : ValueTypeB(0);
//int aj = j < row_end ? r*Ax[j] : ValueTypeB(0);
if (j < row_end)
{
utils::atomic_add(&xout[Aj[j]], r * Ax[j]);
}
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int kCtaSize>
__global__ void kaczmarz_smooth_kernel(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const IndexType *amax,
const ValueTypeB *b,
const ValueTypeB *x,
ValueTypeB *xout,
const IndexType row_offset)
{
// Naive implementation, needs x copy in xout at the very beginning
//IndexType tidx = blockDim.x*blockIdx.x + threadIdx.x;
IndexType i, t;
const int num_warps = kCtaSize / 32;
const int num_rows_per_iter = num_warps * gridDim.x;
const int warpId = threadIdx.x / 32;
const int laneId = threadIdx.x % 32;
for ( int ridx = blockIdx.x * num_warps + warpId ; ridx < num_rows ;
ridx += num_rows_per_iter )
{
ValueTypeB Axi = 0.0;
ValueTypeB r;
i = ourHash(ridx, num_rows);
IndexType row_start = Ap[i];
IndexType row_end = Ap[i + 1];
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
ValueTypeB aValue = j < row_end ? Ax[j] : ValueTypeB(0);
ValueTypeB xValue = j < row_end ? xout[Aj[j]] : ValueTypeB(0);
r = utils::warp_reduce<1, utils::Add>(aValue * xValue);
Axi += r;
//Axi += utils::Warp_reduce_linear<1,32>::execute<utils::Add,ValueTypeB>(aValue * xValue);
//Axi += Ax[j] * xout[Aj[j]];
printf("j = %d, r = %f\n", j, r);
}
if (laneId == 0)
{
r = (b[i] - Axi);// / ( isNotCloseToZero( d[ridx]) ? d[ridx] : epsilon(d[ridx]) );
t = row_start + ourHash(ridx, row_end - row_start);
printf("ridx=%d, i=%d, t=%d, Aj[t]=%d, r=%f\n", ridx, i, t, Aj[t], r);
xout[Aj[t]] += r * ((row_end - row_start) * Ax[t]) * 0.5;
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void multicolor_kaczmarz_smooth_kernel_naive(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const ValueTypeA *d,
const ValueTypeB *b,
const ValueTypeB *x,
ValueTypeB weight,
const int *sorted_rows_by_color,
const int num_rows_per_color,
ValueTypeB *xout)
{
int i;
// Naive implementation, needs x copy in xout at the very beginning
IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int ridx = tidx; ridx < num_rows_per_color; ridx += blockDim.x * gridDim.x)
{
i = sorted_rows_by_color[ridx];
IndexType row_start = Ap[i];
IndexType row_end = Ap[i + 1];
ValueTypeB Axi = 0.0;
ValueTypeB r;
for (int j = row_start; j < row_end; j++)
{
Axi += Ax[j] * xout[Aj[j]];
}
r = (b[i] - Axi) / ( isNotCloseToZero( d[i]) ? d[i] : epsilon(d[i]) );
for (int j = row_start; j < row_end; j++)
{
utils::atomic_add(&xout[Aj[j]], r * Ax[j]);
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB, int kCtaSize>
__global__ void multicolor_kaczmarz_smooth_kernel(const IndexType num_rows,
const IndexType *Ap,
const IndexType *Aj,
const ValueTypeA *Ax,
const ValueTypeA *d,
const ValueTypeB *b,
const ValueTypeB *x,
ValueTypeB weight,
const int *sorted_rows_by_color,
const int num_rows_per_color,
ValueTypeB *xout)
{
const int num_warps = kCtaSize / 32;
const int num_rows_per_iter = num_warps * gridDim.x;
const int warpId = threadIdx.x / 32;
const int laneId = threadIdx.x % 32;
int i;
for ( int ridx = blockIdx.x * num_warps + warpId ; ridx < num_rows_per_color ;
ridx += num_rows_per_iter )
{
i = sorted_rows_by_color[ridx];
IndexType row_start = Ap[i];
IndexType row_end = Ap[i + 1];
ValueTypeB Axi = 0.0;
ValueTypeB r;
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
ValueTypeB aValue = j < row_end ? Ax[j] : ValueTypeB(0);
ValueTypeB xValue = j < row_end ? xout[Aj[j]] : ValueTypeB(0);
r = utils::warp_reduce<1, utils::Add>(aValue * xValue);
Axi += r;
}
r = (b[i] - Axi) / ( isNotCloseToZero( d[i]) ? d[i] : epsilon(d[i]) );
for (int j = row_start + laneId; utils::any( j < row_end) ; j += 32)
{
if (j < row_end)
{
utils::atomic_add(&xout[Aj[j]], r * Ax[j]);
}
}
}
}
template<typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__ void jacobi_smooth_with_0_initial_guess_kernel(const IndexType num_rows,
const ValueTypeA *d,
const ValueTypeB *b,
ValueTypeB *x,
ValueTypeB weight,
const IndexType row_offset)
{
IndexType tidx = blockDim.x * blockIdx.x + threadIdx.x;
for (int ridx = row_offset + tidx; ridx < num_rows; ridx += blockDim.x * gridDim.x)
{
x[ridx] = weight * b[ridx] / ( isNotCloseToZero( d[ridx]) ? d[ridx] : epsilon(d[ridx]) );
}
}
// -----------------
// Methods
// -----------------
// Constructor
template<class T_Config>
KaczmarzSolver_Base<T_Config>::KaczmarzSolver_Base( AMG_Config &cfg, const std::string &cfg_scope) : Solver<T_Config>( cfg, cfg_scope), m_an(0), m_amax(0), m_c_inv(0)
{
weight = cfg.AMG_Config::getParameter<double>("relaxation_factor", cfg_scope);
this->m_coloring_needed = (cfg.AMG_Config::getParameter<int>("kaczmarz_coloring_needed", cfg_scope) != 0);
this->m_reorder_cols_by_color_desired = (cfg.AMG_Config::getParameter<int>("reorder_cols_by_color", cfg_scope) != 0);
this->m_randomized = true;
if (weight == 0)
{
weight = 1.;
amgx_printf("Warning, setting weight to 1 instead of estimating largest_eigen_value in Block Jacobi smoother\n");;
}
}
// Destructor
template<class T_Config>
KaczmarzSolver_Base<T_Config>::~KaczmarzSolver_Base()
{
}
// Solver setup
template<class T_Config>
void
KaczmarzSolver_Base<T_Config>::solver_setup(bool reuse_matrix_structure)
{
m_explicit_A = dynamic_cast<Matrix<T_Config>*>(Base::m_A);
if (!m_explicit_A)
{
FatalError("Kaczmarz solver only works with explicit matrices", AMGX_ERR_INTERNAL);
}
compute_anorm( *this->m_explicit_A );
if (m_randomized) // MC RK is not supported here
{
if (m_coloring_needed)
{
//FatalError("Randomized Kaczmarz solver does not support coloring", AMGX_ERR_INTERNAL);
m_coloring_needed = false;
}
double d_inv = this->m_an[0];
int c_sz = this->m_an.size();
d_inv = thrust::reduce(this->m_an.begin(), this->m_an.end(), d_inv, thrust::minimum<ValueTypeA>());
thrust::inclusive_scan(this->m_an.begin(), this->m_an.end(), this->m_an.begin()); // in-place scan
int c_inv_sz = (this->m_an[c_sz - 1] + d_inv - 1 ) / d_inv;
this->m_c_inv.resize(c_inv_sz, -1);
const size_t THREADS_PER_BLOCK = 128;
const size_t NUM_BLOCKS = min(AMGX_GRID_MAX_SIZE, (int)ceil((ValueTypeB)c_sz / (ValueTypeB)THREADS_PER_BLOCK));
if (c_sz > 0)
{
device_vector_alloc<ValueTypeA> aa(c_sz, 1);
compute_cumul_inv_kernel<IndexType, ValueTypeA, ValueTypeB> <<< (unsigned int)NUM_BLOCKS, (unsigned int)THREADS_PER_BLOCK>>>
(c_sz,
this->m_an.raw(),
d_inv,
c_inv_sz,
this->m_c_inv.raw());
}
cudaDeviceSynchronize();
cudaCheckError();
}
}
template<class T_Config>
void KaczmarzSolver_Base<T_Config>::compute_anorm( Matrix<T_Config> &A)
{
this->m_an.resize(A.get_num_rows()*A.get_block_dimx());
ViewType oldView = A.currentView();
A.setView(this->m_explicit_A->getViewExterior());
if (A.get_block_dimx() == 1 && A.get_block_dimy() == 1)
{
compute_anorm_1x1(A);
}
else
{
FatalError("Unsupported block size for KaczmarzSolver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
A.setView(oldView);
}
template<class T_Config>
void KaczmarzSolver_Base<T_Config>::compute_amax( Matrix<T_Config> &A)
{
this->m_amax.resize(A.get_num_rows()*A.get_block_dimx());
ViewType oldView = A.currentView();
A.setView(this->m_explicit_A->getViewExterior());
if (A.get_block_dimx() == 1 && A.get_block_dimy() == 1)
{
compute_amax_1x1(A);
}
else
{
FatalError("Unsupported block size for KaczmarzSolver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
A.setView(oldView);
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void KaczmarzSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::compute_anorm_1x1(const Matrix_d &A)
{
//DIAG: starnge issues trying to add DIAG property handling
// now leaving !DIAG only
if (A.hasProps(DIAG))
{
FatalError("Unsupported separate diag", AMGX_ERR_NOT_IMPLEMENTED);
}
typedef typename Matrix_d::index_type IndexType;
typedef typename Matrix_d::value_type ValueTypeA;
const size_t THREADS_PER_BLOCK = 128;
const size_t NUM_BLOCKS = min(AMGX_GRID_MAX_SIZE, (int)ceil((ValueTypeB)A.get_num_rows() / (ValueTypeB)THREADS_PER_BLOCK));
if (A.get_num_rows() > 0)
{
compute_anorm_kernel<IndexType, ValueTypeA, ValueTypeB> <<< (unsigned int)NUM_BLOCKS, (unsigned int)THREADS_PER_BLOCK>>>
((int)A.get_num_rows(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
this->m_an.raw());
}
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void KaczmarzSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::compute_amax_1x1(const Matrix_d &A)
{
//DIAG: starnge issues trying to add DIAG property handling
// now leaving !DIAG only
if (A.hasProps(DIAG))
{
FatalError("Unsupported separate diag", AMGX_ERR_NOT_IMPLEMENTED);
}
typedef typename Matrix_d::index_type IndexType;
typedef typename Matrix_d::value_type ValueTypeA;
const size_t THREADS_PER_BLOCK = 128;
const size_t NUM_BLOCKS = min(AMGX_GRID_MAX_SIZE, (int)ceil((ValueTypeB)A.get_num_rows() / (ValueTypeB)THREADS_PER_BLOCK));
if (A.get_num_rows() > 0)
{
compute_amax_kernel<IndexType, ValueTypeA, ValueTypeB> <<< (unsigned int)NUM_BLOCKS, (unsigned int)THREADS_PER_BLOCK>>>
((int)A.get_num_rows(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
this->m_amax.raw());
}
cudaCheckError();
}
template<class T_Config>
void
KaczmarzSolver_Base<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero )
{
}
// Solve one iteration
template<class T_Config>
bool
KaczmarzSolver_Base<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero )
{
if (xIsZero) { x.dirtybit = 0; }
if (!this->m_explicit_A->is_matrix_singleGPU())
{
this->m_explicit_A->manager->exchange_halo_async(x, x.tag);
if (this->m_explicit_A->getViewExterior() == this->m_explicit_A->getViewInterior())
{
this->m_explicit_A->manager->exchange_halo_wait(x, x.tag);
}
}
ViewType oldView = this->m_explicit_A->currentView();
ViewType flags;
bool latencyHiding = true;
if (this->m_explicit_A->is_matrix_singleGPU() || (x.dirtybit == 0))
{
latencyHiding = false;
this->m_explicit_A->setViewExterior();
flags = this->m_explicit_A->getViewExterior();
}
else
{
flags = this->m_explicit_A->getViewInterior();
this->m_explicit_A->setViewInterior();
}
if (this->m_explicit_A->get_block_dimx() == 1 && this->m_explicit_A->get_block_dimy() == 1)
{
if (xIsZero)
{
//smooth_with_0_initial_guess_1x1(*this->m_explicit_A, b, x, flags);
smooth_1x1(*this->m_explicit_A, b, x, flags, latencyHiding);
}
else
{
smooth_1x1(*this->m_explicit_A, b, x, flags, latencyHiding);
}
}
else
{
FatalError("Unsupported block size for Kaczmarz_Solver", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
x.dirtybit = 1;
this->m_explicit_A->setView(oldView);
return this->converged( b, x );
}
template<class T_Config>
void
KaczmarzSolver_Base<T_Config>::solve_finalize( VVector &b, VVector &x )
{
}
// Multicolor version
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void KaczmarzSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1_MC(Matrix_d &A, VVector &b, VVector &x, ViewType separation_flags, bool latency_hiding)
{
typedef typename Matrix_d::index_type IndexType;
typedef typename Matrix_d::value_type ValueTypeA;
ValueTypeB *x_ptr = x.raw();
IndexType num_rows = A.get_num_rows();
const int num_colors = this->m_explicit_A->getMatrixColoring().getNumColors();
const IndexType *A_sorted_rows_by_color_ptr = A.getMatrixColoring().getSortedRowsByColor().raw();
for (int i = 0; i < num_colors; i++)
{
const IndexType color_offset = ((separation_flags & INTERIOR) == 0) ? A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i] : A.getMatrixColoring().getOffsetsRowsPerColor()[i];
const IndexType num_rows_per_color = ((separation_flags == this->m_explicit_A->getViewInterior()) ? A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i] : A.getMatrixColoring().getOffsetsRowsPerColor()[i + 1]) - color_offset;
if (num_rows_per_color == 0) { continue; }
const int threads_per_block = 128;
const int blockrows_per_warp = 1;
const int blockrows_per_cta = (threads_per_block / 32) * blockrows_per_warp;
const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int) (num_rows_per_color / blockrows_per_cta + 1));
multicolor_kaczmarz_smooth_kernel<IndexType, ValueTypeA, ValueTypeB, threads_per_block> <<< num_blocks, threads_per_block >>>
(A.get_num_rows(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
this->m_an.raw(),
b.raw(),
x_ptr,
this->weight,
A_sorted_rows_by_color_ptr + color_offset, num_rows_per_color,
x_ptr);
cudaCheckError();
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void KaczmarzSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1_naive(Matrix_d &A, VVector &b, VVector &x, ViewType separation_flags, bool latency_hiding)
{
typedef typename Matrix_d::index_type IndexType;
typedef typename Matrix_d::value_type ValueTypeA;
ValueTypeB *x_ptr = x.raw();
IndexType num_rows = A.get_num_rows();
IndexType offset = 0;
// Skipping Multi-GPU logic for now
// Current. Will be exact only with one warp per grid
const int threads_per_block = 32;
const int num_blocks = 1;
if (this->m_randomized)
{
IVector rnd_rows;
int c_inv_sz = this->m_c_inv.size();
initRandom(rnd_rows, A.get_num_rows(), c_inv_sz);
randomized_kaczmarz_smooth_kernel_warp_atomics<IndexType, ValueTypeA, ValueTypeB, threads_per_block> <<< num_blocks, threads_per_block >>>
(A.get_num_rows(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
this->m_c_inv.raw(),
rnd_rows.raw(),
b.raw(),
x_ptr,
x_ptr,
offset);
}
else
{
kaczmarz_smooth_kernel_warp_atomics<IndexType, ValueTypeA, ValueTypeB, threads_per_block> <<< num_blocks, threads_per_block >>>
(A.get_num_rows(),
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
this->m_an.raw(),
b.raw(),
x_ptr,
x_ptr,
offset);
}
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void KaczmarzSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_1x1(Matrix_d &A, VVector &b, VVector &x, ViewType separation_flags, bool latency_hiding)
{
if (this->m_coloring_needed)
{
smooth_1x1_MC(A, b, x, separation_flags, latency_hiding);
}
else
{
smooth_1x1_naive(A, b, x, separation_flags, latency_hiding);
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void KaczmarzSolver<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::smooth_with_0_initial_guess_1x1(Matrix_d &A, VVector &b, VVector &x, ViewType separation_flags)
{
ViewType oldView = A.currentView();
// Process all rows
A.setViewExterior();
ViewType flags = A.getViewExterior();
int offset, num_rows;
A.getOffsetAndSizeForView(flags, &offset, &num_rows);
A.setView(oldView);
cudaCheckError();
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class KaczmarzSolver_Base<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class KaczmarzSolver<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
|
the_stack
|
#include <cugraph/algorithms.hpp>
#include <cugraph/partition_manager.hpp>
#include <cuco/detail/hash_functions.cuh>
#include <cugraph/graph_view.hpp>
#include <cugraph/prims/reduce_v.cuh>
#include <thrust/reduce.h>
#include <raft/comms/comms.hpp>
#include <raft/comms/mpi_comms.hpp>
#include <raft/handle.hpp>
#include <rmm/device_scalar.hpp>
#include <rmm/device_uvector.hpp>
#include <gtest/gtest.h>
#include <random>
template <typename vertex_t, typename... Args>
struct property_transform : public thrust::unary_function<vertex_t, thrust::tuple<Args...>> {
int mod{};
property_transform(int mod_count) : mod(mod_count) {}
constexpr __device__ auto operator()(const vertex_t& val)
{
cuco::detail::MurmurHash3_32<vertex_t> hash_func{};
auto value = hash_func(val) % mod;
return thrust::make_tuple(static_cast<Args>(value)...);
}
};
template <typename vertex_t, template <typename...> typename Tuple, typename... Args>
struct property_transform<vertex_t, Tuple<Args...>> : public property_transform<vertex_t, Args...> {
};
template <typename Tuple, std::size_t... I>
auto make_iterator_tuple(Tuple& data, std::index_sequence<I...>)
{
return thrust::make_tuple((std::get<I>(data).begin())...);
}
template <typename... Args>
auto get_zip_iterator(std::tuple<Args...>& data)
{
return thrust::make_zip_iterator(make_iterator_tuple(
data, std::make_index_sequence<std::tuple_size<std::tuple<Args...>>::value>()));
}
template <typename T>
auto get_property_iterator(std::tuple<T>& data)
{
return (std::get<0>(data)).begin();
}
template <typename T0, typename... Args>
auto get_property_iterator(std::tuple<T0, Args...>& data)
{
return get_zip_iterator(data);
}
template <typename... Args>
struct generate_impl {
static thrust::tuple<Args...> initial_value(int init)
{
return thrust::make_tuple(static_cast<Args>(init)...);
}
template <typename label_t>
static std::tuple<rmm::device_uvector<Args>...> property(rmm::device_uvector<label_t>& labels,
int hash_bin_count,
raft::handle_t const& handle)
{
auto data = std::make_tuple(rmm::device_uvector<Args>(labels.size(), handle.get_stream())...);
auto zip = get_zip_iterator(data);
thrust::transform(handle.get_thrust_policy(),
labels.begin(),
labels.end(),
zip,
property_transform<label_t, Args...>(hash_bin_count));
return data;
}
template <typename label_t>
static std::tuple<rmm::device_uvector<Args>...> property(thrust::counting_iterator<label_t> begin,
thrust::counting_iterator<label_t> end,
int hash_bin_count,
raft::handle_t const& handle)
{
auto length = thrust::distance(begin, end);
auto data = std::make_tuple(rmm::device_uvector<Args>(length, handle.get_stream())...);
auto zip = get_zip_iterator(data);
thrust::transform(handle.get_thrust_policy(),
begin,
end,
zip,
property_transform<label_t, Args...>(hash_bin_count));
return data;
}
};
template <typename T>
struct result_compare {
static constexpr double threshold_ratio{1e-3};
constexpr auto operator()(const T& t1, const T& t2)
{
if constexpr (std::is_floating_point_v<T>) {
return std::abs(t1 - t2) < (std::max(t1, t2) * threshold_ratio);
}
return t1 == t2;
}
};
template <typename... Args>
struct result_compare<thrust::tuple<Args...>> {
static constexpr double threshold_ratio{1e-3};
using Type = thrust::tuple<Args...>;
constexpr auto operator()(const Type& t1, const Type& t2)
{
return equality_impl(t1, t2, std::make_index_sequence<thrust::tuple_size<Type>::value>());
}
private:
template <typename T>
constexpr bool equal(T t1, T t2)
{
if constexpr (std::is_floating_point_v<T>) {
return std::abs(t1 - t2) < (std::max(t1, t2) * threshold_ratio);
}
return t1 == t2;
}
template <typename T, std::size_t... I>
constexpr auto equality_impl(T& t1, T& t2, std::index_sequence<I...>)
{
return (... && (equal(thrust::get<I>(t1), thrust::get<I>(t2))));
}
};
template <typename T>
struct generate : public generate_impl<T> {
static T initial_value(int init) { return static_cast<T>(init); }
};
template <typename... Args>
struct generate<std::tuple<Args...>> : public generate_impl<Args...> {
};
struct Prims_Usecase {
bool check_correctness{true};
};
template <typename input_usecase_t>
class Tests_MG_ReduceV
: public ::testing::TestWithParam<std::tuple<Prims_Usecase, input_usecase_t>> {
public:
Tests_MG_ReduceV() {}
static void SetupTestCase() {}
static void TearDownTestCase() {}
virtual void SetUp() {}
virtual void TearDown() {}
// Compare the results of reduce_if_v primitive and thrust reduce on a single GPU
template <typename vertex_t,
typename edge_t,
typename weight_t,
typename result_t,
bool store_transposed>
void run_current_test(Prims_Usecase const& prims_usecase, input_usecase_t const& input_usecase)
{
// 1. initialize handle
raft::handle_t handle{};
HighResClock hr_clock{};
raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD);
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
auto const comm_rank = comm.get_rank();
auto row_comm_size = static_cast<int>(sqrt(static_cast<double>(comm_size)));
while (comm_size % row_comm_size != 0) {
--row_comm_size;
}
cugraph::partition_2d::subcomm_factory_t<cugraph::partition_2d::key_naming_t, vertex_t>
subcomm_factory(handle, row_comm_size);
// 2. create MG graph
if (cugraph::test::g_perf) {
CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
hr_clock.start();
}
auto [mg_graph, d_mg_renumber_map_labels] =
cugraph::test::construct_graph<vertex_t, edge_t, weight_t, store_transposed, true>(
handle, input_usecase, true, true);
if (cugraph::test::g_perf) {
CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
double elapsed_time{0.0};
hr_clock.stop(&elapsed_time);
std::cout << "MG construct_graph took " << elapsed_time * 1e-6 << " s.\n";
}
auto mg_graph_view = mg_graph.view();
// 3. run MG reduce_v
const int hash_bin_count = 5;
const int initial_value = 10;
auto property_initial_value = generate<result_t>::initial_value(initial_value);
auto property_data =
generate<result_t>::property((*d_mg_renumber_map_labels), hash_bin_count, handle);
auto property_iter = get_property_iterator(property_data);
if (cugraph::test::g_perf) {
CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
hr_clock.start();
}
auto result = reduce_v(handle,
mg_graph_view,
property_iter,
property_iter + (*d_mg_renumber_map_labels).size(),
property_initial_value);
if (cugraph::test::g_perf) {
CUDA_TRY(cudaDeviceSynchronize()); // for consistent performance measurement
handle.get_comms().barrier();
double elapsed_time{0.0};
hr_clock.stop(&elapsed_time);
std::cout << "MG reduce_v took " << elapsed_time * 1e-6 << " s.\n";
}
//// 4. compare SG & MG results
if (prims_usecase.check_correctness) {
cugraph::graph_t<vertex_t, edge_t, weight_t, store_transposed, false> sg_graph(handle);
std::tie(sg_graph, std::ignore) =
cugraph::test::construct_graph<vertex_t, edge_t, weight_t, store_transposed, false>(
handle, input_usecase, true, false);
auto sg_graph_view = sg_graph.view();
auto sg_property_data = generate<result_t>::property(
thrust::make_counting_iterator(sg_graph_view.get_local_vertex_first()),
thrust::make_counting_iterator(sg_graph_view.get_local_vertex_last()),
hash_bin_count,
handle);
auto sg_property_iter = get_property_iterator(sg_property_data);
using property_t = decltype(property_initial_value);
auto expected_result =
thrust::reduce(handle.get_thrust_policy(),
sg_property_iter,
sg_property_iter + sg_graph_view.get_number_of_local_vertices(),
property_initial_value,
cugraph::property_add<property_t>());
result_compare<property_t> compare{};
ASSERT_TRUE(compare(expected_result, result));
}
}
};
using Tests_MG_ReduceV_File = Tests_MG_ReduceV<cugraph::test::File_Usecase>;
using Tests_MG_ReduceV_Rmat = Tests_MG_ReduceV<cugraph::test::Rmat_Usecase>;
TEST_P(Tests_MG_ReduceV_File, CheckInt32Int32FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, false>(std::get<0>(param),
std::get<1>(param));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, false>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_File, CheckInt32Int32FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, true>(std::get<0>(param),
std::get<1>(param));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt32Int32FloatTupleIntFloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, std::tuple<int, float>, true>(
std::get<0>(param),
cugraph::test::override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_File, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, false>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt32Int32FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, false>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt32Int64FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int32_t, int64_t, float, int, false>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt64Int64FloatTransposeFalse)
{
auto param = GetParam();
run_current_test<int64_t, int64_t, float, int, false>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_File, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, true>(std::get<0>(param), std::get<1>(param));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt32Int32FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int32_t, float, int, true>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt32Int64FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int32_t, int64_t, float, int, true>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
TEST_P(Tests_MG_ReduceV_Rmat, CheckInt64Int64FloatTransposeTrue)
{
auto param = GetParam();
run_current_test<int64_t, int64_t, float, int, true>(
std::get<0>(param), override_Rmat_Usecase_with_cmd_line_arguments(std::get<1>(param)));
}
INSTANTIATE_TEST_SUITE_P(
file_test,
Tests_MG_ReduceV_File,
::testing::Combine(
::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::File_Usecase("test/datasets/karate.mtx"),
cugraph::test::File_Usecase("test/datasets/web-Google.mtx"),
cugraph::test::File_Usecase("test/datasets/ljournal-2008.mtx"),
cugraph::test::File_Usecase("test/datasets/webbase-1M.mtx"))));
INSTANTIATE_TEST_SUITE_P(
rmat_small_test,
Tests_MG_ReduceV_Rmat,
::testing::Combine(::testing::Values(Prims_Usecase{true}),
::testing::Values(cugraph::test::Rmat_Usecase(
10, 16, 0.57, 0.19, 0.19, 0, false, false, 0, true))));
INSTANTIATE_TEST_SUITE_P(
rmat_benchmark_test, /* note that scale & edge factor can be overridden in benchmarking (with
--gtest_filter to select only the rmat_benchmark_test with a specific
vertex & edge type combination) by command line arguments and do not
include more than one Rmat_Usecase that differ only in scale or edge
factor (to avoid running same benchmarks more than once) */
Tests_MG_ReduceV_Rmat,
::testing::Combine(::testing::Values(Prims_Usecase{false}),
::testing::Values(cugraph::test::Rmat_Usecase(
20, 32, 0.57, 0.19, 0.19, 0, false, false, 0, true))));
CUGRAPH_MG_TEST_PROGRAM_MAIN()
|
the_stack
|
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "math.hpp"
#include "types.hpp"
#include "grid_stride_range.hpp"
#include "execution.hpp"
#include "../cuda4dnn/csl/stream.hpp"
#include "../cuda4dnn/csl/tensor.hpp"
#include "../cuda4dnn/csl/span.hpp"
#include <cuda_runtime.h>
using namespace cv::dnn::cuda4dnn::csl;
using namespace cv::dnn::cuda4dnn::csl::device;
namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
namespace raw {
template <class T, std::size_t CHANNELS_PER_ITER>
__global__ void resize_nn(
Span<T> output, size_type out_height, size_type out_width,
View<T> input, size_type in_height, size_type in_width)
{
auto in_image_size = in_height * in_width;
auto out_image_size = out_height * out_width;
/* think of the output and input as a collection of 2d images with the last axis
* representing the width and the last but one axis representing the height
*
* the remaining axis together form a collection of these images/channels
*/
auto num_effective_channels = output.size() / out_image_size;
/* we process multiple channels every iteration to reuse the identical computation
* involved with the spatial dimensions
*
* if we are processing `CHANNELS_PER_ITER` channels per iteration, we will need
* (num_effective_channels / CHANNELS_PER_ITER) iterations per (x, y) location
*/
auto num_channel_iters_per_xy = (num_effective_channels / CHANNELS_PER_ITER);
/* we need `num_channel_iters_per_xy` iterations per (x, y) and there are `out_image_size`
* combinations of (x, y); hence, we'll need `num_channel_iters_per_xy * out_image_size`
* iterations in total to finish the resize operation
*/
auto iters_required = num_channel_iters_per_xy * out_image_size;
for (auto iter : grid_stride_range(iters_required)) {
const index_type c_start = (iter / out_image_size) * CHANNELS_PER_ITER;
/* note here that consecutive `iter` values will often have consecutive `x` values
* => stores into output will be coalesced across threads
*/
const index_type y = (iter % out_image_size) / out_width;
const index_type x = iter % out_width;
/* o2i = output to input */
auto o2i_fy = static_cast<float>(in_height) / out_height;
auto o2i_fx = static_cast<float>(in_width) / out_width;
auto in_y = static_cast<index_type>(y * o2i_fy);
auto in_x = static_cast<index_type>(x * o2i_fx);
index_type in_idx = c_start * in_image_size + in_y * in_width + in_x;
index_type out_idx = c_start * out_image_size + y * out_width + x;
for (int i = 0; i < CHANNELS_PER_ITER; i++) {
output[out_idx] = input[in_idx];
in_idx += in_image_size;
out_idx += out_image_size;
}
}
}
template <class T, std::size_t CHANNELS_PER_ITER>
__global__ void resize_bilinear(
Span<T> output, size_type out_height, size_type out_width,
View<T> input, size_type in_height, size_type in_width,
float o2i_fy, float o2i_fx)
{
auto in_image_size = in_height * in_width;
auto out_image_size = out_height * out_width;
/* think of the output and input as a collection of 2d images with the last axis
* representing the width and the last but one axis representing the height
*
* the remaining axis together form a collection of these images/channels
*/
auto num_effective_channels = output.size() / out_image_size;
/* we process multiple channels every iteration to reuse the identical computation
* involved with the spatial dimensions
*
* if we are processing `CHANNELS_PER_ITER` channels per iteration, we will need
* (num_effective_channels / CHANNELS_PER_ITER) iterations per (x, y) location
*/
auto num_channel_iters_per_xy = (num_effective_channels / CHANNELS_PER_ITER);
/* we need `num_channel_iters_per_xy` iterations per (x, y) and there are `out_image_size`
* combinations of (x, y); hence, we'll need `num_channel_iters_per_xy * out_image_size`
* iterations in total to finish the resize operation
*/
auto iters_required = num_channel_iters_per_xy * out_image_size;
for (auto iter : grid_stride_range(iters_required)) {
const index_type c_start = (iter / out_image_size) * CHANNELS_PER_ITER;
const index_type c_end = c_start + CHANNELS_PER_ITER;
/* note here that consecutive `iter` values will often have consecutive `x` values
* => stores into output will be coalesced across threads
*/
const index_type y = (iter % out_image_size) / out_width;
const index_type x = iter % out_width;
auto in_x = x * o2i_fx;
auto in_y = y * o2i_fy;
auto in_x0 = static_cast<index_type>(in_x);
auto in_y0 = static_cast<index_type>(in_y);
using device::min;
auto in_x1 = min<index_type>(in_x0 + 1, in_width - 1);
auto in_y1 = min<index_type>(in_y0 + 1, in_height - 1);
index_type in_offset_r0 = c_start * in_image_size + in_y0 * in_width;
index_type in_offset_r1 = c_start * in_image_size + in_y1 * in_width;
index_type out_idx = c_start * out_image_size + y * out_width + x;
#pragma unroll 1 /* disable unrolling to reduce register pressure; not sure how but it works */
for (auto c = c_start; c < c_end; c++) {
auto v_00 = input[in_offset_r0 + in_x0],
v_01 = input[in_offset_r0 + in_x1],
v_10 = input[in_offset_r1 + in_x0],
v_11 = input[in_offset_r1 + in_x1];
output[out_idx] =
v_00 +
T(in_y - in_y0) * T(v_10 - v_00) +
T(in_x - in_x0) * T(v_01 - v_00) +
T(in_y - in_y0) * T(in_x - in_x0) * T(v_11 - v_01 - v_10 + v_00);
in_offset_r0 += in_image_size;
in_offset_r1 += in_image_size;
out_idx += out_image_size;
}
}
}
}
template <class T, std::size_t CHANNELS_PER_ITER> static
void launch_multichannel_resize_nn(const Stream& stream,
Span<T> output, size_type out_height, size_type out_width,
View<T> input, size_type in_height, size_type in_width)
{
auto kernel = raw::resize_nn<T, CHANNELS_PER_ITER>;
auto policy = make_policy(kernel, output.size() / CHANNELS_PER_ITER, 0, stream);
launch_kernel(kernel, policy, output, out_height, out_width, input, in_height, in_width);
}
template <class T>
void resize_nn(const Stream& stream, TensorSpan<T> output, TensorView<T> input) {
auto out_height = output.get_axis_size(-2);
auto out_width = output.get_axis_size(-1);
auto in_height = input.get_axis_size(-2);
auto in_width = input.get_axis_size(-1);
auto num_effective_channels = input.size_range(0, 2);
auto num_iters = num_effective_channels * out_height * out_width;
if (num_effective_channels % 32 == 0 && num_iters > 655360) {
launch_multichannel_resize_nn<T, 32>(stream, output, out_height, out_width, input, in_height, in_width);
} else if (num_effective_channels % 16 == 0 && num_iters > 327680) {
launch_multichannel_resize_nn<T, 16>(stream, output, out_height, out_width, input, in_height, in_width);
} else if (num_effective_channels % 8 == 0 && num_iters > 163840) {
launch_multichannel_resize_nn<T, 8>(stream, output, out_height, out_width, input, in_height, in_width);
} else if (num_effective_channels % 4 == 0 && num_iters > 81920) {
launch_multichannel_resize_nn<T, 4>(stream, output, out_height, out_width, input, in_height, in_width);
} else if (num_effective_channels % 2 == 0) {
launch_multichannel_resize_nn<T, 2>(stream, output, out_height, out_width, input, in_height, in_width);
} else {
launch_multichannel_resize_nn<T, 1>(stream, output, out_height, out_width, input, in_height, in_width);
}
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void resize_nn<__half>(const Stream&, TensorSpan<__half>, TensorView<__half>);
#endif
template void resize_nn<float>(const Stream&, TensorSpan<float>, TensorView<float>);
template <class T, std::size_t CHANNELS_PER_ITER> static
void launch_multichannel_resize_bilinear(const Stream& stream,
Span<T> output, size_type out_height, size_type out_width,
View<T> input, size_type in_height, size_type in_width,
float scale_y, float scale_x)
{
auto kernel = raw::resize_bilinear<T, CHANNELS_PER_ITER>;
auto policy = make_policy(kernel, output.size() / CHANNELS_PER_ITER, 0, stream);
launch_kernel(kernel, policy, output, out_height, out_width, input, in_height, in_width, scale_y, scale_x);
}
template <class T>
void resize_bilinear(const Stream& stream, TensorSpan<T> output, TensorView<T> input, float scale_y, float scale_x) {
auto out_height = output.get_axis_size(-2);
auto out_width = output.get_axis_size(-1);
auto in_height = input.get_axis_size(-2);
auto in_width = input.get_axis_size(-1);
auto num_effective_channels = input.size_range(0, 2);
auto num_iters = num_effective_channels * out_height * out_width;
if (num_effective_channels % 16 == 0 && num_iters > 163840) {
launch_multichannel_resize_bilinear<T, 16>(stream, output, out_height, out_width, input, in_height, in_width, scale_y, scale_x);
} else if (num_effective_channels % 8 == 0 && num_iters > 81920) {
launch_multichannel_resize_bilinear<T, 8>(stream, output, out_height, out_width, input, in_height, in_width, scale_y, scale_x);
} else if (num_effective_channels % 4 == 0 && num_iters > 40960) {
launch_multichannel_resize_bilinear<T, 4>(stream, output, out_height, out_width, input, in_height, in_width, scale_y, scale_x);
} else if (num_effective_channels % 2 == 0) {
launch_multichannel_resize_bilinear<T, 2>(stream, output, out_height, out_width, input, in_height, in_width, scale_y, scale_x);
} else {
launch_multichannel_resize_bilinear<T, 1>(stream, output, out_height, out_width, input, in_height, in_width, scale_y, scale_x);
}
}
#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530)
template void resize_bilinear<__half>(const Stream&, TensorSpan<__half>, TensorView<__half>, float, float);
#endif
template void resize_bilinear<float>(const Stream&, TensorSpan<float>, TensorView<float>, float, float);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */
|
the_stack
|
#include <array/NDArray.h>
#include <execution/Threads.h>
#include <helpers/ConstantTadHelper.h>
#include <system/op_boilerplate.h>
#include "../triangular_solve.h"
namespace sd {
namespace ops {
namespace helpers {
/*
* lower triangular process for system of linear equations
* x_1 = b_1/a_1,1
* x_2 = (b_2 - a_2,1 * x_1) / a_2,2
* x_3 = (b_3 - a_3,1 * x_1 - a_3,2 * x_2) / a_3,3
* ...
* x_M = (b_M - a_M,1 * x_1 - ... a_M,M-1 * x_M-1)/ a_M,M
*
* output == x
* a == leftInput
* b == rightInput
*
* */
template <typename T>
static SD_HOST_DEVICE void lowerTriangularSolve(T const* leftInput, sd::LongType const* leftInputShape,
T const* rightInput, sd::LongType const* rightInputShape,
bool const unitOnDiag, T* output, const sd::LongType* outputShape,
sd::LongType rows, sd::LongType cols) {
for (auto r = 0; r < rows; r++) {
for (auto j = 0; j < cols; j++) {
sd::LongType posY[] = {r, j};
sd::LongType posX[] = {r, r};
auto xIndex = shape::getOffset(leftInputShape, posX, 0);
auto yIndex = shape::getOffset(rightInputShape, posY, 0);
auto zIndex = shape::getOffset(outputShape, posY, 0);
auto sum = rightInput[yIndex];
for (auto c = 0; c < r; c++) {
sd::LongType posZ[] = {c, j};
sd::LongType pos[] = {r, c};
auto xcIndex = shape::getOffset(leftInputShape, pos, 0);
auto zcIndex = shape::getOffset(outputShape, posZ, 0);
sum -= leftInput[xcIndex] * output[zcIndex];
}
output[zIndex] = unitOnDiag ? sum : sum / leftInput[xIndex];
}
}
}
/*
* upper triangular process for system of linear equations
* x_M = b_M/a_M,M
* x_M-1 = (b_M-1 - a_M-1,M-2 * x_M) / a_M-1,M-1
* x_M-2 = (b_M-2 - a_M-2,M-3 * x_M-2 - a_M-2,M-1 * x_M) / a_3,3
* ...
* x_1 = (b_1 - a_1,2 * x_2 - ... a_1,M * x_M)/ a_1,1
*
* output == x
* a == leftInput
* b == rightInput
*
* */
template <typename T>
static SD_HOST_DEVICE void upperTriangularSolve(T const* leftInput, sd::LongType const* leftInputShape,
T const* rightInput, sd::LongType const* rightInputShape,
bool const unitOnDiag, T* output, const sd::LongType* outputShape,
sd::LongType rows, sd::LongType cols) {
for (auto r = rows; r > 0; r--) {
for (auto j = 0; j < cols; j++) {
sd::LongType posY[] = {r - 1, j};
sd::LongType posX[] = {r - 1, r - 1};
auto xIndex = shape::getOffset(leftInputShape, posX, 0);
auto yIndex = shape::getOffset(rightInputShape, posY, 0);
auto zIndex = shape::getOffset(outputShape, posY, 0);
auto sum = rightInput[yIndex];
for (auto c = r; c < rows; c++) {
sd::LongType posZ[] = {c, j};
sd::LongType pos[] = {r - 1, c};
auto zcIndex = shape::getOffset(outputShape, posZ, 0);
auto xcIndex = shape::getOffset(leftInputShape, pos, 0);
sum -= leftInput[xcIndex] * output[zcIndex];
}
output[zIndex] = unitOnDiag ? sum : sum / leftInput[xIndex];
}
}
}
template <typename T>
static SD_KERNEL void triangularSolveKernel(T const* leftInput, sd::LongType const* leftPartShape, T const* rightInput,
sd::LongType const* rightPartShape, bool const lower,
bool const unitsOnDiag, T* output, const sd::LongType* outputShape,
const sd::LongType* tadLeftShape, const sd::LongType* tadLeftOffset,
const sd::LongType* tadRightShape, const sd::LongType* tadRightOffset,
const sd::LongType* tadOutputShape, const sd::LongType* tadOutputOffset,
sd::LongType batchNum) {
__shared__ sd::LongType rows;
__shared__ sd::LongType cols;
if (threadIdx.x == 0) {
rows = shape::sizeAt(leftPartShape, -2);
cols = shape::sizeAt(rightPartShape, -1);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto stop = batchNum;
auto increment = blockDim.x * gridDim.x;
for (auto i = start; i < stop; i += increment) {
auto pLeftPart = leftInput + tadLeftOffset[i];
auto pRightPart = rightInput + tadRightOffset[i];
auto pOutputPart = output + tadOutputOffset[i];
if (lower) {
lowerTriangularSolve<T>(pLeftPart, tadLeftShape, pRightPart, tadRightShape, unitsOnDiag, pOutputPart,
tadOutputShape, rows, cols);
} else {
upperTriangularSolve<T>(pLeftPart, tadLeftShape, pRightPart, tadRightShape, unitsOnDiag, pOutputPart,
tadOutputShape, rows, cols);
}
}
}
template <typename T>
static sd::Status triangularSolveFunctor_(sd::LaunchContext* context, NDArray* leftInput, NDArray* rightInput,
bool lower, bool unitsOnDiag, NDArray* output) {
NDArray::prepareSpecialUse({output}, {leftInput, rightInput});
auto leftTads = ConstantTadHelper::getInstance().tadForDimensions(leftInput->shapeInfo(), {-2, -1});
auto rightTads = ConstantTadHelper::getInstance().tadForDimensions(rightInput->shapeInfo(), {-2, -1});
auto outputTads = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {-2, -1});
auto stream = context->getCudaStream();
T const* leftBuf = reinterpret_cast<T const*>(leftInput->specialBuffer());
T const* rightBuf = reinterpret_cast<T const*>(rightInput->specialBuffer());
T* outputBuf = reinterpret_cast<T*>(output->specialBuffer());
triangularSolveKernel<T><<<128, 128, 256, *stream>>>(
leftBuf, leftInput->specialShapeInfo(), rightBuf, rightInput->specialShapeInfo(), lower, unitsOnDiag, outputBuf,
output->specialShapeInfo(), leftTads.specialShapeInfo(), leftTads.specialOffsets(), rightTads.specialShapeInfo(),
rightTads.specialOffsets(), outputTads.specialShapeInfo(), outputTads.specialOffsets(), leftTads.numberOfTads());
NDArray::registerSpecialUse({output}, {leftInput, rightInput});
return sd::Status::OK;
}
/// triangularSolve2D - 2D implementation of triangularSolveFunctor
/// \tparam T - type of NDArray output
/// \param context - launch context pointer
/// \param leftInput - T matrix of equation Tx = b
/// \param rightInput - b vector of equation Tx = b
/// \param lower - lower or upper triangular matrix
/// \param unitsOnDiag - solve for case when only units (1.0) on diagonal is assumed
/// \param output - output vector (x on equation Tx = b)
///
template <typename T>
void triangularSolve2D(sd::LaunchContext* context, const NDArray& leftInput, const NDArray& rightInput,
bool const lower, bool const unitsOnDiag, NDArray& output) {
triangularSolveFunctor_<T>(context, const_cast<NDArray*>(&leftInput), const_cast<NDArray*>(&rightInput), lower,
unitsOnDiag, &output);
// leftInput.syncToHost(); rightInput.syncToHost(); output.syncToHost();
// T const* pLeftPart = (T const*)leftInput.getBuffer();
// T const* pRightPart = (T const*)rightInput.getBuffer();
// T* pOutputPart = (T*)output.buffer();
// auto rows = leftInput.rows();
// auto cols = leftInput.columns();
// if (lower) {
// lowerTriangularSolve<T>(pLeftPart, leftInput.shapeInfo(), pRightPart, rightInput.shapeInfo(), unitsOnDiag,
// pOutputPart, output.shapeInfo(), rows, cols);
// } else {
// upperTriangularSolve<T>(pLeftPart, leftInput.shapeInfo(), pRightPart, rightInput.shapeInfo(), unitsOnDiag,
// pOutputPart, output.shapeInfo(), rows, cols);
// }
// output.syncToDevice();
}
BUILD_SINGLE_TEMPLATE(template void triangularSolve2D,
(sd::LaunchContext * context, NDArray const& leftInput, NDArray const& rightInput,
bool const lower, bool const unitsOnDiag, NDArray& output),
SD_FLOAT_TYPES);
// template void triangularSolve2D<float>(sd::LaunchContext* context, NDArray const& leftInput, NDArray
// const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output); template void
// triangularSolve2D<bfloat16>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const&
// rightInput, bool const lower, bool const unitsOnDiag, NDArray& output); template void
// triangularSolve2D<float16>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const&
// rightInput, bool const lower, bool const unitsOnDiag, NDArray& output); template void
// triangularSolve2D<double>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput,
// bool const lower, bool const unitsOnDiag, NDArray& output);
sd::Status triangularSolveFunctor(sd::LaunchContext* context, NDArray* leftInput, NDArray* rightInput, bool lower,
bool unitsOnDiag, NDArray* output) {
BUILD_SINGLE_SELECTOR(leftInput->dataType(), return triangularSolveFunctor_,
(context, leftInput, rightInput, lower, unitsOnDiag, output), SD_FLOAT_NATIVE);
}
template <typename T>
static SD_KERNEL void upperAdjointKernel(T const* input, T* output, sd::LongType batchSize, sd::LongType rows,
sd::LongType columns, sd::LongType const* inputTads,
sd::LongType const* inputOffsets, sd::LongType const* outputTads,
sd::LongType const* outputOffsets) {
for (auto b = blockIdx.x; b < batchSize; b += gridDim.x) {
auto inputPart = input + inputOffsets[b];
auto outputPart = output + outputOffsets[b];
for (auto r = threadIdx.x; r < rows; r += blockDim.x) {
for (auto c = threadIdx.y; c <= r; c += blockDim.y) {
sd::LongType zPos[] = {r, c};
sd::LongType xPos[] = {c, r};
auto zIndex = shape::getOffset(outputTads, zPos);
auto xIndex = shape::getOffset(inputTads, xPos);
outputPart[zIndex] = inputPart[xIndex];
}
}
}
}
template <typename T>
static SD_KERNEL void lowerAdjointKernel(T const* input, T* output, sd::LongType batchSize, sd::LongType rows,
sd::LongType columns, sd::LongType const* inputTads,
sd::LongType const* inputOffsets, sd::LongType const* outputTads,
sd::LongType const* outputOffsets) {
for (auto b = blockIdx.x; b < batchSize; b += gridDim.x) {
auto inputPart = input + inputOffsets[b];
auto outputPart = output + outputOffsets[b];
for (auto r = threadIdx.x; r < rows; r += blockDim.x) {
for (auto c = r + threadIdx.y; c < columns; c += blockDim.y) {
sd::LongType zPos[] = {r, c};
sd::LongType xPos[] = {c, r};
auto zIndex = shape::getOffset(outputTads, zPos);
auto xIndex = shape::getOffset(inputTads, xPos);
outputPart[zIndex] = inputPart[xIndex];
}
}
}
}
template <typename T>
static void adjointTriangularMatrix_(sd::LaunchContext* context, NDArray const* input, bool const lower,
NDArray* output) {
auto inputTads = ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {-2, -1});
auto outputTads = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {-2, -1});
auto stream = context->getCudaStream();
auto inputBuf = reinterpret_cast<T const*>(input->specialBuffer());
auto outputBuf = reinterpret_cast<T*>(output->specialBuffer());
auto rows = input->sizeAt(-2);
auto columns = input->sizeAt(-1);
if (lower) {
lowerAdjointKernel<T><<<128, 256, 256, *stream>>>(inputBuf, outputBuf, outputTads.numberOfTads(), rows, columns,
inputTads.specialShapeInfo(), inputTads.specialOffsets(),
outputTads.specialShapeInfo(), outputTads.specialOffsets());
} else {
upperAdjointKernel<T><<<128, 256, 256, *stream>>>(inputBuf, outputBuf, outputTads.numberOfTads(), rows, columns,
inputTads.specialShapeInfo(), inputTads.specialOffsets(),
outputTads.specialShapeInfo(), outputTads.specialOffsets());
}
}
void adjointMatrix(sd::LaunchContext* context, NDArray const* input, bool const lower, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), adjointTriangularMatrix_, (context, input, lower, output), SD_FLOAT_NATIVE);
}
/*
//////////////////////////////////////////////////////////////////////////
template <typename T>
void triangularSolve2D(sd::LaunchContext* context, NDArray const& A, NDArray const& b, bool const lower,
bool const unitsOnDiag, NDArray& x) {
if(A.rankOf() != 2)
throw std::runtime_error("triangularSolve2D: input matrix A must be 2D !");
int temp;
const bool isBvector = b.isCommonVector(temp);
const bool isXvector = x.isCommonVector(temp);
if(A.sizeAt(0) != (isBvector ? b.lengthOf() : b.sizeAt(0)))
throw std::runtime_error("triangularSolve2D: A and b must have the same number of rows !");
if(A.sizeAt(1) != (isXvector ? x.lengthOf() : x.sizeAt(0)))
throw std::runtime_error("triangularSolve2D: columns number of array A must be equal to rows number
of array x !");
if(isBvector) {
if(lower) {
for (int i = 0; i < A.sizeAt(0); ++i) {
T sum = b.t<T>(i);
for (int j = 0; j < i; ++j)
sum -= A.t<T>(i,j) * x.t<T>(j);
x.r<T>(i) = unitsOnDiag ? sum : sum / A.t<T>(i,i);
}
}
else {
for (int i = A.sizeAt(0) - 1; i >= 0; --i) {
T sum = b.t<T>(i);
for (int j = i + 1; j < A.sizeAt(1); ++j)
sum -= A.t<T>(i,j) * x.t<T>(j);
x.r<T>(i) = unitsOnDiag ? sum : sum / A.t<T>(i,i);
}
}
}
else {
if(lower) {
for (int bCol = 0; bCol < b.sizeAt(1); ++bCol) {
for (int i = 0; i < A.sizeAt(0); ++i) {
T sum = b.t<T>(i, bCol);
for (int j = 0; j < i; ++j)
sum -= A.t<T>(i,j) * x.t<T>(j, bCol);
x.r<T>(i, bCol) = unitsOnDiag ? sum : sum / A.t<T>(i,i);
}
}
}
else {
for (int bCol = 0; bCol < b.sizeAt(1); ++bCol) {
for (int i = A.sizeAt(0) - 1; i >= 0; --i) {
T sum = b.t<T>(i, bCol);
for (int j = i + 1; j < A.sizeAt(1); ++j)
sum -= A.t<T>(i,j) * x.t<T>(j, bCol);
x.r<T>(i, bCol) = unitsOnDiag ? sum : sum / A.t<T>(i,i);
}
}
}
}
}
BUILD_SINGLE_TEMPLATE(template void triangularSolve2D, (sd::LaunchContext* context, NDArray const&
leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output), SD_FLOAT_TYPES);
*/
} // namespace helpers
} // namespace ops
} // namespace sd
|
the_stack
|
#include "thundergbm/builder/exact_tree_builder.h"
#include "thundergbm/util/multi_device.h"
#include "thundergbm/util/cub_wrapper.h"
#include "thundergbm/util/device_lambda.cuh"
#include "thrust/iterator/counting_iterator.h"
#include "thrust/iterator/transform_iterator.h"
#include "thrust/iterator/discard_iterator.h"
#include "thrust/sequence.h"
#include "thrust/binary_search.h"
void ExactTreeBuilder::find_split(int level, int device_id) {
const SparseColumns &columns = shards[device_id].columns;
SyncArray<int> &nid = ins2node_id[device_id];
SyncArray<GHPair> &gh_pair = gradients[device_id];
Tree &tree = trees[device_id];
SyncArray<SplitPoint> &sp = this->sp[device_id];
SyncArray<bool> &ignored_set = shards[device_id].ignored_set;
TIMED_FUNC(timerObj);
int n_max_nodes_in_level = static_cast<int>(pow(2, level));
int nid_offset = static_cast<int>(pow(2, level) - 1);
int n_column = columns.n_column;
int n_partition = n_column * n_max_nodes_in_level;
int nnz = columns.nnz;
int n_block = std::min((nnz / n_column - 1) / 256 + 1, 32 * 56);
LOG(TRACE) << "start finding split";
//find the best split locally
{
using namespace thrust;
//calculate split information for each split
int n_split;
SyncArray<GHPair> gh_prefix_sum(nnz);
SyncArray<GHPair> missing_gh(n_partition);
SyncArray<int_float> rle_key(nnz);
if (nnz * 4 > 1.5 * (1 << 30)) rle_key.resize(int(nnz * 0.1));
auto rle_pid_data = make_transform_iterator(rle_key.device_data(),
[=]__device__(int_float key) { return get<0>(key); });
auto rle_fval_data = make_transform_iterator(rle_key.device_data(),
[=]__device__(int_float key) { return get<1>(key); });
{
//gather g/h pairs and do prefix sum
{
SyncArray<int> fvid2pid(nnz);
SyncArray<int> fvid_new2old(nnz);
{
TIMED_SCOPE(timerObj, "find_split - data partitioning");
{
//input
auto *nid_data = nid.device_data();
const int *iid_data = columns.csc_row_idx.device_data();
LOG(TRACE) << "after using v_stats and columns";
//output
int *fvid2pid_data = fvid2pid.device_data();
device_loop_2d(
n_column, columns.csc_col_ptr.device_data(),
[=]__device__(int col_id, int fvid) {
//feature value id -> instance id -> node id
int nid = nid_data[iid_data[fvid]];
int pid;
//if this node is leaf node, move it to the end
if (nid < nid_offset) pid = INT_MAX;//todo negative
else pid = col_id * n_max_nodes_in_level + nid - nid_offset;
fvid2pid_data[fvid] = pid;
},
n_block);
cudaDeviceSynchronize();
LOG(DEBUG) << "fvid2pid " << fvid2pid;
}
//get feature value id mapping for partition, new -> old
{
// TIMED_SCOPE(timerObj, "fvid_new2old");
sequence(cuda::par, fvid_new2old.device_data(), fvid_new2old.device_end(), 0);
//using prefix sum memory for temporary storage
cub_sort_by_key(fvid2pid, fvid_new2old, -1, true, (void *) gh_prefix_sum.device_data());
LOG(DEBUG) << "sorted fvid2pid " << fvid2pid;
LOG(DEBUG) << "fvid_new2old " << fvid_new2old;
}
cudaDeviceSynchronize();
}
//do prefix sum
{
TIMED_SCOPE(timerObj, "find_split - RLE compression");
//same feature value in the same part has the same key.
auto key_iter = make_zip_iterator(
make_tuple(
fvid2pid.device_data(),
make_permutation_iterator(
columns.csc_val.device_data(),
fvid_new2old.device_data())));//use fvid_new2old to access csc_val
n_split = reduce_by_key(
cuda::par,
key_iter, key_iter + nnz,
make_permutation_iterator( //ins id -> gh pair
gh_pair.device_data(),
make_permutation_iterator( //old fvid -> ins id
columns.csc_row_idx.device_data(),
fvid_new2old.device_data())), //new fvid -> old fvid
rle_key.device_data(),
gh_prefix_sum.device_data()
).first - rle_key.device_data();
CHECK_LE(n_split, rle_key.size());
LOG(DEBUG) << "RLE ratio = " << (float) n_split / nnz;
//prefix sum
inclusive_scan_by_key(
cuda::par,
rle_pid_data, rle_pid_data + n_split,
gh_prefix_sum.device_data(),
gh_prefix_sum.device_data());
LOG(DEBUG) << "gh prefix sum = " << gh_prefix_sum;
cudaDeviceSynchronize();
}
}
}
//calculate missing value for each partition
{
TIMED_SCOPE(timerObj, "find _split - calculate missing value");
SyncArray<int> pid_ptr(n_partition + 1);
counting_iterator<int> search_begin(0);
upper_bound(cuda::par, rle_pid_data, rle_pid_data + n_split, search_begin,
search_begin + n_partition, pid_ptr.device_data() + 1);
LOG(DEBUG) << "pid_ptr = " << pid_ptr;
auto pid_ptr_data = pid_ptr.device_data();
auto rle_key_data = rle_key.device_data();
float_type rt_eps = param.rt_eps;
{
SyncArray<float_type> fval(nnz);
auto fval_data = fval.device_data();
device_loop(n_split, [=]__device__(int i) {
fval_data[i] = rle_fval_data[i];
});
device_loop(n_split, [=]__device__(int i) {
int pid = rle_pid_data[i];
if (pid == INT_MAX) return;
float_type f = fval_data[i];
if ((pid_ptr_data[pid + 1] - 1) == i)//the last RLE
//using "get" to get a modifiable lvalue
get<1>(rle_key_data[i]) = (f - fabsf(fval_data[pid_ptr_data[pid]]) - rt_eps);
else
//FIXME read/write collision
get<1>(rle_key_data[i]) = (f + fval_data[i + 1]) * 0.5f;
});
}
const auto gh_prefix_sum_data = gh_prefix_sum.device_data();
const auto node_data = tree.nodes.device_data();
auto missing_gh_data = missing_gh.device_data();
device_loop(n_partition, [=]__device__(int pid) {
int nid = pid % n_max_nodes_in_level + nid_offset;
if (pid_ptr_data[pid + 1] != pid_ptr_data[pid])
missing_gh_data[pid] =
node_data[nid].sum_gh_pair - gh_prefix_sum_data[pid_ptr_data[pid + 1] - 1];
});
LOG(DEBUG) << "missing gh = " << missing_gh;
cudaDeviceSynchronize();
}
//calculate gain of each split
SyncArray<float_type> gain(nnz);
{
TIMED_SCOPE(timerObj, "find_split - calculate gain");
auto compute_gain = []__device__(GHPair father, GHPair lch, GHPair rch, float_type min_child_weight,
float_type lambda) -> float_type {
if (lch.h >= min_child_weight && rch.h >= min_child_weight)
return (lch.g * lch.g) / (lch.h + lambda) + (rch.g * rch.g) / (rch.h + lambda) -
(father.g * father.g) / (father.h + lambda);
else
return 0;
};
const Tree::TreeNode *nodes_data = tree.nodes.device_data();
GHPair *gh_prefix_sum_data = gh_prefix_sum.device_data();
float_type *gain_data = gain.device_data();
const auto missing_gh_data = missing_gh.device_data();
auto ignored_set_data = ignored_set.device_data();
//for lambda expression
float_type mcw = param.min_child_weight;
float_type l = param.lambda;
device_loop(n_split, [=]__device__(int i) {
int pid = rle_pid_data[i];
int nid0 = pid % n_max_nodes_in_level;
int fid = pid / n_max_nodes_in_level;
int nid = nid0 + nid_offset;
if (pid != INT_MAX && !ignored_set_data[fid]) {
GHPair father_gh = nodes_data[nid].sum_gh_pair;
GHPair p_missing_gh = missing_gh_data[pid];
GHPair rch_gh = gh_prefix_sum_data[i];
float_type default_to_left_gain = max(0.f,
compute_gain(father_gh, father_gh - rch_gh, rch_gh, mcw, l));
rch_gh = rch_gh + p_missing_gh;
float_type default_to_right_gain = max(0.f,
compute_gain(father_gh, father_gh - rch_gh, rch_gh, mcw, l));
if (default_to_left_gain > default_to_right_gain)
gain_data[i] = default_to_left_gain;
else
gain_data[i] = -default_to_right_gain;//negative means default split to right
} else gain_data[i] = 0;
});
LOG(DEBUG) << "gain = " << gain;
cudaDeviceSynchronize();
}
//get best gain and the index of best gain for each feature and each node
SyncArray<int_float> best_idx_gain(n_partition);
int n_nodes_in_level;
{
TIMED_SCOPE(timerObj, "find_split - get best gain");
auto arg_abs_max = []__device__(const int_float &a, const int_float &b) {
if (fabsf(get<1>(a)) == fabsf(get<1>(b)))
return get<0>(a) < get<0>(b) ? a : b;
else
return fabsf(get<1>(a)) > fabsf(get<1>(b)) ? a : b;
};
//reduce to get best split of each node for this feature
SyncArray<int> feature_nodes_pid(n_partition);
int n_feature_with_nodes = reduce_by_key(
cuda::par,
rle_pid_data, rle_pid_data + n_split,
make_zip_iterator(make_tuple(counting_iterator<int>(0), gain.device_data())),
feature_nodes_pid.device_data(),
best_idx_gain.device_data(),
thrust::equal_to<int>(),
arg_abs_max).second - best_idx_gain.device_data();
LOG(DEBUG) << "aaa = " << n_feature_with_nodes;
LOG(DEBUG) << "f n pid" << feature_nodes_pid;
LOG(DEBUG) << "best rank & gain = " << best_idx_gain;
auto feature_nodes_pid_data = feature_nodes_pid.device_data();
device_loop(n_feature_with_nodes, [=]__device__(int i) {
feature_nodes_pid_data[i] = feature_nodes_pid_data[i] % n_max_nodes_in_level;
});
LOG(DEBUG) << "f n pid" << feature_nodes_pid;
cub_sort_by_key(feature_nodes_pid, best_idx_gain, n_feature_with_nodes);
LOG(DEBUG) << "f n pid" << feature_nodes_pid;
LOG(DEBUG) << "best rank & gain = " << best_idx_gain;
n_nodes_in_level = reduce_by_key(
cuda::par,
feature_nodes_pid.device_data(), feature_nodes_pid.device_data() + n_feature_with_nodes,
best_idx_gain.device_data(),
make_discard_iterator(),
best_idx_gain.device_data(),
thrust::equal_to<int>(),
arg_abs_max
).second - best_idx_gain.device_data();
LOG(DEBUG) << "#nodes in level = " << n_nodes_in_level;
LOG(DEBUG) << "best rank & gain = " << best_idx_gain;
cudaDeviceSynchronize();
}
//get split points
const int_float *best_idx_gain_data = best_idx_gain.device_data();
GHPair *gh_prefix_sum_data = gh_prefix_sum.device_data();
const auto missing_gh_data = missing_gh.device_data();
sp.resize(n_max_nodes_in_level);
auto sp_data = sp.device_data();
int column_offset = columns.column_offset;
device_loop(n_max_nodes_in_level, [=]__device__(int i) {
sp_data[i].nid = -1;
});
device_loop(n_nodes_in_level, [=]__device__(int i) {
int_float bst = best_idx_gain_data[i];
float_type best_split_gain = get<1>(bst);
int split_index = get<0>(bst);
int pid = rle_pid_data[split_index];
if (pid != INT_MAX) {
int nid0 = pid % n_max_nodes_in_level;
sp_data[nid0].nid = nid0 + nid_offset;
sp_data[nid0].split_fea_id = pid / n_max_nodes_in_level + column_offset;
sp_data[nid0].gain = fabsf(best_split_gain);
sp_data[nid0].fval = rle_fval_data[split_index];
sp_data[nid0].fea_missing_gh = missing_gh_data[pid];
sp_data[nid0].default_right = best_split_gain < 0;
sp_data[nid0].rch_sum_gh = gh_prefix_sum_data[split_index];
}
});
}
LOG(DEBUG) << "split points (gain/fea_id/nid): " << sp;
cudaDeviceSynchronize();
}
void ExactTreeBuilder::update_ins2node_id() {
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
//set new node id for each instance
SparseColumns &columns = shards[device_id].columns;
SyncArray<bool> has_splittable(1);
{
auto nid_data = ins2node_id[device_id].device_data();
const int *iid_data = columns.csc_row_idx.device_data();
const Tree::TreeNode *nodes_data = trees[device_id].nodes.device_data();
const int *col_ptr_data = columns.csc_col_ptr.device_data();
const float_type *f_val_data = columns.csc_val.device_data();
bool *h_s_data = has_splittable.device_data();
int column_offset = columns.column_offset;
int n_column = columns.n_column;
int nnz = columns.nnz;
int n_block = std::min((nnz / n_column - 1) / 256 + 1, 32 * 56);
LOG(TRACE) << "update ins2node id for each fval";
device_loop_2d(n_column, col_ptr_data, [=]__device__(int col_id, int fvid) {
//feature value id -> instance id
int iid = iid_data[fvid];
//instance id -> node id
int nid = nid_data[iid];
//node id -> node
const Tree::TreeNode &node = nodes_data[nid];
//if the node splits on this feature
if (node.splittable() && node.split_feature_id == col_id + column_offset) {
h_s_data[0] = true;
if (f_val_data[fvid] < node.split_value)
//goes to left child
nid_data[iid] = node.lch_index;
else
//right child
nid_data[iid] = node.rch_index;
}
}, n_block);
}
LOG(DEBUG) << "new tree_id = " << ins2node_id[device_id];
has_split[device_id] = has_splittable.host_data()[0];
});
}
void ExactTreeBuilder::init(const DataSet &dataset, const GBMParam ¶m) {
TreeBuilder::init(dataset, param);
//TODO refactor
//init shards
int n_device = param.n_device;
shards = vector<Shard>(n_device);
vector<std::unique_ptr<SparseColumns>> v_columns(param.n_device);
for (int i = 0; i < param.n_device; ++i) {
v_columns[i].reset(&shards[i].columns);
shards[i].ignored_set = SyncArray<bool>(dataset.n_features());
}
SparseColumns columns;
if(dataset.use_cpu)
columns.csr2csc_cpu(dataset, v_columns);
else
columns.csr2csc_gpu(dataset, v_columns);
for (int i = 0; i < param.n_device; ++i) {
v_columns[i].release();
}
// SyncMem::clear_cache();
int gpu_num;
cudaError_t err = cudaGetDeviceCount(&gpu_num);
std::atexit([](){
SyncMem::clear_cache();
});
}
void ExactTreeBuilder::ins2node_id_all_reduce(int depth) {
//get global ins2node id
{
SyncArray<int> local_ins2node_id(n_instances);
auto local_ins2node_id_data = local_ins2node_id.device_data();
auto global_ins2node_id_data = ins2node_id.front().device_data();
for (int d = 1; d < param.n_device; d++) {
local_ins2node_id.copy_from(ins2node_id[d]);
device_loop(n_instances, [=]__device__(int i) {
global_ins2node_id_data[i] = (global_ins2node_id_data[i] > local_ins2node_id_data[i]) ?
global_ins2node_id_data[i] : local_ins2node_id_data[i];
});
}
}
//processing missing value
{
int n_nodes_in_level = 1 << depth;//2^i
int nid_offset = (1 << depth) - 1;//2^i - 1
// TIMED_SCOPE(timerObj, "process missing value");
LOG(TRACE) << "update ins2node id for each missing fval";
auto global_ins2node_id_data = ins2node_id.front().device_data();//essential
auto nodes_data = trees.front().nodes.device_data();//already broadcast above
device_loop(n_instances, [=]__device__(int iid) {
int nid = global_ins2node_id_data[iid];
//if the instance is not on leaf node and not goes down
if (nodes_data[nid].splittable() && nid < nid_offset + n_nodes_in_level) {
//let the instance goes down
const Tree::TreeNode &node = nodes_data[nid];
if (node.default_right)
global_ins2node_id_data[iid] = node.rch_index;
else
global_ins2node_id_data[iid] = node.lch_index;
}
});
LOG(DEBUG) << "new nid = " << ins2node_id.front();
}
DO_ON_MULTI_DEVICES(param.n_device, [&](int device_id){
ins2node_id[device_id].copy_from(ins2node_id.front());
});
}
|
the_stack
|
namespace anakin {
namespace saber {
static void gemm(cublasHandle_t handle,
const bool TransA, const bool TransB,
int m, int n, int k, const float alpha,
const float* a, const float* b,
const float beta, float* c) {
// cout << "(" << m << "," << n << "," << k << ")" << endl;
int lda = (!TransA/* == CblasNoTrans*/) ? k : m;
int ldb = (!TransB/* == CblasNoTrans*/) ? n : k;
cublasOperation_t cuTransA =
(!TransA/* == CblasNoTrans*/) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(!TransB/* == CblasNoTrans*/) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasSgemm(handle, cuTransA, cuTransB, m, n, k, &alpha, b, ldb, a, lda, &beta, c, n);
};
/*one block compute one sequence*/
/*use share memory to reduce*/
template <typename Dtype>
__global__ void sequence_softmax(const Dtype* in_data, const int* seq_offset, const int seq_num,
Dtype* out_data) {
int t_id = threadIdx.x + blockIdx.x * blockDim.x;
if (t_id >= seq_num) {
return;
}
int start_id = seq_offset[t_id];
int end_id = seq_offset[t_id + 1];
Dtype max_data = -1e32;
//Dtype max_data = -FLT_MAX;
Dtype sum = 0;
for (int i = start_id; i < end_id; i++) {
max_data = in_data[i] > max_data ? in_data[i] : max_data;
}
for (int i = start_id; i < end_id; i++) {
sum += expf(in_data[i] - max_data);
}
for (int i = start_id; i < end_id; i++) {
out_data[i] = expf(in_data[i] - max_data) / sum;
}
}
template <typename Dtype>
__global__ void relu(const Dtype* in_data, Dtype* out_data, int count) {
int t_id = threadIdx.x + blockIdx.x * blockDim.x;
if (t_id >= count) {
return;
}
out_data[t_id] = in_data[t_id] > 0 ? in_data[t_id] : 0;
}
template <typename Dtype>
__global__ void bias_relu(const Dtype* in_data, const Dtype* bias_data, const int count,
const int bias_size, Dtype* out_data) {
int t_id = threadIdx.x + blockIdx.x * blockDim.x;
if (t_id >= count) {
return;
}
int id = t_id % bias_size;
Dtype data = in_data[t_id] + bias_data[id];
out_data[t_id] = data > 0 ? data : 0;
}
template <typename Dtype>
__global__ void sequence_pool(const Dtype* in_data, const Dtype* scale, const int* seq_offset,
const int seq_num, const int total_num, const int dim, Dtype* out_data) {
int t_id = threadIdx.x + blockIdx.x * blockDim.x;
if (t_id >= seq_num * dim) {
return;
}
int dim_id = t_id % dim;
int seq_id = t_id / dim;
int start_id = seq_offset[seq_id];
int end_id = seq_offset[seq_id + 1];
Dtype sum = 0;
const Dtype* in = in_data + dim_id + start_id * dim;
for (int i = 0; i < end_id - start_id; i++) {
sum += in[0] * scale[i + start_id];
in += dim;
}
out_data[t_id] = sum;
}
template<typename Dtype>
__device__ Dtype sigmoid(Dtype in) {
Dtype out = Dtype(1.0) / (1 + exp(-in));
return out;
}
template<typename Dtype>
__device__ Dtype tanh(Dtype in) {
//Dtype out = (exp(in)- exp(-in)) / (exp(in) + exp(-in));
//Dtype out = 1 - 2.f / (expf(2*in) + 1);
//Dtype out = 1 - 2.f / (expf(2*in) + 1);
Dtype a = expf(in);
Dtype b = expf(-in);
return (a - b) / (a + b);
}
template <typename Dtype>
__global__ void lstm_bias_and_act(const Dtype* in_data, const Dtype* bias_data, Dtype* out_data,
Dtype* cell_data, int batch_size, int hidden_size) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= batch_size * hidden_size) {
return;
}
int dim_id = tid % hidden_size;
int batch_id = tid / hidden_size;
int offset = batch_id * hidden_size + dim_id;
const Dtype* tmp_in = in_data + batch_id * 4 * hidden_size + dim_id;
Dtype* tmp_cell = cell_data + offset;
const Dtype* tmp_bias = bias_data + dim_id;
Dtype ct = tanh(tmp_in[2 * hidden_size] + tmp_bias[2 * hidden_size]);
Dtype ig = sigmoid(tmp_in[0 * hidden_size] + tmp_bias[0 * hidden_size]);
Dtype fg = sigmoid(tmp_in[1 * hidden_size] + tmp_bias[1 * hidden_size]);
Dtype og = sigmoid(tmp_in[3 * hidden_size] + tmp_bias[3 * hidden_size]);
tmp_cell[0] = ig * ct + fg * tmp_cell[0];
out_data[offset] = og * tanh(tmp_cell[0]);
}
template <typename Dtype>
__global__ void sequence_bias_relu(const Dtype* in_data, const Dtype* seq_bias,
const Dtype* bias_data, const int* seq_id, const int num, const int dim,
Dtype* out_data) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= dim * num) {
return;
}
int dim_id = tid % dim;
int word_id = tid / dim;
int cur_seq_id = seq_id[word_id];
Dtype data = in_data[tid] + seq_bias[cur_seq_id * dim + dim_id] + bias_data[dim_id];
//printf("%d, in:%f, seq_bias:%f, bias:%f\n", tid, in_data[tid], seq_bias[cur_seq_id * dim + dim_id], bias_data[dim_id]);
out_data[tid] = data > 0 ? data : 0;
}
template <typename Dtype>
__global__ void lstm_result_to_sequence(const Dtype* in_data, const int* seq_id_map,
const int* offset, const int seq_num,
const int word_num, const int hidden_size, Dtype* out_data) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= hidden_size * word_num) {
return;
}
int dim_id = tid % hidden_size;
int word_id = tid / hidden_size;
int seq_id = seq_id_map[word_id];
int word_id_in_seq = word_id - offset[seq_id];
out_data[tid] = in_data[(word_id_in_seq * seq_num + seq_id) * hidden_size + dim_id];
}
template<>
SaberStatus SaberAttensionLstm<NV, AK_FLOAT>:: create(const std::vector<OpTensor*>& inputs, \
std::vector<OpTensor*>& outputs, \
AttensionLstmParam<NV>& attension_lstm_param, Context<NV>& ctx) {
if(inputs[0]->get_seq_offset().size()>0) {
int batch_size = inputs[0]->get_seq_offset()[0].size() - 1;
int sequence = inputs[0]->num();
_gemm_wx = saber_find_fast_sass_gemm(false, false,
sequence, 4 * _hidden_size, _word_size);
_gemm_wh = saber_find_fast_sass_gemm(false, false, batch_size, 4 * _hidden_size, _hidden_size);
}
return SaberSuccess;
}
template<>
SaberStatus SaberAttensionLstm<NV, AK_FLOAT>:: init(const std::vector<OpTensor*>& inputs, \
std::vector<OpTensor*>& outputs, \
AttensionLstmParam <NV>& attension_lstm_param, Context<NV>& ctx) {
this->_ctx = &ctx;
auto cuda_stream = ctx.get_compute_stream();
CUBLAS_CHECK(cublasCreate(&_handle));
CUBLAS_CHECK(cublasSetStream(_handle, cuda_stream));
auto lstm_param = attension_lstm_param.lstm_param;
_hidden_size = lstm_param.bias()->valid_size() / 4 / lstm_param.num_layers;
int weights_h2h_size = _hidden_size * _hidden_size * 4 * (2 * lstm_param.num_layers - 1);
int weights_i2h_size = lstm_param.weight()->valid_size() - weights_h2h_size;
_word_size = weights_i2h_size / (4 * _hidden_size);
auto fc_vec = attension_lstm_param.attension_param.fc_vec;
_attn_outs.resize(fc_vec.size());
_max_seq_len = 100;
for (int i = 0; i < fc_vec.size(); i++) {
Shape shape ({inputs[0]->num(), fc_vec[i].num_output, 1, 1});
_attn_outs[i] = new OpTensor(shape);
}
return create(inputs, outputs, attension_lstm_param, ctx);
}
template<>
SaberStatus SaberAttensionLstm<NV, AK_FLOAT>::dispatch(\
const std::vector<OpTensor*>& inputs,
std::vector<OpTensor*>& outputs,
AttensionLstmParam <NV>& param) {
cudaStream_t stream = this->_ctx->get_compute_stream();
auto attn_param = param.attension_param;
auto lstm_param = param.lstm_param;
int hidden_size = lstm_param.with_peephole ? lstm_param.bias()->valid_size() / 7 :
lstm_param.bias()->valid_size() / 4;
OpTensor* input = inputs[0];
_attn_outs.resize(attn_param.fc_vec.size());
auto seq_offset = inputs[0]->get_seq_offset()[0];
int seq_num = seq_offset.size() - 1;
int word_num = inputs[0]->num();
Shape softmax_out_shape ({word_num, 1, 1, 1});
Shape dev_seq_id_shape ({seq_num, 1, 1, 1});
_softmax_out.reshape(softmax_out_shape);
_dev_seq_id_map.reshape(dev_seq_id_shape);
std::vector<int> id_map;
int seq_id = 0;
int max_len = 0;
for (int i = 0; i < seq_num; i++) {
for (int j = seq_offset[i]; j < seq_offset[i + 1]; j++) {
id_map.push_back(i);
}
}
for (int i = 0; i < seq_num; i++) {
int cur_len = seq_offset[i + 1] - seq_offset[i];
max_len = max_len < cur_len ? cur_len : max_len;
}
cudaMemcpyAsync(_dev_seq_id_map.mutable_data(), &id_map[0], sizeof(int) * seq_num,
cudaMemcpyHostToDevice, stream);
Shape offset_shape ( {seq_num + 1, 1, 1, 1});
_dev_offset.reshape(offset_shape);
cudaMemcpyAsync(_dev_offset.mutable_data(), &seq_offset[0],
sizeof(int) * seq_offset.size(),
cudaMemcpyHostToDevice, stream);
/*for first fc*/
int M_0 = input->num();
int N_0 = attn_param.fc_vec[0].num_output;
int K_0 = input->valid_size() / input->num();
Shape first_fc_out_0_shape ({M_0, N_0, 1, 1});
_first_fc_out_0.reshape(first_fc_out_0_shape);
auto data_in = static_cast<const OpDataType*>(input->data());
auto data_out = static_cast<OpDataType*>(_first_fc_out_0.mutable_data());
auto fc_vec = attn_param.fc_vec;
//auto first_fc_0_kernel = saber_find_fast_sass_gemm(false, !fc_vec[0].is_transpose_weights, M_0, N_0, K_0);
auto first_fc_0_kernel = saber_find_fast_sass_gemm(false, false, M_0, N_0, K_0);
first_fc_0_kernel(M_0, N_0, K_0, 1.0f, data_in, 0.f, static_cast<const OpDataType*>(fc_vec[0].weights->data()), data_out, stream);
Shape cell_shape ({seq_num, hidden_size, 1, 1});
_cell_out.reshape(cell_shape);
cudaMemsetAsync(_cell_out.mutable_data(), 0, sizeof(float) * _cell_out.valid_size(), stream);
Shape lstm_mid_shape( {seq_num, 4 * hidden_size, 1, 1});
_hidden_out.reshape(lstm_mid_shape);
Shape lstm_shape ({max_len * seq_num, hidden_size, 1, 1});
_lstm_out.reshape(lstm_shape);
/*for other fc*/
for (int word_id = 0; word_id < max_len; word_id++) {
_attn_outs[0]->reshape(first_fc_out_0_shape);
if (word_id > 0) {
Shape h_shape ({seq_num, N_0, 1, 1});
_first_fc_out_1.reshape(h_shape);
auto kernel_1 = saber_find_fast_sass_gemm(false, false, seq_num, N_0, hidden_size);
kernel_1(seq_num, N_0, hidden_size, 1.0f,
static_cast<const OpDataType*>(_cell_out.data()), 0.f,
static_cast<const OpDataType*>(fc_vec[0].weights->data()) + K_0 * N_0, static_cast<OpDataType*>(_first_fc_out_1.mutable_data()), stream);
sequence_bias_relu <<< CUDA_GET_BLOCKS(_attn_outs[0]->valid_size()), CUDA_NUM_THREADS, 0,
stream >>> (static_cast<const OpDataType*>(_first_fc_out_0.data()), static_cast<const OpDataType*>(_first_fc_out_1.data()), static_cast<const OpDataType*>(fc_vec[0].bias->data()),
static_cast<const int*>(_dev_seq_id_map.data()), M_0, N_0, static_cast<OpDataType*>(_attn_outs[0]->mutable_data()));
} else {
cudaMemcpyAsync((void*)_attn_outs[0]->mutable_data(), (void*) _first_fc_out_0.data(),
sizeof(float) * _attn_outs[0]->valid_size(),
cudaMemcpyDeviceToDevice, stream);
bias_relu <<< CUDA_GET_BLOCKS(_attn_outs[0]->valid_size()), CUDA_NUM_THREADS, 0,
stream >>> (data_out, static_cast<const OpDataType*>(fc_vec[0].bias->data()), _attn_outs[0]->valid_size(), N_0,
static_cast<OpDataType*>(_attn_outs[0]->mutable_data()));
}
for (int i = 1; i < attn_param.fc_vec.size(); i++) {
int M = input->num();
int N = attn_param.fc_vec[i].num_output;
int K = attn_param.fc_vec[i - 1].num_output;
Shape attn_out_shape ( {M, N, 1, 1});
_attn_outs[i]->reshape(attn_out_shape);
auto fc_in_data = static_cast<const OpDataType*>(_attn_outs[i - 1]->data());
auto fc_out_data = static_cast<OpDataType*>(_attn_outs[i]->mutable_data());
auto kernel = saber_find_fast_sass_gemm(false, false, M, N, K);
kernel(M, N, K, 1.0f, fc_in_data, 0.0f, static_cast<const OpDataType*>(fc_vec[i].weights->data()), fc_out_data, stream);
bias_relu <<< CUDA_GET_BLOCKS(_attn_outs[i]->valid_size()), CUDA_NUM_THREADS, 0,
stream >>> (fc_out_data, static_cast<const OpDataType*>(fc_vec[i].bias->data()), _attn_outs[i]->valid_size(), N, fc_out_data);
}
int fc_num = attn_param.fc_vec.size();
int dim = inputs[0]->valid_size() / inputs[0]->num();
Shape pool_shape( {seq_num, dim, 1, 1});
_pool_out.reshape(pool_shape);
sequence_softmax <<< CUDA_GET_BLOCKS(seq_num), CUDA_NUM_THREADS, 0, stream>>>
(static_cast<const OpDataType*>(_attn_outs[fc_num - 1]->data()), static_cast<const int*>(_dev_offset.data()), seq_num, static_cast<OpDataType*>(_softmax_out.mutable_data()));
sequence_pool <<< CUDA_GET_BLOCKS(seq_num* dim), CUDA_NUM_THREADS, 0, stream>>>(static_cast<const OpDataType*>(input->data()),
static_cast<const OpDataType*>(_softmax_out.data()), static_cast<const int*>(_dev_offset.data()), seq_num, inputs[0]->num(), dim, static_cast<OpDataType*>(_pool_out.mutable_data()));
auto x_data = static_cast<const OpDataType*>(_pool_out.data());
auto _wx_data = static_cast<const OpDataType*>(lstm_param.weight()->data());
auto _bias_data = static_cast<const OpDataType*>(lstm_param.bias()->data());
int word_size = dim;
auto _wh_data = static_cast<const OpDataType*>(lstm_param.weight()->data()) + 4 * hidden_size * word_size;
_gemm_wx(seq_num, 4 * hidden_size, word_size, 1.0, x_data, 0.0, static_cast<const OpDataType*>(lstm_param.weight()->data()),
static_cast<OpDataType*>(_hidden_out.mutable_data()), stream);
if (word_id > 0) {
_gemm_wh(seq_num, 4 * hidden_size, hidden_size, 1.0,
static_cast<const OpDataType*>(_lstm_out.data()) + (word_id - 1) * seq_num * hidden_size, 1.0, _wh_data, static_cast<OpDataType*>(_hidden_out.mutable_data()),
stream);
}
auto cell_data = static_cast<OpDataType*>(_cell_out.mutable_data());
lstm_bias_and_act <<< CUDA_GET_BLOCKS(seq_num* hidden_size), CUDA_NUM_THREADS, 0, stream>>>
(static_cast<OpDataType*>(_hidden_out.data()), _bias_data, static_cast<OpDataType*>(_lstm_out.mutable_data()) + word_id * seq_num * hidden_size,
cell_data, seq_num, hidden_size);
}
lstm_result_to_sequence <<< CUDA_GET_BLOCKS(word_num* hidden_size), CUDA_NUM_THREADS, 0, stream>>>
(static_cast<OpDataType*>(_lstm_out.mutable_data()), static_cast<const int*>(_dev_seq_id_map.data()),
static_cast<const int*>(_dev_offset.data()), seq_num, word_num, hidden_size, static_cast<OpDataType*>(outputs[0]->mutable_data()));
outputs[0]->set_seq_offset(inputs[0]->get_seq_offset());
return SaberSuccess;
}
DEFINE_OP_TEMPLATE(SaberAttensionLstm, AttensionLstmParam, NV, AK_HALF);
DEFINE_OP_TEMPLATE(SaberAttensionLstm, AttensionLstmParam, NV, AK_INT8);
}
}
|
the_stack
|
#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/functional.h>
#include <thrust/iterator/reverse_iterator.h>
#include <thrust/scan.h>
#include <thrust/tabulate.h>
#include <thrust/transform.h>
namespace cudf {
namespace groupby {
namespace detail {
namespace {
/**
* @brief Functor to compare two rows of a table in given permutation order
* This is useful to identify unique elements in a sorted order table, when the permutation order is
* the sorted order of the table.
*
*/
template <typename Iterator>
struct permuted_comparator {
/**
* @brief comparator object which compares two rows of the table in given permutation order
*
* @param device_table Device table to compare
* @param permutation The permutation order, integer type column.
* @param has_nulls whether the table has nulls
*/
permuted_comparator(table_device_view device_table, Iterator const permutation, bool has_nulls)
: comparator(nullate::DYNAMIC{has_nulls}, device_table, device_table, null_equality::EQUAL),
permutation(permutation)
{
}
__device__ bool operator()(size_type index1, size_type index2) const
{
return comparator(permutation[index1], permutation[index2]);
};
private:
row_equality_comparator<nullate::DYNAMIC> comparator;
Iterator const permutation;
};
/**
* @brief generate grouped row ranks or dense ranks using a row comparison then scan the results
*
* @tparam forward true if the rank scan computation should use forward iterator traversal (default)
* else reverse iterator traversal
* @tparam value_resolver flag value resolver function with boolean first and row number arguments
* @tparam scan_operator scan function ran on the flag values
* @param grouped_values input column to generate ranks for
* @param value_order column of type INT32 that contains the order of the values in the
* grouped_values column
* @param group_labels ID of group that the corresponding value belongs to
* @param group_offsets group index offsets with group ID indices
* @param resolver flag value resolver
* @param scan_op scan operation ran on the flag results
* @param has_nulls true if nulls are included in the `grouped_values` column
* @param stream CUDA stream used for device memory operations and kernel launches
* @param mr Device memory resource used to allocate the returned column's device memory
* @return std::unique_ptr<column> rank values
*/
template <bool forward, typename value_resolver, typename scan_operator>
std::unique_ptr<column> rank_generator(column_view const& grouped_values,
column_view const& value_order,
device_span<size_type const> group_labels,
device_span<size_type const> group_offsets,
value_resolver resolver,
scan_operator scan_op,
bool has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto const flattened = cudf::structs::detail::flatten_nested_columns(
table_view{{grouped_values}}, {}, {}, structs::detail::column_nullability::MATCH_INCOMING);
auto const d_flat_order = table_device_view::create(flattened, stream);
auto sorted_index_order = value_order.begin<size_type>();
auto comparator = permuted_comparator(*d_flat_order, sorted_index_order, has_nulls);
auto ranks = make_fixed_width_column(data_type{type_to_id<size_type>()},
flattened.flattened_columns().num_rows(),
mask_state::UNALLOCATED,
stream,
mr);
auto mutable_ranks = ranks->mutable_view();
auto unique_identifier = [labels = group_labels.begin(),
offsets = group_offsets.begin(),
comparator,
resolver] __device__(size_type row_index) {
auto const group_start = offsets[labels[row_index]];
if constexpr (forward) {
// First value of equal values is 1.
return resolver(row_index == group_start || !comparator(row_index, row_index - 1),
row_index - group_start);
} else {
auto const group_end = offsets[labels[row_index] + 1];
// Last value of equal values is 1.
return resolver(row_index + 1 == group_end || !comparator(row_index, row_index + 1),
row_index - group_start);
}
};
thrust::tabulate(rmm::exec_policy(stream),
mutable_ranks.begin<size_type>(),
mutable_ranks.end<size_type>(),
unique_identifier);
auto [group_labels_begin, mutable_rank_begin] = [&]() {
if constexpr (forward) {
return thrust::pair{group_labels.begin(), mutable_ranks.begin<size_type>()};
} else {
return thrust::pair{thrust::reverse_iterator(group_labels.end()),
thrust::reverse_iterator(mutable_ranks.end<size_type>())};
}
}();
thrust::inclusive_scan_by_key(rmm::exec_policy(stream),
group_labels_begin,
group_labels_begin + group_labels.size(),
mutable_rank_begin,
mutable_rank_begin,
thrust::equal_to{},
scan_op);
return ranks;
}
} // namespace
std::unique_ptr<column> min_rank_scan(column_view const& grouped_values,
column_view const& value_order,
device_span<size_type const> group_labels,
device_span<size_type const> group_offsets,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return rank_generator<true>(
grouped_values,
value_order,
group_labels,
group_offsets,
[] __device__(bool unequal, auto row_index_in_group) {
return unequal ? row_index_in_group + 1 : 0;
},
DeviceMax{},
has_nested_nulls(table_view{{grouped_values}}),
stream,
mr);
}
std::unique_ptr<column> max_rank_scan(column_view const& grouped_values,
column_view const& value_order,
device_span<size_type const> group_labels,
device_span<size_type const> group_offsets,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return rank_generator<false>(
grouped_values,
value_order,
group_labels,
group_offsets,
[] __device__(bool unequal, auto row_index_in_group) {
return unequal ? row_index_in_group + 1 : std::numeric_limits<size_type>::max();
},
DeviceMin{},
has_nested_nulls(table_view{{grouped_values}}),
stream,
mr);
}
std::unique_ptr<column> first_rank_scan(column_view const& grouped_values,
column_view const&,
device_span<size_type const> group_labels,
device_span<size_type const> group_offsets,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto ranks = make_fixed_width_column(
data_type{type_to_id<size_type>()}, group_labels.size(), mask_state::UNALLOCATED, stream, mr);
auto mutable_ranks = ranks->mutable_view();
thrust::tabulate(rmm::exec_policy(stream),
mutable_ranks.begin<size_type>(),
mutable_ranks.end<size_type>(),
[labels = group_labels.begin(),
offsets = group_offsets.begin()] __device__(size_type row_index) {
auto group_start = offsets[labels[row_index]];
return row_index - group_start + 1;
});
return ranks;
}
std::unique_ptr<column> average_rank_scan(column_view const& grouped_values,
column_view const& value_order,
device_span<size_type const> group_labels,
device_span<size_type const> group_offsets,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto max_rank = max_rank_scan(grouped_values,
value_order,
group_labels,
group_offsets,
stream,
rmm::mr::get_current_device_resource());
auto min_rank = min_rank_scan(grouped_values,
value_order,
group_labels,
group_offsets,
stream,
rmm::mr::get_current_device_resource());
auto ranks = make_fixed_width_column(
data_type{type_to_id<double>()}, group_labels.size(), mask_state::UNALLOCATED, stream, mr);
auto mutable_ranks = ranks->mutable_view();
thrust::transform(rmm::exec_policy(stream),
max_rank->view().begin<size_type>(),
max_rank->view().end<size_type>(),
min_rank->view().begin<size_type>(),
mutable_ranks.begin<double>(),
[] __device__(auto max_rank, auto min_rank) -> double {
return min_rank + (max_rank - min_rank) / 2.0;
});
return ranks;
}
std::unique_ptr<column> dense_rank_scan(column_view const& grouped_values,
column_view const& value_order,
device_span<size_type const> group_labels,
device_span<size_type const> group_offsets,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
return rank_generator<true>(
grouped_values,
value_order,
group_labels,
group_offsets,
[] __device__(bool const unequal, size_type const) { return unequal ? 1 : 0; },
DeviceSum{},
has_nested_nulls(table_view{{grouped_values}}),
stream,
mr);
}
std::unique_ptr<column> group_rank_to_percentage(rank_method const method,
rank_percentage const percentage,
column_view const& rank,
column_view const& count,
device_span<size_type const> group_labels,
device_span<size_type const> group_offsets,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(percentage != rank_percentage::NONE, "Percentage cannot be NONE");
auto ranks = make_fixed_width_column(
data_type{type_to_id<double>()}, group_labels.size(), mask_state::UNALLOCATED, stream, mr);
ranks->set_null_mask(copy_bitmask(rank, stream, mr));
auto mutable_ranks = ranks->mutable_view();
auto one_normalized = [] __device__(auto const rank, auto const group_size) {
return group_size == 1 ? 0.0 : ((rank - 1.0) / (group_size - 1));
};
if (method == rank_method::DENSE) {
thrust::tabulate(rmm::exec_policy(stream),
mutable_ranks.begin<double>(),
mutable_ranks.end<double>(),
[percentage,
one_normalized,
is_double = rank.type().id() == type_id::FLOAT64,
dcount = count.begin<size_type>(),
labels = group_labels.begin(),
offsets = group_offsets.begin(),
d_rank = rank.begin<double>(),
s_rank = rank.begin<size_type>()] __device__(size_type row_index) -> double {
double const r = is_double ? d_rank[row_index] : s_rank[row_index];
auto const count = dcount[labels[row_index]];
size_type const last_rank_index = offsets[labels[row_index]] + count - 1;
auto const last_rank = s_rank[last_rank_index];
return percentage == rank_percentage::ZERO_NORMALIZED
? r / last_rank
: one_normalized(r, last_rank);
});
} else {
thrust::tabulate(rmm::exec_policy(stream),
mutable_ranks.begin<double>(),
mutable_ranks.end<double>(),
[percentage,
one_normalized,
is_double = rank.type().id() == type_id::FLOAT64,
dcount = count.begin<size_type>(),
labels = group_labels.begin(),
d_rank = rank.begin<double>(),
s_rank = rank.begin<size_type>()] __device__(size_type row_index) -> double {
double const r = is_double ? d_rank[row_index] : s_rank[row_index];
auto const count = dcount[labels[row_index]];
return percentage == rank_percentage::ZERO_NORMALIZED
? r / count
: one_normalized(r, count);
});
}
return ranks;
}
} // namespace detail
} // namespace groupby
} // namespace cudf
|
the_stack
|
#include "gpu/coredepth/sweep.hpp"
#include "backend/common/coredepth/sphereSweepParams.h"
#include "../surface.hpp"
#include "gpu/memcpy.hpp"
#include "core/transformGeoParams.hpp"
#include "libvideostitch/geometryDef.hpp"
#include "libvideostitch/panoDef.hpp"
#include "backend/cuda/deviceBuffer.hpp"
#include "backend/cuda/deviceBuffer2D.hpp"
#include "backend/cuda/surface.hpp"
#include "backend/cuda/deviceStream.hpp"
#include "cuda/util.hpp"
#include "gpu/buffer.hpp"
#include "kernels/sphereSweepKernel.cu"
#include <math.h>
static const int CudaBlockSize = 16;
namespace VideoStitch {
namespace GPU {
static int numCall = 0;
PotentialValue<struct InputParams6> prepareInputParams(const Core::PanoDefinition& panoDef, int time,
float scale = 1.f) {
struct InputParams6 inputParamsArray;
for (videoreaderid_t videoInputID = 0; videoInputID < panoDef.numVideoInputs(); videoInputID++) {
const Core::InputDefinition& input = panoDef.getVideoInput(videoInputID);
const Core::GeometryDefinition geometry = input.getGeometries().at(time);
Core::TransformGeoParams params(input, geometry, panoDef);
if (geometry.hasDistortion()) {
return PotentialValue<struct InputParams6>({Origin::Stitcher, ErrType::ImplementationError,
"Sphere sweep does not handle distortion parameters in inputs"});
}
float2 center, iscale;
center.x = (float)input.getCenterX(geometry) / scale;
center.y = (float)input.getCenterY(geometry) / scale;
iscale.x = (float)geometry.getHorizontalFocal() / scale;
iscale.y = (float)geometry.getVerticalFocal() / scale;
InputParams& inputParams = inputParamsArray.params[videoInputID];
inputParams.distortion = params.getDistortion();
inputParams.transform = params.getPose();
inputParams.inverseTransform = params.getPoseInverse();
inputParams.scale = iscale;
inputParams.centerShift = center;
inputParams.texWidth = (int)(input.getWidth() / scale);
inputParams.texHeight = (int)(input.getHeight() / scale);
inputParams.cropLeft = (int)(input.getCropLeft() / scale);
inputParams.cropRight = (int)(input.getCropRight() / scale);
inputParams.cropTop = (int)(input.getCropTop() / scale);
inputParams.cropBottom = (int)(input.getCropBottom() / scale);
}
return PotentialValue<struct InputParams6>(inputParamsArray);
}
static read_only image2d_t getSurfaceFromMap(const videoreaderid_t index,
const std::map<videoreaderid_t, Core::SourceSurface*>& surfaces) {
return (surfaces.find(index) != surfaces.end()) ? surfaces.find(index)->second->pimpl->surface->get().texture() : 0;
}
Status splatInputWithDepthIntoPano(const Core::PanoDefinition& panoDef, Core::PanoSurface& pano,
const GPU::Surface& depthSurface,
const std::map<videoreaderid_t, Core::SourceSurface*>& inputSurfaces,
GPU::Stream stream) {
// TODO
int time = 0;
const videoreaderid_t inputID = 0;
Buffer<uint32_t> panoBuffer = pano.pimpl->buffer;
float2 pscale;
pscale.x = Core::TransformGeoParams::computePanoScale(Core::PanoProjection::Equirectangular, pano.getWidth(), 360.f);
pscale.y =
2 * Core::TransformGeoParams::computePanoScale(Core::PanoProjection::Equirectangular, pano.getHeight(), 360.f);
auto potInputParamsArray = prepareInputParams(panoDef, time);
FAIL_RETURN(potInputParamsArray.status());
const InputParams& referenceInput = potInputParamsArray.value().params[inputID];
const float offset = cosf(numCall++ / 20.f * (float)M_PI / 2.f) * 0.2f;
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv(referenceInput.texWidth, dimBlock.x),
(unsigned)Cuda::ceilDiv(referenceInput.texHeight, dimBlock.y), 1);
Core::splatInputWithDepthIntoPano<<<dimGrid, dimBlock, 0, stream.get()>>>(
panoBuffer.get(), (unsigned)pano.getWidth(), (unsigned)pano.getHeight(), pscale,
getSurfaceFromMap(inputID, inputSurfaces), depthSurface.get().surface(), referenceInput, panoDef.numVideoInputs(),
offset);
Logger::get(Logger::Info) << "SphereSweep frame " << numCall << std::endl;
return Status::OK();
}
Status sphereSweepInput(videoreaderid_t sourceID, int frame, GPU::Surface& dst,
const std::map<videoreaderid_t, Core::SourceSurface*>& inputSurfaces,
const Core::PanoDefinition& panoDef, GPU::Stream& stream, const float scale) {
// debug command line pipeline: just copy input surface to output surface
// via temporary buffer as we don't have a surface->surface copy function
// auto tmpBuf = GPU::uniqueBuffer<uint32_t>(inputDef.getWidth() * inputDef.getHeight(), "tmp bfu");
// GPU::memcpyAsync(tmpBuf.borrow(), *gpuSurf, stream);
// GPU::memcpyAsync(dst, tmpBuf.borrow_const(), stream);
// stream.synchronize()
if (panoDef.numVideoInputs() > maxDepthInputs()) {
return Status{Origin::Stitcher, ErrType::ImplementationError,
"Sphere sweep only implemented up to 6 inputs (hardcoded)"};
}
auto potInputParamsArray = prepareInputParams(panoDef, frame, scale);
FAIL_RETURN(potInputParamsArray.status());
const struct InputParams6 inputParamsArray = potInputParamsArray.releaseValue();
const InputParams& referenceInput = inputParamsArray.params[sourceID];
// Running a kernel that takes > 1s destabilizes the system
// (Display manager resets or kernel panic)
// As the current version is not optimised and works at full resolution it can take several seconds to complete
// --> Tile the work. Each tile should complete in less than 1 second.
const int numBlocks = 16;
// Make sure texture width is a multiple of numBlocks
const int paddedTexWidth = (int)Cuda::ceilDiv(referenceInput.texWidth, numBlocks) * numBlocks;
const int paddedTexHeight = (int)Cuda::ceilDiv(referenceInput.texHeight, numBlocks) * numBlocks;
for (int cx = 0; cx < numBlocks; cx++) {
for (int cy = 0; cy < numBlocks; cy++) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv(paddedTexWidth / numBlocks, dimBlock.x),
(unsigned)Cuda::ceilDiv(paddedTexHeight / numBlocks, dimBlock.y), 1);
Core::sphereSweepInputKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(
dst.get().surface(), (unsigned)dst.width(), (unsigned)dst.height(), nullptr,
getSurfaceFromMap(0, inputSurfaces), getSurfaceFromMap(1, inputSurfaces), getSurfaceFromMap(2, inputSurfaces),
getSurfaceFromMap(3, inputSurfaces), getSurfaceFromMap(4, inputSurfaces), getSurfaceFromMap(5, inputSurfaces),
inputParamsArray, sourceID, panoDef.numVideoInputs(), cx, cy, paddedTexWidth / numBlocks,
paddedTexHeight / numBlocks);
// Force synchronization after tile computation for system stability
stream.synchronize();
}
}
Logger::get(Logger::Info) << "SphereSweep frame " << frame << " input " << sourceID << std::endl;
return Status::OK();
}
Status sphereSweepInputSGM(videoreaderid_t sourceID, int frame, GPU::Surface& dst,
GPU::HostBuffer<unsigned short>& hostCostVolume,
const std::map<videoreaderid_t, Core::SourceSurface*>& inputSurfaces,
const Core::PanoDefinition& panoDef, GPU::Stream& stream, const float scale) {
// debug command line pipeline: just copy input surface to output surface
// via temporary buffer as we don't have a surface->surface copy function
// auto tmpBuf = GPU::uniqueBuffer<uint32_t>(inputDef.getWidth() * inputDef.getHeight(), "tmp bfu");
// GPU::memcpyAsync(tmpBuf.borrow(), *gpuSurf, stream);
// GPU::memcpyAsync(dst, tmpBuf.borrow_const(), stream);
// stream.synchronize()
if (panoDef.numVideoInputs() > maxDepthInputs()) {
return Status{Origin::Stitcher, ErrType::ImplementationError,
"Sphere sweep only implemented up to 6 inputs (hardcoded)"};
}
auto potInputParamsArray = prepareInputParams(panoDef, frame, scale);
FAIL_RETURN(potInputParamsArray.status());
const struct InputParams6 inputParamsArray = potInputParamsArray.releaseValue();
const InputParams& referenceInput = inputParamsArray.params[sourceID];
GPU::UniqueBuffer<unsigned short> devCostVolume;
PROPAGATE_FAILURE_STATUS(
devCostVolume.alloc(referenceInput.texWidth * referenceInput.texHeight * numSphereSweeps(), "SGM score volume"));
// Running a kernel that takes > 1s destabilizes the system
// (Display manager resets or kernel panic)
// As the current version is not optimised and works at full resolution it can take several seconds to complete
// --> Tile the work. Each tile should complete in less than 1 second.
const int numBlocks = 16;
// Make sure texture width is a multiple of numBlocks
const int paddedTexWidth = (int)Cuda::ceilDiv(referenceInput.texWidth, numBlocks) * numBlocks;
const int paddedTexHeight = (int)Cuda::ceilDiv(referenceInput.texHeight, numBlocks) * numBlocks;
for (int cx = 0; cx < numBlocks; cx++) {
for (int cy = 0; cy < numBlocks; cy++) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv(paddedTexWidth / numBlocks, dimBlock.x),
(unsigned)Cuda::ceilDiv(paddedTexHeight / numBlocks, dimBlock.y), 1);
Core::sphereSweepInputKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(
dst.get().surface(), (unsigned)dst.width(), (unsigned)dst.height(), devCostVolume.borrow().devicePtr(),
getSurfaceFromMap(0, inputSurfaces), getSurfaceFromMap(1, inputSurfaces), getSurfaceFromMap(2, inputSurfaces),
getSurfaceFromMap(3, inputSurfaces), getSurfaceFromMap(4, inputSurfaces), getSurfaceFromMap(5, inputSurfaces),
inputParamsArray, sourceID, panoDef.numVideoInputs(), cx, cy, paddedTexWidth / numBlocks,
paddedTexHeight / numBlocks);
// Force synchronization after tile computation for system stability
FAIL_RETURN(stream.synchronize());
}
}
Logger::get(Logger::Info) << "SphereSweep frame " << frame << " input " << sourceID << std::endl;
// copy scoreVolume to host
FAIL_RETURN(GPU::memcpyAsync(
hostCostVolume.hostPtr(), devCostVolume.borrow_const(),
referenceInput.texWidth * referenceInput.texHeight * numSphereSweeps() * sizeof(unsigned short), stream));
stream.synchronize();
Logger::get(Logger::Info) << "SphereSweep score volume copied back to host" << std::endl;
return Status::OK();
}
Status sphereSweepInputDisparityToDepth(videoreaderid_t sourceID, int frame, GPU::Surface& dst,
GPU::HostBuffer<short>& hostDisparity, bool useHostDisparity,
const std::map<videoreaderid_t, Core::SourceSurface*>& surfaces,
const Core::PanoDefinition& panoDef, GPU::Stream& stream, const float scale) {
if (panoDef.numVideoInputs() > maxDepthInputs()) {
return Status{Origin::Stitcher, ErrType::ImplementationError,
"Sphere sweep only implemented up to 6 inputs (hardcoded)"};
}
auto potInputParamsArray = prepareInputParams(panoDef, frame, scale);
FAIL_RETURN(potInputParamsArray.status());
const struct InputParams6 inputParamsArray = potInputParamsArray.releaseValue();
const InputParams& referenceInput = inputParamsArray.params[sourceID];
// copy host disparity to GPU buffer
PotentialValue<GPU::Buffer<short>> potDevBuf =
GPU::Buffer<short>::allocate(referenceInput.texWidth * referenceInput.texHeight, "SGM output disparity");
FAIL_RETURN(potDevBuf.status());
GPU::Buffer<short> devDisparity(potDevBuf.releaseValue());
FAIL_RETURN(GPU::memcpyAsync(devDisparity, hostDisparity.hostPtr(),
referenceInput.texWidth * referenceInput.texHeight * sizeof(short), stream));
stream.synchronize();
// Running a kernel that takes > 1s destabilizes the system
// (Display manager resets or kernel panic)
// As the current version is not optimised and works at full resolution it can take several seconds to complete
// --> Tile the work. Each tile should complete in less than 1 second.
const int numBlocks = 4;
// Make sure texture width is a multiple of numBlocks
const int paddedTexWidth = (int)Cuda::ceilDiv(referenceInput.texWidth, numBlocks) * numBlocks;
const int paddedTexHeight = (int)Cuda::ceilDiv(referenceInput.texHeight, numBlocks) * numBlocks;
for (int cx = 0; cx < numBlocks; cx++) {
for (int cy = 0; cy < numBlocks; cy++) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv(paddedTexWidth / numBlocks, dimBlock.x),
(unsigned)Cuda::ceilDiv(paddedTexHeight / numBlocks, dimBlock.y), 1);
Core::sphereSweepInputDisparityToDepthKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(
dst.get().surface(), (unsigned)dst.width(), (unsigned)dst.height(),
(useHostDisparity) ? devDisparity.devicePtr() : nullptr, getSurfaceFromMap(sourceID, surfaces), cx, cy,
paddedTexWidth / numBlocks, paddedTexHeight / numBlocks);
// Force synchronization after tile computation for system stability
stream.synchronize();
}
}
Logger::get(Logger::Info) << "SphereSweep disparity to depth on input " << sourceID << std::endl;
FAIL_RETURN(devDisparity.release());
return Status();
}
Status sphereSweepInputStep(videoreaderid_t sourceID, int frame, GPU::Surface& dst, GPU::Surface& depthSrcNextLevel,
const std::map<videoreaderid_t, Core::SourceSurface*>& inputSurfaces,
const Core::PanoDefinition& panoDef, GPU::Stream& stream, const float scale) {
// debug command line pipeline: just copy input surface to output surface
// via temporary buffer as we don't have a surface->surface copy function
// auto tmpBuf = GPU::uniqueBuffer<uint32_t>(inputDef.getWidth() * inputDef.getHeight(), "tmp bfu");
// GPU::memcpyAsync(tmpBuf.borrow(), *gpuSurf, stream);
// GPU::memcpyAsync(dst, tmpBuf.borrow_const(), stream);
// stream.synchronize()
if (panoDef.numVideoInputs() > 6) {
return Status{Origin::Stitcher, ErrType::ImplementationError,
"Sphere sweep only implemented for 6 inputs maximum (hardcoded)"};
}
auto potInputParamsArray = prepareInputParams(panoDef, frame, scale);
FAIL_RETURN(potInputParamsArray.status());
const struct InputParams6 inputParamsArray = potInputParamsArray.releaseValue();
const InputParams& referenceInput = inputParamsArray.params[sourceID];
// Running a kernel that takes > 1s destabilizes the system
// (Display manager resets or kernel panic)
// As the current version is not optimised and works at full resolution it can take several seconds to complete
// --> Tile the work. Each tile should complete in less than 1 second.
const int numBlocks = 4;
const int paddedTexWidth = (int)Cuda::ceilDiv(referenceInput.texWidth, numBlocks) * numBlocks;
const int paddedTexHeight = (int)Cuda::ceilDiv(referenceInput.texHeight, numBlocks) * numBlocks;
// search around best depth from lower level pyramid
// search span is in log2, covers [log2(bestDepth) - searchSpan, log2(bestDepth) + searchSpan]
const float searchSpan = scale / 8.f; // decrease searched depths on upper levels
for (int cx = 0; cx < numBlocks; cx++) {
for (int cy = 0; cy < numBlocks; cy++) {
const dim3 dimBlock(CudaBlockSize, CudaBlockSize, 1);
const dim3 dimGrid((unsigned)Cuda::ceilDiv(paddedTexWidth / numBlocks, dimBlock.x),
(unsigned)Cuda::ceilDiv(paddedTexHeight / numBlocks, dimBlock.y), 1);
Core::sphereSweepInputKernelStep<<<dimGrid, dimBlock, 0, stream.get()>>>(
dst.get().surface(), (unsigned)dst.width(), (unsigned)dst.height(), depthSrcNextLevel.get().surface(),
(unsigned)depthSrcNextLevel.width(), (unsigned)depthSrcNextLevel.height(),
getSurfaceFromMap(0, inputSurfaces), getSurfaceFromMap(1, inputSurfaces), getSurfaceFromMap(2, inputSurfaces),
getSurfaceFromMap(3, inputSurfaces), getSurfaceFromMap(4, inputSurfaces), getSurfaceFromMap(5, inputSurfaces),
inputParamsArray, sourceID, panoDef.numVideoInputs(), cx, cy, paddedTexWidth / numBlocks,
paddedTexHeight / numBlocks, searchSpan);
// Force synchronization after tile computation for system stability
stream.synchronize();
}
}
Logger::get(Logger::Info) << "SphereSweep step frame " << frame << " input " << sourceID
<< " search span: " << searchSpan << std::endl;
return Status::OK();
}
int numSphereSweeps() { return numSphereScales; }
int maxDepthInputs() { return NUM_INPUTS; }
} // namespace GPU
} // namespace VideoStitch
|
the_stack
|
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/limits.hpp"
#include "mog2.hpp"
namespace cv
{
namespace cuda
{
namespace device
{
namespace mog2
{
///////////////////////////////////////////////////////////////
// Utility
__device__ __forceinline__ float cvt(uchar val)
{
return val;
}
__device__ __forceinline__ float3 cvt(const uchar3 &val)
{
return make_float3(val.x, val.y, val.z);
}
__device__ __forceinline__ float4 cvt(const uchar4 &val)
{
return make_float4(val.x, val.y, val.z, val.w);
}
__device__ __forceinline__ float sqr(float val)
{
return val * val;
}
__device__ __forceinline__ float sqr(const float3 &val)
{
return val.x * val.x + val.y * val.y + val.z * val.z;
}
__device__ __forceinline__ float sqr(const float4 &val)
{
return val.x * val.x + val.y * val.y + val.z * val.z;
}
__device__ __forceinline__ float sum(float val)
{
return val;
}
__device__ __forceinline__ float sum(const float3 &val)
{
return val.x + val.y + val.z;
}
__device__ __forceinline__ float sum(const float4 &val)
{
return val.x + val.y + val.z;
}
template <class Ptr2D>
__device__ __forceinline__ void swap(Ptr2D &ptr, int x, int y, int k, int rows)
{
typename Ptr2D::elem_type val = ptr(k * rows + y, x);
ptr(k * rows + y, x) = ptr((k + 1) * rows + y, x);
ptr((k + 1) * rows + y, x) = val;
}
///////////////////////////////////////////////////////////////
// MOG2
template <bool detectShadows, typename SrcT, typename WorkT>
__global__ void mog2(const PtrStepSz<SrcT> frame, PtrStepb fgmask, PtrStepb modesUsed,
PtrStepf gmm_weight, PtrStepf gmm_variance, PtrStep<WorkT> gmm_mean,
const float alphaT, const float alpha1, const float prune, const Constants *const constants)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < frame.cols && y < frame.rows)
{
WorkT pix = cvt(frame(y, x));
//calculate distances to the modes (+ sort)
//here we need to go in descending order!!!
bool background = false; // true - the pixel classified as background
//internal:
bool fitsPDF = false; //if it remains zero a new GMM mode will be added
int nmodes = modesUsed(y, x);
const int nNewModes = nmodes; //current number of modes in GMM
float totalWeight = 0.0f;
//go through all modes
for (int mode = 0; mode < nmodes; ++mode)
{
//need only weight if fit is found
float weight = alpha1 * gmm_weight(mode * frame.rows + y, x) + prune;
int swap_count = 0;
//fit not found yet
if (!fitsPDF)
{
//check if it belongs to some of the remaining modes
const float var = gmm_variance(mode * frame.rows + y, x);
const WorkT mean = gmm_mean(mode * frame.rows + y, x);
//calculate difference and distance
const WorkT diff = mean - pix;
const float dist2 = sqr(diff);
//background? - Tb - usually larger than Tg
if (totalWeight < constants->TB_ && dist2 < constants->Tb_ * var)
background = true;
//check fit
if (dist2 < constants->Tg_ * var)
{
//belongs to the mode
fitsPDF = true;
//update distribution
//update weight
weight += alphaT;
float k = alphaT / weight;
//update mean
gmm_mean(mode * frame.rows + y, x) = mean - k * diff;
//update variance
float varnew = var + k * (dist2 - var);
//limit the variance
varnew = ::fmaxf(varnew, constants->varMin_);
varnew = ::fminf(varnew, constants->varMax_);
gmm_variance(mode * frame.rows + y, x) = varnew;
//sort
//all other weights are at the same place and
//only the matched (iModes) is higher -> just find the new place for it
for (int i = mode; i > 0; --i)
{
//check one up
if (weight < gmm_weight((i - 1) * frame.rows + y, x))
break;
swap_count++;
//swap one up
swap(gmm_weight, x, y, i - 1, frame.rows);
swap(gmm_variance, x, y, i - 1, frame.rows);
swap(gmm_mean, x, y, i - 1, frame.rows);
}
//belongs to the mode - bFitsPDF becomes 1
}
} // !fitsPDF
//check prune
if (weight < -prune)
{
weight = 0.0f;
nmodes--;
}
gmm_weight((mode - swap_count) * frame.rows + y, x) = weight; //update weight by the calculated value
totalWeight += weight;
}
//renormalize weights
totalWeight = 1.f / totalWeight;
for (int mode = 0; mode < nmodes; ++mode)
gmm_weight(mode * frame.rows + y, x) *= totalWeight;
nmodes = nNewModes;
//make new mode if needed and exit
if (!fitsPDF)
{
// replace the weakest or add a new one
const int mode = nmodes == constants->nmixtures_ ? constants->nmixtures_ - 1 : nmodes++;
if (nmodes == 1)
gmm_weight(mode * frame.rows + y, x) = 1.f;
else
{
gmm_weight(mode * frame.rows + y, x) = alphaT;
// renormalize all other weights
for (int i = 0; i < nmodes - 1; ++i)
gmm_weight(i * frame.rows + y, x) *= alpha1;
}
// init
gmm_mean(mode * frame.rows + y, x) = pix;
gmm_variance(mode * frame.rows + y, x) = constants->varInit_;
//sort
//find the new place for it
for (int i = nmodes - 1; i > 0; --i)
{
// check one up
if (alphaT < gmm_weight((i - 1) * frame.rows + y, x))
break;
//swap one up
swap(gmm_weight, x, y, i - 1, frame.rows);
swap(gmm_variance, x, y, i - 1, frame.rows);
swap(gmm_mean, x, y, i - 1, frame.rows);
}
}
//set the number of modes
modesUsed(y, x) = nmodes;
bool isShadow = false;
if (detectShadows && !background)
{
float tWeight = 0.0f;
// check all the components marked as background:
for (int mode = 0; mode < nmodes; ++mode)
{
const WorkT mean = gmm_mean(mode * frame.rows + y, x);
const WorkT pix_mean = pix * mean;
const float numerator = sum(pix_mean);
const float denominator = sqr(mean);
// no division by zero allowed
if (denominator == 0)
break;
// if tau < a < 1 then also check the color distortion
else if (numerator <= denominator && numerator >= constants->tau_ * denominator)
{
const float a = numerator / denominator;
WorkT dD = a * mean - pix;
if (sqr(dD) < constants->Tb_ * gmm_variance(mode * frame.rows + y, x) * a * a)
{
isShadow = true;
break;
}
};
tWeight += gmm_weight(mode * frame.rows + y, x);
if (tWeight > constants->TB_)
break;
}
}
fgmask(y, x) = background ? 0 : isShadow ? constants->shadowVal_ : 255;
}
}
template <typename SrcT, typename WorkT>
void mog2_caller(PtrStepSzb frame, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean,
float alphaT, float prune, bool detectShadows, const Constants *const constants, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(frame.cols, block.x), divUp(frame.rows, block.y));
const float alpha1 = 1.0f - alphaT;
if (detectShadows)
{
cudaSafeCall(cudaFuncSetCacheConfig(mog2<true, SrcT, WorkT>, cudaFuncCachePreferL1));
mog2<true, SrcT, WorkT><<<grid, block, 0, stream>>>((PtrStepSz<SrcT>)frame, fgmask, modesUsed,
weight, variance, (PtrStepSz<WorkT>)mean,
alphaT, alpha1, prune, constants);
}
else
{
cudaSafeCall(cudaFuncSetCacheConfig(mog2<false, SrcT, WorkT>, cudaFuncCachePreferL1));
mog2<false, SrcT, WorkT><<<grid, block, 0, stream>>>((PtrStepSz<SrcT>)frame, fgmask, modesUsed,
weight, variance, (PtrStepSz<WorkT>)mean,
alphaT, alpha1, prune, constants);
}
cudaSafeCall(cudaGetLastError());
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
void mog2_gpu(PtrStepSzb frame, int cn, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean,
float alphaT, float prune, bool detectShadows, const Constants *const constants, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb frame, PtrStepSzb fgmask, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzf variance, PtrStepSzb mean, float alphaT, float prune, bool detectShadows, const Constants *const constants, cudaStream_t stream);
static const func_t funcs[] =
{
0, mog2_caller<uchar, float>, 0, mog2_caller<uchar3, float3>, mog2_caller<uchar4, float4>};
funcs[cn](frame, fgmask, modesUsed, weight, variance, mean, alphaT, prune, detectShadows, constants, stream);
}
template <typename WorkT, typename OutT>
__global__ void getBackgroundImage2(const PtrStepSzb modesUsed, const PtrStepf gmm_weight, const PtrStep<WorkT> gmm_mean, PtrStep<OutT> dst, const Constants *const constants)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= modesUsed.cols || y >= modesUsed.rows)
return;
int nmodes = modesUsed(y, x);
WorkT meanVal = VecTraits<WorkT>::all(0.0f);
float totalWeight = 0.0f;
for (int mode = 0; mode < nmodes; ++mode)
{
float weight = gmm_weight(mode * modesUsed.rows + y, x);
WorkT mean = gmm_mean(mode * modesUsed.rows + y, x);
meanVal = meanVal + weight * mean;
totalWeight += weight;
if (totalWeight > constants->TB_)
break;
}
meanVal = meanVal * (1.f / totalWeight);
dst(y, x) = saturate_cast<OutT>(meanVal);
}
template <typename WorkT, typename OutT>
void getBackgroundImage2_caller(PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, const Constants *const constants, cudaStream_t stream)
{
dim3 block(32, 8);
dim3 grid(divUp(modesUsed.cols, block.x), divUp(modesUsed.rows, block.y));
cudaSafeCall(cudaFuncSetCacheConfig(getBackgroundImage2<WorkT, OutT>, cudaFuncCachePreferL1));
getBackgroundImage2<WorkT, OutT><<<grid, block, 0, stream>>>(modesUsed, weight, (PtrStepSz<WorkT>)mean, (PtrStepSz<OutT>)dst, constants);
cudaSafeCall(cudaGetLastError());
if (stream == 0)
cudaSafeCall(cudaDeviceSynchronize());
}
void getBackgroundImage2_gpu(int cn, PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, const Constants *const constants, cudaStream_t stream)
{
typedef void (*func_t)(PtrStepSzb modesUsed, PtrStepSzf weight, PtrStepSzb mean, PtrStepSzb dst, const Constants *const constants, cudaStream_t stream);
static const func_t funcs[] =
{
0, getBackgroundImage2_caller<float, uchar>, 0, getBackgroundImage2_caller<float3, uchar3>, getBackgroundImage2_caller<float4, uchar4>};
funcs[cn](modesUsed, weight, mean, dst, constants, stream);
}
} // namespace mog2
} // namespace device
} // namespace cuda
} // namespace cv
#endif /* CUDA_DISABLER */
|
the_stack
|
// HACK TESTING
#include <iostream>
using std::cout;
using std::endl;
// 2x 4-bit --> 2x 8-bit (unsigned)
inline __device__ void gunpack(uint8_t ival,
uint16_t& oval,
bool byte_reverse,
bool align_msb,
bool conjugate) {
// Note: Ignores conjugate
if( byte_reverse ) {
// ........ABCDEFGH
// EFGH....ABCD....
oval = ival;
oval = (oval | (oval << 12)) & 0xF0F0;
} else {
// ....ABCDEFGH....
// ABCD....EFGH....
oval = ival << 4;
oval = (oval | (oval << 4)) & 0xF0F0;
}
if( !align_msb ) {
// >>>>ABCD>>>>EFGH
oval >>= 4;
}
}
// 4x 2-bit --> 4x 8-bit (unsigned)
inline __device__ void gunpack(uint8_t ival,
uint32_t& oval,
bool byte_reverse,
bool align_msb,
bool conjugate) {
// Note: Ignores conjugate
// ..................ABCDEFGH......
// ......ABCD............EFGH......
// AB......CD......EF......GH......
oval = ival << 6;
oval = (oval | (oval << 12)) & 0x03C003C0;
oval = (oval | (oval << 6)) & 0xC0C0C0C0;
if( byte_reverse) {
byteswap_gpu(oval, &oval);
}
if( !align_msb ) {
// >>>>>>AB>>>>>>CD>>>>>>EF>>>>>>GH
oval >>= 6;
}
}
// 8x 1-bit --> 8x 8-bit (unsigned)
inline __device__ void gunpack(uint8_t ival,
uint64_t& oval,
bool byte_reverse,
bool align_msb,
bool conjugate) {
// Note: Ignores conjugate
// .................................................ABCDEFGH.......
// .....................ABCD............................EFGH.......
// .......AB..............CD..............EF..............GH.......
// A.......B.......C.......D.......E.......F.......G.......H.......
oval = ival << 7;
oval = (oval | (oval << 28)) & 0x0000078000000780;
oval = (oval | (oval << 14)) & 0x0180018001800180;
oval = (oval | (oval << 7)) & 0x8080808080808080;
if( byte_reverse) {
byteswap_gpu(oval, &oval);
}
if( !align_msb ) {
// >>>>>>>A>>>>>>>B>>>>>>>C>>>>>>>D
oval >>= 7;
}
}
// 2x 4-bit --> 2x 8-bit (signed)
inline __device__ void gunpack(uint8_t ival,
int16_t& oval,
bool byte_reverse,
bool align_msb,
bool conjugate) {
if( byte_reverse ) {
// ........ABCDEFGH
// EFGH....ABCD....
oval = ival;
oval = (oval | (oval << 12)) & 0xF0F0;
} else {
// ....ABCDEFGH....
// ABCD....EFGH....
oval = ival << 4;
oval = (oval | (oval << 4)) & 0xF0F0;
}
if( !align_msb ) {
// >>>>ABCD>>>>EFGH
rshift_subwords_gpu<4,int8_t>(oval);
}
if( conjugate ) {
conjugate_subwords_gpu<int8_t>(oval);
}
}
// 4x 2-bit --> 4x 8-bit (signed)
inline __device__ void gunpack(uint8_t ival,
int32_t& oval,
bool byte_reverse,
bool align_msb,
bool conjugate) {
// ..................ABCDEFGH......
// ......ABCD............EFGH......
// AB......CD......EF......GH......
oval = ival << 6;
oval = (oval | (oval << 12)) & 0x03C003C0;
oval = (oval | (oval << 6)) & 0xC0C0C0C0;
if( byte_reverse) {
byteswap_gpu(oval, &oval);
}
if( !align_msb ) {
// >>>>>>AB>>>>>>CD>>>>>>EF>>>>>>GH
rshift_subwords_gpu<6,int8_t>(oval);
}
if( conjugate ) {
conjugate_subwords_gpu<int8_t>(oval);
}
}
// 8x 1-bit --> 8x 8-bit (signed)
inline __device__ void gunpack(uint8_t ival,
int64_t& oval,
bool byte_reverse,
bool align_msb,
bool conjugate) {
// .................................................ABCDEFGH.......
// .....................ABCD............................EFGH.......
// .......AB..............CD..............EF..............GH.......
// A.......B.......C.......D.......E.......F.......G.......H.......
oval = (~ival) << 7;
oval = (oval | (oval << 28)) & 0x0000078000000780;
oval = (oval | (oval << 14)) & 0x0180018001800180;
oval = (oval | (oval << 7)) & 0x8080808080808080;
oval |= 0x4040404040404040;
if( byte_reverse) {
byteswap_gpu(oval, &oval);
}
if( !align_msb ) {
// >>>>>>>A>>>>>>>B>>>>>>>C>>>>>>>D
rshift_subwords_gpu<6,int8_t>(oval);
}
if( conjugate ) {
conjugate_subwords_gpu<int8_t>(oval);
}
}
template<typename IType, typename OType>
struct GunpackFunctor {
bool byte_reverse;
bool align_msb;
bool conjugate;
GunpackFunctor(bool byte_reverse_,
bool align_msb_,
bool conjugate_)
: byte_reverse(byte_reverse_),
align_msb(align_msb_),
conjugate(conjugate_) {}
__device__ void operator()(IType ival, OType& oval) const {
gunpack(ival, oval, byte_reverse, align_msb, conjugate);
}
};
template<typename T, typename U, typename Func, typename Size>
__global__ void foreach_simple_gpu(T const* in,
U* out,
Size nelement,
Func func) {
Size v0 = threadIdx.x + (blockIdx.x + blockIdx.y*gridDim.x)*blockDim.x;
if( v0 < nelement ) {
func(in[v0], out[v0]);
}
}
template<typename T, typename U, typename Func, typename Size>
inline void launch_foreach_simple_gpu(T const* in,
U* out,
Size nelement,
Func func,
cudaStream_t stream) {
dim3 block(512, 1); // TODO: Tune this
Size first = std::min((nelement-1)/block.x+1, 65535ul);
Size secnd = std::min((nelement - first*block.x) / first + 1, 65535ul);
if( block.x*first > nelement ) {
secnd = 1;
}
dim3 grid(first, secnd);
/*
cout << " Block size is " << block.x << " by " << block.y << endl;
cout << " Grid size is " << grid.x << " by " << grid.y << endl;
cout << " Maximum size is " << block.x*grid.x*grid.y << endl;
if( block.x*grid.x*grid.y >= nelement ) {
cout << " -> Valid" << endl;
}
*/
void* args[] = {&in,
&out,
&nelement,
&func};
BF_CHECK_CUDA_EXCEPTION(cudaLaunchKernel((void*)foreach_simple_gpu<T,U,Func,Size>,
grid, block,
&args[0], 0, stream),
BF_STATUS_INTERNAL_ERROR);
}
template<typename T, typename U, typename V, typename Func, typename Size>
__global__ void foreach_promote_gpu(T const* in,
V* out,
Size nelement,
Func func) {
Size v0 = threadIdx.x + (blockIdx.x + blockIdx.y*gridDim.x)*blockDim.x;
if( v0 < nelement ) {
U tmp2 = 0;
func(in[v0], tmp2);
for( Size j=0; j<sizeof(U)/sizeof(T); j++ ) {
out[v0*sizeof(U)/sizeof(T) + j] = int8_t((tmp2 >> j*8) & 0xFF);
}
}
}
template<typename T, typename U, typename V, typename Func, typename Size>
inline void launch_foreach_promote_gpu(T const* in,
U* tmp,
V* out,
Size nelement,
Func func,
cudaStream_t stream) {
dim3 block(512, 1); // TODO: Tune this
Size first = std::min((nelement-1)/block.x+1, 65535ul);
Size secnd = std::min((nelement - first*block.x) / first + 1, 65535ul);
if( block.x*first > nelement ) {
secnd = 1;
}
dim3 grid(first, secnd);
/*
cout << " Block size is " << block.x << " by " << block.y << endl;
cout << " Grid size is " << grid.x << " by " << grid.y << endl;
cout << " Maximum size is " << block.x*grid.x*grid.y << endl;
if( block.x*grid.x*grid.y >= nelement ) {
cout << " -> Valid" << endl;
}
*/
void* args[] = {&in,
&out,
&nelement,
&func};
BF_CHECK_CUDA_EXCEPTION(cudaLaunchKernel((void*)foreach_promote_gpu<T,U,V,Func,Size>,
grid, block,
&args[0], 0, stream),
BF_STATUS_INTERNAL_ERROR);
}
// Instantiation - Gunpack functors used in unpack.cpp
//// unsigned
template class GunpackFunctor<uint8_t,uint16_t>;
template class GunpackFunctor<uint8_t,uint32_t>;
template class GunpackFunctor<uint8_t,uint64_t>;
//// signed
template class GunpackFunctor<uint8_t,int16_t>;
template class GunpackFunctor<uint8_t,int32_t>;
template class GunpackFunctor<uint8_t,int64_t>;
// Instantiation - launch_foreach_simple_gpu calls used in unpack.cpp
//// unsigned
template void launch_foreach_simple_gpu<uint8_t,uint16_t,GunpackFunctor<uint8_t,uint16_t>,size_t>(uint8_t const* in,
uint16_t* out,
size_t nelement,
GunpackFunctor<uint8_t,uint16_t> func,
cudaStream_t stream);
template void launch_foreach_simple_gpu<uint8_t,uint32_t,GunpackFunctor<uint8_t,uint32_t>,size_t>(uint8_t const* in,
uint32_t* out,
size_t nelement,
GunpackFunctor<uint8_t,uint32_t> func,
cudaStream_t stream);
template void launch_foreach_simple_gpu<uint8_t,uint64_t,GunpackFunctor<uint8_t,uint64_t>,size_t>(uint8_t const* in,
uint64_t* out,
size_t nelement,
GunpackFunctor<uint8_t,uint64_t> func,
cudaStream_t stream);
//// signed
template void launch_foreach_simple_gpu<uint8_t,int16_t,GunpackFunctor<uint8_t,int16_t>,size_t>(uint8_t const* in,
int16_t* out,
size_t nelement,
GunpackFunctor<uint8_t,int16_t> func,
cudaStream_t stream);
template void launch_foreach_simple_gpu<uint8_t,int32_t,GunpackFunctor<uint8_t,int32_t>,size_t>(uint8_t const* in,
int32_t* out,
size_t nelement,
GunpackFunctor<uint8_t,int32_t> func,
cudaStream_t stream);
template void launch_foreach_simple_gpu<uint8_t,int64_t,GunpackFunctor<uint8_t,int64_t>,size_t>(uint8_t const *in,
int64_t* out,
size_t nelement,
GunpackFunctor<uint8_t,int64_t> func,
cudaStream_t stream);
// Instantiation - launch_foreach_promote_gpu calls used in unpack.cpp
//// promote to float
template void launch_foreach_promote_gpu<uint8_t,int16_t,float,GunpackFunctor<uint8_t,int16_t>,size_t>(uint8_t const* in,
int16_t* tmp,
float* out,
size_t nelement,
GunpackFunctor<uint8_t,int16_t> func,
cudaStream_t stream);
template void launch_foreach_promote_gpu<uint8_t,int32_t,float,GunpackFunctor<uint8_t,int32_t>,size_t>(uint8_t const* in,
int32_t* tmp,
float* out,
size_t nelement,
GunpackFunctor<uint8_t,int32_t> func,
cudaStream_t stream);
template void launch_foreach_promote_gpu<uint8_t,int64_t,float,GunpackFunctor<uint8_t,int64_t>,size_t>(uint8_t const* in,
int64_t* tmp,
float* out,
size_t nelement,
GunpackFunctor<uint8_t,int64_t> func,
cudaStream_t stream);
//// promote to double
template void launch_foreach_promote_gpu<uint8_t,int16_t,double,GunpackFunctor<uint8_t,int16_t>,size_t>(uint8_t const* in,
int16_t* tmp,
double* out,
size_t nelement,
GunpackFunctor<uint8_t,int16_t> func,
cudaStream_t stream);
template void launch_foreach_promote_gpu<uint8_t,int32_t,double,GunpackFunctor<uint8_t,int32_t>,size_t>(uint8_t const* in,
int32_t* tmp,
double* out,
size_t nelement,
GunpackFunctor<uint8_t,int32_t> func,
cudaStream_t stream);
template void launch_foreach_promote_gpu<uint8_t,int64_t,double,GunpackFunctor<uint8_t,int64_t>,size_t>(uint8_t const* in,
int64_t* tmp,
double* out,
size_t nelement,
GunpackFunctor<uint8_t,int64_t> func,
cudaStream_t stream);
|
the_stack
|
// HIP not support cusolver
#include <thrust/device_vector.h>
#include <algorithm>
#include <vector>
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/qr_op.h"
#include "paddle/fluid/platform/dynload/cusolver.h"
// Reuse some helper functions from svd
#include "paddle/fluid/operators/svd_helper.h"
namespace paddle {
namespace operators {
template <typename T>
class QrGPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
bool compute_q;
bool reduced_mode;
auto& dev_ctx =
context.template device_context<platform::CUDADeviceContext>();
const Tensor& x = *context.Input<Tensor>("X");
Tensor& q = *context.Output<Tensor>("Q");
Tensor& r = *context.Output<Tensor>("R");
const std::string mode = context.Attr<std::string>("mode");
std::tie(compute_q, reduced_mode) = _parse_qr_mode(mode);
auto numel = x.numel();
PADDLE_ENFORCE_GT(numel, 0, platform::errors::PreconditionNotMet(
"The input of QR is empty."));
auto x_dims = x.dims();
int x_rank = x_dims.size();
int m = x_dims[x_rank - 2];
int n = x_dims[x_rank - 1];
int min_mn = std::min(m, n);
int k = reduced_mode ? min_mn : m;
int batch_size = numel / (m * n);
int qr_stride = m * n;
int tau_stride = min_mn;
if (compute_q) {
q.mutable_data<math::Real<T>>(
context.GetPlace(),
size_t(batch_size * m * k * sizeof(math::Real<T>)));
}
r.mutable_data<math::Real<T>>(
context.GetPlace(), size_t(batch_size * k * n * sizeof(math::Real<T>)));
auto dito =
math::DeviceIndependenceTensorOperations<platform::CUDADeviceContext,
T>(context);
// Note: allocate temporary tensors because of lacking in-place operatios.
// Prepare qr
Tensor qr;
qr.mutable_data<math::Real<T>>(
context.GetPlace(), size_t(batch_size * m * n * sizeof(math::Real<T>)));
// BatchedGeqrf performs computation in-place and 'qr' must be a copy of
// input
TensorCopy(x, context.GetPlace(), &qr);
// Prepare tau
auto tau_dims_vec = framework::vectorize<int>(x_dims);
tau_dims_vec.pop_back();
tau_dims_vec[tau_dims_vec.size() - 1] = min_mn;
Tensor tau = dito.Fill(tau_dims_vec, 0);
// Transpose 'qr' to conform the column-major order
auto tmp_qr = dito.Transpose(qr);
framework::TensorCopy(tmp_qr, qr.place(), &qr);
auto qr_data = qr.mutable_data<T>(context.GetPlace());
auto tau_data = tau.mutable_data<T>(context.GetPlace());
BatchedGeqrf(dev_ctx, batch_size, m, n, qr_data, m, tau_data, qr_stride,
tau_stride);
if (reduced_mode) {
auto trans_qr = dito.Transpose(qr);
auto sliced_qr = dito.Slice(trans_qr, {-2}, {0}, {min_mn});
auto tmp_r = dito.TrilTriu(sliced_qr, 0, false);
// Transpose 'tmp_r' to retore the original row-major order
framework::TensorCopy(tmp_r, r.place(), &r);
} else {
auto trans_qr = dito.Transpose(qr);
auto tmp_r = dito.TrilTriu(trans_qr, 0, false);
// Transpose 'tmp_r' to retore the original row-major order
framework::TensorCopy(tmp_r, r.place(), &r);
}
if (compute_q) {
// Perform QRGQR for Q using the result from GEQRF
// Transpose 'q' to retore the original row-major order
if (reduced_mode) {
BatchedOrgqr(dev_ctx, batch_size, m, min_mn, min_mn, qr_data, m,
tau_data, qr_stride, tau_stride);
auto trans_q = dito.Transpose(qr);
auto sliced_q = dito.Slice(trans_q, {-1}, {0}, {min_mn});
framework::TensorCopy(sliced_q, q.place(), &q);
} else {
if (m > n) {
auto new_qr_dims_vec = framework::vectorize<int>(x_dims);
new_qr_dims_vec[new_qr_dims_vec.size() - 1] = m;
Tensor new_qr = dito.Fill(new_qr_dims_vec, 0);
auto new_qr_data = new_qr.mutable_data<T>(context.GetPlace());
auto new_qr_stride = m * m;
for (int i = 0; i < batch_size; ++i) {
memory::Copy(
BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()),
(new_qr_data + i * new_qr_stride),
BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()),
(qr_data + i * qr_stride), qr_stride * sizeof(math::Real<T>),
dev_ctx.stream());
}
BatchedOrgqr(dev_ctx, batch_size, m, m, min_mn, new_qr_data, m,
tau_data, new_qr_stride, tau_stride);
auto trans_q = dito.Transpose(new_qr);
framework::TensorCopy(trans_q, q.place(), &q);
} else {
BatchedOrgqr(dev_ctx, batch_size, m, m, min_mn, qr_data, m, tau_data,
qr_stride, tau_stride);
auto trans_q = dito.Transpose(qr);
auto sliced_q = dito.Slice(trans_q, {-1}, {0}, {m});
framework::TensorCopy(sliced_q, q.place(), &q);
}
}
}
}
void BatchedGeqrf(const platform::CUDADeviceContext& dev_ctx, int batch_size,
int m, int n, float* a, int lda, float* tau, int a_stride,
int tau_stride) const;
void BatchedGeqrf(const platform::CUDADeviceContext& dev_ctx, int batch_size,
int m, int n, double* a, int lda, double* tau, int a_stride,
int tau_stride) const;
void BatchedOrgqr(const platform::CUDADeviceContext& dev_ctx, int batch_size,
int m, int n, int k, float* a, int lda, float* tau,
int a_stride, int tau_stride) const;
void BatchedOrgqr(const platform::CUDADeviceContext& dev_ctx, int batch_size,
int m, int n, int k, double* a, int lda, double* tau,
int a_stride, int tau_stride) const;
};
template <>
void QrGPUKernel<float>::BatchedGeqrf(
const platform::CUDADeviceContext& dev_ctx, int batch_size, int m, int n,
float* a, int lda, float* tau, int a_stride, int tau_stride) const {
int lwork = 0;
auto handle = dev_ctx.cusolver_dn_handle();
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnSgeqrf_bufferSize(
handle, m, n, a, lda, &lwork));
auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(float));
float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr());
auto info = memory::Alloc(dev_ctx, sizeof(int));
int* info_d = reinterpret_cast<int*>(info->ptr());
for (int i = 0; i < batch_size; ++i) {
float* a_working_ptr = &a[i * a_stride];
float* tau_working_ptr = &tau[i * tau_stride];
// compute geqrf
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnSgeqrf(
handle, m, n, a_working_ptr, lda, tau_working_ptr, workspace_ptr, lwork,
info_d));
// Do we need synchronized here?
// check the error info
int info_h;
memory::Copy(platform::CPUPlace(), &info_h,
BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()),
info_d, sizeof(int), dev_ctx.stream());
PADDLE_ENFORCE_EQ(
info_h, 0,
platform::errors::PreconditionNotMet(
"For batch [%d]: CUSolver geqrf is not zero. [%d]", i, info_h));
}
}
template <>
void QrGPUKernel<double>::BatchedGeqrf(
const platform::CUDADeviceContext& dev_ctx, int batch_size, int m, int n,
double* a, int lda, double* tau, int a_stride, int tau_stride) const {
int lwork = 0;
auto handle = dev_ctx.cusolver_dn_handle();
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnDgeqrf_bufferSize(
handle, m, n, a, lda, &lwork));
auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(double));
double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr());
auto info = memory::Alloc(dev_ctx, sizeof(int));
int* info_d = reinterpret_cast<int*>(info->ptr());
for (int i = 0; i < batch_size; ++i) {
double* a_working_ptr = &a[i * a_stride];
double* tau_working_ptr = &tau[i * tau_stride];
// compute geqrf
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnDgeqrf(
handle, m, n, a_working_ptr, lda, tau_working_ptr, workspace_ptr, lwork,
info_d));
// Do we need synchronized here?
// check the error info
int info_h;
memory::Copy(platform::CPUPlace(), &info_h,
BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()),
info_d, sizeof(int), dev_ctx.stream());
PADDLE_ENFORCE_EQ(
info_h, 0,
platform::errors::PreconditionNotMet(
"For batch [%d]: CUSolver geqrf is not zero. [%d]", i, info_h));
}
}
template <>
void QrGPUKernel<float>::BatchedOrgqr(
const platform::CUDADeviceContext& dev_ctx, int batch_size, int m, int n,
int k, float* a, int lda, float* tau, int a_stride, int tau_stride) const {
int lwork = 0;
auto handle = dev_ctx.cusolver_dn_handle();
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnSorgqr_bufferSize(
handle, m, n, k, a, lda, tau, &lwork));
auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(float));
float* workspace_ptr = reinterpret_cast<float*>(workspace->ptr());
auto info = memory::Alloc(dev_ctx, sizeof(int));
int* info_d = reinterpret_cast<int*>(info->ptr());
for (int i = 0; i < batch_size; ++i) {
float* a_working_ptr = &a[i * a_stride];
float* tau_working_ptr = &tau[i * tau_stride];
// compute orggr
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnSorgqr(
handle, m, n, k, a_working_ptr, lda, tau_working_ptr, workspace_ptr,
lwork, info_d));
// Do we need synchronized here?
// check the error info
int info_h;
memory::Copy(platform::CPUPlace(), &info_h,
BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()),
info_d, sizeof(int), dev_ctx.stream());
PADDLE_ENFORCE_EQ(
info_h, 0,
platform::errors::PreconditionNotMet(
"For batch [%d]: CUSolver QR is not zero. [%d]", i, info_h));
}
}
template <>
void QrGPUKernel<double>::BatchedOrgqr(
const platform::CUDADeviceContext& dev_ctx, int batch_size, int m, int n,
int k, double* a, int lda, double* tau, int a_stride,
int tau_stride) const {
int lwork = 0;
auto handle = dev_ctx.cusolver_dn_handle();
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnDorgqr_bufferSize(
handle, m, n, k, a, lda, tau, &lwork));
auto workspace = memory::Alloc(dev_ctx, lwork * sizeof(double));
double* workspace_ptr = reinterpret_cast<double*>(workspace->ptr());
auto info = memory::Alloc(dev_ctx, sizeof(int));
int* info_d = reinterpret_cast<int*>(info->ptr());
for (int i = 0; i < batch_size; ++i) {
double* a_working_ptr = &a[i * a_stride];
double* tau_working_ptr = &tau[i * tau_stride];
// compute orggr
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cusolverDnDorgqr(
handle, m, n, k, a_working_ptr, lda, tau_working_ptr, workspace_ptr,
lwork, info_d));
// Do we need synchronized here?
// check the error info
int info_h;
memory::Copy(platform::CPUPlace(), &info_h,
BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()),
info_d, sizeof(int), dev_ctx.stream());
PADDLE_ENFORCE_EQ(
info_h, 0,
platform::errors::PreconditionNotMet(
"For batch [%d]: CUSolver QR is not zero. [%d]", i, info_h));
}
}
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(qr, ops::QrGPUKernel<float>, ops::QrGPUKernel<double>);
REGISTER_OP_CUDA_KERNEL(
qr_grad, ops::QrGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::QrGradKernel<paddle::platform::CUDADeviceContext, double>);
#endif // not PADDLE_WITH_HIP
|
the_stack
|
#include <opencv2/cudafeatures2d.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
#define BLOCK_X 8
#define BLOCK_Y 8
#define BLOCK_Z 4
using namespace cv;
// Algorithm itself has good performances, but memory allocation is a problem.
// I will try to reduce it.
namespace {
// Only use it with unsigned numeric types
template <typename T>
__device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) {
return (bitmap >> pos) & 1;
}
// Only use it with unsigned numeric types
template <typename T>
__device__ __forceinline__ void SetBit(T &bitmap, unsigned char pos) {
bitmap |= (1 << pos);
}
// Returns the root index of the UFTree
__device__ unsigned Find(const int *s_buf, unsigned n) {
// Warning: do not call Find on a background pixel
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
assert(label > 0);
}
return n;
}
// Init phase.
// Labels start at value 1.
__global__ void Init(const cuda::PtrStepSz3b img, cuda::PtrStepSz3i block_conn, cuda::PtrStepSz3i block_labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z;
unsigned img_index = 2*z * (img.stepz / img.elem_size) + 2*y * (img.stepy / img.elem_size) + 2*x;
unsigned conn_index = z * (block_conn.stepz / block_conn.elem_size) + y * (block_conn.stepy / block_conn.elem_size) + x;
unsigned labels_index = z * (block_labels.stepz / block_labels.elem_size) + y * (block_labels.stepy / block_labels.elem_size) + x;
if (x < block_conn.x && y < block_conn.y && z < block_conn.z) {
#define P0 0x77707770777UL
unsigned long long P = 0UL;
if (img[img_index]) {
P |= P0;
}
if (2 * x + 1 < img.x) {
if (img[img_index + 1]) {
P |= (P0 << 1);
}
if (2 * y + 1 < img.y && img[img_index + img.stepy / img.elem_size + 1]) {
P |= (P0 << 5);
}
}
if (2 * y + 1 < img.y) {
if (img[img_index + img.stepy / img.elem_size]) {
P |= (P0 << 4);
}
}
if (2 * z + 1 < img.z) {
if (img[img_index + img.stepz / img.elem_size]) {
P |= P0 << 16;
}
if (2 * x + 1 < img.x) {
if (img[img_index + img.stepz / img.elem_size + 1]) {
P |= (P0 << 17);
}
if (2 * y + 1 < img.y && img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size + 1]) {
P |= (P0 << 21);
}
}
if (2 * y + 1 < img.y) {
if (img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size]) {
P |= (P0 << 20);
}
}
}
#undef P0
// checks on borders
if (x == 0) {
P &= 0xEEEEEEEEEEEEEEEE;
}
if (2 * x + 1 >= img.x) {
P &= 0x3333333333333333;
}
else if (2 * x + 2 >= img.x) {
P &= 0x7777777777777777;
}
if (y == 0) {
P &= 0xFFF0FFF0FFF0FFF0;
}
if (2 * y + 1 >= img.y) {
P &= 0x00FF00FF00FF00FF;
}
else if (2 * y + 2 >= img.y) {
P &= 0x0FFF0FFF0FFF0FFF;
}
if (z == 0) {
P &= 0xFFFFFFFFFFFF0000;
}
if (2 * z + 1 >= img.z) {
P &= 0x00000000FFFFFFFF;
}
else if (2 * z + 2 >= img.z) {
P &= 0x0000FFFFFFFFFFFF;
}
// P is now ready to be used to find neighbour blocks (or it should be)
// P value avoids range errors
unsigned int conn_bitmask = 0;
if (P > 0) {
block_labels[labels_index] = labels_index + 1;
// Lower plane
unsigned char * plane_data = img.data + img_index - (img.stepz / img.elem_size);
if (HasBit(P, 0) && plane_data[0 - img.stepy - 1]) {
SetBit(conn_bitmask, 0);
}
if ((HasBit(P, 1) && plane_data[0 - img.stepy]) || (HasBit(P, 2) && plane_data[0 - img.stepy + 1])) {
SetBit(conn_bitmask, 1);
}
if (HasBit(P, 3) && plane_data[0 - img.stepy + 2]) {
SetBit(conn_bitmask, 2);
}
if ((HasBit(P, 4) && plane_data[- 1]) || (HasBit(P, 8) && plane_data[img.stepy - 1])) {
SetBit(conn_bitmask, 3);
}
if ((HasBit(P, 5) && plane_data[0]) || (HasBit(P, 6) && plane_data[1]) || (HasBit(P, 9) && plane_data[img.stepy]) || (HasBit(P, 10) && plane_data[img.stepy + 1])) {
SetBit(conn_bitmask, 4);
}
if ((HasBit(P, 7) && plane_data[2]) || (HasBit(P, 11) && plane_data[img.stepy + 2])) {
SetBit(conn_bitmask, 5);
}
if (HasBit(P, 12) && plane_data[2 * img.stepy - 1]) {
SetBit(conn_bitmask, 6);
}
if ((HasBit(P, 13) && plane_data[2 * img.stepy]) || (HasBit(P, 14) && plane_data[2 * img.stepy + 1])) {
SetBit(conn_bitmask, 7);
}
if (HasBit(P, 15) && plane_data[2 * img.stepy + 2]) {
SetBit(conn_bitmask, 8);
}
// Current planes
plane_data += img.stepz / img.elem_size;
if ((HasBit(P, 16) && plane_data[0 - img.stepy - 1]) || (HasBit(P, 32) && plane_data[img.stepz - img.stepy - 1])) {
SetBit(conn_bitmask, 9);
}
if ((HasBit(P, 17) && plane_data[0 - img.stepy]) || (HasBit(P, 18) && plane_data[0 - img.stepy + 1]) || (HasBit(P, 33) && plane_data[img.stepz - img.stepy]) || (HasBit(P, 34) && plane_data[img.stepz - img.stepy + 1])) {
SetBit(conn_bitmask, 10);
}
if ((HasBit(P, 19) && plane_data[0 - img.stepy + 2]) || (HasBit(P, 35) && plane_data[img.stepz - img.stepy + 2])) {
SetBit(conn_bitmask, 11);
}
if ((HasBit(P, 20) && plane_data[-1]) || (HasBit(P, 24) && plane_data[img.stepy - 1]) || (HasBit(P, 36) && plane_data[img.stepz - 1]) || (HasBit(P, 40) && plane_data[img.stepz + img.stepy - 1])) {
SetBit(conn_bitmask, 12);
}
if ((HasBit(P, 23) && plane_data[2]) || (HasBit(P, 27) && plane_data[img.stepy + 2]) || (HasBit(P, 39) && plane_data[img.stepz + 2]) || (HasBit(P, 43) && plane_data[img.stepz + img.stepy + 2])) {
SetBit(conn_bitmask, 14);
}
if ((HasBit(P, 28) && plane_data[2 * img.stepy - 1]) || (HasBit(P, 44) && plane_data[img.stepz + 2 * img.stepy - 1])) {
SetBit(conn_bitmask, 15);
}
if ((HasBit(P, 29) && plane_data[2 * img.stepy]) || (HasBit(P, 30) && plane_data[2 * img.stepy + 1]) || (HasBit(P, 45) && plane_data[img.stepz + 2 * img.stepy]) || (HasBit(P, 46) && plane_data[img.stepz + 2 * img.stepy + 1])) {
SetBit(conn_bitmask, 16);
}
if ((HasBit(P, 31) && plane_data[2 * img.stepy + 2]) || (HasBit(P, 47) && plane_data[img.stepz + 2 * img.stepy + 2])) {
SetBit(conn_bitmask, 17);
}
// Upper plane
plane_data += 2 * (img.stepz / img.elem_size);
if (HasBit(P, 48) && plane_data[0 - img.stepy - 1]) {
SetBit(conn_bitmask, 18);
}
if ((HasBit(P, 49) && plane_data[0 - img.stepy]) || (HasBit(P, 50) && plane_data[0 - img.stepy + 1])) {
SetBit(conn_bitmask, 19);
}
if (HasBit(P, 51) && plane_data[0 - img.stepy + 2]) {
SetBit(conn_bitmask, 20);
}
if ((HasBit(P, 52) && plane_data[-1]) || (HasBit(P, 56) && plane_data[img.stepy - 1])) {
SetBit(conn_bitmask, 21);
}
if ((HasBit(P, 53) && plane_data[0]) || (HasBit(P, 54) && plane_data[1]) || (HasBit(P, 57) && plane_data[img.stepy]) || (HasBit(P, 58) && plane_data[img.stepy + 1])) {
SetBit(conn_bitmask, 22);
}
if ((HasBit(P, 55) && plane_data[2]) || (HasBit(P, 59) && plane_data[img.stepy + 2])) {
SetBit(conn_bitmask, 23);
}
if (HasBit(P, 60) && plane_data[2 * img.stepy - 1]) {
SetBit(conn_bitmask, 24);
}
if ((HasBit(P, 61) && plane_data[2 * img.stepy]) || (HasBit(P, 62) && plane_data[2 * img.stepy + 1])) {
SetBit(conn_bitmask, 25);
}
if (HasBit(P, 63) && plane_data[2 * img.stepy + 2]) {
SetBit(conn_bitmask, 26);
}
}
else {
block_labels[labels_index] = 0;
}
block_conn[conn_index] = conn_bitmask;
}
}
//__global__ void ExpandConnections(const cuda::PtrStepSzb connections, cuda::PtrStepSzb expansion) {
// unsigned row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
// unsigned col = blockIdx.x * BLOCK_COLS + threadIdx.x;
// unsigned conn_index = row * (connections.step / connections.elem_size) + col;
// unsigned exp_index = 3 * row * (expansion.step / expansion.elem_size) + 3 * col;
// if (row < connections.rows && col < connections.cols) {
// expansion[exp_index + (expansion.step / expansion.elem_size) + 1] = 2;
// unsigned char neighbours = connections[conn_index];
// if (HasBit(neighbours, 0)) {
// expansion[exp_index] = 1;
// }
// else {
// expansion[exp_index] = 0;
// }
// if (HasBit(neighbours, 1)) {
// expansion[exp_index + 1] = 1;
// }
// else {
// expansion[exp_index + 1] = 0;
// }
// if (HasBit(neighbours, 2)) {
// expansion[exp_index + 2] = 1;
// }
// else {
// expansion[exp_index + 2] = 0;
// }
// if (HasBit(neighbours, 3)) {
// expansion[exp_index + (expansion.step / expansion.elem_size)] = 1;
// }
// else {
// expansion[exp_index + (expansion.step / expansion.elem_size)] = 0;
// }
// if (HasBit(neighbours, 4)) {
// expansion[exp_index + (expansion.step / expansion.elem_size) + 2] = 1;
// }
// else {
// expansion[exp_index + (expansion.step / expansion.elem_size) + 2] = 0;
// }
// if (HasBit(neighbours, 5)) {
// expansion[exp_index + 2 * (expansion.step / expansion.elem_size)] = 1;
// }
// else {
// expansion[exp_index + 2 * (expansion.step / expansion.elem_size)] = 0;
// }
// if (HasBit(neighbours, 6)) {
// expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 1] = 1;
// }
// else {
// expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 1] = 0;
// }
// if (HasBit(neighbours, 7)) {
// expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 2] = 1;
// }
// else {
// expansion[exp_index + 2 * (expansion.step / expansion.elem_size) + 2] = 0;
// }
// }
//}
__device__ unsigned int MinLabel(unsigned l1, unsigned l2) {
if (l1 && l2)
return min(l1, l2);
else
return l1;
}
__device__ unsigned int FindMinLabel(cuda::PtrStepSz3i labels, unsigned int neighbours, unsigned label, unsigned labels_index) {
unsigned int min = label;
for (char plane = -1; plane <= 1; plane++) {
int * plane_data = labels.data + labels_index + plane * (labels.stepz / labels.elem_size);
if (HasBit(neighbours, 0)) {
min = MinLabel(min, plane_data[0 - (labels.stepy / labels.elem_size) - 1]);
}
if (HasBit(neighbours, 1)) {
min = MinLabel(min, plane_data[0 - (labels.stepy / labels.elem_size)]);
}
if (HasBit(neighbours, 2)) {
min = MinLabel(min, plane_data[0 - (labels.stepy / labels.elem_size) + 1]);
}
if (HasBit(neighbours, 3)) {
min = MinLabel(min, plane_data[-1]);
}
if (plane && HasBit(neighbours, 4)) {
min = MinLabel(min, plane_data[0]);
}
if (HasBit(neighbours, 5)) {
min = MinLabel(min, plane_data[1]);
}
if (HasBit(neighbours, 6)) {
min = MinLabel(min, plane_data[(labels.stepy / labels.elem_size) - 1]);
}
if (HasBit(neighbours, 7)) {
min = MinLabel(min, plane_data[(labels.stepy / labels.elem_size)]);
}
if (HasBit(neighbours, 8)) {
min = MinLabel(min, plane_data[(labels.stepy / labels.elem_size) + 1]);
}
neighbours >>= 9;
}
return min;
}
// Scan phase.
// The pixel associated with current thread is given the minimum label of the neighbours.
__global__ void Scan(cuda::PtrStepSz3i labels, cuda::PtrStepSz3i connections, char *changes) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z;
unsigned conn_index = z * (connections.stepz / connections.elem_size) + y * (connections.stepy / connections.elem_size) + x;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
unsigned int neighbours = connections[conn_index];
unsigned label = labels[labels_index];
if (label) {
unsigned min_label = FindMinLabel(labels, neighbours, label, labels_index);
if (min_label < label) {
labels[label - 1] = min(static_cast<unsigned int>(labels[label - 1]), min_label);
*changes = 1;
}
}
}
}
__global__ void Analyze(cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
unsigned int val = labels[labels_index];
if (val) {
labels[labels_index] = Find(labels.data, labels_index) + 1;
}
}
}
// Final Labeling phase
// Assigns every pixel of 2x2x2 blocks the block label
__global__ void FinalLabeling(cuda::PtrStepSz3i block_labels, cuda::PtrStepSz3i labels, const cuda::PtrStepSz3b img) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z;
unsigned blocks_index = z * (block_labels.stepz / block_labels.elem_size) + y * (block_labels.stepy / block_labels.elem_size) + x;
unsigned labels_index = 2 * z * (labels.stepz / labels.elem_size) + 2 * y * (labels.stepy / labels.elem_size) + 2 * x;
unsigned img_index = 2 * z * (img.stepz / img.elem_size) + 2 * y * (img.stepy / img.elem_size) + 2 * x;
if (x < block_labels.x && y < block_labels.y && z < block_labels.z) {
unsigned int label = block_labels[blocks_index];
// Current plane
if (img[img_index]) {
labels[labels_index] = label;
}
else {
labels[labels_index] = 0;
}
if (2 * x + 1 < labels.x) {
if (img[img_index + 1])
labels[labels_index + 1] = label;
else {
labels[labels_index + 1] = 0;
}
if (2 * y + 1 < labels.y) {
if (img[img_index + img.stepy + 1])
labels[labels_index + (labels.stepy / labels.elem_size) + 1] = label;
else {
labels[labels_index + (labels.stepy / labels.elem_size) + 1] = 0;
}
}
}
if (2 * y + 1 < labels.y) {
if (img[img_index + img.stepy])
labels[labels_index + (labels.stepy / labels.elem_size)] = label;
else {
labels[labels_index + (labels.stepy / labels.elem_size)] = 0;
}
}
// Upper plane
if (2 * z + 1 < labels.z) {
if (img[img_index + img.stepz / img.elem_size])
labels[labels_index + labels.stepz / labels.elem_size] = label;
else {
labels[labels_index + labels.stepz / labels.elem_size] = 0;
}
if (2 * x + 1 < labels.x) {
if (img[img_index + img.stepz / img.elem_size + 1])
labels[labels_index + labels.stepz / labels.elem_size + 1] = label;
else {
labels[labels_index + labels.stepz / labels.elem_size + 1] = 0;
}
if (2 * y + 1 < labels.y) {
if (img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size + 1])
labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size) + 1] = label;
else {
labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size) + 1] = 0;
}
}
}
if (2 * y + 1 < labels.y) {
if (img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size])
labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size)] = label;
else {
labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size)] = 0;
}
}
}
}
}
}
class BE_3D : public GpuLabeling3D<Connectivity3D::CONN_26> {
private:
dim3 grid_size_;
dim3 block_size_;
char changes;
char *d_changes;
cuda::GpuMat3 d_connections_;
cuda::GpuMat3 d_block_labels_;
public:
BE_3D() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
// Extra structures that I would gladly do without
d_connections_.create((d_img_.x + 1) / 2, (d_img_.y + 1) / 2, (d_img_.z + 1) / 2, CV_32SC1);
d_block_labels_.create((d_img_.x + 1) / 2, (d_img_.y + 1) / 2, (d_img_.z + 1) / 2, CV_32SC1);
grid_size_ = dim3((d_block_labels_.x + BLOCK_X - 1) / BLOCK_X, (d_block_labels_.y + BLOCK_Y - 1) / BLOCK_Y, (d_block_labels_.z + BLOCK_Z - 1) / BLOCK_Z);
block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z);
cudaMalloc(&d_changes, sizeof(char));
Init << <grid_size_, block_size_ >> > (d_img_, d_connections_, d_block_labels_);
//Mat init_labels;
//d_block_labels_.download(init_labels);
//::NormalizeLabels(init_labels);
//Mat img_out;
//ColorLabels(init_labels, img_out);
//volwrite("C:\\Users\\Stefano\\Desktop\\debug\\init_labels", img_out);
while (true) {
changes = 0;
cudaMemcpy(d_changes, &changes, sizeof(char), cudaMemcpyHostToDevice);
Scan << <grid_size_, block_size_ >> > (d_block_labels_, d_connections_, d_changes);
cudaMemcpy(&changes, d_changes, sizeof(char), cudaMemcpyDeviceToHost);
if (!changes)
break;
Analyze << <grid_size_, block_size_ >> > (d_block_labels_);
}
//Mat block_labels;
//d_block_labels_.download(block_labels);
//::NormalizeLabels(block_labels);
//ColorLabels(block_labels, img_out);
//volwrite("C:\\Users\\Stefano\\Desktop\\debug\\block_labels", img_out);
FinalLabeling << <grid_size_, block_size_ >> > (d_block_labels_, d_img_labels_, d_img_);
//d_img_labels_.download(img_labels_);
cudaFree(d_changes);
d_connections_.release();
d_block_labels_.release();
cudaDeviceSynchronize();
//d_img_labels_.download(img_labels_);
//Mat errors;
//bool correct = CheckLabeledVolume(img_, img_labels_, errors);
//volwrite("C:\\Users\\Stefano\\Desktop\\debug\\BE_errors", errors);
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
d_connections_.create((d_img_.x + 1) / 2, (d_img_.y + 1) / 2, (d_img_.z + 1) / 2, CV_32SC1);
d_block_labels_.create((d_img_.x + 1) / 2, (d_img_.y + 1) / 2, (d_img_.z + 1) / 2, CV_32SC1);
cudaMalloc(&d_changes, sizeof(char));
perf_.stop();
return perf_.last();
}
double Dealloc() {
perf_.start();
cudaFree(d_changes);
d_connections_.release();
d_block_labels_.release();
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void AllScans() {
grid_size_ = dim3((d_block_labels_.x + BLOCK_X - 1) / BLOCK_X, (d_block_labels_.y + BLOCK_Y - 1) / BLOCK_Y, (d_block_labels_.z + BLOCK_Z - 1) / BLOCK_Z);
block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z);
Init << <grid_size_, block_size_ >> > (d_img_, d_connections_, d_block_labels_);
// La Init esplode
// Controlla che cosa contiene connections
//cuda::GpuMat d_expanded_connections;
//d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1);
//ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections);
//Mat1b expanded_connections;
//d_expanded_connections.download(expanded_connections);
//d_expanded_connections.release();
//assert(cudaDeviceSynchronize() == cudaSuccess);
// Immagine di debug della inizializzazione
//Mat1i init_labels;
//d_block_labels_.download(init_labels);
while (true) {
changes = 0;
cudaMemcpy(d_changes, &changes, sizeof(char), cudaMemcpyHostToDevice);
Scan << <grid_size_, block_size_ >> > (d_block_labels_, d_connections_, d_changes);
cudaMemcpy(&changes, d_changes, sizeof(char), cudaMemcpyDeviceToHost);
if (!changes)
break;
Analyze << <grid_size_, block_size_ >> > (d_block_labels_);
}
// Immagine di debug delle label dei blocchi
//Mat1i block_labels;
//d_block_labels_.download(block_labels);
FinalLabeling << <grid_size_, block_size_ >> > (d_block_labels_, d_img_labels_, d_img_);
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
AllScans();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(BE_3D);
|
the_stack
|
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
// Utilities and system includes
#include <helper_string.h> // helper for string parsing
#include <helper_image.h> // helper for image and data comparison
#include <helper_cuda.h> // helper for cuda error checking functions
const char *sSDKsample = "Transpose";
// Each block transposes/copies a tile of TILE_DIM x TILE_DIM elements
// using TILE_DIM x BLOCK_ROWS threads, so that each thread transposes
// TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an integral multiple of
// BLOCK_ROWS
#define TILE_DIM 16
#define BLOCK_ROWS 16
// This sample assumes that MATRIX_SIZE_X = MATRIX_SIZE_Y
int MATRIX_SIZE_X = 1024;
int MATRIX_SIZE_Y = 1024;
int MUL_FACTOR = TILE_DIM;
#define FLOOR(a, b) (a - (a % b))
// Compute the tile size necessary to illustrate performance cases for SM20+
// hardware
int MAX_TILES = (FLOOR(MATRIX_SIZE_X, 512) * FLOOR(MATRIX_SIZE_Y, 512)) /
(TILE_DIM * TILE_DIM);
// Number of repetitions used for timing. Two sets of repetitions are
// performed: 1) over kernel launches and 2) inside the kernel over just the
// loads and stores
#define NUM_REPS 100
// -------------------------------------------------------
// Copies
// width and height must be integral multiples of TILE_DIM
// -------------------------------------------------------
__global__ void copy(float *odata, float *idata, int width, int height) {
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + width * yIndex;
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
odata[index + i * width] = idata[index + i * width];
}
}
__global__ void copySharedMem(float *odata, float *idata, int width,
int height) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ float tile[TILE_DIM][TILE_DIM];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + width * yIndex;
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
if (xIndex < width && yIndex < height) {
tile[threadIdx.y][threadIdx.x] = idata[index];
}
}
cg::sync(cta);
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
if (xIndex < height && yIndex < width) {
odata[index] = tile[threadIdx.y][threadIdx.x];
}
}
}
// -------------------------------------------------------
// Transposes
// width and height must be integral multiples of TILE_DIM
// -------------------------------------------------------
__global__ void transposeNaive(float *odata, float *idata, int width,
int height) {
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + width * yIndex;
int index_out = yIndex + height * xIndex;
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
odata[index_out + i] = idata[index_in + i * width];
}
}
// coalesced transpose (with bank conflicts)
__global__ void transposeCoalesced(float *odata, float *idata, int width,
int height) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ float tile[TILE_DIM][TILE_DIM];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
tile[threadIdx.y + i][threadIdx.x] = idata[index_in + i * width];
}
cg::sync(cta);
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
odata[index_out + i * height] = tile[threadIdx.x][threadIdx.y + i];
}
}
// Coalesced transpose with no bank conflicts
__global__ void transposeNoBankConflicts(float *odata, float *idata, int width,
int height) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ float tile[TILE_DIM][TILE_DIM + 1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
tile[threadIdx.y + i][threadIdx.x] = idata[index_in + i * width];
}
cg::sync(cta);
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
odata[index_out + i * height] = tile[threadIdx.x][threadIdx.y + i];
}
}
// Transpose that effectively reorders execution of thread blocks along
// diagonals of the matrix (also coalesced and has no bank conflicts)
//
// Here blockIdx.x is interpreted as the distance along a diagonal and
// blockIdx.y as corresponding to different diagonals
//
// blockIdx_x and blockIdx_y expressions map the diagonal coordinates to the
// more commonly used cartesian coordinates so that the only changes to the code
// from the coalesced version are the calculation of the blockIdx_x and
// blockIdx_y and replacement of blockIdx.x and bloclIdx.y with the subscripted
// versions in the remaining code
__global__ void transposeDiagonal(float *odata, float *idata, int width,
int height) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ float tile[TILE_DIM][TILE_DIM + 1];
int blockIdx_x, blockIdx_y;
// do diagonal reordering
if (width == height) {
blockIdx_y = blockIdx.x;
blockIdx_x = (blockIdx.x + blockIdx.y) % gridDim.x;
} else {
int bid = blockIdx.x + gridDim.x * blockIdx.y;
blockIdx_y = bid % gridDim.y;
blockIdx_x = ((bid / gridDim.y) + blockIdx_y) % gridDim.x;
}
// from here on the code is same as previous kernel except blockIdx_x replaces
// blockIdx.x and similarly for y
int xIndex = blockIdx_x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx_y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx_y * TILE_DIM + threadIdx.x;
yIndex = blockIdx_x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
tile[threadIdx.y + i][threadIdx.x] = idata[index_in + i * width];
}
cg::sync(cta);
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
odata[index_out + i * height] = tile[threadIdx.x][threadIdx.y + i];
}
}
// --------------------------------------------------------------------
// Partial transposes
// NB: the coarse- and fine-grained routines only perform part of a
// transpose and will fail the test against the reference solution
//
// They are used to assess performance characteristics of different
// components of a full transpose
// --------------------------------------------------------------------
__global__ void transposeFineGrained(float *odata, float *idata, int width,
int height) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ float block[TILE_DIM][TILE_DIM + 1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index = xIndex + (yIndex)*width;
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
block[threadIdx.y + i][threadIdx.x] = idata[index + i * width];
}
cg::sync(cta);
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
odata[index + i * height] = block[threadIdx.x][threadIdx.y + i];
}
}
__global__ void transposeCoarseGrained(float *odata, float *idata, int width,
int height) {
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ float block[TILE_DIM][TILE_DIM + 1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = xIndex + (yIndex)*width;
xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = xIndex + (yIndex)*height;
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
block[threadIdx.y + i][threadIdx.x] = idata[index_in + i * width];
}
cg::sync(cta);
for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) {
odata[index_out + i * height] = block[threadIdx.y + i][threadIdx.x];
}
}
// ---------------------
// host utility routines
// ---------------------
void computeTransposeGold(float *gold, float *idata, const int size_x,
const int size_y) {
for (int y = 0; y < size_y; ++y) {
for (int x = 0; x < size_x; ++x) {
gold[(x * size_y) + y] = idata[(y * size_x) + x];
}
}
}
void getParams(int argc, char **argv, cudaDeviceProp &deviceProp, int &size_x,
int &size_y, int max_tile_dim) {
// set matrix size (if (x,y) dim of matrix is not square, then this will have
// to be modified
if (checkCmdLineFlag(argc, (const char **)argv, "dimX")) {
size_x = getCmdLineArgumentInt(argc, (const char **)argv, "dimX");
if (size_x > max_tile_dim) {
printf("> MatrixSize X = %d is greater than the recommended size = %d\n",
size_x, max_tile_dim);
} else {
printf("> MatrixSize X = %d\n", size_x);
}
} else {
size_x = max_tile_dim;
size_x = FLOOR(size_x, 512);
}
if (checkCmdLineFlag(argc, (const char **)argv, "dimY")) {
size_y = getCmdLineArgumentInt(argc, (const char **)argv, "dimY");
if (size_y > max_tile_dim) {
printf("> MatrixSize Y = %d is greater than the recommended size = %d\n",
size_y, max_tile_dim);
} else {
printf("> MatrixSize Y = %d\n", size_y);
}
} else {
size_y = max_tile_dim;
size_y = FLOOR(size_y, 512);
}
}
void showHelp() {
printf("\n%s : Command line options\n", sSDKsample);
printf("\t-device=n (where n=0,1,2.... for the GPU device)\n\n");
printf("> The default matrix size can be overridden with these parameters\n");
printf("\t-dimX=row_dim_size (matrix row dimensions)\n");
printf("\t-dimY=col_dim_size (matrix column dimensions)\n");
}
// ----
// main
// ----
int main(int argc, char **argv) {
// Start logs
printf("%s Starting...\n\n", sSDKsample);
if (checkCmdLineFlag(argc, (const char **)argv, "help")) {
showHelp();
return 0;
}
int devID = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp deviceProp;
// get number of SMs on this GPU
checkCudaErrors(cudaGetDevice(&devID));
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
// compute the scaling factor (for GPUs with fewer MPs)
float scale_factor, total_tiles;
scale_factor =
max((192.0f / (_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) *
(float)deviceProp.multiProcessorCount)),
1.0f);
printf("> Device %d: \"%s\"\n", devID, deviceProp.name);
printf("> SM Capability %d.%d detected:\n", deviceProp.major,
deviceProp.minor);
// Calculate number of tiles we will run for the Matrix Transpose performance
// tests
int size_x, size_y, max_matrix_dim, matrix_size_test;
matrix_size_test = 512; // we round down max_matrix_dim for this perf test
total_tiles = (float)MAX_TILES / scale_factor;
max_matrix_dim =
FLOOR((int)(floor(sqrt(total_tiles)) * TILE_DIM), matrix_size_test);
// This is the minimum size allowed
if (max_matrix_dim == 0) {
max_matrix_dim = matrix_size_test;
}
printf("> [%s] has %d MP(s) x %d (Cores/MP) = %d (Cores)\n", deviceProp.name,
deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) *
deviceProp.multiProcessorCount);
printf("> Compute performance scaling factor = %4.2f\n", scale_factor);
// Extract parameters if there are any, command line -dimx and -dimy can
// override any of these settings
getParams(argc, argv, deviceProp, size_x, size_y, max_matrix_dim);
if (size_x != size_y) {
printf(
"\n[%s] does not support non-square matrices (row_dim_size(%d) != "
"col_dim_size(%d))\nExiting...\n\n",
sSDKsample, size_x, size_y);
exit(EXIT_FAILURE);
}
if (size_x % TILE_DIM != 0 || size_y % TILE_DIM != 0) {
printf(
"[%s] Matrix size must be integral multiple of tile "
"size\nExiting...\n\n",
sSDKsample);
exit(EXIT_FAILURE);
}
// kernel pointer and descriptor
void (*kernel)(float *, float *, int, int);
const char *kernelName;
// execution configuration parameters
dim3 grid(size_x / TILE_DIM, size_y / TILE_DIM),
threads(TILE_DIM, BLOCK_ROWS);
if (grid.x < 1 || grid.y < 1) {
printf("[%s] grid size computation incorrect in test \nExiting...\n\n",
sSDKsample);
exit(EXIT_FAILURE);
}
// CUDA events
cudaEvent_t start, stop;
// size of memory required to store the matrix
size_t mem_size = static_cast<size_t>(sizeof(float) * size_x * size_y);
if (2 * mem_size > deviceProp.totalGlobalMem) {
printf("Input matrix size is larger than the available device memory!\n");
printf("Please choose a smaller size matrix\n");
exit(EXIT_FAILURE);
}
// allocate host memory
float *h_idata = (float *)malloc(mem_size);
float *h_odata = (float *)malloc(mem_size);
float *transposeGold = (float *)malloc(mem_size);
float *gold;
// allocate device memory
float *d_idata, *d_odata;
checkCudaErrors(cudaMalloc((void **)&d_idata, mem_size));
checkCudaErrors(cudaMalloc((void **)&d_odata, mem_size));
// initialize host data
for (int i = 0; i < (size_x * size_y); ++i) {
h_idata[i] = (float)i;
}
// copy host data to device
checkCudaErrors(
cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice));
// Compute reference transpose solution
computeTransposeGold(transposeGold, h_idata, size_x, size_y);
// print out common data for all kernels
printf(
"\nMatrix size: %dx%d (%dx%d tiles), tile size: %dx%d, block size: "
"%dx%d\n\n",
size_x, size_y, size_x / TILE_DIM, size_y / TILE_DIM, TILE_DIM, TILE_DIM,
TILE_DIM, BLOCK_ROWS);
// initialize events
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
//
// loop over different kernels
//
bool success = true;
for (int k = 0; k < 8; k++) {
// set kernel pointer
switch (k) {
case 0:
kernel = ©
kernelName = "simple copy ";
break;
case 1:
kernel = ©SharedMem;
kernelName = "shared memory copy";
break;
case 2:
kernel = &transposeNaive;
kernelName = "naive ";
break;
case 3:
kernel = &transposeCoalesced;
kernelName = "coalesced ";
break;
case 4:
kernel = &transposeNoBankConflicts;
kernelName = "optimized ";
break;
case 5:
kernel = &transposeCoarseGrained;
kernelName = "coarse-grained ";
break;
case 6:
kernel = &transposeFineGrained;
kernelName = "fine-grained ";
break;
case 7:
kernel = &transposeDiagonal;
kernelName = "diagonal ";
break;
}
// set reference solution
if (kernel == © || kernel == ©SharedMem) {
gold = h_idata;
} else if (kernel == &transposeCoarseGrained ||
kernel == &transposeFineGrained) {
gold = h_odata; // fine- and coarse-grained kernels are not full
// transposes, so bypass check
} else {
gold = transposeGold;
}
// Clear error status
checkCudaErrors(cudaGetLastError());
// warmup to avoid timing startup
kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y);
// take measurements for loop over kernel launches
checkCudaErrors(cudaEventRecord(start, 0));
for (int i = 0; i < NUM_REPS; i++) {
kernel<<<grid, threads>>>(d_odata, d_idata, size_x, size_y);
// Ensure no launch failure
checkCudaErrors(cudaGetLastError());
}
checkCudaErrors(cudaEventRecord(stop, 0));
checkCudaErrors(cudaEventSynchronize(stop));
float kernelTime;
checkCudaErrors(cudaEventElapsedTime(&kernelTime, start, stop));
checkCudaErrors(
cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost));
bool res = compareData(gold, h_odata, size_x * size_y, 0.01f, 0.0f);
if (res == false) {
printf("*** %s kernel FAILED ***\n", kernelName);
success = false;
}
// take measurements for loop inside kernel
checkCudaErrors(
cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost));
res = compareData(gold, h_odata, size_x * size_y, 0.01f, 0.0f);
if (res == false) {
printf("*** %s kernel FAILED ***\n", kernelName);
success = false;
}
// report effective bandwidths
float kernelBandwidth = 2.0f * 1000.0f * mem_size / (1024 * 1024 * 1024) /
(kernelTime / NUM_REPS);
printf(
"transpose %s, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 "
"elements, NumDevsUsed = %u, Workgroup = %u\n",
kernelName, kernelBandwidth, kernelTime / NUM_REPS, (size_x * size_y),
1, TILE_DIM * BLOCK_ROWS);
}
// cleanup
free(h_idata);
free(h_odata);
free(transposeGold);
cudaFree(d_idata);
cudaFree(d_odata);
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
if (!success) {
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
|
the_stack
|
#define THRD_PER_BLOCK 1024 // Number of threads per block (should always %32==0)
using isce3::cuda::signal::gpuLooks;
/**
input:
hi_res
output:
lo_res
*/
template <typename T>
void gpuLooks<T>::multilook(std::valarray<T> &hi_res,
std::valarray<T> &lo_res)
{
// allocate lo res output on device
T *d_lo_res;
size_t n_lo_res_size = _nrowsLooked*_ncolsLooked;
size_t lo_res_size = n_lo_res_size*sizeof(T);
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_lo_res), lo_res_size));
// allocate and copy to device hi res input
T *d_hi_res;
size_t hi_res_size = _nrows*_ncols*sizeof(T);
// allocate input
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_hi_res), hi_res_size));
// copy hi_res
checkCudaErrors(cudaMemcpy(d_hi_res, &hi_res[0], hi_res_size, cudaMemcpyHostToDevice));
// determine block layout
dim3 block(THRD_PER_BLOCK);
dim3 grid((n_lo_res_size+(THRD_PER_BLOCK-1))/THRD_PER_BLOCK);
// run kernels
multilooks_g<<<grid, block>>>(d_lo_res,
d_hi_res,
_ncols,
_ncolsLooked,
_rowsLooks,
_colsLooks,
_nrowsLooked*_ncolsLooked,
T(_rowsLooks*_colsLooks));
// copy from device lo res output
checkCudaErrors(cudaMemcpy(&lo_res[0], d_lo_res, lo_res_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_lo_res));
checkCudaErrors(cudaFree(d_hi_res));
}
/**
input:
hi_res
output:
lo_res
*/
template <typename T>
void gpuLooks<T>::multilook(std::valarray<std::complex<T>> &hi_res,
std::valarray<std::complex<T>> &lo_res)
{
// allocate lo res output on device
thrust::complex<T> *d_lo_res;
size_t n_lo_res_size = _nrowsLooked*_ncolsLooked;
size_t lo_res_size = n_lo_res_size*sizeof(thrust::complex<T>);
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_lo_res), lo_res_size));
// allocate and copy to device hi res input
thrust::complex<T> *d_hi_res;
size_t hi_res_size = _nrows*_ncols*sizeof(thrust::complex<T>);
// allocate input
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_hi_res), hi_res_size));
// copy hi_res
checkCudaErrors(cudaMemcpy(d_hi_res, &hi_res[0], hi_res_size, cudaMemcpyHostToDevice));
// determine block layout
dim3 block(THRD_PER_BLOCK);
dim3 grid((n_lo_res_size+(THRD_PER_BLOCK-1))/THRD_PER_BLOCK);
// run kernels
multilooks_g<<<grid, block>>>(d_lo_res,
d_hi_res,
_ncols,
_ncolsLooked,
_rowsLooks,
_colsLooks,
_nrowsLooked*_ncolsLooked,
T(_rowsLooks*_colsLooks));
// copy from device lo res output
checkCudaErrors(cudaMemcpy(&lo_res[0], d_lo_res, lo_res_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_lo_res));
checkCudaErrors(cudaFree(d_hi_res));
}
template <typename T>
void gpuLooks<T>::multilook(std::valarray<T> &hi_res,
std::valarray<T> &lo_res,
T noDataValue)
{
// allocate lo res output on device
T *d_lo_res;
size_t n_lo_res_size = _nrowsLooked*_ncolsLooked;
size_t lo_res_size = n_lo_res_size*sizeof(T);
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_lo_res), lo_res_size));
// allocate and copy to device hi res input
T *d_hi_res;
size_t hi_res_size = _nrows*_ncols*sizeof(T);
// allocate input
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_hi_res), hi_res_size));
// copy hi_res
checkCudaErrors(cudaMemcpy(d_hi_res, &hi_res[0], hi_res_size, cudaMemcpyHostToDevice));
// determine block layout
dim3 block(THRD_PER_BLOCK);
dim3 grid((n_lo_res_size+(THRD_PER_BLOCK-1))/THRD_PER_BLOCK);
// run kernels
multilooks_no_data_g<<<grid, block>>>(d_lo_res,
d_hi_res,
noDataValue,
_ncols,
_ncolsLooked,
_rowsLooks,
_colsLooks,
_nrowsLooked*_ncolsLooked,
T(_rowsLooks*_colsLooks));
// copy from device lo res output
checkCudaErrors(cudaMemcpy(&lo_res[0], d_lo_res, lo_res_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_lo_res));
checkCudaErrors(cudaFree(d_hi_res));
}
template <typename T>
void gpuLooks<T>::multilook(std::valarray<std::complex<T>> &hi_res,
std::valarray<std::complex<T>> &lo_res,
std::complex<T> noDataValue)
{
// allocate lo res output on device
thrust::complex<T> *d_lo_res;
size_t n_lo_res_size = _nrowsLooked*_ncolsLooked;
size_t lo_res_size = n_lo_res_size*sizeof(thrust::complex<T>);
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_lo_res), lo_res_size));
// allocate and copy to device hi res input
thrust::complex<T> *d_hi_res;
size_t hi_res_size = _nrows*_ncols*sizeof(thrust::complex<T>);
// allocate input
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_hi_res), hi_res_size));
// copy hi_res
checkCudaErrors(cudaMemcpy(d_hi_res, &hi_res[0], hi_res_size, cudaMemcpyHostToDevice));
// determine block layout
dim3 block(THRD_PER_BLOCK);
dim3 grid((n_lo_res_size+(THRD_PER_BLOCK-1))/THRD_PER_BLOCK);
// run kernels
multilooks_no_data_g<<<grid, block>>>(d_lo_res,
d_hi_res,
thrust::complex<T>(noDataValue),
_ncols,
_ncolsLooked,
_rowsLooks,
_colsLooks,
_nrowsLooked*_ncolsLooked,
T(_rowsLooks*_colsLooks));
// copy from device lo res output
checkCudaErrors(cudaMemcpy(&lo_res[0], d_lo_res, lo_res_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_lo_res));
checkCudaErrors(cudaFree(d_hi_res));
}
template <typename T>
void gpuLooks<T>::multilook(std::valarray<T> &hi_res,
std::valarray<T> &lo_res,
std::valarray<T> &weights)
{
// allocate lo res output on device
T *d_lo_res;
size_t n_lo_res_size = _nrowsLooked*_ncolsLooked;
size_t lo_res_size = n_lo_res_size*sizeof(T);
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_lo_res), lo_res_size));
// allocate and copy to device hi res input
T *d_hi_res;
size_t hi_res_size = _nrows*_ncols*sizeof(T);
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_hi_res), hi_res_size));
checkCudaErrors(cudaMemcpy(d_hi_res, &hi_res[0], hi_res_size, cudaMemcpyHostToDevice));
// allocate and copy to device weights
T *d_weights;
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_weights), hi_res_size));
checkCudaErrors(cudaMemcpy(d_weights, &weights[0], hi_res_size, cudaMemcpyHostToDevice));
// determine block layout
dim3 block(THRD_PER_BLOCK);
dim3 grid((n_lo_res_size+(THRD_PER_BLOCK-1))/THRD_PER_BLOCK);
// run kernels
multilooks_weighted_g<<<grid, block>>>(d_lo_res,
d_hi_res,
d_weights,
_ncols,
_ncolsLooked,
_rowsLooks,
_colsLooks,
_nrowsLooked*_ncolsLooked);
// copy from device lo res output
checkCudaErrors(cudaMemcpy(&lo_res[0], d_lo_res, lo_res_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_lo_res));
checkCudaErrors(cudaFree(d_hi_res));
checkCudaErrors(cudaFree(d_weights));
}
template <typename T>
void gpuLooks<T>::multilook(std::valarray<std::complex<T>> &hi_res,
std::valarray<T> &lo_res,
int p)
{
// allocate lo res output on device
T *d_lo_res;
size_t n_lo_res_size = _nrowsLooked*_ncolsLooked;
size_t lo_res_size = n_lo_res_size*sizeof(T);
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_lo_res), lo_res_size));
// allocate and copy to device hi res input
thrust::complex<T> *d_hi_res;
size_t hi_res_size = _nrows*_ncols*sizeof(thrust::complex<T>);
checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&d_hi_res), hi_res_size));
checkCudaErrors(cudaMemcpy(d_hi_res, &hi_res[0], hi_res_size, cudaMemcpyHostToDevice));
// determine block layout
dim3 block(THRD_PER_BLOCK);
dim3 grid((n_lo_res_size+(THRD_PER_BLOCK-1))/THRD_PER_BLOCK);
// run kernels
multilooks_power_g<<<grid, block>>>(d_lo_res,
d_hi_res,
p,
_ncols,
_ncolsLooked,
_rowsLooks,
_colsLooks,
_nrowsLooked*_ncolsLooked,
T(_rowsLooks*_colsLooks));
// copy from device lo res output
checkCudaErrors(cudaMemcpy(&lo_res[0], d_lo_res, lo_res_size, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_lo_res));
checkCudaErrors(cudaFree(d_hi_res));
}
/*
accumulate then average from hi res to lo res
output:
lo_res
input:
hi_res to be reduced to lo_res
rows_lo rows in lo res
row_resize scale factor of hi to lo in rows
col_resize scale factor of hi to lo in cols
sz_lo number of elements in lo res
*/
template <typename T>
__global__ void multilooks_g(T *lo_res,
const T* __restrict__ hi_res,
size_t n_cols_hi,
size_t n_cols_lo,
int row_resize,
int col_resize,
size_t sz_lo,
T blk_sz)
{
const auto i = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x;
if (i < sz_lo) {
auto i_lo_row = i / n_cols_lo;
auto i_lo_col = i % n_cols_lo;
// init mlook accumulation to 0
T accumulation = 0.0;
// loop over contributing hi_res rows
for (int i_blk_row = 0; i_blk_row < row_resize; ++i_blk_row) {
// get lo_res row index
auto i_hi_row = i_blk_row + i_lo_row * row_resize;
// loop over contributing hi_res columns
for (int i_blk_col = 0; i_blk_col < col_resize; ++i_blk_col) {
// get lo_res col index
auto i_hi_col = i_blk_col + i_lo_col * col_resize;
// combine lo_res row and col index to hi_res index
auto i_hi = i_hi_row * n_cols_hi + i_hi_col;
// accumulate lo_res into lo_res
accumulation += hi_res[i_hi];
}
}
lo_res[i] = accumulation / blk_sz;
}
}
template <typename T>
__global__ void multilooks_g(thrust::complex<T> *lo_res,
const thrust::complex<T>* __restrict__ hi_res,
size_t n_cols_hi,
size_t n_cols_lo,
int row_resize,
int col_resize,
size_t sz_lo,
T blk_sz)
{
const auto i = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x;
if (i < sz_lo) {
auto i_lo_row = i / n_cols_lo;
auto i_lo_col = i % n_cols_lo;
// init mlook accumulation to 0
thrust::complex<T> accumulation(0.0, 0.0);
// loop over contributing hi_res rows
for (int i_blk_row = 0; i_blk_row < row_resize; ++i_blk_row) {
// get lo_res row index
auto i_hi_row = i_blk_row + i_lo_row * row_resize;
// loop over contributing hi_res columns
for (int i_blk_col = 0; i_blk_col < col_resize; ++i_blk_col) {
// get lo_res col index
auto i_hi_col = i_blk_col + i_lo_col * col_resize;
// combine lo_res row and col index to hi_res index
auto i_hi = i_hi_row * n_cols_hi + i_hi_col;
// accumulate lo_res into lo_res
accumulation += hi_res[i_hi];
}
}
lo_res[i] = accumulation / blk_sz;
}
}
/*
accumulate then average from hi res to lo res
output:
lo_res
input:
hi_res to be reduced to lo_res
no_data_value values where hi_res data is not accounted for
rows_lo rows in lo res
row_resize scale factor of hi to lo in rows
col_resize scale factor of hi to lo in cols
sz_lo number of elements in lo res
*/
template <typename T>
__global__ void multilooks_no_data_g(T *lo_res,
const T* __restrict__ hi_res,
T no_data_value,
size_t n_cols_hi,
size_t n_cols_lo,
int row_resize,
int col_resize,
size_t sz_lo,
T blk_sz)
{
const auto i = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x;
if (i < sz_lo) {
auto i_lo_row = i / n_cols_lo;
auto i_lo_col = i % n_cols_lo;
T accumulation = 0;
int n_no_val = 0;
// loop over contributing hi_res rows
for (int i_blk_row = 0; i_blk_row < row_resize; ++i_blk_row) {
// get lo_res row index
auto i_hi_row = i_blk_row + i_lo_row * row_resize;
// loop over contributing hi_res columns
for (int i_blk_col = 0; i_blk_col < col_resize; ++i_blk_col) {
// get lo_res col index
auto i_hi_col = i_blk_col + i_lo_col * col_resize;
// combine lo_res row and col index to hi_res index
auto i_hi = i_hi_row * n_cols_hi + i_hi_col;
// accumulate lo_res into lo_res
T hi_res_pixel_value = hi_res[i_hi];
if (hi_res_pixel_value != no_data_value)
accumulation += hi_res_pixel_value;
else
++n_no_val;
}
}
lo_res[i] = accumulation / (blk_sz - n_no_val);
}
}
template <class T>
__global__ void multilooks_no_data_g(thrust::complex<T> *lo_res,
const thrust::complex<T>* __restrict__ hi_res,
thrust::complex<T> no_data_value,
size_t n_cols_hi,
size_t n_cols_lo,
int row_resize,
int col_resize,
size_t sz_lo,
T blk_sz)
{
const auto i = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x;
if (i < sz_lo) {
auto i_lo_row = i / n_cols_lo;
auto i_lo_col = i % n_cols_lo;
thrust::complex<T> accumulation(0.0, 0.0);
int n_no_val = 0;
// loop over contributing hi_res rows
for (int i_blk_row = 0; i_blk_row < row_resize; ++i_blk_row) {
// get lo_res row index
auto i_hi_row = i_blk_row + i_lo_row * row_resize;
// loop over contributing hi_res columns
for (int i_blk_col = 0; i_blk_col < col_resize; ++i_blk_col) {
// get lo_res col index
auto i_hi_col = i_blk_col + i_lo_col * col_resize;
// combine lo_res row and col index to hi_res index
auto i_hi = i_hi_row * n_cols_hi + i_hi_col;
// accumulate lo_res into lo_res
thrust::complex<T> hi_res_pixel_value = hi_res[i_hi];
if (hi_res_pixel_value != no_data_value)
accumulation += hi_res_pixel_value;
else
++n_no_val;
}
}
lo_res[i] = accumulation / (blk_sz - n_no_val);
}
}
/*
accumulate, apply weight, then average from hi res to lo res
output:
lo_res
input:
hi_res to be reduced to lo_res
weights weights to be applied to lo_res data
rows_lo rows in lo res
row_resize scale factor of hi to lo in rows
col_resize scale factor of hi to lo in cols
sz_lo number of elements in lo res
*/
template <typename T>
__global__ void multilooks_weighted_g(T *lo_res,
const T* __restrict__ hi_res,
const T* __restrict__ weights,
size_t n_cols_hi,
size_t n_cols_lo,
int row_resize,
int col_resize,
size_t sz_lo)
{
const auto i = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x;
if (i < sz_lo) {
auto i_lo_row = i / n_cols_lo;
auto i_lo_col = i % n_cols_lo;
T accumulation = 0;
T sum_weight = 0;
// loop over contributing hi_res rows
for (int i_blk_row = 0; i_blk_row < row_resize; ++i_blk_row) {
// get lo_res row index
auto i_hi_row = i_blk_row + i_lo_row * row_resize;
// loop over contributing hi_res columns
for (int i_blk_col = 0; i_blk_col < col_resize; ++i_blk_col) {
// get lo_res col index
auto i_hi_col = i_blk_col + i_lo_col * col_resize;
// combine lo_res row and col index to hi_res index
auto i_hi = i_hi_row * n_cols_hi + i_hi_col;
// accumulate lo_res into lo_res
accumulation += hi_res[i_hi];
sum_weight += weights[i_hi];
}
}
if (sum_weight > 0) {
lo_res[i] = accumulation / sum_weight;
} else {
lo_res[i] = 0.0;
}
}
}
/*
accumulate then average from hi res to lo res
output:
lo_res
input:
hi_res to be reduced to lo_res
power
rows_lo rows in lo res
row_resize scale factor of hi to lo in rows
col_resize scale factor of hi to lo in cols
sz_lo number of elements in lo res
*/
template <typename T>
__global__ void multilooks_power_g(T *lo_res,
const thrust::complex<T>* __restrict__ hi_res,
int power,
size_t n_cols_hi,
size_t n_cols_lo,
int row_resize,
int col_resize,
size_t sz_lo,
T blk_sz)
{
const auto i = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x;
if (i < sz_lo) {
auto i_lo_row = i / n_cols_lo;
auto i_lo_col = i % n_cols_lo;
// init mlook accumulation to 0
T accumulation = 0.0;
// loop over contributing hi_res rows
for (int i_blk_row = 0; i_blk_row < row_resize; ++i_blk_row) {
// get lo_res row index
auto i_hi_row = i_blk_row + i_lo_row * row_resize;
// loop over contributing hi_res columns
for (int i_blk_col = 0; i_blk_col < col_resize; ++i_blk_col) {
// get lo_res col index
auto i_hi_col = i_blk_col + i_lo_col * col_resize;
// combine lo_res row and col index to hi_res index
auto i_hi = i_hi_row * n_cols_hi + i_hi_col;
// accumulate lo_res into lo_res
accumulation += pow(abs(hi_res[i_hi]), power);
}
}
lo_res[i] = accumulation / blk_sz;
}
}
/*
declarations!
*/
template class gpuLooks<float>;
template __global__ void
multilooks_g<float>(float *lo_res,
const float* __restrict__ hi_res,
size_t n_cols_hi,
size_t n_cols_lo,
int row_resize,
int col_resize,
size_t sz_lo,
float blk_sz);
template __global__ void
multilooks_g<float>(thrust::complex<float> *lo_res,
const thrust::complex<float>* __restrict__ hi_res,
size_t n_cols_hi,
size_t n_cols_lo,
int row_resize,
int col_resize,
size_t sz_lo,
float blk_sz);
template __global__ void
multilooks_no_data_g<float>(float *lo_res,
const float* __restrict__ hi_res,
float no_data_value,
size_t n_cols_hi,
size_t n_cols_lo,
int row_resize,
int col_resize,
size_t sz_lo,
float blk_sz);
template __global__ void
multilooks_no_data_g<float>(thrust::complex<float> *lo_res,
const thrust::complex<float>* __restrict__ hi_res,
thrust::complex<float> no_data_value,
size_t n_cols_hi,
size_t n_cols_lo,
int row_resize,
int col_resize,
size_t sz_lo,
float blk_sz);
template __global__ void
multilooks_power_g<float>(float *lo_res,
const thrust::complex<float>* __restrict__ hi_res,
int power,
size_t n_cols_hi,
size_t n_cols_lo,
int row_resize,
int col_resize,
size_t sz_lo,
float blk_sz);
|
the_stack
|
#pragma once
//#define SNN_DEBUG 1
#ifdef SNN_DEBUG
#define debug(a...) fprintf(stderr, a)
#else
#define debug(a...)
#endif
//#define SNN_DEBUG2
#ifdef SNN_DEBUG2
#define debug2(a...) fprintf(stderr, a)
#else
#define debug2(a...)
#endif
#include <iostream>
#include <unordered_set>
#include <vector>
#include <algorithm>
#include <cassert>
#include <gunrock/app/knn/knn_test.cuh>
#include <gunrock/app/snn/snn_helpers.cuh>
namespace gunrock {
namespace app {
namespace snn {
/******************************************************************************
* SNN Testing Routines
*****************************************************************************/
/**
* @brief Simple CPU-based reference snn ranking implementations
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the values
* @param[in] graph Input graph
...
* @param[in] quiet Whether to print out anything to stdout
*/
template <typename GraphT,
typename VertexT = typename GraphT::VertexT,
typename SizeT = typename GraphT::SizeT>
double CPU_Reference(
const GraphT &graph,
SizeT num_points, // number of points
SizeT k, // number of nearest neighbor
SizeT eps, // min number of SNN to increase snn-density
SizeT min_pts, // mininum snn-density to be core point
SizeT *knns, // k nearest neighbor array
SizeT *cluster, // cluster id
SizeT *core_point_counter, // counter of core points
SizeT *noise_point_counter,// counter of core points
SizeT *cluster_counter, // counter of clusters
bool quiet) {
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::CsrT CsrT;
//util::PrintMsg("#threads: " + std::to_string(omp_get_num_threads()));
util::CpuTimer cpu_timer;
cpu_timer.Start();
#pragma omp parallel for
for (auto x = 0; x < num_points; ++x) {
cluster[x] = util::PreDefinedValues<SizeT>::InvalidValue;
}
debug2("step 0\n");
// For each point make a binary search tree of its k nearest neighbors
std::vector<std::set<SizeT>> knns_set; knns_set.resize(num_points);
for (SizeT x = 0; x < num_points; ++x){
knns_set[x] = std::set<SizeT>(knns + (x*k), knns + ((x+1)*k));
}
debug2("step 1\n");
#ifdef SNN_DEBUG
for (auto x = 0; x < num_points; ++x){
debug("knns[%d]: ", x);
for (auto y : knns_set[x]){
debug("%d ", y);
}
debug("\nsupposed to be:\n");
for (int i = 0; i < k; ++i){
debug("%d ", knns[x * k + i]);
}
debug("\n");
}
#endif
// Table of sets of shared nn for each point
std::vector<std::set<SizeT>> snns; snns.resize(num_points);
// Set of core points
std::set<SizeT> core_points;
// Visited points
std::vector<bool> visited; visited.resize(num_points, false);
// Intersection of two knns sets
std::vector<SizeT> common_knns; common_knns.resize(k);
debug("Looking for snns\n");
for (SizeT x = 0; x < num_points; ++x){
//for each q in kNN(x)
debug("Snn of %d\n", x);
for (SizeT i = 0; i < k; ++i){
SizeT q = knns[x * k + i];
debug("%d - knn[%d]\t", q, x);
//if x is in kNN(q)
/*if (snns[q].find(x) != snns[q].end()){
debug("\n");
continue;
}*/
if (knns_set[q].find(x) != knns_set[q].end()){
debug("%d - knn[%d]\t", x, q);
// checking size of set the common
auto it = std::set_intersection(knns_set[x].begin(),
knns_set[x].end(), knns_set[q].begin(),
knns_set[q].end(), common_knns.begin());
common_knns.resize(it-common_knns.begin());
debug("they shared %d neighbors\t", common_knns.size());
if (common_knns.size() > eps){
snns[x].insert(q);
snns[q].insert(x);
visited[x] = true;
visited[q] = true;
debug("%d %d - snn\n", x, q);
}else{
debug("\n");
}
}else{
debug("\n");
}
}
}
debug2("step 2\n");
// Find core points:
for (SizeT x = 0; x < num_points; ++x){
if (visited[x] && snns[x].size() > 0){
debug("density[%d] = %d\t", x, snns[x].size());
debug("snns: ");
for (auto ss :snns[x]){
debug("%d ", ss);
}
debug("\n");
}
if (visited[x] && snns[x].size() >= min_pts){
core_points.insert(x);
debug("%d is core_point\n", x);
cluster[x] = x;
}else
debug("%d is not core_point\n", x);
}
// Set core points counter
*core_point_counter = core_points.size();
debug2("step 3\n");
#if SNN_DEBUG
debug("core points (%d): ", core_points.size());
for (auto cpb = core_points.begin(); cpb != core_points.end(); ++cpb) {
debug("%d ", *cpb);
}
debug("\n");
#endif
// Create empty clusters:
DisjointSet<SizeT> clusters(num_points);
int iter = 0;
// Find clusters. Union core points
for (auto c1 : core_points){
for (auto c2 : core_points){
if (snns[c1].find(c2) != snns[c1].end()){
if ((iter++) % 100000 == 0)
debug2("union %d %d\n", c1, c2);
clusters.Union(c1, c2);
cluster[c1] = clusters.Find(c1);
cluster[c2] = clusters.Find(c1);
}
}
}
debug2("step 4\n");
#if SNN_DEBUG
debug("clusters after union core points:\n");
for (int i = 0; i < num_points; ++i)
debug("cluster[%d] = %d\n", i, cluster[i]);
#endif
noise_point_counter[0] = 0;
debug2("cpu noise points: ");
debug("assign non core points\n");
// Assign non core points
for (SizeT x = 0; x < num_points; ++x){
if (core_points.find(x) == core_points.end()){
// x is non core point
debug("%d - non core point\n", x);
SizeT nearest_core_point = util::PreDefinedValues<SizeT>::InvalidValue;
SizeT similarity_to_nearest_core_point = 0;
for (auto q : knns_set[x]){
debug("%d, knn of %d\n", q);
if (core_points.find(q) != core_points.end()){
debug("\t%d is core point\n", q);
if (knns_set[q].find(x) != knns_set[q].end()){
// q is core point
auto it = std::set_intersection(knns_set[x].begin(), knns_set[x].end(),
knns_set[q].begin(), knns_set[q].end(), common_knns.begin());
common_knns.resize(it-common_knns.begin());
if (!util::isValid(nearest_core_point) ||
common_knns.size() > similarity_to_nearest_core_point){
similarity_to_nearest_core_point = common_knns.size();
nearest_core_point = q;
}
}
}
}
if (util::isValid(nearest_core_point) &&
similarity_to_nearest_core_point > eps){
// x is not a noise point
clusters.Union(x, nearest_core_point);
cluster[x] = clusters.Find(nearest_core_point);
cluster[nearest_core_point] = clusters.Find(nearest_core_point);
}else{
cluster[x] = util::PreDefinedValues<SizeT>::InvalidValue;
noise_point_counter[0]++;
debug2("%d ", x);
}
}
}
debug2("\n");
debug2("step 5\n");
#if SNN_DEBUG
debug("clusters after assigne non core points\n");
for (int i = 0; i < num_points; ++i)
debug("cluster[%d] = %d\n", i, cluster[i]);
#endif
std::unordered_set<SizeT> cluster_set;
for (SizeT x = 0; x < num_points; ++x){
if (util::isValid(cluster[x])){
cluster_set.insert(clusters.Find(x)); // have to be clusters.Find(x) because array stores not updated cluster numbers
}
}
debug2("cpu clusters ids:\n");
for (auto x: cluster_set){
debug2("%d ", x);
}
debug2("\n");
debug2("step 6\n");
#if SNN_DEBUG
debug("cpu clusters: ");
for (SizeT x = 0; x < num_points; ++x){
if (!util::isValid(cluster[x])){
debug("%x does not have cluster, it is noise point", x);
}else{
debug("cluster of %d is %d\n", x, clusters.Find(x));
}
}
#endif
// Set cluster counter
*cluster_counter = cluster_set.size();
debug2("step 7\n");
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
return elapsed;
}
/**
* @brief Validation of snn results
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the values
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
...
* @param[in] verbose Whether to output detail comparsions
* \return GraphT::SizeT Number of errors
*/
template <typename GraphT, typename SizeT = typename GraphT::SizeT>
typename GraphT::SizeT Validate_Results(util::Parameters ¶meters,
GraphT &graph,
SizeT *h_cluster,
SizeT *h_core_point_counter,
SizeT *h_noise_point_counter,
SizeT *h_cluster_counter,
SizeT *ref_cluster,
SizeT *ref_core_point_counter,
SizeT *ref_noise_point_counter,
SizeT *ref_cluster_counter,
bool verbose = true) {
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::CsrT CsrT;
SizeT num_errors = 0;
bool quiet = parameters.Get<bool>("quiet");
bool quick = parameters.Get<bool>("quick");
SizeT num_points = parameters.Get<SizeT>("n");
SizeT k = parameters.Get<int>("k");
SizeT eps = parameters.Get<int>("eps");
SizeT min_pts = parameters.Get<int>("min-pts");
if (quick){
printf("number of points: %d\n", num_points);
printf("gpu core point counter %d\n", *h_core_point_counter);
printf("gpu noise point counter %d\n", *h_noise_point_counter);
printf("gpu cluster counter %d\n", *h_cluster_counter);
return num_errors;
}
printf("Validate results start, num_errors so far %d\n", num_errors);
printf("number of points: %d\n", num_points);
printf("cpu core point counter %d, gpu core point counter %d\n",
*ref_core_point_counter, *h_core_point_counter);
if (*ref_core_point_counter != *h_core_point_counter){
++num_errors;
printf("error\n");
}
printf("cpu noise point counter %d, gpu noise point counter %d\n",
*ref_noise_point_counter, *h_noise_point_counter);
if (*ref_noise_point_counter != *h_noise_point_counter){
++num_errors;
printf("error\n");
}
printf("cpu cluster counter %d, gpu cluster counter %d\n",
*ref_cluster_counter, *h_cluster_counter);
if (*ref_cluster_counter != *h_cluster_counter){
++num_errors;
printf("error\n");
}
std::vector<bool> unvisited_cluster_of; unvisited_cluster_of.resize(num_points, true);
for (SizeT i = 0; i < num_points; ++i) {
if (unvisited_cluster_of[i]){
unvisited_cluster_of[i] = false;
auto h_cluster_of_i = h_cluster[i];
auto ref_cluster_of_i = ref_cluster[i];
for (SizeT j = 0; j < num_points; ++j){
if (not unvisited_cluster_of[j]) continue;
if (ref_cluster[j] == ref_cluster_of_i){
if (h_cluster[j] != h_cluster_of_i){
printf("error: gpu %d and %d supposed to be in one cluster but are: %d != %d, on CPU %d and %d are in one cluster %d\n", i, j, h_cluster[i], h_cluster[j], i, j, ref_cluster_of_i);
++num_errors;
}
unvisited_cluster_of[j] = false;
}
}
}
}
if (num_errors > 0) {
util::PrintMsg(std::to_string(num_errors) + " errors occurred in SNN.",
!quiet);
} else {
util::PrintMsg("PASSED SNN", !quiet);
}
return num_errors;
//+ knn_errors;
}
} // namespace snn
} // namespace app
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
the_stack
|
#include <cuda.h>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <vector>
#include <memory>
#include <algorithm>
#include <immintrin.h>
#include "cudautils.h"
#define RUNCPU 1
#define CHECK 1
#define NPTS (2048*8)
#define NDIM 128
#define M1W 128
#define M2W 16
#define M2H 16
#define M5W 16
#define M5H 16
#define M5R 4
#define M7W 32
#define M7H 32
#define M7R 4
/*
Data size: 16 MB
Allocate: 1.01194 ms
Upload: 3.69939 ms 4.32503 MB/ms
MatchCPU1: 34649.6 ms 1.89139 Gflops
MatchCPU2: 3064.36 ms 21.3866 Gflops
MatchCPU3: 184.762 ms 354.706 Gflops
MatchGPU1: 641.828 ms 102.108 Gflops
MatchGPU2: 148.020 ms 442.752 Gflops
MatchGPU3: 31.9609 ms 2050.50 Gflops
MatchGPU4: 29.7891 ms 2200.00 Gflops
MatchGPU5: 17.1484 ms 3821.69 Gflops
MatchGPU6: 16.3516 ms 4007.94 Gflops
MatchGPU7: 14.7995 ms 4428.27 Gflops
MatchGPU8: 10.5291 ms 6224.28 Gflops
Download: 0.16016 ms 0.780488 MB/ms
*/
void MatchC1(float *h_pts1, float *h_pts2, float *h_score, int *h_index)
{
std::memset(h_score, 0, sizeof(float)*NPTS);
for (int p1=0;p1<NPTS;p1++) {
for (int p2=0;p2<NPTS;p2++) {
float score = 0.0f;
for (int d=0;d<NDIM;d++)
score += h_pts1[p1*NDIM + d]*h_pts2[p2*NDIM + d];
if (score>h_score[p1]) {
h_score[p1] = score;
h_index[p1] = p2;
}
}
}
}
void MatchC2(float *h_pts1, float *h_pts2, float *h_score, int *h_index)
{
#define BSIZ 256
std::memset(h_score, 0, sizeof(float)*NPTS);
for (int b1=0;b1<NPTS;b1+=BSIZ) {
for (int b2=0;b2<NPTS;b2+=BSIZ) {
for (int p1=b1;p1<b1+BSIZ;p1++) {
float *pt1 = &h_pts1[p1*NDIM];
for (int p2=b2;p2<b2+BSIZ;p2++) {
float *pt2 = &h_pts2[p2*NDIM];
__m256 score8 = _mm256_setzero_ps();
for (int d=0;d<NDIM;d+=8) {
__m256 v1 = _mm256_load_ps(pt1 + d);
__m256 v2 = _mm256_load_ps(pt2 + d);
score8 = _mm256_fmadd_ps(v1, v2, score8);
}
score8 = _mm256_add_ps(score8, _mm256_permute2f128_ps(score8, score8, 1));
score8 = _mm256_hadd_ps(score8, score8);
float score = _mm256_cvtss_f32(_mm256_hadd_ps(score8, score8));
if (score>h_score[p1]) {
h_score[p1] = score;
h_index[p1] = p2;
}
}
}
}
}
}
void MatchC3(float *h_pts1, float *h_pts2, float *h_score, int *h_index)
{
#define BSIZ 256
std::memset(h_score, 0, sizeof(float)*NPTS);
#pragma omp parallel for
for (int b1=0;b1<NPTS;b1+=BSIZ) {
for (int b2=0;b2<NPTS;b2+=BSIZ) {
for (int p1=b1;p1<b1+BSIZ;p1++) {
float *pt1 = &h_pts1[p1*NDIM];
for (int p2=b2;p2<b2+BSIZ;p2++) {
float *pt2 = &h_pts2[p2*NDIM];
__m256 score8 = _mm256_setzero_ps();
for (int d=0;d<NDIM;d+=8) {
__m256 v1 = _mm256_load_ps(pt1 + d);
__m256 v2 = _mm256_load_ps(pt2 + d);
score8 = _mm256_fmadd_ps(v1, v2, score8);
}
score8 = _mm256_add_ps(score8, _mm256_permute2f128_ps(score8, score8, 1));
score8 = _mm256_hadd_ps(score8, score8);
float score = _mm256_cvtss_f32(_mm256_hadd_ps(score8, score8));
if (score>h_score[p1]) {
h_score[p1] = score;
h_index[p1] = p2;
}
}
}
}
}
}
void CheckMatches(int *h_index, int *h_index2, float *h_score, float *h_score2)
{
int ndiff = 0;
for (int i=0;i<NPTS;i++) {
ndiff += (h_index[i] != h_index2[i]);
if (h_index[i] != h_index2[i])
std::cout << " " << i << " " << h_index[i] << " " << h_index2[i] << " " << h_score[i] << " " << h_score2[i] << std::endl;
}
std::cout << "Number of incorrect matches: " << ndiff << std::endl;
}
__global__ void Match1(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
int p1 = threadIdx.x + M1W*blockIdx.x;
float max_score = 0.0f;
int index = -1;
for (int p2=0;p2<NPTS;p2++) {
float score = 0.0f;
for (int d=0;d<NDIM;d++)
score += d_pts1[p1*NDIM + d]*d_pts2[p2*NDIM + d];
if (score>max_score) {
max_score = score;
index = p2;
}
}
d_score[p1] = max_score;
d_index[p1] = index;
}
__global__ void Match2(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
__shared__ float buffer1[M2W*NDIM]; //%%%%
__shared__ float buffer2[M2H*NDIM]; //%%%%
__shared__ float scores[M2W*M2H]; //%%%%
int tx = threadIdx.x;
int ty = threadIdx.y;
int idx = tx + M2W*ty;
int bp1 = M2W*blockIdx.x;
if (ty<M2W)
for (int d=tx;d<NDIM;d+=M2W)
for (int j=ty;j<M2W;j+=M2H)
buffer1[j*NDIM + d] = d_pts1[(bp1 + j)*NDIM + d]; //%%%%
__syncthreads();
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M2H) {
for (int d=tx;d<NDIM;d+=M2W)
buffer2[ty*NDIM + d] = d_pts2[(bp2 + ty)*NDIM + d]; //%%%%
__syncthreads();
float score = 0.0f;
for (int d=0;d<NDIM;d++)
score += buffer1[tx*NDIM + d]*buffer2[ty*NDIM + d]; //%%%%
scores[idx] = score;
__syncthreads();
if (ty==0) {
for (int i=0;i<M2H;i++) {
if (scores[i*M2W + tx]>max_score) {
max_score = scores[i*M2W + tx];
index = bp2 + i;
}
}
}
__syncthreads();
}
if (ty==0) {
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match3(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
__shared__ float buffer1[M2W*(NDIM + 1)]; //%%%%
__shared__ float buffer2[M2H*NDIM];
__shared__ float scores[M2W*M2H];
int tx = threadIdx.x;
int ty = threadIdx.y;
int idx = tx + M2W*ty;
int bp1 = M2W*blockIdx.x;
if (ty<M2W)
for (int d=tx;d<NDIM;d+=M2W)
for (int j=ty;j<M2W;j+=M2H)
buffer1[j*(NDIM + 1) + d] = d_pts1[(bp1 + j)*NDIM + d]; //%%%%
__syncthreads();
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M2H) {
for (int d=tx;d<NDIM;d+=M2W)
buffer2[ty*NDIM + d] = d_pts2[(bp2 + ty)*NDIM + d];
__syncthreads();
float score = 0.0f;
for (int d=0;d<NDIM;d++)
score += buffer1[tx*(NDIM + 1) + d]*buffer2[ty*NDIM + d]; //%%%%
scores[idx] = score;
__syncthreads();
if (ty==0) {
for (int i=0;i<M2H;i++) {
if (scores[i*M2W + tx]>max_score) {
max_score = scores[i*M2W + tx];
index = bp2 + i;
}
}
}
__syncthreads();
}
if (ty==0) {
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match4(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
__shared__ float4 buffer1[M2W*(NDIM/4 + 1)]; //%%%%
__shared__ float4 buffer2[M2H*NDIM/4]; //%%%%
__shared__ float scores[M2W*M2H];
int tx = threadIdx.x;
int ty = threadIdx.y;
int idx = tx + M2W*ty;
int bp1 = M2W*blockIdx.x;
if (ty<M2W)
for (int d=tx;d<NDIM/4;d+=M2W)
for (int j=ty;j<M2W;j+=M2H)
buffer1[j*(NDIM/4 + 1) + d] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d]; //%%%%
__syncthreads();
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M2H) {
for (int d=tx;d<NDIM/4;d+=M2W)
buffer2[ty*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + ty)*(NDIM/4) + d]; //%%%%
__syncthreads();
float score = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1 = buffer1[tx*(NDIM/4 + 1) + d]; //%%%%
float4 v2 = buffer2[ty*(NDIM/4) + d]; //%%%%
score += v1.x*v2.x; score += v1.y*v2.y;
score += v1.z*v2.z; score += v1.w*v2.w;
}
scores[idx] = score;
__syncthreads();
if (ty==0) {
for (int i=0;i<M2H;i++) {
if (scores[i*M2W + tx]>max_score) {
max_score = scores[i*M2W + tx];
index = bp2 + i;
}
}
}
__syncthreads();
}
if (ty==0) {
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match5(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
__shared__ float4 buffer1[M5W*(NDIM/4 + 1)];
__shared__ float4 buffer2[M5H*NDIM/4];
__shared__ float scores[M5W*M5H];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M5W*blockIdx.x;
if (ty<M5W)
for (int d=tx;d<NDIM/4;d+=M5W)
for (int j=ty;j<M5W;j+=M5H)
buffer1[j*(NDIM/4 + 1) + d] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
__syncthreads();
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M5H) {
for (int d=tx;d<NDIM/4;d+=M5W)
buffer2[ty*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + ty)*(NDIM/4) + d];
__syncthreads();
if (ty<M5H/M5R) { //%%%%
float score[M5R]; //%%%%
for (int dy=0;dy<M5R;dy++)
score[dy] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1 = buffer1[tx*(NDIM/4 + 1) + d];
for (int dy=0;dy<M5R;dy++) {
float4 v2 = buffer2[(M5R*ty + dy)*(NDIM/4) + d]; //%%%%
score[dy] += v1.x*v2.x; score[dy] += v1.y*v2.y;
score[dy] += v1.z*v2.z; score[dy] += v1.w*v2.w;
}
}
for (int dy=0;dy<M5R;dy++)
scores[tx + M5W*(M5R*ty + dy)] = score[dy];
}
__syncthreads();
if (ty==0) {
for (int i=0;i<M5H;i++) {
if (scores[i*M2W + tx]>max_score) {
max_score = scores[i*M5W + tx];
index = bp2 + i;
}
}
}
__syncthreads();
}
if (ty==0) {
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match6(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
__shared__ float4 buffer1[M5W*(NDIM/4 + 1)];
__shared__ float4 buffer2[M5H*NDIM/4];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M5W*blockIdx.x;
if (ty<M5W)
for (int d=tx;d<NDIM/4;d+=M5W)
for (int j=ty;j<M5W;j+=M5H)
buffer1[j*(NDIM/4 + 1) + d] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M5H) {
for (int d=tx;d<NDIM/4;d+=M5W)
buffer2[ty*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + ty)*(NDIM/4) + d];
__syncthreads();
if (ty<M5H/M5R) {
float score[M5R];
for (int dy=0;dy<M5R;dy++)
score[dy] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1 = buffer1[tx*(NDIM/4 + 1) + d];
for (int dy=0;dy<M5R;dy++) {
float4 v2 = buffer2[(M5R*ty + dy)*(NDIM/4) + d];
score[dy] += v1.x*v2.x; score[dy] += v1.y*v2.y;
score[dy] += v1.z*v2.z; score[dy] += v1.w*v2.w;
}
}
for (int dy=0;dy<M5R;dy++) {
if (score[dy]>max_score) { //%%%%
max_score = score[dy];
index = bp2 + M5R*ty + dy;
}
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M5W*M5H/M5R];
if (ty<M5H/M5R) {
scores[ty*M5W + tx] = max_score; //%%%%
indices[ty*M5W + tx] = index; //%%%%
}
__syncthreads();
if (ty==0) {
max_score = scores[tx];
index = indices[tx];
for (int y=0;y<M5H/M5R;y++)
if (scores[y*M5W + tx]>max_score) {
max_score = scores[y*M5W + tx]; //%%%%
index = indices[y*M5W + tx]; //%%%%
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match7(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
__shared__ float4 buffer1[M7W*NDIM/4]; //%%%%
__shared__ float4 buffer2[M7H*NDIM/4];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M7W*blockIdx.x;
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7W;j+=M7H/M7R) //%%%%
buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
float max_score = 0.0f;
int index = -1;
for (int bp2=0;bp2<NPTS;bp2+=M7H) {
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7H;j+=M7H/M7R) //%%%%
buffer2[j*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d];
__syncthreads();
float score[M7R];
for (int dy=0;dy<M7R;dy++)
score[dy] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1 = buffer1[tx*NDIM/4 + (d + tx)%(NDIM/4)];
for (int dy=0;dy<M7R;dy++) {
float4 v2 = buffer2[(M7R*ty + dy)*(NDIM/4) + d];
score[dy] += v1.x*v2.x; score[dy] += v1.y*v2.y;
score[dy] += v1.z*v2.z; score[dy] += v1.w*v2.w;
}
}
for (int dy=0;dy<M7R;dy++) {
if (score[dy]>max_score) {
max_score = score[dy];
index = bp2 + M7R*ty + dy;
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M7W*M7H/M7R];
scores[ty*M7W + tx] = max_score;
indices[ty*M7W + tx] = index;
__syncthreads();
if (ty==0) {
max_score = scores[tx];
index = indices[tx];
for (int y=0;y<M7H/M7R;y++)
if (scores[y*M7W + tx]>max_score) {
max_score = scores[y*M7W + tx];
index = indices[y*M7W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match8(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
__shared__ float4 buffer1[M7W*NDIM/4];
__shared__ float4 buffer2[M7H*NDIM/4];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M7W*blockIdx.x;
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7W;j+=M7H/M7R)
buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
#define NRX 2
float max_score[NRX];
int index[NRX];
for (int i=0;i<NRX;i++) {
max_score[i] = 0.0f;
index[i] = -1;
}
int idx = ty*M7W + tx;
int ix = idx%(M7W/NRX);
int iy = idx/(M7W/NRX);
for (int bp2=0;bp2<NPTS;bp2+=M7H) {
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7H;j+=M7H/M7R)
buffer2[j*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d];
__syncthreads();
if (idx<M7W*M7H/M7R/NRX) {
float score[M7R][NRX];
for (int dy=0;dy<M7R;dy++)
for (int i=0;i<NRX;i++)
score[dy][i] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1[NRX];
for (int i=0;i<NRX;i++)
v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (d + (M7W/NRX)*i + ix)%(NDIM/4)];
for (int dy=0;dy<M7R;dy++) {
float4 v2 = buffer2[(M7R*iy + dy)*(NDIM/4) + d];
for (int i=0;i<NRX;i++) {
score[dy][i] += v1[i].x*v2.x;
score[dy][i] += v1[i].y*v2.y;
score[dy][i] += v1[i].z*v2.z;
score[dy][i] += v1[i].w*v2.w;
}
}
}
for (int dy=0;dy<M7R;dy++) {
for (int i=0;i<NRX;i++) {
if (score[dy][i]>max_score[i]) {
max_score[i] = score[dy][i];
index[i] = bp2 + M7R*iy + dy;
}
}
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M7W*M7H/M7R];
if (idx<M7W*M7H/M7R/NRX) {
for (int i=0;i<NRX;i++) {
scores[iy*M7W + (M7W/NRX)*i + ix] = max_score[i];
indices[iy*M7W + (M7W/NRX)*i + ix] = index[i];
}
}
__syncthreads();
if (ty==0) {
float max_score = scores[tx];
int index = indices[tx];
for (int y=0;y<M7H/M7R;y++)
if (scores[y*M7W + tx]>max_score) {
max_score = scores[y*M7W + tx];
index = indices[y*M7W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match8small(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
#define NRX 2
__shared__ float4 buffer1[M7W*NDIM/4];
__shared__ float4 buffer2[M7H*NDIM/4];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M7W*blockIdx.x;
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7W;j+=M7H/M7R/NRX)
buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
float max_score[NRX];
int index[NRX];
for (int i=0;i<NRX;i++) {
max_score[i] = 0.0f;
index[i] = -1;
}
int idx = ty*M7W + tx;
int ix = idx%(M7W/NRX);
int iy = idx/(M7W/NRX);
for (int bp2=0;bp2<NPTS;bp2+=M7H) {
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7H;j+=M7H/M7R/NRX)
buffer2[j*NDIM/4 + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d];
__syncthreads();
float score[M7R][NRX];
for (int dy=0;dy<M7R;dy++)
for (int i=0;i<NRX;i++)
score[dy][i] = 0.0f;
for (int d=0;d<NDIM/4;d++) {
float4 v1[NRX];
for (int i=0;i<NRX;i++)
v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (d + (M7W/NRX)*i + ix)%(NDIM/4)];
for (int dy=0;dy<M7R;dy++) {
float4 v2 = buffer2[(M7R*iy + dy)*(NDIM/4) + d];
for (int i=0;i<NRX;i++) {
score[dy][i] += v1[i].x*v2.x;
score[dy][i] += v1[i].y*v2.y;
score[dy][i] += v1[i].z*v2.z;
score[dy][i] += v1[i].w*v2.w;
}
}
}
for (int dy=0;dy<M7R;dy++) {
for (int i=0;i<NRX;i++) {
if (score[dy][i]>max_score[i]) {
max_score[i] = score[dy][i];
index[i] = bp2 + M7R*iy + dy;
}
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M7W*M7H/M7R];
if (idx<M7W*M7H/M7R/NRX) {
for (int i=0;i<NRX;i++) {
scores[iy*M7W + (M7W/NRX)*i + ix] = max_score[i];
indices[iy*M7W + (M7W/NRX)*i + ix] = index[i];
}
}
__syncthreads();
if (ty==0) {
float max_score = scores[tx];
int index = indices[tx];
for (int y=0;y<M7H/M7R;y++)
if (scores[y*M7W + tx]>max_score) {
max_score = scores[y*M7W + tx];
index = indices[y*M7W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match8blocked(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
#define NRX 2
#define NUM (NRX*M7R) // 32*8 threads
__shared__ float4 buffer1[M7W*NDIM/4]; // 32*32
__shared__ float4 buffer2[M7H*NUM]; // 32*8
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M7W*blockIdx.x;
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7W;j+=M7H/M7R)
buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
float max_score[NRX];
int index[NRX];
for (int i=0;i<NRX;i++) {
max_score[i] = 0.0f;
index[i] = -1;
}
int idx = ty*M7W + tx;
int ix = idx%(M7W/NRX);
int iy = idx/(M7W/NRX);
for (int bp2=0;bp2<NPTS;bp2+=M7H) {
float score[M7R][NRX];
for (int dy=0;dy<M7R;dy++)
for (int i=0;i<NRX;i++)
score[dy][i] = 0.0f;
int d = (idx%NUM);
int j = (idx/NUM);
buffer2[j*NUM + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + d];
__syncthreads();
for (int dp=0;dp<NDIM/4;dp+=NUM) {
float4 temp;
if (dp<(NDIM/4-NUM))
temp = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + dp + d + NUM];
if (idx<M7W*M7H/M7R/NRX) {
for (int d=0;d<NUM;d++) {
float4 v1[NRX];
#pragma unroll
for (int i=0;i<NRX;i++)
v1[i] = buffer1[(((M7W/NRX)*i + ix)<<5) + ((dp + d + (M7W/NRX)*i + ix)&31)];
//v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (dp + d + (M7W/NRX)*i + ix)%(NDIM/4)];
#pragma unroll
for (int dy=0;dy<M7R;dy++) {
float4 v2 = buffer2[(M7R*iy + dy)*NUM + d];
#pragma unroll
for (int i=0;i<NRX;i++) {
score[dy][i] += v1[i].x*v2.x;
score[dy][i] += v1[i].y*v2.y;
score[dy][i] += v1[i].z*v2.z;
score[dy][i] += v1[i].w*v2.w;
}
}
}
}
__syncthreads();
if (dp<(NDIM/4-NUM)) {
buffer2[j*NUM + d] = temp;
__syncthreads();
}
}
for (int dy=0;dy<M7R;dy++) {
for (int i=0;i<NRX;i++) {
if (score[dy][i]>max_score[i]) {
max_score[i] = score[dy][i];
index[i] = bp2 + M7R*iy + dy;
}
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M7W*M7H/M7R];
if (idx<M7W*M7H/M7R/NRX) {
for (int i=0;i<NRX;i++) {
scores[iy*M7W + (M7W/NRX)*i + ix] = max_score[i];
indices[iy*M7W + (M7W/NRX)*i + ix] = index[i];
}
}
__syncthreads();
if (ty==0) {
float max_score = scores[tx];
int index = indices[tx];
for (int y=0;y<M7H/M7R;y++)
if (scores[y*M7W + tx]>max_score) {
max_score = scores[y*M7W + tx];
index = indices[y*M7W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match8blocked2(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
#define NRX 2
#define NUM (NRX*M7R) // 32*8 threads
__shared__ float4 buffer1[M7W*NDIM/4]; // 32*32
__shared__ float4 buffer2[M7H*NUM]; // 32*8
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M7W*blockIdx.x;
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7W;j+=M7H/M7R)
buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
float max_score[NRX];
int index[NRX];
for (int i=0;i<NRX;i++) {
max_score[i] = 0.0f;
index[i] = -1;
}
int idx = ty*M7W + tx;
int ix = idx%(M7W/NRX);
int iy = idx/(M7W/NRX);
for (int bp2=0;bp2<NPTS;bp2+=M7H) {
float score[M7R][NRX];
for (int dy=0;dy<M7R;dy++)
for (int i=0;i<NRX;i++)
score[dy][i] = 0.0f;
for (int dp=0;dp<NDIM/4;dp+=NUM) {
int d = (idx%NUM);
int j = (idx/NUM);
buffer2[j*NUM + d] = ((float4*)d_pts2)[(bp2 + j)*(NDIM/4) + dp + d];
__syncthreads();
if (idx<M7W*M7H/M7R/NRX) {
for (int d=0;d<NUM;d++) {
float4 v1[NRX];
for (int i=0;i<NRX;i++)
v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (dp + d + (M7W/NRX)*i + ix)%(NDIM/4)];
for (int dy=0;dy<M7R;dy++) {
float4 v2 = buffer2[(M7R*iy + dy)*NUM + d];
for (int i=0;i<NRX;i++) {
score[dy][i] += v1[i].x*v2.x;
score[dy][i] += v1[i].y*v2.y;
score[dy][i] += v1[i].z*v2.z;
score[dy][i] += v1[i].w*v2.w;
}
}
}
}
__syncthreads();
}
for (int dy=0;dy<M7R;dy++) {
for (int i=0;i<NRX;i++) {
if (score[dy][i]>max_score[i]) {
max_score[i] = score[dy][i];
index[i] = bp2 + M7R*iy + dy;
}
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M7W*M7H/M7R];
if (idx<M7W*M7H/M7R/NRX) {
for (int i=0;i<NRX;i++) {
scores[iy*M7W + (M7W/NRX)*i + ix] = max_score[i];
indices[iy*M7W + (M7W/NRX)*i + ix] = index[i];
}
}
__syncthreads();
if (ty==0) {
float max_score = scores[tx];
int index = indices[tx];
for (int y=0;y<M7H/M7R;y++)
if (scores[y*M7W + tx]>max_score) {
max_score = scores[y*M7W + tx];
index = indices[y*M7W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
__global__ void Match9(float *d_pts1, float *d_pts2, float *d_score, int *d_index)
{
#define NRX 2
#define NUM 8
__shared__ float4 buffer1[M7W*NDIM/4];
__shared__ float4 buffer2[M7H*NUM];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bp1 = M7W*blockIdx.x;
for (int d=tx;d<NDIM/4;d+=M7W)
for (int j=ty;j<M7W;j+=M7H/M7R)
buffer1[j*NDIM/4 + (d + j)%(NDIM/4)] = ((float4*)d_pts1)[(bp1 + j)*(NDIM/4) + d];
float max_score[NRX];
int index[NRX];
for (int i=0;i<NRX;i++) {
max_score[i] = 0.0f;
index[i] = -1;
}
int idx = ty*M7W + tx;
int ix = idx%(M7W/NRX);
int iy = idx/(M7W/NRX);
for (int bp2=0;bp2<NPTS;bp2+=M7H) {
float score[M7R][NRX];
if (idx<M7W*M7H/M7R/NRX) { // 128
for (int dy=0;dy<M7R;dy++)
for (int i=0;i<NRX;i++)
score[dy][i] = 0.0f;
}
for (int d=0;d<NDIM/4;d+=NUM) {
if (idx<M7H*NUM) // 256
buffer2[idx] = ((float4*)d_pts2)[(bp2 + (idx/NUM))*(NDIM/4) + d + (idx%NUM)];
__syncthreads();
if (idx<M7W*M7H/M7R/NRX) { // 128
for (int j=0;j<NUM;j++) {
float4 v1[NRX];
for (int i=0;i<NRX;i++)
v1[i] = buffer1[((M7W/NRX)*i + ix)*NDIM/4 + (d + j + (M7W/NRX)*i + ix)%(NDIM/4)];
for (int dy=0;dy<M7R;dy++) {
float4 v2 = buffer2[(M7R*ty + dy)*NUM + j];
for (int i=0;i<NRX;i++) {
score[dy][i] += v1[i].x*v2.x;
score[dy][i] += v1[i].y*v2.y;
score[dy][i] += v1[i].z*v2.z;
score[dy][i] += v1[i].w*v2.w;
}
}
}
}
__syncthreads();
}
if (idx<M7W*M7H/M7R/NRX) { // 128
for (int dy=0;dy<M7R;dy++) {
for (int i=0;i<NRX;i++) {
if (score[dy][i]>max_score[i]) {
max_score[i] = score[dy][i];
index[i] = bp2 + M7R*iy + dy;
}
}
}
}
__syncthreads();
}
float *scores = (float*)buffer1;
int *indices = (int*)&scores[M7W*M7H/M7R];
if (idx<M7W*M7H/M7R/NRX) {
for (int i=0;i<NRX;i++) {
scores[iy*M7W + (M7W/NRX)*i + ix] = max_score[i];
indices[iy*M7W + (M7W/NRX)*i + ix] = index[i];
}
}
__syncthreads();
if (ty==0) {
float max_score = scores[tx];
int index = indices[tx];
for (int y=0;y<M7H/M7R;y++)
if (scores[y*M7W + tx]>max_score) {
max_score = scores[y*M7W + tx];
index = indices[y*M7W + tx];
}
d_score[bp1 + tx] = max_score;
d_index[bp1 + tx] = index;
}
}
int main(int argc, char *argv[])
{
safeCall(cudaSetDevice(0));
size_t space = sizeof(float)*NPTS*NDIM*2 + 8;
std::vector<float> data(NPTS*NDIM*2 + 8);
void *ptr = (void*)&data[0];
float *h_pts1 = (float*)std::align(32, sizeof(float)*NPTS*NDIM, ptr, space);
ptr = (void*)&data[NPTS*NDIM];
float *h_pts2 = (float*)std::align(32, sizeof(float)*NPTS*NDIM, ptr, space);
std::vector<int> h_index(NPTS);
std::vector<float> h_score(NPTS);
std::vector<int> h_index2(NPTS);
std::vector<float> h_score2(NPTS);
float *d_pts1, *d_pts2, *d_score;
int *d_index;
std::cout << std::endl;
int psize = sizeof(float)*NPTS;
std::cout << "Data size: " << 2.0*psize*NDIM/1024/1024 << " MB" << std::endl;
TimerGPU time;
float ltime = time.read();
safeCall(cudaMalloc((void **)&d_pts1, psize*NDIM));
safeCall(cudaMalloc((void **)&d_pts2, psize*NDIM));
safeCall(cudaMalloc((void **)&d_index, psize));
safeCall(cudaMalloc((void **)&d_score, psize));
std::cout << "Allocate: " << time.read() - ltime << " ms" << std::endl;
for (int i=0;i<NPTS;i++) {
float sum1 = 0.0f, sum2 = 0.0f;
for (int d=0;d<NDIM;d++) {
sum1 += h_pts1[i*NDIM + d] = (float)rand()/RAND_MAX;
sum2 += h_pts2[i*NDIM + d] = (float)rand()/RAND_MAX;
}
sum1 = sqrt(NDIM)/sum1;
sum2 = sqrt(NDIM)/sum2;
for (int d=0;d<NDIM;d++) {
h_pts1[i*NDIM + d] *= sum1;
h_pts2[i*NDIM + d] *= sum2;
}
}
ltime = time.read();
safeCall(cudaMemcpy(d_pts1, h_pts1, psize*NDIM, cudaMemcpyHostToDevice));
safeCall(cudaMemcpy(d_pts2, h_pts2, psize*NDIM, cudaMemcpyHostToDevice));
float delay = time.read() - ltime;
std::cout << "Upload: " << delay << " ms " << 2*psize*NDIM/delay/1024/1024 << " MB/ms" << std::endl;
if (RUNCPU) {
#if 0
ltime = time.read();
MatchC1(h_pts1, h_pts2, h_score.data(), h_index.data());
delay = time.read() - ltime;
std::cout << "MatchCPU1: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
ltime = time.read();
MatchC2(h_pts1, h_pts2, h_score.data(), h_index.data());
delay = time.read() - ltime;
std::cout << "MatchCPU2: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
#endif
ltime = time.read();
MatchC3(h_pts1, h_pts2, h_score.data(), h_index.data());
delay = time.read() - ltime;
std::cout << "MatchCPU3: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
}
dim3 blocks, threads;
#if 0
blocks = dim3(NPTS/M1W);
threads = dim3(M1W);
ltime = time.read();
Match1<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
delay = time.read() - ltime;
checkMsg("Match1 error");
std::cout << "MatchGPU1: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
blocks = dim3(NPTS/M2W);
threads = dim3(M2W, M2H);
ltime = time.read();
Match2<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
delay = time.read() - ltime;
checkMsg("Match2 error");
std::cout << "MatchGPU2: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
#endif
blocks = dim3(NPTS/M2W);
threads = dim3(M2W, M2H);
ltime = time.read();
Match3<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
delay = time.read() - ltime;
checkMsg("Match3 error");
std::cout << "MatchGPU3: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
blocks = dim3(NPTS/M2W);
threads = dim3(M2W, M2H);
ltime = time.read();
Match4<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
delay = time.read() - ltime;
checkMsg("Match4 error");
std::cout << "MatchGPU4: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
blocks = dim3(NPTS/M5W);
threads = dim3(M5W, M5H);
ltime = time.read();
Match5<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
delay = time.read() - ltime;
checkMsg("Match5 error");
std::cout << "MatchGPU5: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
blocks = dim3(NPTS/M5W);
threads = dim3(M5W, M5H);
ltime = time.read();
Match6<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
delay = time.read() - ltime;
checkMsg("Match6 error");
std::cout << "MatchGPU6: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
blocks = dim3(NPTS/M7W);
threads = dim3(M7W, M7H/M7R);
ltime = time.read();
Match7<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
delay = time.read() - ltime;
checkMsg("Match7 error");
std::cout << "MatchGPU7: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
blocks = dim3(NPTS/M7W);
threads = dim3(M7W, M7H/M7R);
ltime = time.read();
Match8<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
delay = time.read() - ltime;
checkMsg("Match8 error");
std::cout << "MatchGPU8: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
#if 1
blocks = dim3(NPTS/M7W);
threads = dim3(M7W, M7H/M7R/2);
ltime = time.read();
Match8small<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
delay = time.read() - ltime;
checkMsg("Match8small error");
std::cout << "Match8small: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
#endif
#if 1
blocks = dim3(NPTS/M7W);
threads = dim3(M7W, M7H/M7R);
ltime = time.read();
Match8blocked<<<blocks,threads>>>(d_pts1, d_pts2, d_score, d_index);
delay = time.read() - ltime;
checkMsg("Match8blocked error");
std::cout << "MatchGPU8blocked: " << delay << " ms " << 2.0*NPTS*NPTS*NDIM/delay/1024/1024 << " Gflops" << std::endl;
#endif
ltime = time.read();
safeCall(cudaMemcpy(h_index2.data(), d_index, psize, cudaMemcpyDeviceToHost));
safeCall(cudaMemcpy(h_score2.data(), d_score, psize, cudaMemcpyDeviceToHost));
delay = time.read() - ltime;
std::cout << "Download: " << delay << " ms " << 2*psize/delay/1024/1024 << " MB/ms" << std::endl;
ltime = time.read();
if (CHECK)
CheckMatches(h_index.data(), h_index2.data(), h_score.data(), h_score2.data());
std::cout << std::endl;
safeCall(cudaFree(d_pts1));
safeCall(cudaFree(d_pts2));
safeCall(cudaFree(d_index));
safeCall(cudaFree(d_score));
}
|
the_stack
|
#include "caffe/layers/base_data_layer.hpp"
#include "caffe/layers/suncg_data_layer.hpp"
#include "caffe/util/rng.hpp"
#include "suncg_util.hpp"
#include "suncg_fusion.hpp"
// #include "suncg_fusion.cu"
DEFINE_bool(shuran_chatter, false,
"If you are Shuran and want chatter, turn this on.");
using std::vector;
namespace caffe {
template<typename Dtype>
SuncgDataLayer<Dtype>::~SuncgDataLayer<Dtype>() {
this->StopInternalThread();
}
template<typename Dtype>
void SuncgDataLayer<Dtype>::DataLayerSetUp(const vector<Blob<Dtype> *> &bottom,
const vector<Blob<Dtype> *> &top) {
LOG(INFO) << "Read SUNCG parameters";
const SuncgDataParameter& data_param =
this->layer_param_.suncg_data_param();
for (int i = 0; i < data_param.file_data_size(); ++i) {
file_data.push_back(data_param.file_data(i));
}
file_list = data_param.file_list();
vox_unit = data_param.vox_unit();
vox_margin = data_param.vox_margin();
add_height = data_param.with_height();
for (int i = 0; i < data_param.seg_class_map_size(); ++i) {
segmentation_class_map.push_back(data_param.seg_class_map(i));
}
num_segmentation_class = *std::max_element(segmentation_class_map.begin(), segmentation_class_map.end())+1;
LOG(INFO) << "num_segmentation_class"<< num_segmentation_class;
for (int i = 0; i < data_param.seg_class_weight_size(); ++i) {
segmentation_class_weight.push_back(data_param.seg_class_weight(i));
}
for (int i = 0; i < data_param.occ_class_weight_size(); ++i) {
occupancy_class_weight.push_back(data_param.occ_class_weight(i));
}
shuffle_data = data_param.shuffle();
occ_emptyonly = data_param.occ_empty_only();
data_num_channel = add_height ? 2 : 1;
surf_only = data_param.surf_only();
CHECK_EQ(data_param.vox_size_size(), 3);
CHECK_EQ(data_param.crop_size_size(), 3);
for (int i = 0; i < data_param.vox_size_size(); ++i) {
data_full_vox_size.push_back(data_param.vox_size(i));
}
for (int i = 0; i < data_param.crop_size_size(); ++i) {
data_crop_vox_size.push_back(data_param.crop_size(i));;
}
for (int i = 0; i < data_param.label_size_size(); ++i) {
label_vox_size.push_back(data_param.label_size(i));
}
sample_neg_obj_ratio = data_param.neg_obj_sample_ratio();
batch_size = data_param.batch_size();
offset_value = 0;
epoch_prefetch = 0;
counter = 0;
// List all files in data folder and shuffle them if necessary
GetFiles(file_data, data_filenames, "camera_list_train.list", "0000.png");
if (shuffle_data) {
const unsigned int rng_seed = caffe_rng_rand();
rng_.reset(new Caffe::RNG(rng_seed));
Shuffle();
}
LOG(INFO) << "Read camera information";
// Copy camera information to GPU
cam_info[0] = Dtype(frame_width);
cam_info[1] = Dtype(frame_height);
for (int i = 0; i < 9; ++i)
cam_info[i + 2] = cam_K[i];
for (int i = 0; i < 16; ++i)
cam_info[i + 11] = 0.0f;
CUDA_CHECK(cudaMalloc(&cam_info_GPU, 27 * sizeof(Dtype)));
LOG(INFO) << "Set voxel volume parameters and copy them to GPU";
vox_info[0] = vox_unit;
vox_info[1] = vox_margin;
for (int i = 0; i < 3; ++i)
vox_info[i + 2] = Dtype(data_crop_vox_size[i]);
CUDA_CHECK(cudaMalloc(&vox_info_GPU, 8 * sizeof(Dtype)));
LOG(INFO) << "Allocating data";
LOG(INFO) << "data_num_channel: "<< data_num_channel;
// GPU malloc depth data
CUDA_CHECK(cudaMalloc(&depth_data_GPU,
frame_height * frame_width * sizeof(Dtype)));
// GPU malloc voxel volume weights
CUDA_CHECK(cudaMalloc(&vox_weight_GPU,
data_crop_vox_size[0] * data_crop_vox_size[1] *
data_crop_vox_size[2] * sizeof(Dtype)));
size_t memoryBytes = 0;
// std::cout << (train_me ? "* " : " ");
// std::cout << name << std::endl;
// Determine if data should be cropped
is_cropping_data = data_crop_vox_size[0] < data_full_vox_size[0] ||
data_crop_vox_size[1] < data_full_vox_size[1] ||
data_crop_vox_size[2] < data_full_vox_size[2];
int num_crop_voxels =
data_crop_vox_size[0] * data_crop_vox_size[1] * data_crop_vox_size[2];
int num_full_voxels =
data_full_vox_size[0] * data_full_vox_size[1] * data_full_vox_size[2];
if (is_cropping_data) {
CUDA_CHECK(cudaMalloc(&vox_data_GPU,
batch_size * data_num_channel * num_crop_voxels *
sizeof(Dtype)));
memoryBytes +=
batch_size * data_num_channel * num_crop_voxels * sizeof(Dtype);
} else {
CUDA_CHECK(cudaMalloc(&vox_data_GPU,
batch_size * data_num_channel * num_full_voxels *
sizeof(Dtype)));
memoryBytes +=
batch_size * data_num_channel * num_full_voxels * sizeof(Dtype);
}
int num_label_voxels =
label_vox_size[0] * label_vox_size[1] * label_vox_size[2];
CUDA_CHECK(cudaMalloc(&occupancy_label_GPU,
batch_size * num_label_voxels * sizeof(Dtype)));
CUDA_CHECK(cudaMalloc(&occupancy_weight_GPU,
batch_size * 2 * num_label_voxels * sizeof(Dtype)));
memoryBytes += batch_size * 3 * num_label_voxels * sizeof(Dtype);
CUDA_CHECK(cudaMalloc(&segmentation_label_GPU,
batch_size * num_label_voxels * sizeof(Dtype)));
CUDA_CHECK(cudaMalloc(&segmentation_weight_GPU,
batch_size * num_segmentation_class * num_label_voxels *
sizeof(Dtype)));
CUDA_CHECK(cudaMalloc(&segmentation_surf_weight_GPU,
batch_size * num_segmentation_class * num_label_voxels *
sizeof(Dtype)));
memoryBytes += batch_size * (num_segmentation_class + 1) * num_label_voxels *
sizeof(Dtype);
LOG(INFO) << "Resize tops";
// out[0]->need_diff = false;
std::vector<int> data_dim;
data_dim.resize(5);
data_dim[0] = batch_size;
data_dim[1] = data_num_channel;
if (is_cropping_data) {
data_dim[2] = data_crop_vox_size[0];
data_dim[3] = data_crop_vox_size[1];
data_dim[4] = data_crop_vox_size[2];
} else {
data_dim[2] = data_full_vox_size[0];
data_dim[3] = data_full_vox_size[1];
data_dim[4] = data_full_vox_size[2];
}
top[0]->Reshape(data_dim);
for (int i = 0; i < this->PREFETCH_COUNT; ++i) {
// this->prefetch_[i].Resize(5);
this->prefetch_[i].mutable_blob(0)->Reshape(data_dim);
}
data_dim[1] = 1;
data_dim[2] = label_vox_size[0];
data_dim[3] = label_vox_size[1];
data_dim[4] = label_vox_size[2];
for (int i = 1; i < top.size(); ++i) {
top[i]->Reshape(data_dim);
for (int j = 0; j < this->PREFETCH_COUNT; ++j) {
this->prefetch_[j].mutable_blob(i)->Reshape(data_dim);
}
}
// if (top.size() > 5) {
// data_dim[2] = 480;
// data_dim[3] = 640;
// data_dim[4] = 1;
// top[5]->Reshape(data_dim);
// for (int j = 0; j < this->PREFETCH_COUNT; ++j) {
// this->prefetch_[j].mutable_blob(5)->Reshape(data_dim);
// }
// }
//
// out[0]->receptive_field.resize(data_dim.size() - 2); fill_n(out[0]->receptive_field.begin(), data_dim.size() - 2, 1);
// out[0]->receptive_gap.resize(data_dim.size() - 2); fill_n(out[0]->receptive_gap.begin(), data_dim.size() - 2, 1);
// out[0]->receptive_offset.resize(data_dim.size() - 2); fill_n(out[0]->receptive_offset.begin(), data_dim.size() - 2, 0);
// memoryBytes += out[0]->Malloc(data_dim);
//
// // Occupancy label
// out[1]->need_diff = false;
// data_dim[1] = 1;
// data_dim[2] = label_vox_size[0];
// data_dim[3] = label_vox_size[1];
// data_dim[4] = label_vox_size[2];
// memoryBytes += out[1]->Malloc(data_dim);
//
// // Occupancy weight
// out[2]->need_diff = false;
// data_dim[1] = 2;
// memoryBytes += out[2]->Malloc(data_dim);
//
// // Segmentation label
// out[3]->need_diff = false;
// data_dim[1] = 1;
// memoryBytes += out[3]->Malloc(data_dim);
//
// // Segmentation weight
// out[4]->need_diff = false;
// data_dim[1] = num_segmentation_class;
// memoryBytes += out[4]->Malloc(data_dim);
//
// // Segmentation surface weight
// out[5]->need_diff = false;
// memoryBytes += out[5]->Malloc(data_dim);
//
// // prefetch();
// lock = std::async(std::launch::async, &SUNCGDataLayer::prefetch, this);
// return memoryBytes;
}
template<typename Dtype>
void SuncgDataLayer<Dtype>::load_batch(Batch<Dtype> *batch) {
// LOG(INFO) << "Loading " << batch;
const SuncgDataParameter& data_param =
this->layer_param_.suncg_data_param();
Blob<Dtype> *tsdf = nullptr, *occ_label = nullptr, *occ_weight = nullptr;
if (batch->size() > 3) {
occ_label = batch->mutable_blob(3);
occ_weight = batch->mutable_blob(4);
}
// if (data_param.data_type() == SuncgDataParameter_DATA_TSDF) {
// tsdf = batch->mutable_blob(0);
// } else if(data_param.data_type() == SuncgDataParameter_DATA_OCCUPANCY) {
// occ_label = batch->mutable_blob(0);
// }
tsdf = batch->mutable_blob(0);
Blob<Dtype> *seg_label = batch->mutable_blob(1);
Blob<Dtype> *seg_weight = batch->mutable_blob(2);
for (size_t batch_idx = 0; batch_idx < batch_size; ++batch_idx) {
// Get random image
bool is_valid_image = false;
std::string depth_path = "";
while (!is_valid_image) {
depth_path = data_filenames[counter];
is_valid_image =
FileExists(depth_path.substr(0, depth_path.length() - 3) + "bin") &&
FileExists(depth_path);
// increase counter
counter++;
if (counter >= data_filenames.size()) {
counter = 0;
++epoch_prefetch;
if (shuffle_data) Shuffle();
}
}
// Get depth image and copy to GPU
Dtype *depth_data = new Dtype[frame_height * frame_width];
ReadDepthImage(depth_path, depth_data, frame_width, frame_height);
// LOG(INFO) << "Depth data: " << depth_data[frame_width * frame_height / 2];
CUDA_CHECK(cudaMemcpy(depth_data_GPU, depth_data,
frame_height * frame_width * sizeof(Dtype),
cudaMemcpyHostToDevice));
// Get voxel origin (in world coordinates), camera pose, and voxel labels
Dtype vox_origin[3];
Dtype cam_pose[16];
int num_crop_voxels =
data_crop_vox_size[0] * data_crop_vox_size[1] * data_crop_vox_size[2];
int num_full_voxels =
data_full_vox_size[0] * data_full_vox_size[1] * data_full_vox_size[2];
Dtype *occupancy_label_full = new Dtype[num_full_voxels];
Dtype *segmentation_label_full = new Dtype[num_full_voxels];
ReadVoxLabel(depth_path.substr(0, depth_path.length() - 3) + "bin",
vox_origin, cam_pose, occupancy_label_full,
segmentation_class_map, segmentation_label_full);
// Find cropping origin
int crop_origin[3] = {0, 0, 0};
if (is_cropping_data) {
bool crop_vox_found = false;
int max_iter = 100;
int sample_iter = 0;
// Crop a random box out of the full volume
while (!crop_vox_found && sample_iter < max_iter) {
// Compute random cropping origin
crop_origin[0] = 0.0f;
crop_origin[1] = 0.0f;
crop_origin[2] = 0.0f;
if (data_full_vox_size[0] - data_crop_vox_size[0] > 0)
crop_origin[0] = (int) std::floor(GenRandFloat(0.0f, (float) (
data_full_vox_size[0] - data_crop_vox_size[0])));
if (data_full_vox_size[1] - data_crop_vox_size[1] > 0)
crop_origin[1] = (int) std::floor(GenRandFloat(0.0f, (float) (
data_full_vox_size[1] - data_crop_vox_size[1])));
if (data_full_vox_size[2] - data_crop_vox_size[2] > 0)
crop_origin[2] = (int) std::floor(GenRandFloat(0.0f, (float) (
data_full_vox_size[2] - data_crop_vox_size[2])));
sample_iter++;
// Check cropped box is non-empty and contains object classes other than only floor, wall, ceiling
int num_non_empty_voxels = 0;
int num_object_voxels = 0;
for (int x = crop_origin[0];
x < crop_origin[0] + data_crop_vox_size[0]; ++x)
for (int y = crop_origin[1];
y < crop_origin[1] + data_crop_vox_size[1]; ++y)
for (int z = crop_origin[2];
z < crop_origin[2] + data_crop_vox_size[2]; ++z) {
int full_voxel_idx =
z * data_full_vox_size[0] * data_full_vox_size[1] +
y * data_full_vox_size[0] + x;
if (segmentation_label_full[full_voxel_idx] > 0 & segmentation_label_full[full_voxel_idx] < 255)
num_non_empty_voxels++;
if (segmentation_label_full[full_voxel_idx] > 3)
num_object_voxels++;
}
if (num_non_empty_voxels <
data_crop_vox_size[0] * data_crop_vox_size[0] ||
num_object_voxels < data_crop_vox_size[0])
continue;
crop_vox_found = true;
}
}
if (FLAGS_shuran_chatter) {
LOG(INFO) << depth_path << " " << crop_origin[0] << " " << crop_origin[1]
<< " " << crop_origin[2];
}
// Update voxel parameters with new voxel origin (+ cropping origin) in world coordinates
vox_info[5] = vox_origin[0] + (float) (crop_origin[2]) * vox_unit;
vox_info[6] = vox_origin[1] + (float) (crop_origin[0]) * vox_unit;
vox_info[7] = vox_origin[2] + (float) (crop_origin[1]) * vox_unit;
// Update camera information with new camera pose
for (int i = 0; i < 16; ++i)
cam_info[i + 11] = cam_pose[i];
// Update camera information and voxel parameters in GPU
CUDA_CHECK(cudaMemcpy(cam_info_GPU, cam_info, 27 * sizeof(Dtype),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(vox_info_GPU, vox_info, 8 * sizeof(Dtype),
cudaMemcpyHostToDevice));
// Get voxel volume
Dtype *tmp_tsdf_data_GPU =
vox_data_GPU + batch_idx * num_crop_voxels;
GPU_set_value(num_crop_voxels, tmp_tsdf_data_GPU, Dtype(1.0));
Dtype *tmp_vox_height_GPU = NULL;
if (add_height) {
tmp_vox_height_GPU = vox_data_GPU +
batch_idx * data_num_channel * num_crop_voxels +
num_crop_voxels;
GPU_set_zeros(num_crop_voxels, tmp_vox_height_GPU);
}
// Reset voxel weights in GPU
Dtype *vox_weight_CPU = new Dtype[num_crop_voxels];
memset(vox_weight_CPU, 0, num_crop_voxels * sizeof(Dtype));
CUDA_CHECK(cudaMemcpy(vox_weight_GPU, vox_weight_CPU,
num_crop_voxels * sizeof(Dtype),
cudaMemcpyHostToDevice));
// Retreive cropped labels
Dtype *occupancy_label_crop = new Dtype[num_crop_voxels];
Dtype *segmentation_label_crop = new Dtype[num_crop_voxels];
for (int x = 0; x < data_crop_vox_size[0]; ++x)
for (int y = 0; y < data_crop_vox_size[1]; ++y)
for (int z = 0; z < data_crop_vox_size[2]; ++z) {
int crop_voxel_idx =
z * data_crop_vox_size[0] * data_crop_vox_size[1] +
y * data_crop_vox_size[0] + x;
int full_voxel_idx = (z + crop_origin[2]) * data_full_vox_size[0] *
data_full_vox_size[1] +
(y + crop_origin[1]) * data_full_vox_size[0] +
(x + crop_origin[0]);
occupancy_label_crop[crop_voxel_idx] = occupancy_label_full[full_voxel_idx];
segmentation_label_crop[crop_voxel_idx] = segmentation_label_full[full_voxel_idx];
}
// Fuse frame into voxel volume
if (data_param.data_type() == SuncgDataParameter_DATA_OCCUPANCY ){
Dtype * occupancy_label_crop_GPU;
cudaMalloc(&occupancy_label_crop_GPU, num_crop_voxels* sizeof(Dtype));
cudaMemcpy(occupancy_label_crop_GPU, occupancy_label_crop, num_crop_voxels * sizeof(Dtype), cudaMemcpyHostToDevice);
int THREADS_NUM = 1024;
int BLOCK_NUM = int((num_crop_voxels + size_t(THREADS_NUM) - 1) / THREADS_NUM);
CompleteTSDF<<< BLOCK_NUM, THREADS_NUM >>>(vox_info_GPU, occupancy_label_crop_GPU, tmp_tsdf_data_GPU);
cudaFree(occupancy_label_crop_GPU);
CUDA_CHECK(cudaGetLastError());
}else{
if(data_param.with_projection_tsdf()){
int num_blocks = data_crop_vox_size[2];
int num_threads = data_crop_vox_size[1];
GPU_set_zeros(num_crop_voxels, vox_weight_GPU);
Integrate <<< data_crop_vox_size[2], data_crop_vox_size[1] >>> (cam_info_GPU, vox_info_GPU, depth_data_GPU, tmp_tsdf_data_GPU, vox_weight_GPU, tmp_vox_height_GPU);
CUDA_CHECK(cudaGetLastError());
}else{
ComputeTSDF(cam_info, vox_info, cam_info_GPU, vox_info_GPU, depth_data_GPU, tmp_tsdf_data_GPU, tmp_vox_height_GPU);
CUDA_CHECK(cudaGetLastError());
}
}
// Copy voxel volume back to CPU
Dtype *vox_tsdf = new Dtype[num_crop_voxels];
CUDA_CHECK(cudaMemcpy(vox_tsdf, tmp_tsdf_data_GPU,
num_crop_voxels * sizeof(Dtype),
cudaMemcpyDeviceToHost));
if (tsdf != nullptr) {
memcpy(tsdf->mutable_cpu_data() + num_crop_voxels * batch_idx * data_num_channel,
vox_tsdf, num_crop_voxels * sizeof(Dtype));
}
//adding height to floor
if (add_height){
cudaMemcpy(tsdf->mutable_cpu_data() + num_crop_voxels * batch_idx * data_num_channel + num_crop_voxels,
tmp_vox_height_GPU, num_crop_voxels * sizeof(Dtype), cudaMemcpyDeviceToHost);
}
// Downsample label with scale
int num_label_voxels =
label_vox_size[0] * label_vox_size[1] * label_vox_size[2];
int label_downscale = (data_crop_vox_size[0] / label_vox_size[0]);
Dtype *occupancy_label_downscale = new Dtype[num_label_voxels];
Dtype *segmentation_label_downscale = new Dtype[num_label_voxels];
Dtype *tsdf_data_downscale = new Dtype[num_label_voxels];
if (label_downscale > 1){
DownsampleLabel(data_crop_vox_size, label_vox_size, label_downscale,
occupancy_label_crop, occupancy_label_downscale,
segmentation_label_crop, segmentation_label_downscale,
vox_tsdf, tsdf_data_downscale);
}else{
if (FLAGS_shuran_chatter) {
LOG(INFO) << "label_downscale: " << label_downscale;
}
memcpy(occupancy_label_downscale , occupancy_label_crop, num_label_voxels * sizeof(Dtype));
memcpy(segmentation_label_downscale , segmentation_label_crop, num_label_voxels * sizeof(Dtype));
memcpy(tsdf_data_downscale , vox_tsdf, num_label_voxels * sizeof(Dtype));
}
// Copy labels to GPU
// CUDA_CHECK(
// cudaMemcpy(occupancy_label_GPU + batch_idx * num_label_voxels,
// occupancy_label_downscale,
// num_label_voxels * sizeof(Dtype),
// cudaMemcpyHostToDevice));
// CUDA_CHECK(
// cudaMemcpy(segmentation_label_GPU + batch_idx * num_label_voxels,
// segmentation_label_downscale,
// num_label_voxels * sizeof(Dtype),
// cudaMemcpyHostToDevice));
if (occ_label != nullptr) {
memcpy(occ_label->mutable_cpu_data() + batch_idx * num_label_voxels,
occupancy_label_downscale, num_label_voxels * sizeof(Dtype));
}
memcpy(seg_label->mutable_cpu_data() + batch_idx * num_label_voxels,
segmentation_label_downscale, num_label_voxels * sizeof(Dtype));
// Find number of occupied voxels
// Save voxel indices of background
// Set label weights of occupied voxels as 1
int num_occ_voxels = 0;
std::vector<int> bg_voxel_idx;
Dtype *occupancy_weight = new Dtype[num_label_voxels];
Dtype *segmentation_weight = new Dtype[num_label_voxels];
//Dtype *segmentation_surf_weight = new Dtype[num_label_voxels];
memset(occupancy_weight, 0, num_label_voxels * sizeof(Dtype));
memset(segmentation_weight, 0, num_label_voxels * sizeof(Dtype));
//memset(segmentation_surf_weight, 0, num_label_voxels * sizeof(Dtype));
for (int i = 0; i < num_label_voxels; ++i) {
if (Dtype(occupancy_label_downscale[i]) > 0) {
if (tsdf_data_downscale[i] < -0.5) {
// forground voxels in unobserved region
num_occ_voxels++;
occupancy_weight[i] = Dtype(occupancy_class_weight[1]);
}
} else {
if (tsdf_data_downscale[i] < -0.5) {
bg_voxel_idx.push_back(i); // background voxels in unobserved regoin
}
}
if (Dtype(segmentation_label_downscale[i]) > 0 && Dtype(segmentation_label_downscale[i]) < 255) {
// foreground voxels within room
if (surf_only){
if(abs(tsdf_data_downscale[i]) < 0.5){
segmentation_weight[i] = Dtype(segmentation_class_weight[(int) segmentation_label_downscale[i]]);
}
}else{
segmentation_weight[i] = Dtype(segmentation_class_weight[(int) segmentation_label_downscale[i]]);
}
// if (abs(tsdf_data_downscale[i]) < 0.5) {
// segmentation_surf_weight[i] = Dtype(
// segmentation_class_weight[(int) (segmentation_label_downscale[i])]);
// }
}
}
// Raise the weight for a few indices of background voxels
std::random_device tmp_rand_rd;
std::mt19937 tmp_rand_mt(tmp_rand_rd());
int segnegcout = 0;
int segnegtotal = floor(sample_neg_obj_ratio * (float) num_occ_voxels);
if (bg_voxel_idx.size() > 0) {
std::uniform_real_distribution<double> tmp_rand_dist(
0, (float) (bg_voxel_idx.size()) - 0.0001);
for (int i = 0; i < num_occ_voxels; ++i) {
int rand_idx = (int) (std::floor(tmp_rand_dist(tmp_rand_mt)));
occupancy_weight[bg_voxel_idx[rand_idx]] = Dtype(
occupancy_class_weight[0]);
if (segnegcout < segnegtotal && Dtype(segmentation_label_downscale[bg_voxel_idx[rand_idx]]) < 255 ) {
// background voxels within room
segmentation_weight[bg_voxel_idx[rand_idx]] = Dtype(
segmentation_class_weight[0]);
segnegcout++;
}
}
}
if (occ_weight != nullptr) {
memcpy(occ_weight->mutable_cpu_data() + batch_idx * num_label_voxels,
occupancy_weight, num_label_voxels * sizeof(Dtype));
}
memcpy(seg_weight->mutable_cpu_data() + batch_idx * num_label_voxels,
segmentation_weight, num_label_voxels * sizeof(Dtype));
// // Visualize
//SaveVox2Ply("vis_tsdf_" + std::to_string(batch_idx) + ".ply", data_crop_vox_size, vox_tsdf); // "vis_tsdf_" + data_filenames[counter] + ".ply"
// if (add_height) {
// Dtype * vox_height = new Dtype[num_crop_voxels];
// CUDA_CHECK(cudaMemcpy(vox_height, tmp_vox_height_GPU, num_crop_voxels * sizeof(Dtype), cudaMemcpyDeviceToHost));
// SaveVoxHeight2Ply("vis_height_" + std::to_string(batch_idx) + ".ply", data_crop_vox_size, vox_height);
// delete [] vox_height;
// }
// SaveVox2Ply("vis_tsdf_" + std::to_string(batch_idx) + ".ply", label_vox_size, tsdf_data_downscale);
// SaveVoxLabel2Ply("vis_occ_label_" + std::to_string(batch_idx) + ".ply", label_vox_size, label_downscale, occupancy_label_downscale);
// SaveVoxLabel2Ply("vis_seg_label_" + std::to_string(batch_idx) + ".ply", label_vox_size, label_downscale, segmentation_label_downscale);
// SaveVoxWeight2Ply("vis_occ_weight_" + std::to_string(batch_idx) + ".ply", label_vox_size, label_downscale, occupancy_weight);
// SaveVoxWeight2Ply("vis_seg_weight_" + std::to_string(batch_idx) + ".ply", label_vox_size, label_downscale, segmentation_weight);
// SaveVoxWeight2Ply("vis_seg_surf_weight_" + std::to_string(batch_idx) + ".ply", label_vox_size, label_downscale, segmentation_surf_weight);
if (data_param.tsdf_type() > 0) {
// transfrom TSDF if necsessary
int THREADS_NUM = 1024;
int BLOCK_NUM = int((num_crop_voxels + size_t(THREADS_NUM) - 1) / THREADS_NUM);
tsdfTransform <<< BLOCK_NUM, THREADS_NUM >>> (vox_info_GPU, tmp_tsdf_data_GPU, data_param.tsdf_type());
CUDA_CHECK(cudaMemcpy(tsdf->mutable_cpu_data() + num_crop_voxels * batch_idx, tmp_tsdf_data_GPU,
num_crop_voxels * sizeof(Dtype),
cudaMemcpyDeviceToHost));
}
// // Free memory
delete[] depth_data;
delete[] vox_tsdf;
delete[] vox_weight_CPU;
delete[] tsdf_data_downscale;
delete[] occupancy_label_full;
delete[] occupancy_label_crop;
delete[] occupancy_label_downscale;
delete[] occupancy_weight;
delete[] segmentation_label_full;
delete[] segmentation_label_crop;
delete[] segmentation_label_downscale;
delete[] segmentation_weight;
//delete[] segmentation_surf_weight;
}
}
template<typename Dtype>
int SuncgDataLayer<Dtype>::numofitems() {
return data_filenames.size();
};
template<typename Dtype>
void SuncgDataLayer<Dtype>::Shuffle() {
//std::shuffle(sceneMetaList.begin(),sceneMetaList.end(), rng );
caffe::rng_t *rng = static_cast<caffe::rng_t *>(rng_->generator());
shuffle(data_filenames.begin(), data_filenames.end(), rng);
return;
};
INSTANTIATE_CLASS(SuncgDataLayer);
REGISTER_LAYER_CLASS(SuncgData);
} // caffe
|
the_stack
|
using namespace cupoch;
using namespace cupoch::geometry;
namespace {
struct compute_sphere_vertices_functor {
compute_sphere_vertices_functor(int resolution, float radius)
: resolution_(resolution), radius_(radius) {
step_ = M_PI / (float)resolution;
};
const int resolution_;
const float radius_;
float step_;
__device__ Eigen::Vector3f operator()(size_t idx) const {
int i = idx / (2 * resolution_) + 1;
int j = idx % (2 * resolution_);
float alpha = step_ * i;
float theta = step_ * j;
const float sa = sinf(alpha);
return Eigen::Vector3f(sa * cosf(theta), sa * sinf(theta),
cosf(alpha)) *
radius_;
}
};
struct compute_sphere_triangles_functor1 {
compute_sphere_triangles_functor1(Eigen::Vector3i *triangle, int resolution)
: triangles_(triangle), resolution_(resolution){};
Eigen::Vector3i *triangles_;
const int resolution_;
__device__ void operator()(size_t idx) {
int j1 = (idx + 1) % (2 * resolution_);
int base = 2;
triangles_[2 * idx] = Eigen::Vector3i(0, base + idx, base + j1);
base = 2 + 2 * resolution_ * (resolution_ - 2);
triangles_[2 * idx + 1] = Eigen::Vector3i(1, base + j1, base + idx);
}
};
struct compute_sphere_triangles_functor2 {
compute_sphere_triangles_functor2(Eigen::Vector3i *triangle,
int resolution,
int initial_base = 2)
: triangles_(triangle),
resolution_(resolution),
initial_base_(initial_base){};
Eigen::Vector3i *triangles_;
const int resolution_;
const int initial_base_;
__device__ void operator()(size_t idx) {
int i = idx / (2 * resolution_) + 1;
int j = idx % (2 * resolution_);
int base1 = initial_base_ + 2 * resolution_ * (i - 1);
int base2 = base1 + 2 * resolution_;
int j1 = (j + 1) % (2 * resolution_);
triangles_[2 * idx] = Eigen::Vector3i(base2 + j, base1 + j1, base1 + j);
triangles_[2 * idx + 1] =
Eigen::Vector3i(base2 + j, base2 + j1, base1 + j1);
}
};
struct compute_half_sphere_triangles_functor1 {
compute_half_sphere_triangles_functor1(Eigen::Vector3i *triangle,
int resolution)
: triangles_(triangle), resolution_(resolution){};
Eigen::Vector3i *triangles_;
const int resolution_;
__device__ void operator()(size_t idx) {
int j1 = (idx + 1) % (2 * resolution_);
int base = 1;
triangles_[idx] = Eigen::Vector3i(0, base + idx, base + j1);
}
};
struct compute_cylinder_vertices_functor {
compute_cylinder_vertices_functor(int resolution,
float radius,
float height,
float step,
float h_step)
: resolution_(resolution),
radius_(radius),
height_(height),
step_(step),
h_step_(h_step){};
const int resolution_;
const float radius_;
const float height_;
const float step_;
const float h_step_;
__device__ Eigen::Vector3f operator()(size_t idx) const {
int i = idx / resolution_;
int j = idx % resolution_;
float theta = step_ * j;
return Eigen::Vector3f(cosf(theta) * radius_, sinf(theta) * radius_,
height_ * 0.5 - h_step_ * i);
}
};
struct compute_cylinder_triangles_functor1 {
compute_cylinder_triangles_functor1(Eigen::Vector3i *triangle,
int resolution,
int split)
: triangles_(triangle), resolution_(resolution), split_(split){};
Eigen::Vector3i *triangles_;
const int resolution_;
const int split_;
__device__ void operator()(size_t idx) {
int j1 = (idx + 1) % resolution_;
int base = 2;
triangles_[2 * idx] = Eigen::Vector3i(0, base + idx, base + j1);
base = 2 + resolution_ * split_;
triangles_[2 * idx + 1] = Eigen::Vector3i(1, base + j1, base + idx);
}
};
struct compute_cylinder_triangles_functor2 {
compute_cylinder_triangles_functor2(Eigen::Vector3i *triangle,
int resolution,
int initial_base = 2)
: triangles_(triangle),
resolution_(resolution),
initial_base_(initial_base){};
Eigen::Vector3i *triangles_;
const int resolution_;
const int initial_base_;
__device__ void operator()(size_t idx) {
int i = idx / resolution_;
int j = idx % resolution_;
int base1 = initial_base_ + resolution_ * i;
int base2 = base1 + resolution_;
int j1 = (j + 1) % resolution_;
triangles_[2 * idx] = Eigen::Vector3i(base2 + j, base1 + j1, base1 + j);
triangles_[2 * idx + 1] =
Eigen::Vector3i(base2 + j, base2 + j1, base1 + j1);
}
};
struct compute_cone_vertices_functor {
compute_cone_vertices_functor(
int resolution, int split, float step, float r_step, float h_step)
: resolution_(resolution),
split_(split),
step_(step),
r_step_(r_step),
h_step_(h_step){};
const int resolution_;
const int split_;
const float step_;
const float r_step_;
const float h_step_;
__device__ Eigen::Vector3f operator()(size_t idx) const {
int i = idx / resolution_;
int j = idx % resolution_;
float r = r_step_ * (split_ - i);
float theta = step_ * j;
return Eigen::Vector3f(cosf(theta) * r, sinf(theta) * r, h_step_ * i);
}
};
struct compute_cone_triangles_functor1 {
compute_cone_triangles_functor1(Eigen::Vector3i *triangle,
int resolution,
int split)
: triangles_(triangle), resolution_(resolution), split_(split){};
Eigen::Vector3i *triangles_;
const int resolution_;
const int split_;
__device__ void operator()(size_t idx) {
int j1 = (idx + 1) % resolution_;
int base = 2;
triangles_[2 * idx] = Eigen::Vector3i(0, base + j1, base + idx);
base = 2 + resolution_ * (split_ - 1);
triangles_[2 * idx + 1] = Eigen::Vector3i(1, base + idx, base + j1);
}
};
struct compute_cone_triangles_functor2 {
compute_cone_triangles_functor2(Eigen::Vector3i *triangle, int resolution)
: triangles_(triangle), resolution_(resolution){};
Eigen::Vector3i *triangles_;
const int resolution_;
__device__ void operator()(size_t idx) {
int i = idx / resolution_;
int j = idx % resolution_;
int base1 = 2 + resolution_ * i;
int base2 = base1 + resolution_;
int j1 = (j + 1) % resolution_;
triangles_[2 * idx] =
Eigen::Vector3i(base2 + j1, base1 + j, base1 + j1);
triangles_[2 * idx + 1] =
Eigen::Vector3i(base2 + j1, base2 + j, base1 + j);
}
};
struct compute_torus_mesh_functor {
compute_torus_mesh_functor(Eigen::Vector3f *vertices,
Eigen::Vector3i *triangles,
float torus_radius,
float tube_radius,
int radial_resolution,
int tubular_resolution)
: vertices_(vertices),
triangles_(triangles),
torus_radius_(torus_radius),
tube_radius_(tube_radius),
radial_resolution_(radial_resolution),
tubular_resolution_(tubular_resolution),
u_step_(2 * M_PI / float(radial_resolution_)),
v_step_(2 * M_PI / float(tubular_resolution_)){};
Eigen::Vector3f *vertices_;
Eigen::Vector3i *triangles_;
const float torus_radius_;
const float tube_radius_;
const int radial_resolution_;
const int tubular_resolution_;
const float u_step_;
const float v_step_;
__device__ int vert_idx(int uidx, int vidx) const {
return uidx * tubular_resolution_ + vidx;
};
__device__ void operator()(size_t idx) {
int uidx = idx / tubular_resolution_;
int vidx = idx % tubular_resolution_;
float u = uidx * u_step_;
Eigen::Vector3f w(cos(u), sin(u), 0);
float v = vidx * v_step_;
vertices_[vert_idx(uidx, vidx)] =
torus_radius_ * w + tube_radius_ * cos(v) * w +
Eigen::Vector3f(0, 0, tube_radius_ * sin(v));
int tri_idx = (uidx * tubular_resolution_ + vidx) * 2;
triangles_[tri_idx + 0] =
Eigen::Vector3i(vert_idx((uidx + 1) % radial_resolution_, vidx),
vert_idx((uidx + 1) % radial_resolution_,
(vidx + 1) % tubular_resolution_),
vert_idx(uidx, vidx));
triangles_[tri_idx + 1] = Eigen::Vector3i(
vert_idx(uidx, vidx),
vert_idx((uidx + 1) % radial_resolution_,
(vidx + 1) % tubular_resolution_),
vert_idx(uidx, (vidx + 1) % tubular_resolution_));
}
};
struct compute_moebius_vertices_functor {
compute_moebius_vertices_functor(int length_split,
int width_split,
int twists,
float radius,
float flatness,
float width,
float scale)
: width_split_(width_split),
twists_(twists),
radius_(radius),
flatness_(flatness),
width_(width),
scale_(scale),
u_step_(2 * M_PI / length_split),
v_step_(width / (width_split - 1)){};
const int width_split_;
const int twists_;
const float radius_;
const float flatness_;
const float width_;
const float scale_;
const float u_step_;
const float v_step_;
__device__ Eigen::Vector3f operator()(size_t idx) {
int uidx = idx / width_split_;
int vidx = idx % width_split_;
float u = uidx * u_step_;
float cos_u = cos(u);
float sin_u = sin(u);
float v = -width_ / 2.0 + vidx * v_step_;
float alpha = twists_ * 0.5 * u;
float cos_alpha = cos(alpha);
float sin_alpha = sin(alpha);
return Eigen::Vector3f(
scale_ * ((cos_alpha * cos_u * v) + radius_ * cos_u),
scale_ * ((cos_alpha * sin_u * v) + radius_ * sin_u),
scale_ * sin_alpha * v * flatness_);
}
};
struct compute_moebius_triangles_functor {
compute_moebius_triangles_functor(Eigen::Vector3i *triangles,
int length_split,
int width_split,
int twists)
: triangles_(triangles),
length_split_(length_split),
width_split_(width_split),
twists_(twists){};
Eigen::Vector3i *triangles_;
const int length_split_;
const int width_split_;
const int twists_;
__device__ void operator()(size_t idx) {
int uidx = idx / (width_split_ - 1);
int vidx = idx % (width_split_ - 1);
if (uidx == length_split_ - 1) {
if (twists_ % 2 == 1) {
if ((uidx + vidx) % 2 == 0) {
triangles_[idx * 2] =
Eigen::Vector3i((width_split_ - 1) - (vidx + 1),
uidx * width_split_ + vidx,
uidx * width_split_ + vidx + 1);
triangles_[idx * 2 + 1] =
Eigen::Vector3i((width_split_ - 1) - vidx,
uidx * width_split_ + vidx,
(width_split_ - 1) - (vidx + 1));
} else {
triangles_[idx * 2] =
Eigen::Vector3i(uidx * width_split_ + vidx,
uidx * width_split_ + vidx + 1,
(width_split_ - 1) - vidx);
triangles_[idx * 2 + 1] =
Eigen::Vector3i((width_split_ - 1) - vidx,
uidx * width_split_ + vidx + 1,
(width_split_ - 1) - (vidx + 1));
}
} else {
if ((uidx + vidx) % 2 == 0) {
triangles_[idx * 2] = Eigen::Vector3i(
uidx * width_split_ + vidx, vidx + 1,
uidx * width_split_ + vidx + 1);
triangles_[idx * 2 + 1] = Eigen::Vector3i(
uidx * width_split_ + vidx, vidx, vidx + 1);
} else {
triangles_[idx * 2] =
Eigen::Vector3i(uidx * width_split_ + vidx, vidx,
uidx * width_split_ + vidx + 1);
triangles_[idx * 2 + 1] = Eigen::Vector3i(
uidx * width_split_ + vidx + 1, vidx, vidx + 1);
}
}
} else {
if ((uidx + vidx) % 2 == 0) {
triangles_[idx * 2] =
Eigen::Vector3i(uidx * width_split_ + vidx,
(uidx + 1) * width_split_ + vidx + 1,
uidx * width_split_ + vidx + 1);
triangles_[idx * 2 + 1] =
Eigen::Vector3i(uidx * width_split_ + vidx,
(uidx + 1) * width_split_ + vidx,
(uidx + 1) * width_split_ + vidx + 1);
} else {
triangles_[idx * 2] =
Eigen::Vector3i(uidx * width_split_ + vidx + 1,
uidx * width_split_ + vidx,
(uidx + 1) * width_split_ + vidx);
triangles_[idx * 2 + 1] =
Eigen::Vector3i(uidx * width_split_ + vidx + 1,
(uidx + 1) * width_split_ + vidx,
(uidx + 1) * width_split_ + vidx + 1);
}
}
}
};
} // namespace
std::shared_ptr<TriangleMesh> TriangleMesh::CreateTetrahedron(
float radius /* = 1.0*/) {
auto mesh = std::make_shared<TriangleMesh>();
if (radius <= 0) {
utility::LogError("[CreateTetrahedron] radius <= 0");
}
mesh->vertices_.push_back(radius *
Eigen::Vector3f(std::sqrt(8. / 9.), 0, -1. / 3.));
mesh->vertices_.push_back(radius * Eigen::Vector3f(-std::sqrt(2. / 9.),
std::sqrt(2. / 3.),
-1. / 3.));
mesh->vertices_.push_back(radius * Eigen::Vector3f(-std::sqrt(2. / 9.),
-std::sqrt(2. / 3.),
-1. / 3.));
mesh->vertices_.push_back(radius * Eigen::Vector3f(0., 0., 1.));
mesh->triangles_.push_back(Eigen::Vector3i(0, 2, 1));
mesh->triangles_.push_back(Eigen::Vector3i(0, 3, 2));
mesh->triangles_.push_back(Eigen::Vector3i(0, 1, 3));
mesh->triangles_.push_back(Eigen::Vector3i(1, 2, 3));
return mesh;
}
std::shared_ptr<TriangleMesh> TriangleMesh::CreateOctahedron(
float radius /* = 1.0*/) {
auto mesh = std::make_shared<TriangleMesh>();
if (radius <= 0) {
utility::LogError("[CreateOctahedron] radius <= 0");
}
mesh->vertices_.push_back(radius * Eigen::Vector3f(1, 0, 0));
mesh->vertices_.push_back(radius * Eigen::Vector3f(0, 1, 0));
mesh->vertices_.push_back(radius * Eigen::Vector3f(0, 0, 1));
mesh->vertices_.push_back(radius * Eigen::Vector3f(-1, 0, 0));
mesh->vertices_.push_back(radius * Eigen::Vector3f(0, -1, 0));
mesh->vertices_.push_back(radius * Eigen::Vector3f(0, 0, -1));
mesh->triangles_.push_back(Eigen::Vector3i(0, 1, 2));
mesh->triangles_.push_back(Eigen::Vector3i(1, 3, 2));
mesh->triangles_.push_back(Eigen::Vector3i(3, 4, 2));
mesh->triangles_.push_back(Eigen::Vector3i(4, 0, 2));
mesh->triangles_.push_back(Eigen::Vector3i(0, 5, 1));
mesh->triangles_.push_back(Eigen::Vector3i(1, 5, 3));
mesh->triangles_.push_back(Eigen::Vector3i(3, 5, 4));
mesh->triangles_.push_back(Eigen::Vector3i(4, 5, 0));
return mesh;
}
std::shared_ptr<TriangleMesh> TriangleMesh::CreateIcosahedron(
float radius /* = 1.0*/) {
auto mesh = std::make_shared<TriangleMesh>();
if (radius <= 0) {
utility::LogError("[CreateIcosahedron] radius <= 0");
}
const float p = (1. + std::sqrt(5.)) / 2.;
mesh->vertices_.push_back(radius * Eigen::Vector3f(-1, 0, p));
mesh->vertices_.push_back(radius * Eigen::Vector3f(1, 0, p));
mesh->vertices_.push_back(radius * Eigen::Vector3f(1, 0, -p));
mesh->vertices_.push_back(radius * Eigen::Vector3f(-1, 0, -p));
mesh->vertices_.push_back(radius * Eigen::Vector3f(0, -p, 1));
mesh->vertices_.push_back(radius * Eigen::Vector3f(0, p, 1));
mesh->vertices_.push_back(radius * Eigen::Vector3f(0, p, -1));
mesh->vertices_.push_back(radius * Eigen::Vector3f(0, -p, -1));
mesh->vertices_.push_back(radius * Eigen::Vector3f(-p, -1, 0));
mesh->vertices_.push_back(radius * Eigen::Vector3f(p, -1, 0));
mesh->vertices_.push_back(radius * Eigen::Vector3f(p, 1, 0));
mesh->vertices_.push_back(radius * Eigen::Vector3f(-p, 1, 0));
mesh->triangles_.push_back(Eigen::Vector3i(0, 4, 1));
mesh->triangles_.push_back(Eigen::Vector3i(0, 1, 5));
mesh->triangles_.push_back(Eigen::Vector3i(1, 4, 9));
mesh->triangles_.push_back(Eigen::Vector3i(1, 9, 10));
mesh->triangles_.push_back(Eigen::Vector3i(1, 10, 5));
mesh->triangles_.push_back(Eigen::Vector3i(0, 8, 4));
mesh->triangles_.push_back(Eigen::Vector3i(0, 11, 8));
mesh->triangles_.push_back(Eigen::Vector3i(0, 5, 11));
mesh->triangles_.push_back(Eigen::Vector3i(5, 6, 11));
mesh->triangles_.push_back(Eigen::Vector3i(5, 10, 6));
mesh->triangles_.push_back(Eigen::Vector3i(4, 8, 7));
mesh->triangles_.push_back(Eigen::Vector3i(4, 7, 9));
mesh->triangles_.push_back(Eigen::Vector3i(3, 6, 2));
mesh->triangles_.push_back(Eigen::Vector3i(3, 2, 7));
mesh->triangles_.push_back(Eigen::Vector3i(2, 6, 10));
mesh->triangles_.push_back(Eigen::Vector3i(2, 10, 9));
mesh->triangles_.push_back(Eigen::Vector3i(2, 9, 7));
mesh->triangles_.push_back(Eigen::Vector3i(3, 11, 6));
mesh->triangles_.push_back(Eigen::Vector3i(3, 8, 11));
mesh->triangles_.push_back(Eigen::Vector3i(3, 7, 8));
return mesh;
}
std::shared_ptr<TriangleMesh> TriangleMesh::CreateBox(float width /* = 1.0*/,
float height /* = 1.0*/,
float depth /* = 1.0*/) {
auto mesh_ptr = std::make_shared<TriangleMesh>();
if (width <= 0) {
utility::LogError("[CreateBox] width <= 0");
}
if (height <= 0) {
utility::LogError("[CreateBox] height <= 0");
}
if (depth <= 0) {
utility::LogError("[CreateBox] depth <= 0");
}
mesh_ptr->vertices_.resize(8);
mesh_ptr->vertices_[0] = Eigen::Vector3f(0.0, 0.0, 0.0);
mesh_ptr->vertices_[1] = Eigen::Vector3f(width, 0.0, 0.0);
mesh_ptr->vertices_[2] = Eigen::Vector3f(0.0, 0.0, depth);
mesh_ptr->vertices_[3] = Eigen::Vector3f(width, 0.0, depth);
mesh_ptr->vertices_[4] = Eigen::Vector3f(0.0, height, 0.0);
mesh_ptr->vertices_[5] = Eigen::Vector3f(width, height, 0.0);
mesh_ptr->vertices_[6] = Eigen::Vector3f(0.0, height, depth);
mesh_ptr->vertices_[7] = Eigen::Vector3f(width, height, depth);
mesh_ptr->triangles_.push_back(Eigen::Vector3i(4, 7, 5));
mesh_ptr->triangles_.push_back(Eigen::Vector3i(4, 6, 7));
mesh_ptr->triangles_.push_back(Eigen::Vector3i(0, 2, 4));
mesh_ptr->triangles_.push_back(Eigen::Vector3i(2, 6, 4));
mesh_ptr->triangles_.push_back(Eigen::Vector3i(0, 1, 2));
mesh_ptr->triangles_.push_back(Eigen::Vector3i(1, 3, 2));
mesh_ptr->triangles_.push_back(Eigen::Vector3i(1, 5, 7));
mesh_ptr->triangles_.push_back(Eigen::Vector3i(1, 7, 3));
mesh_ptr->triangles_.push_back(Eigen::Vector3i(2, 3, 7));
mesh_ptr->triangles_.push_back(Eigen::Vector3i(2, 7, 6));
mesh_ptr->triangles_.push_back(Eigen::Vector3i(0, 4, 1));
mesh_ptr->triangles_.push_back(Eigen::Vector3i(1, 4, 5));
return mesh_ptr;
}
std::shared_ptr<TriangleMesh> TriangleMesh::CreateSphere(
float radius /* = 1.0*/, int resolution /* = 20*/) {
auto mesh_ptr = std::make_shared<TriangleMesh>();
if (radius <= 0) {
utility::LogError("[CreateSphere] radius <= 0");
}
if (resolution <= 0) {
utility::LogError("[CreateSphere] resolution <= 0");
}
size_t n_vertices = 2 * resolution * (resolution - 1) + 2;
mesh_ptr->vertices_.resize(n_vertices);
mesh_ptr->vertices_[0] = Eigen::Vector3f(0.0, 0.0, radius);
mesh_ptr->vertices_[1] = Eigen::Vector3f(0.0, 0.0, -radius);
compute_sphere_vertices_functor func_vt(resolution, radius);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_vertices - 2),
mesh_ptr->vertices_.begin() + 2, func_vt);
mesh_ptr->triangles_.resize(4 * resolution +
4 * (resolution - 2) * resolution);
compute_sphere_triangles_functor1 func_tr1(
thrust::raw_pointer_cast(mesh_ptr->triangles_.data()), resolution);
thrust::for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(2 * resolution),
func_tr1);
compute_sphere_triangles_functor2 func_tr2(
thrust::raw_pointer_cast(mesh_ptr->triangles_.data()) +
4 * resolution,
resolution);
thrust::for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(
2 * (resolution - 1) * resolution),
func_tr2);
return mesh_ptr;
}
std::shared_ptr<TriangleMesh> TriangleMesh::CreateHalfSphere(
float radius /* = 1.0*/, int resolution /* = 20*/) {
auto mesh_ptr = std::make_shared<TriangleMesh>();
if (radius <= 0) {
utility::LogError("[CreateHalfSphere] radius <= 0");
}
if (resolution <= 0) {
utility::LogError("[CreateHalfSphere] resolution <= 0");
}
size_t n_vertices = resolution * resolution + 1;
mesh_ptr->vertices_.resize(n_vertices);
mesh_ptr->vertices_[0] = Eigen::Vector3f(0.0, 0.0, radius);
compute_sphere_vertices_functor func_vt(resolution, radius);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_vertices - 1),
mesh_ptr->vertices_.begin() + 1, func_vt);
mesh_ptr->triangles_.resize(2 * resolution +
4 * (resolution / 2 - 1) * resolution);
compute_half_sphere_triangles_functor1 func_tr1(
thrust::raw_pointer_cast(mesh_ptr->triangles_.data()), resolution);
thrust::for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(2 * resolution),
func_tr1);
compute_sphere_triangles_functor2 func_tr2(
thrust::raw_pointer_cast(mesh_ptr->triangles_.data()) +
2 * resolution,
resolution, 1);
thrust::for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(
2 * (resolution / 2 - 1) * resolution),
func_tr2);
return mesh_ptr;
}
std::shared_ptr<TriangleMesh> TriangleMesh::CreateCylinder(
float radius /* = 1.0*/,
float height /* = 2.0*/,
int resolution /* = 20*/,
int split /* = 4*/) {
auto mesh_ptr = std::make_shared<TriangleMesh>();
if (radius <= 0) {
utility::LogError("[CreateCylinder] radius <= 0");
}
if (height <= 0) {
utility::LogError("[CreateCylinder] height <= 0");
}
if (resolution <= 0) {
utility::LogError("[CreateCylinder] resolution <= 0");
}
if (split <= 0) {
utility::LogError("[CreateCylinder] split <= 0");
}
size_t n_vertices = resolution * (split + 1) + 2;
mesh_ptr->vertices_.resize(n_vertices);
mesh_ptr->vertices_[0] = Eigen::Vector3f(0.0, 0.0, height * 0.5);
mesh_ptr->vertices_[1] = Eigen::Vector3f(0.0, 0.0, -height * 0.5);
float step = M_PI * 2.0 / (float)resolution;
float h_step = height / (float)split;
compute_cylinder_vertices_functor func_vt(resolution, radius, height, step,
h_step);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(n_vertices - 2),
mesh_ptr->vertices_.begin() + 2, func_vt);
mesh_ptr->triangles_.resize(2 * resolution + 2 * split * resolution);
compute_cylinder_triangles_functor1 func_tr1(
thrust::raw_pointer_cast(mesh_ptr->triangles_.data()), resolution,
split);
for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(resolution), func_tr1);
compute_cylinder_triangles_functor2 func_tr2(
thrust::raw_pointer_cast(mesh_ptr->triangles_.data()) +
2 * resolution,
resolution);
for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(resolution * split),
func_tr2);
return mesh_ptr;
}
std::shared_ptr<TriangleMesh> TriangleMesh::CreateTube(float radius /* = 1.0*/,
float height /* = 2.0*/,
int resolution /* = 20*/,
int split /* = 4*/) {
auto mesh_ptr = std::make_shared<TriangleMesh>();
if (radius <= 0) {
utility::LogError("[CreateTube] radius <= 0");
}
if (height <= 0) {
utility::LogError("[CreateTube] height <= 0");
}
if (resolution <= 0) {
utility::LogError("[CreateTube] resolution <= 0");
}
if (split <= 0) {
utility::LogError("[CreateTube] split <= 0");
}
size_t n_vertices = resolution * (split + 1);
mesh_ptr->vertices_.resize(n_vertices);
float step = M_PI * 2.0 / (float)resolution;
float h_step = height / (float)split;
compute_cylinder_vertices_functor func_vt(resolution, radius, height, step,
h_step);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(n_vertices),
mesh_ptr->vertices_.begin(), func_vt);
mesh_ptr->triangles_.resize(2 * split * resolution);
compute_cylinder_triangles_functor2 func_tr2(
thrust::raw_pointer_cast(mesh_ptr->triangles_.data()), resolution,
0);
for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(resolution * split),
func_tr2);
return mesh_ptr;
}
std::shared_ptr<TriangleMesh> TriangleMesh::CreateCapsule(
float radius /* = 1.0*/,
float height /* = 2.0*/,
int resolution /* = 20*/,
int split /* = 4*/) {
auto mesh_ptr = std::make_shared<TriangleMesh>();
if (radius <= 0) {
utility::LogError("[CreateCapsule] radius <= 0");
}
if (height <= 0) {
utility::LogError("[CreateCapsule] height <= 0");
}
if (resolution <= 0) {
utility::LogError("[CreateCapsule] resolution <= 0");
}
if (split <= 0) {
utility::LogError("[CreateCapsule] split <= 0");
}
Eigen::Matrix4f transform;
auto mesh_top = CreateHalfSphere(radius, resolution);
transform << 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, height / 2.0, 0, 0, 0, 1;
mesh_top->Transform(transform);
auto mesh_bottom = CreateHalfSphere(radius, resolution);
transform << 1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1, -height / 2.0, 0, 0, 0, 1;
mesh_bottom->Transform(transform);
mesh_ptr = CreateTube(radius, height, resolution, split);
*mesh_ptr += *mesh_top;
*mesh_ptr += *mesh_bottom;
return mesh_ptr;
}
std::shared_ptr<TriangleMesh> TriangleMesh::CreateCone(float radius /* = 1.0*/,
float height /* = 2.0*/,
int resolution /* = 20*/,
int split /* = 4*/) {
auto mesh_ptr = std::make_shared<TriangleMesh>();
if (radius <= 0) {
utility::LogError("[CreateCone] radius <= 0");
}
if (height <= 0) {
utility::LogError("[CreateCone] height <= 0");
}
if (resolution <= 0) {
utility::LogError("[CreateCone] resolution <= 0");
}
if (split <= 0) {
utility::LogError("[CreateCone] split <= 0");
}
mesh_ptr->vertices_.resize(resolution * split + 2);
mesh_ptr->vertices_[0] = Eigen::Vector3f(0.0, 0.0, 0.0);
mesh_ptr->vertices_[1] = Eigen::Vector3f(0.0, 0.0, height);
float step = M_PI * 2.0 / (float)resolution;
float h_step = height / (float)split;
float r_step = radius / (float)split;
compute_cone_vertices_functor func_vt(resolution, split, step, r_step,
h_step);
thrust::transform(
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(resolution * split),
mesh_ptr->vertices_.begin() + 2, func_vt);
mesh_ptr->triangles_.resize(2 * resolution + 2 * (split - 1) * resolution);
compute_cone_triangles_functor1 func_tr1(
thrust::raw_pointer_cast(mesh_ptr->triangles_.data()), resolution,
split);
thrust::for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(resolution),
func_tr1);
compute_cone_triangles_functor2 func_tr2(
thrust::raw_pointer_cast(mesh_ptr->triangles_.data()) +
2 * resolution,
resolution);
thrust::for_each(
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>((split - 1) * resolution),
func_tr2);
return mesh_ptr;
}
std::shared_ptr<TriangleMesh> TriangleMesh::CreateTorus(
float torus_radius /* = 1.0 */,
float tube_radius /* = 0.5 */,
int radial_resolution /* = 20 */,
int tubular_resolution /* = 20 */) {
auto mesh = std::make_shared<TriangleMesh>();
if (torus_radius <= 0) {
utility::LogError("[CreateTorus] torus_radius <= 0");
}
if (tube_radius <= 0) {
utility::LogError("[CreateTorus] tube_radius <= 0");
}
if (radial_resolution <= 0) {
utility::LogError("[CreateTorus] radial_resolution <= 0");
}
if (tubular_resolution <= 0) {
utility::LogError("[CreateTorus] tubular_resolution <= 0");
}
mesh->vertices_.resize(radial_resolution * tubular_resolution);
mesh->triangles_.resize(2 * radial_resolution * tubular_resolution);
compute_torus_mesh_functor func(
thrust::raw_pointer_cast(mesh->vertices_.data()),
thrust::raw_pointer_cast(mesh->triangles_.data()), torus_radius,
tube_radius, radial_resolution, tubular_resolution);
thrust::for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(radial_resolution *
tubular_resolution),
func);
return mesh;
}
std::shared_ptr<TriangleMesh> TriangleMesh::CreateArrow(
float cylinder_radius /* = 1.0*/,
float cone_radius /* = 1.5*/,
float cylinder_height /* = 5.0*/,
float cone_height /* = 4.0*/,
int resolution /* = 20*/,
int cylinder_split /* = 4*/,
int cone_split /* = 1*/) {
if (cylinder_radius <= 0) {
utility::LogError("[CreateArrow] cylinder_radius <= 0");
}
if (cone_radius <= 0) {
utility::LogError("[CreateArrow] cone_radius <= 0");
}
if (cylinder_height <= 0) {
utility::LogError("[CreateArrow] cylinder_height <= 0");
}
if (cone_height <= 0) {
utility::LogError("[CreateArrow] cone_height <= 0");
}
if (resolution <= 0) {
utility::LogError("[CreateArrow] resolution <= 0");
}
if (cylinder_split <= 0) {
utility::LogError("[CreateArrow] cylinder_split <= 0");
}
if (cone_split <= 0) {
utility::LogError("[CreateArrow] cone_split <= 0");
}
Eigen::Matrix4f transformation = Eigen::Matrix4f::Identity();
auto mesh_cylinder = CreateCylinder(cylinder_radius, cylinder_height,
resolution, cylinder_split);
transformation(2, 3) = cylinder_height * 0.5;
mesh_cylinder->Transform(transformation);
auto mesh_cone =
CreateCone(cone_radius, cone_height, resolution, cone_split);
transformation(2, 3) = cylinder_height;
mesh_cone->Transform(transformation);
auto mesh_arrow = mesh_cylinder;
*mesh_arrow += *mesh_cone;
return mesh_arrow;
}
std::shared_ptr<TriangleMesh> TriangleMesh::CreateCoordinateFrame(
float size /* = 1.0*/,
const Eigen::Vector3f &origin /* = Eigen::Vector3f(0.0, 0.0, 0.0)*/) {
if (size <= 0) {
utility::LogError("[CreateCoordinateFrame] size <= 0");
}
auto mesh_frame = CreateSphere(0.06 * size);
mesh_frame->ComputeVertexNormals();
mesh_frame->PaintUniformColor(Eigen::Vector3f(0.5, 0.5, 0.5));
std::shared_ptr<TriangleMesh> mesh_arrow;
Eigen::Matrix4f transformation;
mesh_arrow = CreateArrow(0.035 * size, 0.06 * size, 0.8 * size, 0.2 * size);
mesh_arrow->ComputeVertexNormals();
mesh_arrow->PaintUniformColor(Eigen::Vector3f(1.0, 0.0, 0.0));
transformation << 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1;
mesh_arrow->Transform(transformation);
*mesh_frame += *mesh_arrow;
mesh_arrow = CreateArrow(0.035 * size, 0.06 * size, 0.8 * size, 0.2 * size);
mesh_arrow->ComputeVertexNormals();
mesh_arrow->PaintUniformColor(Eigen::Vector3f(0.0, 1.0, 0.0));
transformation << 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1;
mesh_arrow->Transform(transformation);
*mesh_frame += *mesh_arrow;
mesh_arrow = CreateArrow(0.035 * size, 0.06 * size, 0.8 * size, 0.2 * size);
mesh_arrow->ComputeVertexNormals();
mesh_arrow->PaintUniformColor(Eigen::Vector3f(0.0, 0.0, 1.0));
transformation << 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1;
mesh_arrow->Transform(transformation);
*mesh_frame += *mesh_arrow;
transformation = Eigen::Matrix4f::Identity();
transformation.block<3, 1>(0, 3) = origin;
mesh_frame->Transform(transformation);
return mesh_frame;
}
std::shared_ptr<TriangleMesh> TriangleMesh::CreateMoebius(
int length_split /* = 70 */,
int width_split /* = 15 */,
int twists /* = 1 */,
float radius /* = 1 */,
float flatness /* = 1 */,
float width /* = 1 */,
float scale /* = 1 */) {
auto mesh = std::make_shared<TriangleMesh>();
if (length_split <= 0) {
utility::LogError("[CreateMoebius] length_split <= 0");
}
if (width_split <= 0) {
utility::LogError("[CreateMoebius] width_split <= 0");
}
if (twists < 0) {
utility::LogError("[CreateMoebius] twists < 0");
}
if (radius <= 0) {
utility::LogError("[CreateMoebius] radius <= 0");
}
if (flatness == 0) {
utility::LogError("[CreateMoebius] flatness == 0");
}
if (width <= 0) {
utility::LogError("[CreateMoebius] width <= 0");
}
if (scale <= 0) {
utility::LogError("[CreateMoebius] scale <= 0");
}
mesh->vertices_.resize(length_split * width_split);
compute_moebius_vertices_functor func1(length_split, width_split, twists,
radius, flatness, width, scale);
thrust::transform(
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(length_split * width_split),
mesh->vertices_.begin(), func1);
mesh->triangles_.resize(2 * length_split * (width_split - 1));
compute_moebius_triangles_functor func2(
thrust::raw_pointer_cast(mesh->triangles_.data()), length_split,
width_split, twists);
thrust::for_each(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(length_split *
(width_split - 1)),
func2);
return mesh;
}
|
the_stack
|
#pragma once
#include "backend/common/imageOps.hpp"
#include <stdint.h>
namespace VideoStitch {
namespace Image {
inline __device__ uint32_t YRGBDiffToRGBA(unsigned char y, const int3& rgbDiff) {
const int32_t ya = (1164 * (y - 16)) / 1000;
return RGBA::pack(clamp8(ya + rgbDiff.x), clamp8(ya + rgbDiff.y), clamp8(ya + rgbDiff.z), 0xff);
}
#define nv12_surface_write surface_write_i
#include "../gpuKernelDef.h"
#include "backend/common/image/unpack.gpu"
// ---------------------------- Output -----------------------------
__global__ void unpackKernelGrayscale(unsigned char* dst, unsigned pitch, const cudaSurfaceObject_t src, unsigned width,
unsigned height) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
uint32_t val;
surf2Dread(&val, src, x * sizeof(uint32_t), y);
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
dst[y * pitch + x] = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
}
}
__global__ void unpackSourceKernelRGBA(uint32_t* dst, unsigned pitch, const cudaSurfaceObject_t src, unsigned width,
unsigned height) {
const unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
// yeah, we could use a memcpy
uint32_t val;
surf2Dread(&val, src, x * sizeof(uint32_t), y);
dst[y * pitch + x] = val;
}
}
__global__ void unpackKernelRGB(unsigned char* __restrict__ dst, unsigned pitch, const uint32_t* __restrict__ src,
unsigned width, unsigned height) {
const unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
const uint32_t val = src[y * width + x];
dst[y * pitch + 3 * x] = RGBA::r(val);
dst[y * pitch + 3 * x + 1] = RGBA::g(val);
dst[y * pitch + 3 * x + 2] = RGBA::b(val);
}
}
__global__ void unpackSourceKernelRGB(unsigned char* dst, unsigned pitch, const cudaSurfaceObject_t src, unsigned width,
unsigned height) {
const unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
uint32_t val;
surf2Dread(&val, src, x * sizeof(uint32_t), y);
dst[y * pitch + 3 * x] = RGBA::r(val);
dst[y * pitch + 3 * x + 1] = RGBA::g(val);
dst[y * pitch + 3 * x + 2] = RGBA::b(val);
}
}
__global__ void unpackSourceKernelF32C1(float* dst, unsigned pitch, const cudaSurfaceObject_t src, unsigned width,
unsigned height) {
const unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
// yeah, we could use a memcpy
float val;
surf2Dread(&val, src, x * sizeof(float), y);
dst[y * pitch + x] = val;
}
}
__global__ void unpackSourceKernelGrayscale16(uint16_t* dst, unsigned pitch, const cudaSurfaceObject_t src,
unsigned width, unsigned height) {
const unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
float val;
surf2Dread(&val, src, x * sizeof(float), y);
const float inMilliMeters = val * 1000.f;
const uint16_t u16 = (uint16_t)max(0.f, min((float)USHRT_MAX, round(inMilliMeters)));
dst[y * pitch + x] = u16;
}
}
__global__ void unpackKernelDepth(unsigned char* __restrict__ yDst, unsigned yPitch, unsigned char* __restrict__ uDst,
unsigned uPitch, unsigned char* __restrict__ vDst, unsigned vPitch,
const float* __restrict__ src, unsigned width, unsigned height) {
// each thread is responsible for a 2x2 pixel group
unsigned x = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
unsigned y = 2 * (blockIdx.y * blockDim.y + threadIdx.y);
if (x < width && y < height) {
int32_t u = 0;
int32_t v = 0;
#pragma unroll
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
const float depth = src[(y + j) * width + x + i];
// convert to millimeters and truncate
unsigned int val = min(__float2uint_rn(depth * 1000.f), 65279);
// encode
yDst[(y + j) * yPitch + x + i] = (unsigned char)(val / 256);
int cu = val % 512;
int cv = (val + 384) % 512;
if (cu >= 256) {
u += (unsigned char)(511 - cu);
} else {
u += (unsigned char)cu;
}
if (cv >= 256) {
v += (unsigned char)(511 - cv);
} else {
v += (unsigned char)cv;
}
}
}
uDst[(y * uPitch + x) / 2] = (u + 2) / 4;
vDst[(y * vPitch + x) / 2] = (v + 2) / 4;
}
}
__global__ void unpackSourceKernelDepth(unsigned char* __restrict__ yDst, unsigned yPitch,
unsigned char* __restrict__ uDst, unsigned uPitch,
unsigned char* __restrict__ vDst, unsigned vPitch,
const cudaSurfaceObject_t src, unsigned width, unsigned height) {
// each thread is responsible for a 2x2 pixel group
unsigned x = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
unsigned y = 2 * (blockIdx.y * blockDim.y + threadIdx.y);
if (x < width && y < height) {
int32_t u = 0;
int32_t v = 0;
#pragma unroll
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
float depth;
surf2Dread(&depth, src, (x + i) * sizeof(float), y + j);
// convert to millimeters and truncate
unsigned int val = min(__float2uint_rn(depth * 1000.f), 65279);
// encode
yDst[(y + j) * yPitch + x + i] = (unsigned char)(val / 256);
int cu = val % 512;
int cv = (val + 384) % 512;
if (cu >= 256) {
u += (unsigned char)(511 - cu);
} else {
u += (unsigned char)cu;
}
if (cv >= 256) {
v += (unsigned char)(511 - cv);
} else {
v += (unsigned char)cv;
}
}
}
uDst[(y * uPitch + x) / 2] = (u + 2) / 4;
vDst[(y * vPitch + x) / 2] = (v + 2) / 4;
}
}
/**
* This kernel converts the buffer from RGBA to planar 12 bits 4:2:0 (YV12) out-of-place.
* The conversion is undefined for pixels with 0 alpha.
*
* Y0 Y1 Y2 Y3
* ...
* U0 U1
* ...
* V0 V1
* ...
*/
__global__ void unpackKernelYV12(unsigned char* __restrict__ yDst, unsigned yPitch, unsigned char* __restrict__ uDst,
unsigned uPitch, unsigned char* __restrict__ vDst, unsigned vPitch,
const uint32_t* __restrict__ src, unsigned width, unsigned height) {
// each thread is responsible for a 2x2 pixel group
unsigned sx = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
unsigned sy = 2 * (blockIdx.y * blockDim.y + threadIdx.y);
if (sx < width && sy < height) {
int32_t u = 0;
int32_t v = 0;
{
uint32_t val = src[sy * width + sx];
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[sy * yPitch + sx] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
}
if (sx + 1 < width && sy + 1 < height) {
// general case
{
uint32_t val = src[sy * width + sx + 1];
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[sy * yPitch + sx + 1] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
}
{
uint32_t val = src[(sy + 1) * width + sx];
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[(sy + 1) * yPitch + sx] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
}
{
uint32_t val = src[(sy + 1) * width + sx + 1];
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[(sy + 1) * yPitch + sx + 1] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
}
uDst[(sy * uPitch + sx) / 2] = u / 4;
vDst[(sy * vPitch + sx) / 2] = v / 4;
} else {
// border case with odd width / height
if (sx + 1 < width) {
uint32_t val = src[sy * width + sx + 1];
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[sy * yPitch + sx + 1] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
uDst[(sy * uPitch + sx) / 2] = u / 2;
vDst[(sy * vPitch + sx) / 2] = v / 2;
}
__syncthreads();
if (sy + 1 < height) {
uint32_t val = src[(sy + 1) * width + sx];
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[(sy + 1) * yPitch + sx] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
uDst[(sy * uPitch + sx) / 2] = u / 2;
vDst[(sy * vPitch + sx) / 2] = v / 2;
}
}
}
}
__global__ void unpackSourceKernelYV12(unsigned char* __restrict__ yDst, unsigned yPitch,
unsigned char* __restrict__ uDst, unsigned uPitch,
unsigned char* __restrict__ vDst, unsigned vPitch, const cudaSurfaceObject_t src,
unsigned width, unsigned height) {
// each thread is responsible for a 2x2 pixel group
unsigned sx = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
unsigned sy = 2 * (blockIdx.y * blockDim.y + threadIdx.y);
if (sx < width && sy < height) {
int32_t u = 0;
int32_t v = 0;
{
uint32_t val;
surf2Dread(&val, src, sx * sizeof(uint32_t), sy);
int32_t r = clamp8(RGBA::r(val));
int32_t g = clamp8(RGBA::g(val));
int32_t b = clamp8(RGBA::b(val));
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[sy * yPitch + sx] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
}
if (sx + 1 < width && sy + 1 < height) {
// general case
{
uint32_t val;
surf2Dread(&val, src, (sx + 1) * sizeof(uint32_t), sy);
int32_t r = clamp8(RGBA::r(val));
int32_t g = clamp8(RGBA::g(val));
int32_t b = clamp8(RGBA::b(val));
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[sy * yPitch + sx + 1] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
}
{
uint32_t val;
surf2Dread(&val, src, sx * sizeof(uint32_t), sy + 1);
int32_t r = clamp8(RGBA::r(val));
int32_t g = clamp8(RGBA::g(val));
int32_t b = clamp8(RGBA::b(val));
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[(sy + 1) * yPitch + sx] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
}
{
uint32_t val;
surf2Dread(&val, src, (sx + 1) * sizeof(uint32_t), sy + 1);
int32_t r = clamp8(RGBA::r(val));
int32_t g = clamp8(RGBA::g(val));
int32_t b = clamp8(RGBA::b(val));
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[(sy + 1) * yPitch + sx + 1] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
}
uDst[(sy * uPitch + sx) / 2] = u / 4;
vDst[(sy * vPitch + sx) / 2] = v / 4;
} else {
// border case with odd width / height
if (sx + 1 < width) {
uint32_t val;
surf2Dread(&val, src, (sx + 1) * sizeof(uint32_t), sy);
int32_t r = clamp8(RGBA::r(val));
int32_t g = clamp8(RGBA::g(val));
int32_t b = clamp8(RGBA::b(val));
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[sy * yPitch + sx + 1] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
uDst[(sy * uPitch + sx) / 2] = u / 2;
vDst[(sy * vPitch + sx) / 2] = v / 2;
}
__syncthreads();
if (sy + 1 < height) {
uint32_t val;
surf2Dread(&val, src, sx * sizeof(uint32_t), sy + 1);
int32_t r = clamp8(RGBA::r(val));
int32_t g = clamp8(RGBA::g(val));
int32_t b = clamp8(RGBA::b(val));
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[(sy + 1) * yPitch + sx] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
uDst[(sy * uPitch + sx) / 2] = u / 2;
vDst[(sy * vPitch + sx) / 2] = v / 2;
}
}
}
}
/**
* This kernel converts the buffer from RGBA to interleaved YUV420 (NV12) out-of-place.
* The conversion is undefined for pixels with 0 alpha.
*
* Y0 Y1 Y2 Y3
* ...
* U0 V0 U1 V1
* ...
*/
__global__ void unpackKernelNV12(unsigned char* __restrict__ yDst, unsigned yPitch, unsigned char* __restrict__ uvDst,
unsigned uvPitch, const uint32_t* __restrict__ src, unsigned width, unsigned height) {
// each thread is responsible for a 2x2 pixel group
unsigned sx = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
unsigned sy = 2 * (blockIdx.y * blockDim.y + threadIdx.y);
if (sx < width && sy < height) {
int32_t u = 0;
int32_t v = 0;
{
uint32_t val = src[sy * width + sx];
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[sy * yPitch + sx] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
}
if (sx + 1 < width && sy + 1 < height) {
// general case
{
uint32_t val = src[sy * width + sx + 1];
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[sy * yPitch + sx + 1] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
}
{
uint32_t val = src[(sy + 1) * width + sx];
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[(sy + 1) * yPitch + sx] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
}
{
uint32_t val = src[(sy + 1) * width + sx + 1];
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[(sy + 1) * yPitch + sx + 1] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
}
uvDst[(sy * uvPitch) / 2 + sx] = u / 4;
uvDst[(sy * uvPitch) / 2 + sx + 1] = v / 4;
} else {
// border case with odd width / height
if (sx + 1 < width) {
uint32_t val = src[sy * width + sx + 1];
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[sy * yPitch + sx + 1] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
uvDst[(sy * uvPitch) / 2 + sx] = u / 2;
uvDst[(sy * uvPitch) / 2 + sx + 1] = v / 2;
} else if (sy + 1 < height) {
uint32_t val = src[(sy + 1) * width + sx];
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[(sy + 1) * yPitch + sx] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
uvDst[(sy * uvPitch) / 2 + sx] = u / 2;
uvDst[(sy * uvPitch) / 2 + sx + 1] = v / 2;
}
}
}
}
/**
* This kernel converts the buffer from RGBA to interleaved YUV420 (NV12) out-of-place.
* The conversion is undefined for pixels with 0 alpha.
*
* Y0 Y1 Y2 Y3
* ...
* U0 V0 U1 V1
* ...
*/
__global__ void unpackSourceKernelNV12(unsigned char* __restrict__ yDst, unsigned yPitch,
unsigned char* __restrict__ uvDst, unsigned uvPitch,
const cudaSurfaceObject_t src, unsigned width, unsigned height) {
// each thread is responsible for a 2x2 pixel group
unsigned sx = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
unsigned sy = 2 * (blockIdx.y * blockDim.y + threadIdx.y);
if (sx < width && sy < height) {
int32_t u = 0;
int32_t v = 0;
{
uint32_t val;
surf2Dread(&val, src, sx * sizeof(uint32_t), sy);
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[sy * yPitch + sx] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
}
if (sx + 1 < width && sy + 1 < height) {
// general case
{
uint32_t val;
surf2Dread(&val, src, (sx + 1) * sizeof(uint32_t), sy);
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[sy * yPitch + sx + 1] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
}
{
uint32_t val;
surf2Dread(&val, src, sx * sizeof(uint32_t), sy + 1);
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[(sy + 1) * yPitch + sx] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
}
{
uint32_t val;
surf2Dread(&val, src, (sx + 1) * sizeof(uint32_t), sy + 1);
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[(sy + 1) * yPitch + sx + 1] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
}
uvDst[(sy * uvPitch) / 2 + sx] = u / 4;
uvDst[(sy * uvPitch) / 2 + sx + 1] = v / 4;
} else {
// border case with odd width / height
if (sx + 1 < width) {
uint32_t val;
surf2Dread(&val, src, (sx + 1) * sizeof(uint32_t), sy);
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[sy * yPitch + sx + 1] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
uvDst[(sy * uvPitch) / 2 + sx] = u / 2;
uvDst[(sy * uvPitch) / 2 + sx + 1] = v / 2;
} else if (sy + 1 < height) {
uint32_t val;
surf2Dread(&val, src, sx * sizeof(uint32_t), sy + 1);
int32_t r = RGBA::r(val);
int32_t g = RGBA::g(val);
int32_t b = RGBA::b(val);
int32_t y = ((66 * r + 129 * g + 25 * b + 128) >> 8) + 16;
yDst[(sy + 1) * yPitch + sx] = y;
u += ((-38 * r - 74 * g + 112 * b + 128) >> 8) + 128;
v += ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128;
uvDst[(sy * uvPitch) / 2 + sx] = u / 2;
uvDst[(sy * uvPitch) / 2 + sx + 1] = v / 2;
}
}
}
}
/**
* This kernel converts the buffer from RGBA to YUY2 out-of-place.
* Pixels are all given full solidness (max alpha).
*/
__global__ void unpackYUY2Kernel(unsigned char* __restrict__ dst, unsigned pitch, const uint32_t* __restrict__ src,
unsigned width, unsigned height) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width / 2 && y < height) {
uint32_t val0 = src[y * width + 2 * x];
int32_t r0 = RGBA::r(val0);
int32_t g0 = RGBA::g(val0);
int32_t b0 = RGBA::b(val0);
uint32_t val1 = src[y * width + 2 * x + 1];
int32_t r1 = RGBA::r(val1);
int32_t g1 = RGBA::g(val1);
int32_t b1 = RGBA::b(val1);
unsigned char y0 = ((66 * r0 + 129 * g0 + 25 * b0 + 128) >> 8) + 16;
unsigned char y1 = ((66 * r1 + 129 * g1 + 25 * b1 + 128) >> 8) + 16;
unsigned char u = ((-38 * r0 - 74 * g0 + 112 * b0 + 128) >> 8) + 128;
unsigned char v = ((112 * r0 - 94 * g0 - 18 * b0 + 128) >> 8) + 128;
dst[y * pitch + 4 * x] = y0;
dst[y * pitch + 4 * x + 1] = u;
dst[y * pitch + 4 * x + 2] = y1;
dst[y * pitch + 4 * x + 3] = v;
}
}
/**
* This kernel converts the buffer from RGBA to UYVY out-of-place.
* Pixels are all given full solidness (max alpha).
*/
__global__ void unpackUYVYKernel(unsigned char* __restrict__ dst, unsigned pitch, const uint32_t* __restrict__ src,
unsigned width, unsigned height) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width / 2 && y < height) {
uint32_t val0 = src[y * width + 2 * x];
int32_t r0 = RGBA::r(val0);
int32_t g0 = RGBA::g(val0);
int32_t b0 = RGBA::b(val0);
uint32_t val1 = src[y * width + 2 * x + 1];
int32_t r1 = RGBA::r(val1);
int32_t g1 = RGBA::g(val1);
int32_t b1 = RGBA::b(val1);
unsigned char y0 = ((66 * r0 + 129 * g0 + 25 * b0 + 128) >> 8) + 16;
unsigned char y1 = ((66 * r1 + 129 * g1 + 25 * b1 + 128) >> 8) + 16;
unsigned char u = ((-38 * r0 - 74 * g0 + 112 * b0 + 128) >> 8) + 128;
unsigned char v = ((112 * r0 - 94 * g0 - 18 * b0 + 128) >> 8) + 128;
dst[y * pitch + 4 * x] = u;
dst[y * pitch + 4 * x + 1] = y0;
dst[y * pitch + 4 * x + 2] = v;
dst[y * pitch + 4 * x + 3] = y1;
}
}
/**
* This kernel converts the buffer from RGBA to 10 bits planar YUV422 out-of-place.
* Pixels are all given full solidness (max alpha).
* 10 bits values are padded to 16 bits.
*/
__global__ void unpackYUV422P10Kernel(uint16_t* __restrict__ yDst, unsigned yPitch, uint16_t* __restrict__ uDst,
unsigned uPitch, uint16_t* __restrict__ vDst, unsigned vPitch,
const uint32_t* __restrict__ src, unsigned width, unsigned height) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width / 2 && y < height) {
uint32_t val0 = src[y * width + 2 * x];
int32_t r0 = RGBA::r(val0);
int32_t g0 = RGBA::g(val0);
int32_t b0 = RGBA::b(val0);
uint32_t val1 = src[y * width + 2 * x + 1];
int32_t r1 = RGBA::r(val1);
int32_t g1 = RGBA::g(val1);
int32_t b1 = RGBA::b(val1);
uint32_t u = 0, v = 0;
int32_t y0 = ((66 * r0 + 129 * g0 + 25 * b0 + 128) >> 8) + 16 << 2;
int32_t y1 = ((66 * r1 + 129 * g1 + 25 * b1 + 128) >> 8) + 16 << 2;
u += ((-38 * r0 - 74 * g0 + 112 * b0 + 128) >> 8) + 128 << 2;
u += ((-38 * r1 - 74 * g1 + 112 * b1 + 128) >> 8) + 128 << 2;
v += ((112 * r0 - 94 * g0 - 18 * b0 + 128) >> 8) + 128 << 2;
v += ((112 * r1 - 94 * g1 - 18 * b1 + 128) >> 8) + 128 << 2;
yDst[y * yPitch + 2 * x] = y0;
yDst[y * yPitch + 2 * x + 1] = y1;
uDst[y * uPitch + x] = u / 2;
vDst[y * vPitch + x] = v / 2;
}
}
__global__ void unpackMonoKernelYUV420P(unsigned char* __restrict__ dst, const unsigned char* __restrict__ src,
unsigned width, unsigned height) {
// each thread is responsible for a 2x2 pixel group
unsigned sx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned sy = blockIdx.y * blockDim.y + threadIdx.y;
if (sx < width / 2 && sy < height / 2) {
{
const unsigned i = (2 * sy) * width + 2 * sx;
dst[i] = src[i];
}
{
const unsigned i = (2 * sy) * width + 2 * sx + 1;
dst[i] = src[i];
}
{
const unsigned i = (2 * sy + 1) * width + 2 * sx;
dst[i] = src[i];
}
{
const unsigned i = (2 * sy + 1) * width + 2 * sx + 1;
dst[i] = src[i];
}
}
}
// ---------------------------- Input -----------------------------
/**
* This kernel converts the buffer from BGRU8888 (where 'U' stands for 'unused') to RGBA8888 out-of-place.
* Pixels are all given full solidness (max alpha)
*/
__global__ void convertBGRUToRGBAKernel(uint32_t* __restrict__ dst, const unsigned char* __restrict__ src,
unsigned width, unsigned height) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
unsigned i = y * width + x;
dst[i] = RGBA::pack(src[4 * i + 2], src[4 * i + 1], src[4 * i], 0xff);
}
}
/**
* This kernel converts the buffer from RGB to RGBA8888 out-of-place.
* Pixels are all given full solidness (max alpha)
*/
__global__ void convertRGBToRGBAKernel(cudaSurfaceObject_t dst, const unsigned char* src, unsigned width,
unsigned height) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
unsigned i = y * width + x;
surf2Dwrite(RGBA::pack(src[3 * i], src[3 * i + 1], src[3 * i + 2], 0xff), dst, x * sizeof(uint32_t), y);
}
}
__global__ void convertRGB210ToRGBAKernel(cudaSurfaceObject_t dst, const uint32_t* src, unsigned width,
unsigned height) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
uint32_t v = src[y * width + x];
surf2Dwrite(RGBA::pack(clamp8(RGB210::r(v)), clamp8(RGB210::g(v)), clamp8(RGB210::b(v)), RGB210::a(v)), dst,
x * sizeof(uint32_t), y);
}
}
__device__ unsigned loadBayerPattern(const unsigned char* __restrict__ src, unsigned width, unsigned height,
unsigned char* sharedSrc, unsigned srcX, unsigned srcY) {
// The shared memory uses the same pattern as src.
// There are (2 * blockDim.x + 2) * (2 * blockDim.y + 2) bytes to load
// (we need an extra layer outside the current zone for interpolation).
const unsigned sharedBlockWidth = blockDim.x + 1; // +1: one half block left and one half-block right.
const unsigned sharedWidth = 2 * sharedBlockWidth;
// Start with interior blocks.
if (srcX < width && srcY < height) {
// The access pattern is the same as during interpolation, meaning that each thread issues two (coalesced) reads: RG
// then GB.
// TODO: try out a different loading pattern: each thread loads 4 consecutive bytes in memory.
// This would reduce the number of coalesced reads to 1 instead of 2.
// Note that this would not be the same pattern as during interpolation.
const int srcBase = width * srcY + srcX;
const int sharedBase = sharedWidth + 1 + 2 * (sharedWidth * threadIdx.y + threadIdx.x);
// The compiler should be able to optimize that in only 2 coalesced single word reads.
// If it can't, accesses are still coalesced, but there are 4 accesses instead of one.
sharedSrc[sharedBase] = src[srcBase];
sharedSrc[sharedBase + 1] = src[srcBase + 1];
sharedSrc[sharedBase + sharedWidth] = src[srcBase + width];
sharedSrc[sharedBase + sharedWidth + 1] = src[srcBase + width + 1];
}
// Now load the boundary
if (threadIdx.y == 0 && srcX < width) {
// Top
{
const int sharedBase = 1 + 2 * threadIdx.x;
const int srcBase = srcY > 0 ?
// Normal case.
width * (srcY - 1) + srcX
:
// The previous row is outside the image, constant boundary condition.
width + srcX;
sharedSrc[sharedBase] = src[srcBase];
sharedSrc[sharedBase + 1] = src[srcBase + 1];
}
// Bottom
{
int srcBoundaryRow;
int sharedBoundaryRow;
if (srcY + 2 * blockDim.y < height) {
// Normal case, extra row is within image.
srcBoundaryRow = srcY + 2 * blockDim.y;
sharedBoundaryRow = 2 * blockDim.y;
} else {
// The next row is outside the image, constant boundary condition.
srcBoundaryRow = height - 2;
sharedBoundaryRow = height - srcY;
}
const int srcBase = width * srcBoundaryRow + srcX;
const int sharedBase = sharedWidth + 1 + sharedWidth * sharedBoundaryRow + 2 * threadIdx.x;
sharedSrc[sharedBase] = src[srcBase];
sharedSrc[sharedBase + 1] = src[srcBase + 1];
}
}
if (threadIdx.x == 0 && srcY < height) {
// Left
{
const int sharedBase = sharedWidth + 2 * sharedWidth * threadIdx.y;
const int srcBase = srcX > 0 ?
// Normal case.
width * srcY + srcX - 1
:
// The previous col is outside the image, constant boundary condition.
width * srcY + 1;
sharedSrc[sharedBase] = src[srcBase];
sharedSrc[sharedBase + sharedWidth] = src[srcBase + width];
}
// Right
{
int srcBoundaryCol;
int sharedBoundaryCol;
if (srcX + 2 * blockDim.x < width) {
// Normal case, extra col is within image.
srcBoundaryCol = srcX + 2 * blockDim.x;
sharedBoundaryCol = 2 * blockDim.x;
} else {
// The next col is outside the image, constant boundary condition.
srcBoundaryCol = width - 2;
sharedBoundaryCol = width - srcX;
}
const int srcBase = width * srcY + srcBoundaryCol;
const int sharedBase = sharedWidth + 1 + 2 * sharedWidth * threadIdx.y + sharedBoundaryCol;
sharedSrc[sharedBase] = src[srcBase];
sharedSrc[sharedBase + sharedWidth] = src[srcBase + width];
}
}
// And the corners.
if (threadIdx.x == 0 && threadIdx.y == 0) {
// Due to the assymetry, only the top left and bottom right corner are ever used (see the test for an example).
// Top left
{
const int srcBoundaryCol = srcX > 0 ? srcX - 1 : 1;
const int srcBoundaryRow = srcY > 0 ? srcY - 1 : 1;
const int srcBase = width * srcBoundaryRow + srcBoundaryCol;
sharedSrc[0] = src[srcBase];
}
// Bottom right.
{
int srcBoundaryCol;
int sharedBoundaryCol;
if (srcX + 2 * blockDim.x < width) {
// Normal case, extra col is within image.
srcBoundaryCol = srcX + 2 * blockDim.x;
sharedBoundaryCol = 2 * blockDim.x;
} else {
// The next col is outside the image, constant boundary condition.
srcBoundaryCol = width - 2;
sharedBoundaryCol = width - srcX;
}
int srcBoundaryRow;
int sharedBoundaryRow;
if (srcY + 2 * blockDim.y < height) {
// Normal case, extra row is within image.
srcBoundaryRow = srcY + 2 * blockDim.y;
sharedBoundaryRow = 2 * blockDim.y;
} else {
// The next row is outside the image, constant boundary condition.
srcBoundaryRow = height - 2;
sharedBoundaryRow = height - srcY;
}
const int srcBase = width * srcBoundaryRow + srcBoundaryCol;
const int sharedBase = sharedWidth + 1 + sharedWidth * sharedBoundaryRow + sharedBoundaryCol;
sharedSrc[sharedBase] = src[srcBase];
}
}
return sharedWidth;
}
/**
* This kernel converts the buffer from Bayer-filtered RGGB to RGBA8888 out-of-place.
* Pixels are all given full solidness (max alpha).
* Uses bilinear interpolation within color planes.
*
* Each thread handles a 2*2 RGGB pixel block. The interpolation support is the 4*4 pixel block centered around the 2*2
* block. Globally each thread block needs an extra pixel around itself.
*
*/
__global__ void convertBayerRGGBToRGBAKernel(uint32_t* __restrict__ dst, const unsigned char* __restrict__ src,
unsigned width, unsigned height) {
// x and y are the 2*2 block ids.
const unsigned srcX = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
const unsigned srcY = 2 * (blockIdx.y * blockDim.y + threadIdx.y);
// Load the data to shared memory.
extern __shared__ unsigned char sharedSrc[];
const unsigned sharedWidth = loadBayerPattern(src, width, height, sharedSrc, srcX, srcY);
__syncthreads();
if (srcX < width && srcY < height) {
const int sharedBase = sharedWidth + 1 + 2 * (sharedWidth * threadIdx.y + threadIdx.x);
// Top-left component;
dst[srcY * width + srcX] = RGBA::pack(
sharedSrc[sharedBase], // Red is given
((int32_t)sharedSrc[sharedBase - 1] + (int32_t)sharedSrc[sharedBase + 1] +
(int32_t)sharedSrc[sharedBase - sharedWidth] + (int32_t)sharedSrc[sharedBase + sharedWidth]) /
4, // Green is 4-tap straight (+)
((int32_t)sharedSrc[sharedBase - 1 - sharedWidth] + (int32_t)sharedSrc[sharedBase + 1 - sharedWidth] +
(int32_t)sharedSrc[sharedBase - 1 + sharedWidth] + (int32_t)sharedSrc[sharedBase + 1 + sharedWidth]) /
4, // Blue is 4-tap 45° rotated (x)
255);
// Top-right component;
dst[srcY * width + srcX + 1] = RGBA::pack(
((int32_t)sharedSrc[sharedBase] + (int32_t)sharedSrc[sharedBase + 2]) / 2, // Red is 2-tap horizontal
sharedSrc[sharedBase + 1], // Green is given
((int32_t)sharedSrc[sharedBase + 1 - sharedWidth] + (int32_t)sharedSrc[sharedBase + 1 + sharedWidth]) /
2, // Blue is 2-tap vertical
255);
// Bottom-left component;
dst[(srcY + 1) * width + srcX] = RGBA::pack(
((int32_t)sharedSrc[sharedBase] + (int32_t)sharedSrc[sharedBase + 2 * sharedWidth]) /
2, // Red is 2-tap vertical
sharedSrc[sharedBase + sharedWidth], // Green is given
((int32_t)sharedSrc[sharedBase + sharedWidth - 1] + (int32_t)sharedSrc[sharedBase + sharedWidth + 1]) /
2, // Blue is 2-tap horizontal
255);
// Bottom-right component
dst[(srcY + 1) * width + srcX + 1] = RGBA::pack(
((int32_t)sharedSrc[sharedBase] + (int32_t)sharedSrc[sharedBase + 2] +
(int32_t)sharedSrc[sharedBase + 2 * sharedWidth] + (int32_t)sharedSrc[sharedBase + 2 + 2 * sharedWidth]) /
4, // Red is is 4-tap 45° rotated (x)
((int32_t)sharedSrc[sharedBase + 1] + (int32_t)sharedSrc[sharedBase + sharedWidth] +
(int32_t)sharedSrc[sharedBase + 2 + sharedWidth] + (int32_t)sharedSrc[sharedBase + 1 + 2 * sharedWidth]) /
4, // Green is 4-tap straight (+)
sharedSrc[sharedBase + sharedWidth + 1], // Blue is given
255);
}
}
/**
* This kernel converts the buffer from Bayer-filtered BGGR to RGBA8888 out-of-place.
* Pixels are all given full solidness (max alpha).
* Uses bilinear interpolation within color planes.
*
* Each thread handles a 2*2 BGGR pixel block. The interpolation support is the 4*4 pixel block centered around the 2*2
* block. Globally each thread block needs an extra pixel around itself.
*
*/
__global__ void convertBayerBGGRToRGBAKernel(uint32_t* __restrict__ dst, const unsigned char* __restrict__ src,
unsigned width, unsigned height) {
// x and y are the 2*2 block ids.
const unsigned srcX = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
const unsigned srcY = 2 * (blockIdx.y * blockDim.y + threadIdx.y);
// Load the data to shared memory.
extern __shared__ unsigned char sharedSrc[];
const unsigned sharedWidth = loadBayerPattern(src, width, height, sharedSrc, srcX, srcY);
__syncthreads();
if (srcX < width && srcY < height) {
const int sharedBase = sharedWidth + 1 + 2 * (sharedWidth * threadIdx.y + threadIdx.x);
// Top-left component;
dst[srcY * width + srcX] = RGBA::pack(
((int32_t)sharedSrc[sharedBase - 1 - sharedWidth] + (int32_t)sharedSrc[sharedBase + 1 - sharedWidth] +
(int32_t)sharedSrc[sharedBase - 1 + sharedWidth] + (int32_t)sharedSrc[sharedBase + 1 + sharedWidth]) /
4, // Red is 4-tap 45° rotated (x)
((int32_t)sharedSrc[sharedBase - 1] + (int32_t)sharedSrc[sharedBase + 1] +
(int32_t)sharedSrc[sharedBase - sharedWidth] + (int32_t)sharedSrc[sharedBase + sharedWidth]) /
4, // Green is 4-tap straight (+)
sharedSrc[sharedBase], // Blue is given
255);
// Top-right component;
dst[srcY * width + srcX + 1] = RGBA::pack(
((int32_t)sharedSrc[sharedBase + 1 - sharedWidth] + (int32_t)sharedSrc[sharedBase + 1 + sharedWidth]) /
2, // Red is 2-tap vertical
sharedSrc[sharedBase + 1], // Green is given
((int32_t)sharedSrc[sharedBase] + (int32_t)sharedSrc[sharedBase + 2]) / 2, // Blue is 2-tap horizontal
255);
// Bottom-left component;
dst[(srcY + 1) * width + srcX] = RGBA::pack(
((int32_t)sharedSrc[sharedBase + sharedWidth - 1] + (int32_t)sharedSrc[sharedBase + sharedWidth + 1]) /
2, // Red is 2-tap horizontal
sharedSrc[sharedBase + sharedWidth], // Green is given
((int32_t)sharedSrc[sharedBase] + (int32_t)sharedSrc[sharedBase + 2 * sharedWidth]) /
2, // Blue is 2-tap vertical
255);
// Bottom-right component
dst[(srcY + 1) * width + srcX + 1] = RGBA::pack(
sharedSrc[sharedBase + sharedWidth + 1], // Red is given
((int32_t)sharedSrc[sharedBase + 1] + (int32_t)sharedSrc[sharedBase + sharedWidth] +
(int32_t)sharedSrc[sharedBase + 2 + sharedWidth] + (int32_t)sharedSrc[sharedBase + 1 + 2 * sharedWidth]) /
4, // Green is 4-tap straight (+)
((int32_t)sharedSrc[sharedBase] + (int32_t)sharedSrc[sharedBase + 2] +
(int32_t)sharedSrc[sharedBase + 2 * sharedWidth] + (int32_t)sharedSrc[sharedBase + 2 + 2 * sharedWidth]) /
4, // Blue is is 4-tap 45° rotated (x)
255);
}
}
/**
* This kernel converts the buffer from Bayer-filtered GRBG to RGBA8888 out-of-place.
* Pixels are all given full solidness (max alpha).
* Uses bilinear interpolation within color planes.
*
* Each thread handles a 2*2 GRBG pixel block. The interpolation support is the 4*4 pixel block centered around the 2*2
* block. Globally each thread block needs an extra pixel around itself.
*
*/
__global__ void convertBayerGRBGToRGBAKernel(uint32_t* __restrict__ dst, const unsigned char* __restrict__ src,
unsigned width, unsigned height) {
// x and y are the 2*2 block ids.
const unsigned srcX = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
const unsigned srcY = 2 * (blockIdx.y * blockDim.y + threadIdx.y);
// Load the data to shared memory.
extern __shared__ unsigned char sharedSrc[];
const unsigned sharedWidth = loadBayerPattern(src, width, height, sharedSrc, srcX, srcY);
__syncthreads();
if (srcX < width && srcY < height) {
const int sharedBase = sharedWidth + 1 + 2 * (sharedWidth * threadIdx.y + threadIdx.x);
// Top-left component;
dst[srcY * width + srcX] = RGBA::pack(
((int32_t)sharedSrc[sharedBase - 1] + (int32_t)sharedSrc[sharedBase + 1]) / 2, // Red is 2-tap horizontal
sharedSrc[sharedBase], // Green is given
((int32_t)sharedSrc[sharedBase - sharedWidth] + (int32_t)sharedSrc[sharedBase + sharedWidth]) /
2, // Blue is 2-tap vertical
255);
// Top-right component;
dst[srcY * width + srcX + 1] = RGBA::pack(
sharedSrc[sharedBase + 1], // Red is given
((int32_t)sharedSrc[sharedBase] + (int32_t)sharedSrc[sharedBase + 2] +
(int32_t)sharedSrc[sharedBase + 1 - sharedWidth] + (int32_t)sharedSrc[sharedBase + 1 + sharedWidth]) /
4, // Green is 4-tap straight (+)
((int32_t)sharedSrc[sharedBase - sharedWidth] + (int32_t)sharedSrc[sharedBase + 2 - sharedWidth] +
(int32_t)sharedSrc[sharedBase + sharedWidth] + (int32_t)sharedSrc[sharedBase + 2 + sharedWidth]) /
4, // Blue is 4-tap 45° rotated (x)
255);
// Bottom-left component;
dst[(srcY + 1) * width + srcX] = RGBA::pack(
((int32_t)sharedSrc[sharedBase - 1] + (int32_t)sharedSrc[sharedBase + 1] +
(int32_t)sharedSrc[sharedBase - 1 + 2 * sharedWidth] + (int32_t)sharedSrc[sharedBase + 1 + 2 * sharedWidth]) /
4, // Red is 4-tap 45° rotated (x)
((int32_t)sharedSrc[sharedBase - 1 + sharedWidth] + (int32_t)sharedSrc[sharedBase + 1 + sharedWidth] +
(int32_t)sharedSrc[sharedBase] + (int32_t)sharedSrc[sharedBase + 2 * sharedWidth]) /
4, // Green is 4-tap straight (+)
sharedSrc[sharedBase + sharedWidth], // Blue is given
255);
// Bottom-right component
dst[(srcY + 1) * width + srcX + 1] =
RGBA::pack(((int32_t)sharedSrc[sharedBase + 1] + (int32_t)sharedSrc[sharedBase + 1 + 2 * sharedWidth]) /
2, // Red is 2-tap vertical
sharedSrc[sharedBase + sharedWidth + 1], // Green is given
((int32_t)sharedSrc[sharedBase + sharedWidth] + (int32_t)sharedSrc[sharedBase + 2 + sharedWidth]) /
2, // Blue is 2-tap horizontal
255);
}
}
/**
* This kernel converts the buffer from Bayer-filtered GBRG to RGBA8888 out-of-place.
* Pixels are all given full solidness (max alpha).
* Uses bilinear interpolation within color planes.
*
* Each thread handles a 2*2 GBRG pixel block. The interpolation support is the 4*4 pixel block centered around the 2*2
* block. Globally each thread block needs an extra pixel around itself.
*
*/
__global__ void convertBayerGBRGToRGBAKernel(uint32_t* __restrict__ dst, const unsigned char* __restrict__ src,
unsigned width, unsigned height) {
// x and y are the 2*2 block ids.
const unsigned srcX = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
const unsigned srcY = 2 * (blockIdx.y * blockDim.y + threadIdx.y);
// Load the data to shared memory.
extern __shared__ unsigned char sharedSrc[];
const unsigned sharedWidth = loadBayerPattern(src, width, height, sharedSrc, srcX, srcY);
__syncthreads();
if (srcX < width && srcY < height) {
const int sharedBase = sharedWidth + 1 + 2 * (sharedWidth * threadIdx.y + threadIdx.x);
// Top-left component;
dst[srcY * width + srcX] = RGBA::pack(
((int32_t)sharedSrc[sharedBase - sharedWidth] + (int32_t)sharedSrc[sharedBase + sharedWidth]) /
2, // Red is 2-tap vertical
sharedSrc[sharedBase], // Green is given
((int32_t)sharedSrc[sharedBase - 1] + (int32_t)sharedSrc[sharedBase + 1]) / 2, // Blue is 2-tap horizontal
255);
// Top-right component;
dst[srcY * width + srcX + 1] = RGBA::pack(
((int32_t)sharedSrc[sharedBase - sharedWidth] + (int32_t)sharedSrc[sharedBase + 2 - sharedWidth] +
(int32_t)sharedSrc[sharedBase + sharedWidth] + (int32_t)sharedSrc[sharedBase + 2 + sharedWidth]) /
4, // Red is 4-tap 45° rotated (x)
((int32_t)sharedSrc[sharedBase] + (int32_t)sharedSrc[sharedBase + 2] +
(int32_t)sharedSrc[sharedBase + 1 - sharedWidth] + (int32_t)sharedSrc[sharedBase + 1 + sharedWidth]) /
4, // Green is 4-tap straight (+)
sharedSrc[sharedBase + 1], // Blue is given
255);
// Bottom-left component;
dst[(srcY + 1) * width + srcX] = RGBA::pack(
sharedSrc[sharedBase + sharedWidth], // Red is given
((int32_t)sharedSrc[sharedBase - 1 + sharedWidth] + (int32_t)sharedSrc[sharedBase + 1 + sharedWidth] +
(int32_t)sharedSrc[sharedBase] + (int32_t)sharedSrc[sharedBase + 2 * sharedWidth]) /
4, // Green is 4-tap straight (+)
((int32_t)sharedSrc[sharedBase - 1] + (int32_t)sharedSrc[sharedBase + 1] +
(int32_t)sharedSrc[sharedBase - 1 + 2 * sharedWidth] + (int32_t)sharedSrc[sharedBase + 1 + 2 * sharedWidth]) /
4, // Blue is 4-tap 45° rotated (x)
255);
// Bottom-right component
dst[(srcY + 1) * width + srcX + 1] =
RGBA::pack(((int32_t)sharedSrc[sharedBase + sharedWidth] + (int32_t)sharedSrc[sharedBase + 2 + sharedWidth]) /
2, // Red is 2-tap horizontal
sharedSrc[sharedBase + sharedWidth + 1], // Green is given
((int32_t)sharedSrc[sharedBase + 1] + (int32_t)sharedSrc[sharedBase + 1 + 2 * sharedWidth]) /
2, // Blue is 2-tap vertical
255);
}
}
/**
* This kernel converts the buffer from BGR888 to RGBA8888 out-of-place.
* Pixels are all given full solidness (max alpha)
*/
__global__ void convertBGRToRGBAKernel(uint32_t* __restrict__ dst, const unsigned char* __restrict__ src,
unsigned width, unsigned height) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
unsigned i = y * width + x;
dst[i] = RGBA::pack(src[3 * i + 2], src[3 * i + 1], src[3 * i], 0xff);
}
}
/**
* These kernels converts the buffer from various YUV422 representations to RGBA8888 out-of-place.
* Pixels are all given full solidness (max alpha).
*/
__global__ void convertUYVYToRGBAKernel(cudaSurfaceObject_t dst, const unsigned char* __restrict__ src, unsigned width,
unsigned height) {
// each thread is responsible for a 2x1 pixel group
// Two bytes per pixel. Y0 U Y1 V
// Read 2x (y0), 2x+1 (u), 2x+2 (y1) 2x+3 (v)
// Write x, x+1
// Repeat for every line
const unsigned pitch = width * 2;
const unsigned x = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
const unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
const unsigned char u = src[y * pitch + 2 * x]; // Two bytes per pixel. U Y0 V Y1
const unsigned char y0 = src[y * pitch + 2 * x + 1];
const unsigned char v = src[y * pitch + 2 * x + 2];
const unsigned char y1 = src[y * pitch + 2 * x + 3];
const RGBDiff rgbDiff(yuv444ToRGBDiff(u, v));
surf2Dwrite(YRGBDiffToRGBA(y0, rgbDiff), dst, x * 4, y);
surf2Dwrite(YRGBDiffToRGBA(y1, rgbDiff), dst, (x + 1) * 4, y);
}
}
__global__ void convertYUY2ToRGBAKernel(cudaSurfaceObject_t dst, const unsigned char* src, unsigned width,
unsigned height) {
// each thread is responsible for a 2x1 pixel group
// Two bytes per pixel. Y0 U Y1 V
// Read 2x (y0), 2x+1 (u), 2x+2 (y1) 2x+3 (v)
// Write x, x+1
// Repeat for every line
const unsigned pitch = width * 2;
const unsigned x = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
const unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
const unsigned char y0 = src[y * pitch + 2 * x]; // Two bytes per pixel. Y0 U Y1 V
const unsigned char u = src[y * pitch + 2 * x + 1];
const unsigned char y1 = src[y * pitch + 2 * x + 2];
const unsigned char v = src[y * pitch + 2 * x + 3];
const RGBDiff rgbDiff(yuv444ToRGBDiff(u, v));
surf2Dwrite(YRGBDiffToRGBA(y0, rgbDiff), dst, x * 4, y);
surf2Dwrite(YRGBDiffToRGBA(y1, rgbDiff), dst, (x + 1) * 4, y);
}
}
/**
* This kernel converts the buffer from 10 bits planar YUV422 to packed RGBA8888 out-of-place.
* Each thread manages 2 pixels.
* 10 bits values are padded to 16 bits, and are clamped to 8 bits during conversion
* All pixels are solid.
*/
__global__ void convertYUV422P10ToRGBAKernel(cudaSurfaceObject_t dst, const uint16_t* src, unsigned width,
unsigned height) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
const uint16_t* uSrc = src + width * height;
const uint16_t* vSrc = uSrc + width * height / 2;
if (x < width / 2 && y < height) {
uint32_t y0 = src[y * width + 2 * x] >> 2;
uint32_t y1 = src[y * width + 2 * x + 1] >> 2;
uint32_t u = uSrc[y * (width / 2) + x] >> 2;
uint32_t v = vSrc[y * (width / 2) + x] >> 2;
const RGBDiff rgbDiff = yuv444ToRGBDiff(u, v);
surf2Dwrite(YRGBDiffToRGBA(y0, rgbDiff), dst, (2 * x) * 4, y);
surf2Dwrite(YRGBDiffToRGBA(y1, rgbDiff), dst, (2 * x + 1) * 4, y);
}
}
/**
* This kernel converts the buffer from planar 12 bits 4:2:0 (YV12) to packed RGBA8888 out-of-place.
* All pixels are solid.
*/
__global__ void convertYV12ToRGBAKernel(cudaSurfaceObject_t dst, const unsigned char* src, unsigned width,
unsigned height) {
// each thread is responsible for a 2x2 pixel group
unsigned sx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned sy = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned char* uSrc = src + width * height;
const unsigned char* vSrc = uSrc + (width * height) / 4;
if (sx < width / 2 && sy < height / 2) {
const RGBDiff rgbDiff(yuv444ToRGBDiff(uSrc[sy * (width / 2) + sx], vSrc[sy * (width / 2) + sx]));
surf2Dwrite(YRGBDiffToRGBA(src[(2 * sy) * width + 2 * sx], rgbDiff), dst, (2 * sx) * 4, 2 * sy);
surf2Dwrite(YRGBDiffToRGBA(src[(2 * sy) * width + 2 * sx + 1], rgbDiff), dst, (2 * sx + 1) * 4, 2 * sy);
surf2Dwrite(YRGBDiffToRGBA(src[(2 * sy + 1) * width + 2 * sx], rgbDiff), dst, (2 * sx) * 4, 2 * sy + 1);
surf2Dwrite(YRGBDiffToRGBA(src[(2 * sy + 1) * width + 2 * sx + 1], rgbDiff), dst, (2 * sx + 1) * 4, 2 * sy + 1);
}
}
__global__ void convertKernelGrayscale(uint32_t* __restrict__ dst, const unsigned char* __restrict__ src,
unsigned width, unsigned height) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
unsigned i = y * width + x;
dst[i] = RGBA::pack((uint32_t)src[i], (uint32_t)src[i], (uint32_t)src[i], 0xff);
}
}
__global__ void convertGrayscaleKernel(cudaSurfaceObject_t dst, const unsigned char* __restrict__ src, unsigned width,
unsigned height) {
unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
unsigned i = y * width + x;
surf2Dwrite(RGBA::pack((uint32_t)src[i], (uint32_t)src[i], (uint32_t)src[i], 0xff), dst, x * 4, y);
}
}
} // namespace Image
} // namespace VideoStitch
|
the_stack
|
namespace faiss { namespace gpu {
//
// IVF list length update
//
__global__ void
runUpdateListPointers(Tensor<int, 1, true> listIds,
Tensor<int, 1, true> newListLength,
Tensor<void*, 1, true> newCodePointers,
Tensor<void*, 1, true> newIndexPointers,
int* listLengths,
void** listCodes,
void** listIndices) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < listIds.getSize(0)) {
int listId = listIds[i];
listLengths[listId] = newListLength[i];
listCodes[listId] = newCodePointers[i];
listIndices[listId] = newIndexPointers[i];
}
}
void
runUpdateListPointers(Tensor<int, 1, true>& listIds,
Tensor<int, 1, true>& newListLength,
Tensor<void*, 1, true>& newCodePointers,
Tensor<void*, 1, true>& newIndexPointers,
thrust::device_vector<int>& listLengths,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
cudaStream_t stream) {
int numThreads = std::min(listIds.getSize(0), getMaxThreadsCurrentDevice());
int numBlocks = utils::divUp(listIds.getSize(0), numThreads);
dim3 grid(numBlocks);
dim3 block(numThreads);
runUpdateListPointers<<<grid, block, 0, stream>>>(
listIds, newListLength, newCodePointers, newIndexPointers,
listLengths.data().get(),
listCodes.data().get(),
listIndices.data().get());
CUDA_TEST_ERROR();
}
//
// IVF PQ append
//
template <IndicesOptions Opt>
__global__ void
ivfpqInvertedListAppend(Tensor<int, 1, true> listIds,
Tensor<int, 1, true> listOffset,
Tensor<int, 2, true> encodings,
Tensor<long, 1, true> indices,
void** listCodes,
void** listIndices) {
int encodingToAdd = blockIdx.x * blockDim.x + threadIdx.x;
if (encodingToAdd >= listIds.getSize(0)) {
return;
}
int listId = listIds[encodingToAdd];
int offset = listOffset[encodingToAdd];
// Add vector could be invalid (contains NaNs etc)
if (listId == -1 || offset == -1) {
return;
}
auto encoding = encodings[encodingToAdd];
long index = indices[encodingToAdd];
if (Opt == INDICES_32_BIT) {
// FIXME: there could be overflow here, but where should we check this?
((int*) listIndices[listId])[offset] = (int) index;
} else if (Opt == INDICES_64_BIT) {
((long*) listIndices[listId])[offset] = (long) index;
} else {
// INDICES_CPU or INDICES_IVF; no indices are being stored
}
unsigned char* codeStart =
((unsigned char*) listCodes[listId]) + offset * encodings.getSize(1);
// FIXME: slow
for (int i = 0; i < encodings.getSize(1); ++i) {
codeStart[i] = (unsigned char) encoding[i];
}
}
void
runIVFPQInvertedListAppend(Tensor<int, 1, true>& listIds,
Tensor<int, 1, true>& listOffset,
Tensor<int, 2, true>& encodings,
Tensor<long, 1, true>& indices,
thrust::device_vector<void*>& listCodes,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
cudaStream_t stream) {
int numThreads = std::min(listIds.getSize(0), getMaxThreadsCurrentDevice());
int numBlocks = utils::divUp(listIds.getSize(0), numThreads);
dim3 grid(numBlocks);
dim3 block(numThreads);
#define RUN_APPEND(IND) \
do { \
ivfpqInvertedListAppend<IND><<<grid, block, 0, stream>>>( \
listIds, listOffset, encodings, indices, \
listCodes.data().get(), \
listIndices.data().get()); \
} while (0)
if ((indicesOptions == INDICES_CPU) || (indicesOptions == INDICES_IVF)) {
// no need to maintain indices on the GPU
RUN_APPEND(INDICES_IVF);
} else if (indicesOptions == INDICES_32_BIT) {
RUN_APPEND(INDICES_32_BIT);
} else if (indicesOptions == INDICES_64_BIT) {
RUN_APPEND(INDICES_64_BIT);
} else {
// unknown index storage type
FAISS_ASSERT(false);
}
CUDA_TEST_ERROR();
#undef RUN_APPEND
}
//
// IVF flat append
//
__global__ void
ivfFlatIndicesAppend(Tensor<int, 1, true> listIds,
Tensor<int, 1, true> listOffset,
Tensor<long, 1, true> indices,
IndicesOptions opt,
void** listIndices) {
int vec = blockIdx.x * blockDim.x + threadIdx.x;
if (vec >= listIds.getSize(0)) {
return;
}
int listId = listIds[vec];
int offset = listOffset[vec];
// Add vector could be invalid (contains NaNs etc)
if (listId == -1 || offset == -1) {
return;
}
long index = indices[vec];
if (opt == INDICES_32_BIT) {
// FIXME: there could be overflow here, but where should we check this?
((int*) listIndices[listId])[offset] = (int) index;
} else if (opt == INDICES_64_BIT) {
((long*) listIndices[listId])[offset] = (long) index;
}
}
template <typename Codec>
__global__ void
ivfFlatInvertedListAppend(Tensor<int, 1, true> listIds,
Tensor<int, 1, true> listOffset,
Tensor<float, 2, true> vecs,
void** listData,
Codec codec) {
int vec = blockIdx.x;
int listId = listIds[vec];
int offset = listOffset[vec];
// Add vector could be invalid (contains NaNs etc)
if (listId == -1 || offset == -1) {
return;
}
// Handle whole encoding (only thread 0 will handle the remainder)
int limit = utils::divDown(vecs.getSize(1), Codec::kDimPerIter);
int i;
for (i = threadIdx.x; i < limit; i += blockDim.x) {
int realDim = i * Codec::kDimPerIter;
float toEncode[Codec::kDimPerIter];
#pragma unroll
for (int j = 0; j < Codec::kDimPerIter; ++j) {
toEncode[j] = vecs[vec][realDim + j];
}
codec.encode(listData[listId], offset, i, toEncode);
}
// Handle remainder with a single thread, if any
if (Codec::kDimPerIter > 1) {
int realDim = limit * Codec::kDimPerIter;
// Was there any remainder?
if (realDim < vecs.getSize(1)) {
if (threadIdx.x == 0) {
float toEncode[Codec::kDimPerIter];
// How many remaining that we need to encode
int remaining = vecs.getSize(1) - realDim;
#pragma unroll
for (int j = 0; j < Codec::kDimPerIter; ++j) {
int idx = realDim + j;
toEncode[j] = idx < vecs.getSize(1) ? vecs[vec][idx] : 0.0f;
}
codec.encodePartial(listData[listId], offset, i, remaining, toEncode);
}
}
}
}
void
runIVFFlatInvertedListAppend(Tensor<int, 1, true>& listIds,
Tensor<int, 1, true>& listOffset,
Tensor<float, 2, true>& vecs,
Tensor<long, 1, true>& indices,
bool useResidual,
Tensor<float, 2, true>& residuals,
GpuScalarQuantizer* scalarQ,
thrust::device_vector<void*>& listData,
thrust::device_vector<void*>& listIndices,
IndicesOptions indicesOptions,
cudaStream_t stream) {
int dim = vecs.getSize(1);
int maxThreads = getMaxThreadsCurrentDevice();
// First, append the indices that we're about to add, if any
if (indicesOptions != INDICES_CPU && indicesOptions != INDICES_IVF) {
int blocks = utils::divUp(vecs.getSize(0), maxThreads);
ivfFlatIndicesAppend<<<blocks, maxThreads, 0, stream>>>(
listIds,
listOffset,
indices,
indicesOptions,
listIndices.data().get());
}
// Each block will handle appending a single vector
#define RUN_APPEND \
do { \
dim3 grid(vecs.getSize(0)); \
dim3 block(std::min(dim / codec.kDimPerIter, maxThreads)); \
\
ivfFlatInvertedListAppend \
<<<grid, block, 0, stream>>>( \
listIds, \
listOffset, \
useResidual ? residuals : vecs, \
listData.data().get(), \
codec); \
} while (0)
if (!scalarQ) {
CodecFloat codec(dim * sizeof(float));
RUN_APPEND;
} else {
switch (scalarQ->qtype) {
case QuantizerType::QT_8bit:
{
if (false) {
// if (dim % 4 == 0) {
Codec<(int)QuantizerType::QT_8bit, 4>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
RUN_APPEND;
} else {
Codec<(int)QuantizerType::QT_8bit, 1>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
RUN_APPEND;
}
}
break;
case QuantizerType::QT_8bit_uniform:
{
// if (dim % 4 == 0) {
if (false) {
Codec<(int)QuantizerType::QT_8bit_uniform, 4>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
RUN_APPEND;
} else {
Codec<(int)QuantizerType::QT_8bit_uniform, 1>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
RUN_APPEND;
}
}
break;
case QuantizerType::QT_fp16:
{
// if (dim % 2 == 0) {
if (false) {
Codec<(int)QuantizerType::QT_fp16, 2>
codec(scalarQ->code_size);
RUN_APPEND;
} else {
Codec<(int)QuantizerType::QT_fp16, 1>
codec(scalarQ->code_size);
RUN_APPEND;
}
}
break;
case QuantizerType::QT_8bit_direct:
{
Codec<(int)QuantizerType::QT_8bit_direct, 1>
codec(scalarQ->code_size);
RUN_APPEND;
}
break;
case QuantizerType::QT_4bit:
{
Codec<(int)QuantizerType::QT_4bit, 1>
codec(scalarQ->code_size,
scalarQ->gpuTrained.data(),
scalarQ->gpuTrained.data() + dim);
RUN_APPEND;
}
break;
case QuantizerType::QT_4bit_uniform:
{
Codec<(int)QuantizerType::QT_4bit_uniform, 1>
codec(scalarQ->code_size, scalarQ->trained[0], scalarQ->trained[1]);
RUN_APPEND;
}
break;
default:
// unimplemented, should be handled at a higher level
FAISS_ASSERT(false);
}
}
CUDA_TEST_ERROR();
#undef RUN_APPEND
}
} } // namespace
|
the_stack
|
namespace hpc {
namespace rll {
namespace cuda {
// random seeding, keep up with caffe
int64_t cluster_seedgen(void) {
int64_t s, seed, pid;
FILE* f = fopen("/dev/urandom", "rb");
if (f && fread(&seed, 1, sizeof(seed), f) == sizeof(seed)) {
fclose(f);
return seed;
}
LOG(INFO) << "System entropy source not available, "
"using fallback algorithm to generate seed instead.";
if (f)
fclose(f);
pid = getpid();
s = time(NULL);
seed = std::abs(((s * 181) * ((pid - 83) * 359)) % 104729);
return seed;
}
void LstmForward(
const std::vector<torch::Tensor>& inputs,
std::vector<torch::Tensor>& outputs,
float dropout_threshold) {
unsigned int index = 0;
const torch::Tensor& x0 = inputs[index++];
const torch::Tensor& h0 = inputs[index++];
const torch::Tensor& c0 = inputs[index++];
const torch::Tensor& wx = inputs[index++];
const torch::Tensor& wh = inputs[index++];
const torch::Tensor& bias = inputs[index++];
const torch::Tensor& ln_gamma = inputs[index++];
const torch::Tensor& ln_beta = inputs[index++];
index = 0;
torch::Tensor& xbuf = outputs[index++];
torch::Tensor& hbuf = outputs[index++];
torch::Tensor& hn = outputs[index++];
torch::Tensor& cn = outputs[index++];
torch::Tensor& ifog = outputs[index++];
torch::Tensor& ym = outputs[index++];
torch::Tensor& ln_in = outputs[index++];
torch::Tensor& ln_mean = outputs[index++];
torch::Tensor& ln_rstd = outputs[index++];
torch::Tensor& dropout_mask = outputs[index++];
const unsigned int seq_len = x0.size(0);
const unsigned int batch_size = x0.size(1);
const unsigned int input_size = x0.size(2);
const unsigned int num_layers = h0.size(0);
const unsigned int hidden_size = h0.size(2);
const float* inputptr = (float*)(x0.data_ptr());
const float* h0ptr = (float*)(h0.data_ptr());
const float* c0ptr = (float*)(c0.data_ptr());
const float* wxptr = (float*)(wx.data_ptr());
const float* whptr = (float*)(wh.data_ptr());
const float* biasptr = (float*)(bias.data_ptr());
const float* ln_gammaptr = (float*)(ln_gamma.data_ptr());
const float* ln_betaptr = (float*)(ln_beta.data_ptr());
float* xbufptr = (float*)(xbuf.data_ptr());
float* hbufptr = (float*)(hbuf.data_ptr());
float* hnptr = (float*)(hn.data_ptr());
float* cnptr = (float*)(cn.data_ptr());
float* ifogptr = (float*)(ifog.data_ptr());
float* outputptr = (float*)(ym.data_ptr());
float* ln_in_ptr = (float*)(ln_in.data_ptr());
float* ln_mean_ptr = (float*)(ln_mean.data_ptr());
float* ln_rstd_ptr = (float*)(ln_rstd.data_ptr());
unsigned int* maskptr = reinterpret_cast<unsigned int*>((int32_t*)(dropout_mask.data_ptr()));
float onedata = 1;
float zerodata = 0;
// create handles
cublasHandle_t cublas_handle;
checkCublasErr(cublasCreate(&cublas_handle));
curandGenerator_t gen;
if (dropout_threshold > 0) {
checkCurandErr(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
checkCurandErr(curandSetPseudoRandomGeneratorSeed(gen, cluster_seedgen()));
}
// TODO pay attention to wx shape change
unsigned int wxidx[num_layers];
wxidx[0] = input_size;
for (int l = 0; l < num_layers - 1; l++) {
wxidx[l + 1] = hidden_size;
}
unsigned int wxoffset[num_layers];
wxoffset[0] = 0;
for (int l = 0; l < num_layers - 1; l++) {
wxoffset[l + 1] = wxoffset[l] + wxidx[l] * wxidx[l + 1] * 4;
}
for (int l = 0; l < num_layers; l++) {
const float* xdata = (l == 0 ? inputptr : (outputptr + (l - 1) * seq_len * batch_size * hidden_size));
const float* wxdata = wxptr + wxoffset[l];
const float* whdata = whptr + l * hidden_size * (hidden_size * 4);
const float* biasdata = biasptr + l * (hidden_size * 4);
const float* ln_gamma_x = ln_gammaptr + l * hidden_size * 4 * 2;
const float* ln_gamma_h = ln_gammaptr + l * hidden_size * 4 * 2 + hidden_size * 4;
const float* ln_beta_x = ln_betaptr + l * hidden_size * 4 * 2;
const float* ln_beta_h = ln_betaptr + l * hidden_size * 4 * 2 + hidden_size * 4;
float* ln_x = ln_in_ptr + l * seq_len * batch_size * hidden_size * 4 * 2;
float* ln_h = ln_in_ptr + l * seq_len * batch_size * hidden_size * 4 * 2 + seq_len * batch_size * hidden_size * 4;
float* ln_mean_x = ln_mean_ptr + l * seq_len * batch_size * 2;
float* ln_mean_h = ln_mean_ptr + l * seq_len * batch_size * 2 + seq_len * batch_size;
float* ln_rstd_x = ln_rstd_ptr + l * seq_len * batch_size * 2;
float* ln_rstd_h = ln_rstd_ptr + l * seq_len * batch_size * 2 + seq_len * batch_size;
checkCublasErr(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N,
hidden_size * 4, seq_len * batch_size, wxidx[l],
&onedata, wxdata, hidden_size * 4, xdata, wxidx[l], &zerodata, ln_x, hidden_size * 4));
// layernorm
unsigned int block_size = DEFAULT_WARP_NUM * WARP_SIZE;
unsigned int grid_size = seq_len * batch_size;
layernorm<<<grid_size, block_size>>>(
hidden_size * 4, ln_x, ln_gamma_x, ln_beta_x, ln_mean_x, ln_rstd_x, xbufptr);
for (int s = 0; s < seq_len; s++) {
const float* xbufdata = xbufptr + s * batch_size * (hidden_size * 4);
const float* prehdata = (s == 0 ? (h0ptr + l * batch_size * hidden_size)
: (hnptr + (s - 1) * num_layers * batch_size * hidden_size + l * batch_size * hidden_size));
const float* precdata = (s == 0 ? (c0ptr + l * batch_size * hidden_size)
: (cnptr + (s - 1) * num_layers * batch_size * hidden_size + l * batch_size * hidden_size));
float* hdata = hnptr + s * num_layers * batch_size * hidden_size + l * batch_size * hidden_size;
float* cdata = cnptr + s * num_layers * batch_size * hidden_size + l * batch_size * hidden_size;
float* ifogdata = ifogptr + l * seq_len * batch_size * hidden_size * 4 + s * batch_size * hidden_size * 4;
float* outputdata = outputptr + l * seq_len * batch_size * hidden_size + s * batch_size * hidden_size;
float* ln_h_s = ln_h + s * batch_size * hidden_size * 4;
float* ln_mean_h_s = ln_mean_h + s * batch_size;
float* ln_rstd_h_s = ln_rstd_h + s * batch_size;
checkCublasErr(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N,
hidden_size * 4, batch_size, hidden_size,
&onedata, whdata, hidden_size * 4, prehdata, hidden_size, &zerodata, ln_h_s, hidden_size * 4));
// layernorm
{
unsigned int block_size = DEFAULT_WARP_NUM * WARP_SIZE;
unsigned int grid_size = batch_size;
layernorm<<<grid_size, block_size>>>(
hidden_size * 4, ln_h_s, ln_gamma_h, ln_beta_h, ln_mean_h_s, ln_rstd_h_s, hbufptr);
}
{
dim3 block_size = {DEFAULT_WARP_NUM * WARP_SIZE, 1, 1};
dim3 grid_size = {(hidden_size + block_size.x - 1) / block_size.x, batch_size, 1};
activation<<<grid_size, block_size>>>(
batch_size, hidden_size, xbufdata , hbufptr, biasdata,
prehdata, precdata, hdata, cdata, ifogdata, outputdata);
}
}
// dropout
if (dropout_threshold > 0 && l != num_layers - 1) {
float* dropoutdata = outputptr + l * seq_len * batch_size * hidden_size;
unsigned int maskstride = seq_len * batch_size * hidden_size;
unsigned int* maskdata = maskptr + l * maskstride;
checkCurandErr(curandGenerate(gen, maskdata, maskstride));
float dropout_scale = 1. / (1. - dropout_threshold);
unsigned int uint_threshold = static_cast<unsigned int>(UINT_MAX * dropout_threshold);
unsigned int block_size = DEFAULT_WARP_NUM * WARP_SIZE;
unsigned int grid_size = (maskstride + block_size - 1) / block_size;
dropout<<<grid_size, block_size>>>(
maskstride, uint_threshold, dropout_scale, maskdata, dropoutdata);
}
}
// destroy handles
checkCublasErr(cublasDestroy(cublas_handle));
if (dropout_threshold > 0) {
checkCurandErr(curandDestroyGenerator(gen));
}
}
void LstmBackward(
const std::vector<torch::Tensor>& inputs,
std::vector<torch::Tensor>& outputs,
float dropout_threshold) {
unsigned int index = 0;
const torch::Tensor& x0 = inputs[index++];
const torch::Tensor& h0 = inputs[index++];
const torch::Tensor& c0 = inputs[index++];
const torch::Tensor& wx = inputs[index++];
const torch::Tensor& wh = inputs[index++];
const torch::Tensor& hn = inputs[index++];
const torch::Tensor& cn = inputs[index++];
const torch::Tensor& ifogbuf = inputs[index++];
const torch::Tensor& ym = inputs[index++];
const torch::Tensor& ln_in = inputs[index++];
const torch::Tensor& ln_mean = inputs[index++];
const torch::Tensor& ln_rstd = inputs[index++];
const torch::Tensor& ln_gamma = inputs[index++];
const torch::Tensor& dropout_mask = inputs[index++];
index = 0;
torch::Tensor& dgatebuf = outputs[index++];
torch::Tensor& xbuf = outputs[index++];
torch::Tensor& hbuf = outputs[index++];
torch::Tensor& dx = outputs[index++];
torch::Tensor& dwx = outputs[index++];
torch::Tensor& dwh = outputs[index++];
torch::Tensor& dbias = outputs[index++];
torch::Tensor& d_ln_gamma = outputs[index++];
torch::Tensor& d_ln_beta = outputs[index++];
torch::Tensor& dy = outputs[index++];
torch::Tensor& dh = outputs[index++];
torch::Tensor& dc = outputs[index++];
const unsigned int seq_len = x0.size(0);
const unsigned int batch_size = x0.size(1);
const unsigned int input_size = x0.size(2);
const unsigned int num_layers = h0.size(0);
const unsigned int hidden_size = h0.size(2);
const float* x0ptr = (float*)(x0.data_ptr());
const float* ifogptr = (float*)(ifogbuf.data_ptr());
const float* ymptr = (float*)(ym.data_ptr());
const float* h0ptr = (float*)(h0.data_ptr());
const float* c0ptr = (float*)(c0.data_ptr());
const float* hnptr = (float*)(hn.data_ptr());
const float* cnptr = (float*)(cn.data_ptr());
const float* wxptr = (float*)(wx.data_ptr());
const float* whptr = (float*)(wh.data_ptr());
const float* ln_in_ptr = (float*)(ln_in.data_ptr());
const float* ln_mean_ptr = (float*)(ln_mean.data_ptr());
const float* ln_rstd_ptr = (float*)(ln_rstd.data_ptr());
const float* ln_gammaptr = (float*)(ln_gamma.data_ptr());
const unsigned int* maskptr = reinterpret_cast<unsigned int*>((int32_t*)(dropout_mask.data_ptr()));
float* dgatebufptr = (float*)(dgatebuf.data_ptr());
float* xbufptr = (float*)(xbuf.data_ptr());
float* hbufptr = (float*)(hbuf.data_ptr());
float* dyptr = (float*)(dy.data_ptr());
float* dxptr = (float*)(dx.data_ptr());
float* dhptr = (float*)(dh.data_ptr());
float* dcptr = (float*)(dc.data_ptr());
float* dwxptr = (float*)(dwx.data_ptr());
float* dwhptr = (float*)(dwh.data_ptr());
float* dbiasptr = (float*)(dbias.data_ptr());
float* ln_dgammaptr = (float*)(d_ln_gamma.data_ptr());
float* ln_dbetaptr = (float*)(d_ln_beta.data_ptr());
float onedata = 1;
float zerodata = 0;
cublasHandle_t cublas_handle;
checkCublasErr(cublasCreate(&cublas_handle));
// TODO pay attention to wx shape change
unsigned int wxidx[num_layers + 1];
wxidx[0] = input_size;
for (int l = 0; l < num_layers; l++) {
wxidx[l + 1] = hidden_size;
}
unsigned int wxoffset[num_layers + 1];
wxoffset[0] = 0;
unsigned int totalwx = 0;
for (int l = 0; l < num_layers; l++) {
totalwx += wxidx[l] * wxidx[l + 1] * 4;
wxoffset[l + 1] = wxoffset[l] + wxidx[l] * wxidx[l + 1] * 4;
}
checkCudaErr(cudaMemsetAsync(dxptr, 0, seq_len * batch_size * input_size * sizeof(float)));
checkCudaErr(cudaMemsetAsync(dwxptr, 0, totalwx * sizeof(float)));
checkCudaErr(cudaMemsetAsync(dwhptr, 0, num_layers * hidden_size * hidden_size * 4 * sizeof(float)));
checkCudaErr(cudaMemsetAsync(dbiasptr, 0, num_layers * hidden_size * 4 * sizeof(float)));
checkCudaErr(cudaMemsetAsync(ln_dgammaptr, 0, num_layers * hidden_size * 4 * 2 * sizeof(float)));
checkCudaErr(cudaMemsetAsync(ln_dbetaptr, 0, num_layers * hidden_size * 4 * 2 * sizeof(float)));
for (int l = num_layers - 1; l >= 0; l--) {
// dropout
if (dropout_threshold > 0 && l != num_layers - 1) {
float* dropoutdata = dyptr;
unsigned int maskstride = seq_len * batch_size * hidden_size;
const unsigned int* maskdata = maskptr + l * maskstride;
float dropout_scale = 1. / (1. - dropout_threshold);
unsigned int uint_threshold = static_cast<unsigned int>(UINT_MAX * dropout_threshold);
unsigned int block_size = DEFAULT_WARP_NUM * WARP_SIZE;
unsigned int grid_size = (maskstride + block_size - 1) / block_size;
dropout_backward<<<grid_size, block_size>>>(
maskstride, uint_threshold, dropout_scale, maskdata, dropoutdata);
}
// layernorm
const float* ln_gamma_x = ln_gammaptr + l * hidden_size * 4 * 2;
const float* ln_gamma_h = ln_gammaptr + l * hidden_size * 4 * 2 + hidden_size * 4;
float* ln_dgamma_x = ln_dgammaptr + l * hidden_size * 4 * 2;
float* ln_dgamma_h = ln_dgammaptr + l * hidden_size * 4 * 2 + hidden_size * 4;
float* ln_dbeta_x = ln_dbetaptr + l * hidden_size * 4 * 2;
float* ln_dbeta_h = ln_dbetaptr + l * hidden_size * 4 * 2 + hidden_size * 4;
// lstm
const float* wxdata = wxptr + wxoffset[l];
float* dwxdata = dwxptr + wxoffset[l];
const float* whdata = whptr + l * hidden_size * hidden_size * 4;
float* dwhdata = dwhptr + l * hidden_size * hidden_size * 4;
float* dbiasdata = dbiasptr + l * hidden_size * 4;
checkCudaErr(cudaMemsetAsync(dhptr, 0, batch_size * hidden_size * sizeof(float)));
checkCudaErr(cudaMemsetAsync(dcptr, 0, batch_size * hidden_size * sizeof(float)));
float* dxlayer = (l == 0 ? dxptr : dyptr);
const float* xlayer = (l == 0 ? x0ptr : (ymptr + (l - 1) * seq_len * batch_size * hidden_size));
for (int s = seq_len - 1; s >= 0; s--) {
const float* cdata = cnptr + s * num_layers * batch_size * hidden_size + l * batch_size * hidden_size;
const float* prehdata = (s == 0 ? (h0ptr + l * batch_size * hidden_size)
: (hnptr + (s - 1) * num_layers * batch_size * hidden_size + l * batch_size * hidden_size));
const float* precdata = (s == 0 ? (c0ptr + l * batch_size * hidden_size)
: (cnptr + (s - 1) * num_layers * batch_size * hidden_size + l * batch_size * hidden_size));
const float* ifogdata = ifogptr + l * seq_len * batch_size * hidden_size * 4 + s * batch_size * hidden_size * 4;
const float* dydata = dyptr + s * batch_size * hidden_size;
const float* xdata = xlayer + s * batch_size * wxidx[l];
float* dxdata = dxlayer + s * batch_size * wxidx[l];
{
dim3 block_size = {DEFAULT_WARP_NUM * WARP_SIZE, 1, 1};
dim3 grid_size = {(hidden_size + block_size.x - 1) / block_size.x, batch_size, 1};
activation_backward<<<grid_size, block_size>>>(
batch_size, hidden_size, dydata, cdata, precdata, ifogdata,
dgatebufptr, dhptr, dcptr, dbiasdata);
}
// layernorm
const float* ln_x = ln_in_ptr + l * seq_len * batch_size * hidden_size * 4 * 2 +
s * batch_size * hidden_size * 4;
const float* ln_h = ln_in_ptr + l * seq_len * batch_size * hidden_size * 4 * 2 +
seq_len * batch_size * hidden_size * 4 + s * batch_size * hidden_size * 4;
const float* ln_mean_x = ln_mean_ptr + l * seq_len * batch_size * 2 + s * batch_size;
const float* ln_mean_h = ln_mean_ptr + l * seq_len * batch_size * 2 + seq_len * batch_size + s * batch_size;
const float* ln_rstd_x = ln_rstd_ptr + l * seq_len * batch_size * 2 + s * batch_size;
const float* ln_rstd_h = ln_rstd_ptr + l * seq_len * batch_size * 2 + seq_len * batch_size + s * batch_size;
{
unsigned int block_size = DEFAULT_WARP_NUM * WARP_SIZE;
unsigned int grid_size = batch_size;
// xbufptr has seq_len blocks(for fp), bp only use the first block
layernorm_backward<<<grid_size, block_size>>>(
hidden_size * 4, dgatebufptr, ln_x, ln_mean_x, ln_rstd_x, ln_gamma_x, ln_dgamma_x, ln_dbeta_x, xbufptr);
layernorm_backward<<<grid_size, block_size>>>(
hidden_size * 4, dgatebufptr, ln_h, ln_mean_h, ln_rstd_h, ln_gamma_h, ln_dgamma_h, ln_dbeta_h, hbufptr);
}
// dwx += torch.matmul(x_t, d_gate)
checkCublasErr(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T,
hidden_size * 4, wxidx[l], batch_size,
&onedata, xbufptr, hidden_size * 4, xdata, wxidx[l],
&onedata, dwxdata, hidden_size * 4));
// dwh += torch.matmul(h_t, d_gate)
checkCublasErr(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T,
hidden_size * 4, hidden_size, batch_size,
&onedata, hbufptr, hidden_size * 4, prehdata, hidden_size,
&onedata, dwhdata, hidden_size * 4));
// dx = torch.matmul(d_gate, wx_t)
checkCublasErr(cublasSgemm(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N,
wxidx[l], batch_size, hidden_size * 4,
&onedata, wxdata, hidden_size * 4, xbufptr, hidden_size * 4,
&zerodata, dxdata, wxidx[l]));
// dh = torch.matmul(d_gate, wh_t)
checkCublasErr(cublasSgemm(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N,
hidden_size, batch_size, hidden_size * 4,
&onedata, whdata, hidden_size * 4, hbufptr, hidden_size * 4,
&zerodata, dhptr, hidden_size));
}
}
// destroy handles
checkCublasErr(cublasDestroy(cublas_handle));
}
} // namespace cuda
} // namespace rll
} // namespace hpc
|
the_stack
|
#include <algorithm>
#include <string>
#include <vector>
#include <map>
#include <assert.h>
#include <helper_timer.h>
#include "../../nvmatrix/include/nvmatrix.cuh"
//#include "experimental/akrizhevsky/g3/mactruck-gpu-tests/gpu_util.cuh"
#include "weights.cuh"
#include "convnet.cuh"
#include "cost.cuh"
#include "neuron.cuh"
#include "data.cuh"
#include "layer_kernels.cuh"
#include "streambroadcast.cuh"
#include "actbroadcaster.cuh"
#include "gradreducer.cuh"
#include "util.cuh"
#include "timer.cuh"
#include "memorysource.cuh"
class Cost;
class ConvNet;
class ConvNetThread;
class CostLayer;
class DataLayer;
class Layer;
class ActBroadcaster;
class BroadcastMessage;
class IActGradReducer;
class Weights;
class WeightList;
typedef std::vector<Layer*> LayerV;
class BinomialCrossEntOperator {
protected:
float _posWeight;
public:
BinomialCrossEntOperator(float posWeight) : _posWeight(posWeight) {
}
__device__ inline float operator()(const float t, const float y) const {
return _posWeight * t * safelog(y) + (1.0f - t) * safelog(1.0f - y);
}
};
class CrossEntOperator {
protected:
float _posWeight;
public:
CrossEntOperator(float posWeight) : _posWeight(posWeight) {
}
__device__ inline float operator()(const float t, const float y) const {
return _posWeight * t * safelog(y);
}
};
/*
* Abstract layer.
*/
class Layer {
protected:
ConvNetThread* _convNetThread;
// This is a vector[#layers_next]
std::vector<Layer*> _next;
// This is a vector[#replicas_prev][#layers_prev]
std::map<int, std::vector<Layer*> > _prev;
int _rcvdFInputMsgs;
std::map<int, int> _numComputedActsGrads;
int _rcvdBInputMsgs;
int _numOutputs;
std::map<int, NVMatrix*> _inputs; // input idx -> matrix
std::map<int, MemoryView*> _memSrcActs; // device id -> memory source
std::map<int, MemoryView*> _memSrcActsGrad; // device id -> memory source
bool _gradConsumer, _foundGradConsumers, _trans;
std::map<int,bool> _bwdTerminal; // One bool per pass
int _numGradProducersNext;
int _actsTarget, _actsGradTarget;
std::string _name, _type;
intv _nextDeviceIDs, _prevDeviceIDs;
HostNVMatrix _hostMemFwd;
// New replica-related stuff:
std::map<int,Layer*> _replicas; // NOTE: a layer is its own sibling, too
// Previous layers sorted by device ID, in reverse order in which they are procesed by
// sequential grad reducer. map from replica -> device id -> layers
std::map<int,std::map<int,std::set<Layer*> > > _prevByDevice;
std::map<std::string, int> _inputIndices;
int _replicaID;
int _numReplicas;
int _numReplicasPrev, _numReplicasNext;
Queue<int> _broadcastFinishQueue;
Queue<int> _reductionFinishQueue;
ActBroadcaster* _actBroadcaster;
IActGradReducer* _gradReducer;
Timer _timer;
bool _initialized;
virtual void fpropNext(PASS_TYPE passType, int passIdx);
virtual void truncBwdActs();
virtual void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) = 0;
virtual void bpropCommon(NVMatrix& v, int replicaIdx, PASS_TYPE passType) {
// Do nothing by default
}
virtual void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(!isGradProducer()); // Only do nothing if not grad producer
}
virtual void fpropCommon(PASS_TYPE passType) {
}
void bpropActsCall(NVMatrix& v, PASS_TYPE passType, int replicaIdx, int inputIdx);
ActBroadcaster& getActBroadcaster();
IActGradReducer& getGradReducer();
int getInputIdx(std::string& parentName);
void setInputIdx(std::string& parentName, int idx);
public:
static bool _saveActsGrad, _saveActs;
Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans);
virtual ~Layer();
virtual bool fprop(PASS_TYPE passType, int passIdx);
void fprop(NVMatrix& v, int inpIdx, PASS_TYPE passType, int passIdx);
virtual void fprop(std::map<int,NVMatrix*>& v, PASS_TYPE passType, int passIdx);
virtual void bprop(PASS_TYPE passType, int passIdx);
virtual void bprop(NVMatrix& v, PASS_TYPE passType, int passIdx);
virtual void reset();
virtual void resetPassIdx();
int getNumCases(NVMatrix& v);
int& getNumComputedActsGrads(int deviceID);
int incRcvdBInputMsgs();
bool isGradConsumer();
bool hasGradProducerNext(std::string& layerName);
// Does this layer produce a gradient for any layer?
virtual bool isGradProducer();
// Does this layer produce a gradient for layer of given name?
virtual bool isGradProducer(std::string& layerName);
std::string& getName();
std::string& getType();
virtual void addNext(Layer& l);
virtual void addPrev(Layer& l, int replicaIdx);
virtual void addReplica(Layer& l);
std::map<int,std::vector<Layer*> >& getPrev();
std::vector<Layer*>& getNext();
virtual NVMatrix& getActs();
virtual NVMatrix& getActs(int deviceID);
virtual NVMatrix& getActs(int deviceID, int numCases);
virtual NVMatrix& getActsGrad();
virtual NVMatrix& getActsGrad(int deviceID);
virtual std::map<int,NVMatrix*> getAllActs();
virtual std::map<int, NVMatrix*> getAllActsGrads();
virtual bool postInit();
int getDeviceID();
ConvNetThread& getConvNetThread();
cudaStream_t getStream();
void syncStream();
void setBwdTerminal(int passIdx);
// Do nothing if this layer has no weights
virtual bool updateWeights() {
return false;
}
virtual bool constrainWeights() {
return false;
}
virtual void checkGradient() {
}
virtual void copyToCPU() {
}
virtual void copyToGPU() {
}
intv& getNextDeviceIDs() {
return _nextDeviceIDs;
}
int getReplicaID();
int getNumReplicas();
int getNumSiblingReplicas();
int getNumReplicasPrev();
int getNumReplicasNext();
int getNumOutputs();
void setMemorySourceActs(int deviceID, MemoryView& mem);
void setMemorySourceActsGrad(int deviceID, MemoryView& mem);
MemoryView& getMemorySourceActs(int deviceID);
MemoryView& getMemorySourceActsGrad(int deviceID);
int getFwdActiveInputReplicaIdx(int passIdx);
int getBwdActiveInputReplicaIdx(int passIdx);
int getFwdActiveReplicaIdx(int passIdx);
int getNumLayersPrev();
virtual int getNumInputReplicas();
int getNumExpectedBwdMsgs();
int getNumExpectedFwdMsgs();
int getReplicaIdx();
int getActivePassPeriod();
int getNumGradProducersNext();
virtual ConvNet& getConvNet();
};
class TwoDLayerInterface {
protected:
int _channels, _imgSize, _imgPixels;
public:
TwoDLayerInterface(PyObject* paramsDict);
};
class NeuronLayer : public Layer {
protected:
Neuron* _neuron;
std::string _neuronType;
virtual void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
virtual void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
virtual bool bpropSpecial(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
public:
class CrossEntLogisticGradientOperator {
private:
float _coeff, _posWeight;
public:
CrossEntLogisticGradientOperator(float coeff, float posWeight) : _coeff(coeff), _posWeight(posWeight) {
}
__device__ inline float operator()(const float y, const float t) const {
return _coeff * (_posWeight * t * (1.0f - y) + (t - 1.0f) * y);
}
};
NeuronLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
~NeuronLayer();
std::string& getNeuronType();
};
class WeightLayer : public Layer {
protected:
WeightList* _weights;
Weights *_biases;
NVMatrix _norm2;
float _wStep, _bStep;
int _weightUpdatePassPeriod;
void fpropCommon(PASS_TYPE passType);
void bpropCommon(NVMatrix& v, int replicaIdx, PASS_TYPE passType);
virtual void bpropBiases(NVMatrix& v, PASS_TYPE passType) = 0;
virtual void bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) = 0;
virtual void _constrainWeights();
virtual float getGradScale(int inpIdx, PASS_TYPE passType);
virtual float getIncScale(int inpIdx, PASS_TYPE passType);
virtual float getBGradScale(PASS_TYPE passType);
virtual float getBIncScale();
virtual NVMatrix& getGradTarget(int inpIdx);
NVMatrix& getWeightMatrix(PASS_TYPE passType, int inpIdx);
NVMatrix& getBiasMatrix(PASS_TYPE passType);
public:
WeightLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans, bool useGrad);
virtual ~WeightLayer();
virtual bool updateWeights();
virtual bool constrainWeights();
virtual void copyToCPU();
virtual void copyToGPU();
virtual void checkGradient();
Weights& getWeights(int idx);
void addReplica(Layer& l);
virtual bool postInit();
};
class FCLayer : public WeightLayer {
protected:
virtual void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
virtual void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
virtual void bpropBiases(NVMatrix& v, PASS_TYPE passType);
virtual void bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType);
virtual void _constrainWeights();
public:
FCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad);
FCLayer();
};
class SplitFCLayer : public FCLayer {
protected:
int _numParts;
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
// void bpropBiases(NVMatrix& v, PASS_TYPE passType);
void bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType);
void splitWeights();
public:
SplitFCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad);
};
class SoftmaxLayer : public Layer {
protected:
bool _doUpperGrad;
NVMatrix _max, _sum;
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
public:
SoftmaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
void setDoUpperGrad(bool b);
};
class ConcatenationLayer : public Layer {
protected:
intv* _copyOffsets;
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
public:
ConcatenationLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
virtual ~ConcatenationLayer();
};
class PassThroughLayer : public Layer {
protected:
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
public:
PassThroughLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
virtual bool postInit();
};
class EltwiseSumLayer : public Layer {
protected:
floatv* _coeffs;
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
public:
EltwiseSumLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
~EltwiseSumLayer();
};
class EltwiseMaxLayer : public Layer {
protected:
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
public:
EltwiseMaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
class SumLayer : public Layer {
protected:
int _stride;
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
public:
SumLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
class DataCopyMessage {
public:
enum MESSAGE_TYPE {
COPY,
EXIT
};
protected:
CPUData* _cpuData;
int _passIdx;
bool _other;
DataCopyMessage::MESSAGE_TYPE _type;
DataCopyMessage(DataCopyMessage::MESSAGE_TYPE type) : _cpuData(NULL), _other(false), _passIdx(0), _type(type) {
}
public:
DataCopyMessage(CPUData& cpuData, bool other, int passIdx) : _cpuData(&cpuData), _other(other), _passIdx(passIdx), _type(DataCopyMessage::COPY) {
}
CPUData& getData() const {
return *_cpuData;
}
int getPassIdx() const {
return _passIdx;
}
bool isOther() const {
return _other;
}
DataCopyMessage::MESSAGE_TYPE getType() {
return _type;
}
};
class DataCopyExitMessage : public DataCopyMessage {
public:
DataCopyExitMessage() : DataCopyMessage(DataCopyMessage::EXIT) {
}
};
class DataCopyThread;
class DataLayer : public Layer {
protected:
bool _useBuffer;
int _dataIdx;
ConvNet* _convNet;
// std::map<int, NVMatrix*> _outputs2; // Buffer for copying data during computation
std::map<int, MemoryView*> _memSrcActs2; // // Buffer for copying data during computation
std::map<int, cudaStream_t> _copyStreams;
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
Queue<int> _copyFinishQueue;
DataCopyThread* _copier;
bool _outstandingCopyRequest;
int _start, _end;
public:
void fprop(PASS_TYPE passType, int passIdx, bool fromBuffer);
DataLayer(ConvNet* convNet, PyObject* paramsDict, int replicaID);
~DataLayer();
NVMatrix& getActs(int deviceID);
// NVMatrix& getActs(int deviceID, bool other);
NVMatrix& getActs(int deviceID, bool other, int numCases);
bool isGradProducer();
void toggleBuffer(int passIdx);
void copyData(CPUData& data, bool other, int passIdx);
bool postInit();
ConvNet& getConvNet();
int getNumInputReplicas();
cudaStream_t getCopyStream(int deviceID);
Queue<int>& getCopyFinishQueue() {
return _copyFinishQueue;
}
void waitForCopyFinish();
int getDataIdx() const {
return _dataIdx;
}
int getStart() const {
return _start;
}
int getEnd() const {
return _end;
}
};
class DataCopyThread : public Thread {
protected:
DataLayer* _parent;
Queue<DataCopyMessage*> _queue;
HostNVMatrix _hostMemFwd;
Timer _requestTimer;
int _sleepUsec;
virtual void* run();
public:
DataCopyThread(DataLayer& parent, intv& cpus);
Queue<DataCopyMessage*>& getQueue();
void stop();
};
class LocalLayer : public WeightLayer {
protected:
intv* _padding, *_stride, *_filterSize, *_channels, *_imgSize, *_groups;
intv* _imgPixels, *_filterPixels, *_filterChannels;
int _modulesX, _modules, _numFilters;
public:
LocalLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad);
virtual ~LocalLayer();
};
class ConvLayer : public LocalLayer {
protected:
int _sumWidth;
bool _sharedBiases;
floatv* _weightContrastNormMin, *_weightContrastNormMax;
NVMatrix _weightGradTmp;
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
void bpropBiases(NVMatrix& v, PASS_TYPE passType);
void bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType);
void truncBwdActs();
void _constrainWeights();
public:
ConvLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
virtual ~ConvLayer();
};
class LocalUnsharedLayer : public LocalLayer {
protected:
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
void bpropBiases(NVMatrix& v, PASS_TYPE passType);
void bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType);
void _constrainWeights();
public:
LocalUnsharedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
class PoolLayer : public Layer, public TwoDLayerInterface {
protected:
int _sizeX, _start, _stride, _outputsX;
std::string _pool;
public:
PoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans);
static PoolLayer& make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
class AvgPoolLayer : public PoolLayer {
protected:
bool _sum;
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
public:
AvgPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
class MaxPoolLayer : public PoolLayer {
protected:
bool _abs;
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
public:
MaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool abs);
};
class CrossMapPoolLayer : public Layer, public TwoDLayerInterface {
protected:
int _size, _start, _stride, _outputs;
std::string _pool;
public:
CrossMapPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans);
static CrossMapPoolLayer& make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
class CrossMapMaxPoolLayer : public CrossMapPoolLayer {
protected:
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
public:
CrossMapMaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
class RandomScaleLayer : public Layer, public TwoDLayerInterface {
protected:
int _tgtSize, _minScaledSize;
float _maxScale; // should be >= 1
NVMatrix _rescaledActs;
std::vector<double> _scaleProbs;
public:
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
RandomScaleLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
class CropLayer : public Layer, public TwoDLayerInterface {
protected:
int _tgtSize, _startX, _startY;
public:
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
CropLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
class NailbedLayer : public Layer, public TwoDLayerInterface {
protected:
int _start, _stride, _outputsX;
public:
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
NailbedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
class GaussianBlurLayer : public Layer, public TwoDLayerInterface {
protected:
Matrix* _hFilter;
NVMatrix _filter;
NVMatrix _actGradsTmp;
public:
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
void copyToGPU();
GaussianBlurLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
~GaussianBlurLayer();
};
class HorizontalReflectionLayer : public Layer, public TwoDLayerInterface {
protected:
public:
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
HorizontalReflectionLayer(ConvNetThread* convNet, PyObject* paramsDict, int replicaID);
};
class ResizeLayer : public Layer, public TwoDLayerInterface {
protected:
float _scale;
int _tgtSize;
public:
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
ResizeLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
class DropoutLayer : public Layer {
protected:
bool _enable;
float _keep;
NVMatrix _keepMask;
public:
virtual void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
virtual void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
void truncBwdActs();
DropoutLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
class DropoutSmallerThanOperator {
private:
float _keep, _scale;
public:
DropoutSmallerThanOperator(float keep) : _keep(keep), _scale(1.0f/keep) {
}
__device__ inline float operator()(const float x) const {
return (x < _keep) * _scale;
}
};
};
class Dropout2Layer : public DropoutLayer {
protected:
public:
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
Dropout2Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
class RGBToYUVLayer : public Layer {
public:
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
RGBToYUVLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
class RGBToLABLayer : public Layer {
protected:
bool _center;
public:
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
RGBToLABLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
class ResponseNormLayer : public Layer, public TwoDLayerInterface {
protected:
int _size;
float _scale, _pow;
float _minDiv;
NVMatrix _denoms;
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
void truncBwdActs();
public:
ResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
class CrossMapResponseNormLayer : public ResponseNormLayer {
protected:
bool _blocked;
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
public:
CrossMapResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
class ContrastNormLayer : public ResponseNormLayer {
protected:
NVMatrix _meanDiffs;
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
void truncBwdActs();
public:
ContrastNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
class CostLayer : public Layer {
protected:
float _coeff;
doublev _costv;
NVMatrix _tmpbuf; // For error accumulation
int _numCases; // number of cases that the values in _costv were computed on
bool _aggregated;
void fpropCommon(PASS_TYPE passType);
public:
CostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans);
void bprop(NVMatrix& v, PASS_TYPE passType, int passIdx);
bool fprop(PASS_TYPE passType, int passIdx);
int getNumCases();
virtual doublev& getCost();
float getCoeff();
bool isGradProducer();
void setSendTerminalMessages(bool send);
void resetPassIdx();
static CostLayer& make(ConvNetThread* convNetThread, PyObject* paramsDict, std::string& type, int replicaID);
};
/*
* Input 0: labels
* Input 1: softmax outputs
*/
class CrossEntCostLayer : public CostLayer {
protected:
NVMatrix _trueLabelLogProbs, _correctProbs;
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
public:
CrossEntCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
/*
* Input 0: labels
* Input 1: softmax outputs
*/
class LogregCostLayer : public CostLayer {
protected:
NVMatrix _trueLabelLogProbs, _correctProbs, _topkProbs;
std::map<int,NVMatrix*> _probsAccum; // input replica idx -> nvmatrix
NVMatrix _maxProbs;
std::map<int,int> _numAccumed; // input replica idx -> int
int _topk;
bool _doCompute;
virtual void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
public:
LogregCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
NVMatrix& getProbsAccum(int replicaIdx);
};
/*
* Input 0: labels
* Input 1: logistic outputs
*/
class BinomialCrossEntropyCostLayer : public CostLayer {
protected:
bool _computeSoftmaxErrorRate;
NVMatrix _tmpProbs, _tmpVec, _correctProbs;
float _posWeight;
virtual void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
public:
BinomialCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
float getPosWeight();
// Only for use with non-logistic units
class BinomialCrossEntGradientOperator {
private:
float _coeff, _posWeight;
public:
BinomialCrossEntGradientOperator(float coeff, float posWeight) : _coeff(coeff), _posWeight(posWeight) {
}
__device__ inline float operator()(const float t, const float y) const {
return _coeff * (_posWeight * __fdividef(t, y) + __fdividef(t - 1.0f, 1.0f - y));
}
};
};
/*
* Input 0: labels
* Input 1: logistic outputs
*/
class DetectionCrossEntropyCostLayer : public BinomialCrossEntropyCostLayer {
protected:
Matrix _hNumPositive, _hNumTruePositive, _hNumDeclaredPositive;
NVMatrix _numPositive, _numTrueNegative, _numTruePositive, _numDeclaredPositive;
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
public:
DetectionCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
class SumOfSquaresCostLayer : public CostLayer {
protected:
NVMatrix _tmp;
void fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx);
void bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType);
public:
SumOfSquaresCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID);
};
#endif /* LAYER_CUH */
|
the_stack
|
#include <nvidia/helper_cuda.h>
#define PI 3.141592653589793f
#define BLOCK_SIZE 256
// step size of the normals
// for PointXYZI
#define X_STEP 8
#define X_OFFSET 0
// for PointXYZ
//#define X_STEP 4
//#define X_OFFSET 0
__constant__ float c_rgbForMFaxes[7];
extern "C" void loadRGBvaluesForMFaxes()
{
float h_rgbForMFaxes[7];
for (uint32_t k=0; k<7; ++k)
{
union{
uint8_t asByte[4];
uint32_t asInt;
float asFloat;
} rgb;
rgb.asInt = 0;
if(k ==0)
rgb.asInt = (255 << 16) | (0 << 8) | 0;
else if (k ==1)
rgb.asInt = (255 << 16) | (100 << 8) | 100;
else if (k ==2)
rgb.asInt = (0 << 16) | (255 << 8) | 0;
else if (k ==3)
rgb.asInt = (100 << 16) | (255 << 8) | 100;
else if (k ==4)
rgb.asInt = (0 << 16) | (0 << 8) | 255;
else if (k ==5)
rgb.asInt = (100 << 16) | (100 << 8) | 255;
else if (k ==6)
rgb.asInt = (200 << 16) | (200 << 8) | 200;
h_rgbForMFaxes[k] = rgb.asFloat; //*(float *)(&rgb);
}
cudaMemcpyToSymbol(c_rgbForMFaxes, h_rgbForMFaxes , 7* sizeof(float));
}
/*
* Given assignments z of normals x to MF axes compute the costfunction value
*/
__global__ void robustSquaredAngleCostFct(float *cost, float *x,
unsigned short *z, float *mu, float sigma_sq, int w, int h) //, float *dbg)
{
const int DIM = 3;
//__shared__ float xi[BLOCK_SIZE*3];
__shared__ float mui[DIM*6];
__shared__ float rho[BLOCK_SIZE];
//const int tid = threadIdx.x;
const int tid = threadIdx.x + blockDim.x * threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
const int id = idx + idy*w;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
rho[tid] = 0.0f;
__syncthreads(); // make sure that ys have been cached
if ((idx<w) && (idy<h))
{
// xi[tid*3] = x[tid];
// xi[tid*3+1] = x[tid+Nx];
// xi[tid*3+2] = x[tid+Nx*2];
unsigned short k = z[id];
if (k<6)
{
// k==6 means that xi is nan
float xiTy = x[id*X_STEP+X_OFFSET]*mui[k] + x[id*X_STEP+X_OFFSET+1]*mui[k+6]
+ x[id*X_STEP+X_OFFSET+2]*mui[k+12];
float err = acosf(max(-1.0f,min(1.0f,xiTy)));
//float errSq = err*err;
rho[tid] = (err*err)/(err*err+sigma_sq);
}
}
//reduction.....
// TODO: make it faster!
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
if(tid < s)
rho[tid] += rho[tid + s];
__syncthreads();
}
if(tid==0 && rho[0]!=0 ) {
atomicAdd(&cost[0],rho[0]);
}
}
//#ifndef WEIGHTED
//extern "C" void robustSquaredAngleCostFctGPU(float *h_cost, float *d_cost,
// float *d_x, uint16_t *d_z, float *d_mu, float sigma_sq, int w, int h)
//{
//
//// float *d_dbg;
//// checkCudaErrors(cudaMalloc((void **)&d_dbg, w * h * sizeof(float)));
//
// for(uint32_t k=0; k<6; ++k)
// h_cost[k] =0.0f;
// checkCudaErrors(cudaMemcpy(d_cost, h_cost, 6* sizeof(float),
// cudaMemcpyHostToDevice));
//
// dim3 threads(16,16,1);
// dim3 blocks(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1);
// robustSquaredAngleCostFct<<<blocks,threads>>>(d_cost,d_x,d_z,d_mu,
// sigma_sq,w,h);//,d_dbg);
// checkCudaErrors(cudaDeviceSynchronize());
//
// checkCudaErrors(cudaMemcpy(h_cost, d_cost, 6*sizeof(float),
// cudaMemcpyDeviceToHost));
//
//// float dbg[w*h];
//// checkCudaErrors(cudaMemcpy(dbg, d_dbg, w*h* sizeof(float),
//// cudaMemcpyDeviceToHost));
//// printf("%.2f, %.2f, %.2f, %.2f, %.2f, %.2f \n",dbg[0],dbg[1],dbg[2],dbg[3],dbg[4],dbg[5]);
//}
//#endif
//
/*
* compute the Jacobian of robust squared cost function
*/
__global__ void robustSquaredAngleCostFctJacobian(float *J, float *x,
unsigned short *z, float *mu, float sigma_sq, int w, int h)//, float *dbg)
{
const int DIM = 3;
__shared__ float mui[DIM*6];
// one J per column; BLOCK_SIZE columns; per column first 3 first col of J,
// second 3 columns second cols of J
__shared__ float J_shared[BLOCK_SIZE*3*3];
const int tid = threadIdx.x + blockDim.x*threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
const int id = idx + idy*w;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
#pragma unroll
for(int s=0; s<3*3; ++s) {
J_shared[tid+BLOCK_SIZE*s] = 0.0f;
}
__syncthreads(); // make sure that ys have been cached
if ((idx<w) && (idy<h))
{
float xi[3];
xi[0] = x[id*X_STEP+X_OFFSET+0];
xi[1] = x[id*X_STEP+X_OFFSET+1];
xi[2] = x[id*X_STEP+X_OFFSET+2];
unsigned short k = z[id]; // which MF axis does it belong to
if (k<6)// && k!=4 && k!=5)
{
int j = k/2; // which of the rotation columns does this belong to
float sign = (- float(k%2) +0.5f)*2.0f; // sign of the axis
float xiTy = xi[0]*mui[k] + xi[1]*mui[k+6]
+ xi[2]*mui[k+12];
xiTy = max(-1.0f,min(1.0f,xiTy));
float J_ =0.0f;
if (xiTy > 1.0f-1e-10)
{
// limit according to mathematica
J_ = -2.0f/sigma_sq;
}else{
float err = acosf(xiTy);
float err_sq = err*err;
float a = sqrtf(1.0f - xiTy*xiTy);
float b = (sigma_sq + err_sq);
// obtained using Mathematica
J_ = 2.0f*( (err*err_sq/(a*b*b)) - (err/(a*b)) );
// TODO could be simplified: see writeup!
}
//dbg[id] = J_;
J_shared[tid+(j*3+0)*BLOCK_SIZE] = sign*J_*xi[0];
J_shared[tid+(j*3+1)*BLOCK_SIZE] = sign*J_*xi[1];
J_shared[tid+(j*3+2)*BLOCK_SIZE] = sign*J_*xi[2];
}else{
//dbg[id] = 9999.0f;
}
}
//reduction.....
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
if(tid < s)
#pragma unroll
for( int k=0; k<3*3; ++k) {
int tidk = k*BLOCK_SIZE+tid;
J_shared[tidk] += J_shared[tidk + s];
}
__syncthreads();
}
#pragma unroll
for( int k=0; k<3*3; ++k) {
if(tid==k && J_shared[k*BLOCK_SIZE]!=0 ) {
atomicAdd(&J[k],J_shared[k*BLOCK_SIZE]);
}
}
// //reduction.....
//#pragma unroll
// for( int k=0; k<3*3; ++k) {
// int tidk = k*BLOCK_SIZE+tid;
// __syncthreads(); //sync the threads
//#pragma unroll
// for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
// if(tid < s)
// J_shared[tidk] += J_shared[tidk + s];
// __syncthreads();
// }
//
// if(tid==0 && J_shared[k*BLOCK_SIZE]!=0 ) {
// atomicAdd(&J[k],J_shared[k*BLOCK_SIZE]);
// }
// }
}
//#ifndef WEIGHTED
//extern "C" void robustSquaredAngleCostFctJacobianGPU(float *h_J, float *d_J,
// float *d_x, unsigned short *d_z, float *d_mu, float sigma_sq, int w, int h)
//{
//// float *d_dbg;
//// checkCudaErrors(cudaMalloc((void **)&d_dbg, w * h * sizeof(float)));
//
// for(uint32_t k=0; k<3*3; ++k)
// h_J[k] =0.0f;
// checkCudaErrors(cudaMemcpy(d_J, h_J, 3*3* sizeof(float),
// cudaMemcpyHostToDevice));
//
// dim3 threads(16,16,1);
// dim3 blocks(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1);
// robustSquaredAngleCostFctJacobian<<<blocks,threads>>>(d_J,d_x,d_z,d_mu,
// sigma_sq,w,h);//,d_dbg);
// checkCudaErrors(cudaDeviceSynchronize());
//
// checkCudaErrors(cudaMemcpy(h_J, d_J, 3*3*sizeof(float),
// cudaMemcpyDeviceToHost));
//
//// float dbg[w*h];
//// checkCudaErrors(cudaMemcpy(dbg, d_dbg, w*h* sizeof(float),
//// cudaMemcpyDeviceToHost));
//// for (int i=20; i<h-20; ++i)
//// {
//// int offset = w*i + w/2;
//// printf("%.2f, %.2f, %.2f, %.2f, %.2f, %.2f \n",dbg[offset+0],dbg[offset+1],dbg[offset+2],dbg[offset+3],dbg[offset+4],dbg[offset+5]);
//// }
//}
//#endif
/*
* compute normal assignments as well as the costfunction value under that
* assignment. Normal assignments are computed according based on nearest
* distance in the arclength sense.
*/
__global__ void robustSquaredAngleCostFctAssignment(float *cost, uint32_t* N,
float *x, unsigned short *z, float* errs, float *mu, float sigma_sq,
int w, int h)
{
const int DIM = 3;
//__shared__ float xi[BLOCK_SIZE*3];
__shared__ float mui[DIM*6];
__shared__ float rho[BLOCK_SIZE];
__shared__ uint32_t Ni[BLOCK_SIZE];
const int tid = threadIdx.x + blockDim.x * threadIdx.y;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idy = threadIdx.y + blockDim.y * blockIdx.y;
const int id = idx + idy*w;
// caching
if(tid < DIM*6) mui[tid] = mu[tid];
rho[tid] = 0.0f;
Ni[tid] = 0;
__syncthreads(); // make sure that ys have been cached
if ((idx<w) && (idy<h))
{
float xi[3];
xi[0] = x[id*X_STEP+X_OFFSET+0];
xi[1] = x[id*X_STEP+X_OFFSET+1];
xi[2] = x[id*X_STEP+X_OFFSET+2];
float err_min = 9999999.0f;
unsigned short k_min = 6;
if((xi[0]!=xi[0] || xi[1]!=xi[1] || xi[2]!=xi[2])
|| xi[0]*xi[0]+xi[1]*xi[1]+xi[2]*xi[2] < 0.9f )
{
// if nan
k_min = 6;
err_min = .1f;
//if(X_STEP == 8) x[id*X_STEP+4] = 6.0f;
}else{
#pragma unroll
for (unsigned short k=0; k<6; ++k)
{
float xiTy = xi[0]*mui[k] + xi[1]*mui[k+6] + xi[2]*mui[k+12];
float err = acosf(max(-1.0f,min(1.0f,xiTy)));
if(err_min > err)
{
err_min = err;
k_min = k;
}
}
rho[tid] = (err_min*err_min)/(err_min*err_min+sigma_sq);
Ni[tid] = 1;
}
z[id] = k_min;
errs[id] = err_min;
if(X_STEP == 8)
{
x[id*X_STEP+X_OFFSET+4] = c_rgbForMFaxes[k_min];//float(k_min);
x[id*X_STEP+X_OFFSET+5] = float(k_min);//xi[0]; //float(k_min);
x[id*X_STEP+X_OFFSET+6] = err_min; //rgb;//xi[1]; //err_min;
// x[id*X_STEP+X_OFFSET+7] = 0.0f;//err_min; //err_min;
}
}
//reduction.....
// TODO: make it faster!
__syncthreads(); //sync the threads
#pragma unroll
for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
if(tid < s)
{
rho[tid] += rho[tid + s];
Ni[tid] += Ni[tid + s];
}
__syncthreads();
}
if(tid==0 && rho[0]!=0.0f) {
atomicAdd(&cost[0],rho[0]);
}
if(tid==1 && Ni[0]!=0 ) {
atomicAdd(N,Ni[0]);
}
// __syncthreads(); //sync the threads
//#pragma unroll
// for(int s=(BLOCK_SIZE)/2; s>0; s>>=1) {
// if(tid < s)
// Ni[tid] += Ni[tid + s];
// __syncthreads();
// }
//
// if(tid==0 && Ni[0]!=0 ) {
// atomicAdd(N,Ni[0]);
// }
}
//#ifndef WEIGHTED
//extern "C" void robustSquaredAngleCostFctAssignmentGPU(float *h_cost, float *d_cost,
// int *h_N, int *d_N, float *d_x, uint16_t *d_z, float *d_mu,
// float sigma_sq, int w, int h)
//{
//
// for(uint32_t k=0; k<6; ++k)
// h_cost[k] =0.0f;
// *h_N =0;
// checkCudaErrors(cudaMemcpy(d_cost, h_cost, 6* sizeof(float),
// cudaMemcpyHostToDevice));
// checkCudaErrors(cudaMemcpy(d_N, h_N, sizeof(int),
// cudaMemcpyHostToDevice));
//
// dim3 threads(16,16,1);
// dim3 blocks(w/16+(w%16>0?1:0), h/16+(h%16>0?1:0),1);
// robustSquaredAngleCostFctAssignment<<<blocks,threads>>>(d_cost,d_N,d_x,d_z,d_mu,
// sigma_sq,w,h);
// checkCudaErrors(cudaDeviceSynchronize());
//
// checkCudaErrors(cudaMemcpy(h_cost, d_cost, 6*sizeof(float),
// cudaMemcpyDeviceToHost));
// checkCudaErrors(cudaMemcpy(h_N, d_N, sizeof(int),
// cudaMemcpyDeviceToHost));
//
//}
//#endif
#include "optimizationSO3_weighted.cu"
|
the_stack
|
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <c10/cuda/CUDAStream.h>
#include <c10/util/Optional.h>
#include "inplace_abn.h"
#include "utils.h"
#include "cuda_utils.cuh"
#include "inplace_abn_kernels.cuh"
#include "dispatch.h"
/***********************************************************************************************************************
* Templated implementations
**********************************************************************************************************************/
template<typename scalar_t, typename index_t>
std::tuple<at::Tensor, at::Tensor, at::Tensor> statistics_template(const at::Tensor& x_) {
// Normalize shape and get dimensions
auto x = normalize_shape(x_);
auto num = x.size(0), chn = x.size(1), sp = x.size(2);
// Type handling
using accscalar_t = at::acc_type<scalar_t, true>;
auto acc_options = x.options();
if (x.scalar_type() == at::ScalarType::Half) {
acc_options = acc_options.dtype(at::ScalarType::Float);
}
// Initialize output tensors
auto mean = at::empty({chn}, acc_options);
auto var = at::empty({chn}, acc_options);
auto count = at::full({1}, num * sp, x.options().dtype(at::ScalarType::Long));
// Make accessors
auto x_accessor = x.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>();
auto mean_accessor = mean.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>();
auto var_accessor = var.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>();
// Kernel parameters
auto stream = at::cuda::getCurrentCUDAStream();
dim3 blocks(chn);
int tf = getNumThreads(sp);
dim3 threads(tf, std::max<int>(1, MAX_BLOCK_SIZE / tf));
// Invoke kernel
statistics_kernel<scalar_t, accscalar_t, index_t><<<blocks, threads, 0, stream>>>(
x_accessor, mean_accessor, var_accessor);
return std::make_tuple(mean, var, count);
}
template<typename scalar_t, typename index_t>
std::tuple<at::Tensor, at::Tensor, at::Tensor> reduce_statistics_template(
const at::Tensor& all_mean, const at::Tensor& all_var, const at::Tensor& all_count) {
auto num = all_mean.size(0), chn = all_mean.size(1);
// Initialize output tensors
auto mean = at::empty({chn}, all_mean.options());
auto var = at::empty({chn}, all_var.options());
auto count = all_count.sum({0});
// Make accessors
auto all_mean_accessor = all_mean.packed_accessor<scalar_t, 2, at::RestrictPtrTraits, index_t>();
auto all_var_accessor = all_var.packed_accessor<scalar_t, 2, at::RestrictPtrTraits, index_t>();
auto all_count_accessor = all_count.packed_accessor<int64_t, 2, at::RestrictPtrTraits, index_t>();
auto mean_accessor = mean.packed_accessor<scalar_t, 1, at::RestrictPtrTraits, index_t>();
auto var_accessor = var.packed_accessor<scalar_t, 1, at::RestrictPtrTraits, index_t>();
// Kernel parameters
auto stream = at::cuda::getCurrentCUDAStream();
int threads = getNumThreads(chn);
int blocks = std::max<int>(1, chn / threads);
// Invoke kernel
reduce_statistics_kernel<scalar_t, index_t><<<blocks, threads, 0, stream>>>(
all_mean_accessor, all_var_accessor, all_count_accessor, mean_accessor, var_accessor);
return std::make_tuple(mean, var, count);
}
template<typename scalar_t, typename prmscalar_t, typename index_t>
void forward_template(at::Tensor& x_, const at::Tensor& mean, const at::Tensor& var,
const c10::optional<at::Tensor>& weight, const c10::optional<at::Tensor>& bias,
float eps, Activation activation, float activation_param) {
// Normalize shape and get dimensions
auto x = normalize_shape(x_);
auto num = x.size(0), chn = x.size(1), sp = x.size(2);
// Type handling
using accscalar_t = at::acc_type<scalar_t, true>;
// Make accessors
auto x_accessor = x.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>();
auto mean_accessor = mean.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>();
auto var_accessor = var.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>();
auto weight_accessor = packed_accessor_or_dummy<prmscalar_t, 1, at::RestrictPtrTraits, index_t>(weight);
auto bias_accessor = packed_accessor_or_dummy<prmscalar_t, 1, at::RestrictPtrTraits, index_t>(bias);
// Kernel parameters
auto stream = at::cuda::getCurrentCUDAStream();
int tf = std::max<int>(getNumThreads(sp / 4), std::min<int>(getNumThreads(sp), 64));
int tb = std::max<int>(64 / tf, 1);
dim3 blocks(chn, std::max<int>(1, std::min<int>((256 * 1024) / chn, (chn + tb - 1) / tb)));
blocks.y = std::min<int>(blocks.y, 65535);
dim3 threads(tf, tb);
// Invoke kernel
switch (activation) {
case Activation::LeakyReLU:
forward_kernel<scalar_t, accscalar_t, prmscalar_t, index_t, Activation::LeakyReLU><<<blocks, threads, 0, stream>>>(
x_accessor, mean_accessor, var_accessor, weight_accessor, bias_accessor, eps, activation_param);
break;
case Activation::ELU:
forward_kernel<scalar_t, accscalar_t, prmscalar_t, index_t, Activation::ELU><<<blocks, threads, 0, stream>>>(
x_accessor, mean_accessor, var_accessor, weight_accessor, bias_accessor, eps, activation_param);
break;
case Activation::Identity:
forward_kernel<scalar_t, accscalar_t, prmscalar_t, index_t, Activation::Identity><<<blocks, threads, 0, stream>>>(
x_accessor, mean_accessor, var_accessor, weight_accessor, bias_accessor, eps, activation_param);
break;
}
}
template<typename scalar_t, typename prmscalar_t, typename index_t>
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> backward_reduce_template(
const at::Tensor& y_act_, const at::Tensor& dy_act_, const c10::optional<at::Tensor>& weight,
const c10::optional<at::Tensor>& bias, float eps, Activation activation, float activation_param) {
// Normalize shape and get dimensions
auto y_act = normalize_shape(y_act_);
auto dy_act = normalize_shape(dy_act_);
auto num = y_act.size(0), chn = y_act.size(1), sp = y_act.size(2);
// Type handling
using accscalar_t = at::acc_type<scalar_t, true>;
auto acc_options = y_act.options();
if (y_act.scalar_type() == at::ScalarType::Half) {
acc_options = acc_options.dtype(at::ScalarType::Float);
}
// Initialize output tensors
auto xhat = at::empty_like(y_act);
auto dy = at::empty_like(y_act);
auto sum_dy = at::empty({chn}, acc_options);
auto sum_xhat_dy = at::empty({chn}, acc_options);
// Make accessors
auto y_act_accessor = y_act.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>();
auto dy_act_accessor = dy_act.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>();
auto xhat_accessor = xhat.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>();
auto dy_accessor = dy.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>();
auto weight_accessor = packed_accessor_or_dummy<prmscalar_t, 1, at::RestrictPtrTraits, index_t>(weight);
auto bias_accessor = packed_accessor_or_dummy<prmscalar_t, 1, at::RestrictPtrTraits, index_t>(bias);
auto sum_dy_accessor = sum_dy.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>();
auto sum_xhat_dy_accessor = sum_xhat_dy.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>();
// Kernel parameters
auto stream = at::cuda::getCurrentCUDAStream();
int block_y = std::min<int>(lastPow2(num), MAX_BLOCK_SIZE / 32);
int block_x = std::min<int>(getNumThreads(sp), MAX_BLOCK_SIZE / block_y);
const dim3 threads(block_x, block_y);
const dim3 blocks(chn);
// Invoke kernel
switch (activation) {
case Activation::LeakyReLU:
backward_reduce_kernel<scalar_t, accscalar_t, prmscalar_t, index_t, Activation::LeakyReLU><<<blocks, threads, 0, stream>>>(
y_act_accessor, dy_act_accessor, weight_accessor, bias_accessor, xhat_accessor, dy_accessor, sum_dy_accessor, sum_xhat_dy_accessor,
eps, activation_param);
break;
case Activation::ELU:
backward_reduce_kernel<scalar_t, accscalar_t, prmscalar_t, index_t, Activation::ELU><<<blocks, threads, 0, stream>>>(
y_act_accessor, dy_act_accessor, weight_accessor, bias_accessor, xhat_accessor, dy_accessor, sum_dy_accessor, sum_xhat_dy_accessor,
eps, activation_param);
break;
case Activation::Identity:
backward_reduce_kernel<scalar_t, accscalar_t, prmscalar_t, index_t, Activation::Identity><<<blocks, threads, 0, stream>>>(
y_act_accessor, dy_act_accessor, weight_accessor, bias_accessor, xhat_accessor, dy_accessor, sum_dy_accessor, sum_xhat_dy_accessor,
eps, activation_param);
break;
}
return std::make_tuple(xhat.view(y_act_.sizes()), dy.view(y_act_.sizes()), sum_dy, sum_xhat_dy);
}
template<typename scalar_t, typename prmscalar_t, typename index_t>
void backward_template(const at::Tensor& xhat_, at::Tensor& dy_, const at::Tensor& var,
const at::Tensor& count, const at::Tensor& sum_dy, const at::Tensor& sum_xhat_dy,
const c10::optional<at::Tensor>& weight, float eps) {
// Normalize shape and get dimensions
auto xhat = normalize_shape(xhat_);
auto dy = normalize_shape(dy_);
auto num = xhat.size(0), chn = xhat.size(1), sp = xhat.size(2);
// Type handling
using accscalar_t = at::acc_type<scalar_t, true>;
// Make accessors
auto xhat_accessor = xhat.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>();
auto dy_accessor = dy.packed_accessor<scalar_t, 3, at::RestrictPtrTraits, index_t>();
auto var_accessor = var.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>();
auto count_accessor = count.packed_accessor<int64_t, 1, at::RestrictPtrTraits, index_t>();
auto sum_dy_accessor = sum_dy.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>();
auto sum_xhat_dy_accessor = sum_xhat_dy.packed_accessor<accscalar_t, 1, at::RestrictPtrTraits, index_t>();
auto weight_accessor = packed_accessor_or_dummy<prmscalar_t, 1, at::RestrictPtrTraits, index_t>(weight);
// Kernel parameters
auto stream = at::cuda::getCurrentCUDAStream();
int tf = std::max<int>(getNumThreads(sp / 4), std::min<int>(getNumThreads(sp), 64));
int tb = std::max<int>(64 / tf, 1);
dim3 blocks(chn, std::max<int>(1, std::min<int>((256 * 1024) / chn, (chn + tb - 1) / tb)));
blocks.y = std::min<int>(blocks.y, 65535);
dim3 threads(tf, tb);
// Invoke kernel
backward_kernel<scalar_t, accscalar_t, prmscalar_t, index_t><<<blocks, threads, 0, stream>>>(
xhat_accessor, dy_accessor, var_accessor, count_accessor, sum_dy_accessor, sum_xhat_dy_accessor,
weight_accessor, eps);
}
/***********************************************************************************************************************
* Interface methods
**********************************************************************************************************************/
std::tuple<at::Tensor, at::Tensor, at::Tensor> statistics_cuda(const at::Tensor& x) {
return AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "statistics_cuda", [&] {
if (at::cuda::detail::canUse32BitIndexMath(x)) {
return statistics_template<scalar_t, int32_t>(x);
} else {
return statistics_template<scalar_t, int64_t>(x);
}
});
}
std::tuple<at::Tensor, at::Tensor, at::Tensor> reduce_statistics_cuda(
const at::Tensor& all_mean, const at::Tensor& all_var, const at::Tensor& all_count) {
return AT_DISPATCH_FLOATING_TYPES(all_mean.scalar_type(), "reduce_statistics_cuda", [&] {
if (at::cuda::detail::canUse32BitIndexMath(all_mean)) {
return reduce_statistics_template<scalar_t, int32_t>(all_mean, all_var, all_count);
} else {
return reduce_statistics_template<scalar_t, int64_t>(all_mean, all_var, all_count);
}
});
}
void forward_cuda(at::Tensor& x, const at::Tensor& mean, const at::Tensor& var,
const c10::optional<at::Tensor>& weight, const c10::optional<at::Tensor>& bias,
float eps, Activation activation, float activation_param) {
const auto& w_scalar_type = weight.has_value() ? weight.value().scalar_type() : x.scalar_type();
DOUBLE_DISPATCH(x.scalar_type(), w_scalar_type, "forward_cuda", [&] {
if (at::cuda::detail::canUse32BitIndexMath(x)) {
forward_template<scalar_t, prmscalar_t, int32_t>(x, mean, var, weight, bias, eps, activation, activation_param);
} else {
forward_template<scalar_t, prmscalar_t, int64_t>(x, mean, var, weight, bias, eps, activation, activation_param);
}
});
}
std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> backward_reduce_cuda(
const at::Tensor& y_act, const at::Tensor& dy_act, const c10::optional<at::Tensor>& weight,
const c10::optional<at::Tensor>& bias, float eps, Activation activation, float activation_param) {
const auto& w_scalar_type = weight.has_value() ? weight.value().scalar_type() : y_act.scalar_type();
return DOUBLE_DISPATCH(y_act.scalar_type(), w_scalar_type, "backward_reduce_cuda", [&] {
if (at::cuda::detail::canUse32BitIndexMath(y_act)) {
return backward_reduce_template<scalar_t, prmscalar_t, int32_t>(
y_act, dy_act, weight, bias, eps, activation, activation_param);
} else {
return backward_reduce_template<scalar_t, prmscalar_t, int64_t>(
y_act, dy_act, weight, bias, eps, activation, activation_param);
}
});
}
void backward_cuda(const at::Tensor& xhat, at::Tensor& dy, const at::Tensor& var, const at::Tensor& count,
const at::Tensor& sum_dy, const at::Tensor& sum_xhat_dy,
const c10::optional<at::Tensor>& weight, float eps) {
const auto& w_scalar_type = weight.has_value() ? weight.value().scalar_type() : xhat.scalar_type();
return DOUBLE_DISPATCH(xhat.scalar_type(), w_scalar_type, "backward_cuda", [&] {
if (at::cuda::detail::canUse32BitIndexMath(xhat)) {
backward_template<scalar_t, prmscalar_t, int32_t>(xhat, dy, var, count, sum_dy, sum_xhat_dy, weight, eps);
} else {
backward_template<scalar_t, prmscalar_t, int64_t>(xhat, dy, var, count, sum_dy, sum_xhat_dy, weight, eps);
}
});
}
|
the_stack
|
#define GET_BLOB_ADDRESS(ptr, offset) (&((ptr)[(offset)/sizeof((ptr)[0])]))
#define GET_ARRAY_CAPACITY(ptr) (((long *)(ptr))[0])
#define GET_ARRAY_LENGTH(ptr) (((long *)(ptr))[1])
#define GET_ARRAY_BODY(ptr) (&((ptr)[128/sizeof((ptr)[0])]))
#define SET_ARRAY_CAPACITY(ptr, val) { (((long *)(ptr))[0]) = (val); }
#define SET_ARRAY_LENGTH(ptr, val) { (((long *)(ptr))[1]) = (val); }
// very simple test kernel
extern "C"
__global__ void identity(int *size, const int *input, int *output) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
output[ix] = input[ix];
}
}
extern "C"
// very simple test kernel for int array
__global__ void intArrayIdentity(int *size, const int *input, int *output, int *length) {
const int ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
// copy int array
const int *inArrayBody = &input[ix* *length];
int *outArrayBody = &output[ix* *length];
for (long i = 0; i < *length; i++) {
outArrayBody[i] = inArrayBody[i];
}
}
}
extern "C"
// very simple test kernel for IntDataPoint class
__global__ void IntDataPointIdentity(int *size, const int *inputX, const int *inputY, int *outputX, int *outputY, int *length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
// copy int array
const int *inArrayBody = &inputX[ix* *length];
int *outArrayBody = &outputX[ix* *length];
for (long i = 0; i < *length; i++) {
outArrayBody[i] = inArrayBody[i];
}
// copy int scalar value
outputY[ix] = inputY[ix];
}
}
extern "C"
// very simple test kernel for int array with free var
__global__ void intArrayAdd(int *size, const int *input, int *output, const int *inFreeArray, int *length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
// copy int array
const int *inArrayBody = &input[ix* *length];
int *outArrayBody = &output[ix* *length];
for (long i = 0; i < *length; i++) {
outArrayBody[i] = inArrayBody[i] + inFreeArray[i];
}
}
}
extern "C"
// test kernel for multiple input columns
__global__ void vectorLength(int *size, const double *x, const double *y, double *len) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
len[ix] = sqrt(x[ix] * x[ix] + y[ix] * y[ix]);
}
}
extern "C"
// test kernel for multiple input and multiple output columns, with different types
__global__ void plusMinus(int *size, const double *base, const float *deviation, double *a, float *b) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
a[ix] = base[ix] - deviation[ix];
b[ix] = base[ix] + deviation[ix];
}
}
extern "C"
// test kernel for two const arguments
__global__ void applyLinearFunction(int *size, const short *x, short *y, short *a, short *b) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
y[ix] = *a + *b * x[ix];
}
}
extern "C"
// test kernel for custom number of blocks + const argument
// manual SIMD, to be ran on size / 8 threads, assumes size % 8 == 0
// note that key is reversed, since it's little endian
__global__ void blockXOR(int *size, const char *input, char *output, long *key) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix * 8 < *size) {
((long *)output)[ix] = ((const long *)input)[ix] ^ *key;
}
}
extern "C"
// another simple test kernel
__global__ void multiplyBy2(int *size, int *in, int *out) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < *size) {
out[ix] = in[ix] * 2;
}
}
extern "C"
// another simple test kernel
__global__ void multiplyBy2_l(int *size, long *in, long *out) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
out[ix] = in[ix] * 2;
}
}
extern "C"
// another simple test kernel
__global__ void multiplyBy2_self(int *size, int *in, int *out) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < *size) {
out[ix] = in[ix] * 2;
in[ix] = out[ix];
}
}
extern "C"
// test reduce kernel that sums elements
__global__ void sum(int *size, int *input, int *output, int *stage, int *totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (*stage == 0) {
if (ix < *size) {
assert(jump == blockDim.x * gridDim.x);
int result = 0;
for (long i = ix; i < *size; i += jump) {
result += input[i];
}
input[ix] = result;
}
} else if (ix == 0) {
const long count = (*size < (long)jump) ? *size : (long)jump;
int result = 0;
for (long i = 0; i < count; ++i) {
result += input[i];
}
output[0] = result;
}
}
extern "C"
// test reduce kernel that sums elements
__global__ void sum_l(int *size, long *input, long *output, int *stage, int *totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const long jump = 64 * 256;
if (*stage == 0) {
if (ix < jump) {
assert(jump == blockDim.x * gridDim.x);
for (long long i = ix+jump; i < *size; i += jump) {
*(input+ix) += *(input+i);
}
}
} else if (ix == 0) {
const long count = (*size < (long)jump) ? *size : (long)jump;
long long result = 0;
for (long i = 0; i < count; ++i) {
result += *(input+i);
}
output[0] = result;
}
}
extern "C"
// test reduce kernel that sums elements
__global__ void intArraySum(int *size, const int *input, int *output, int *length, int *stage, int *totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (*stage == 0) {
if (ix < *size) {
assert(jump == blockDim.x * gridDim.x);
int *accArrayBody = const_cast<int *>(&input[ix* *length]);
for (long i = ix + jump; i < *size; i += jump) {
const int *inArrayBody = &input[(ix* *length) + i];
for (long j = 0; j < *length; j++) {
accArrayBody[j] += inArrayBody[j];
}
}
}
} else if (ix == 0) {
const long count = (*size < jump) ? *size : (long)jump;
int *outArrayBody = &output[ix* *length];
for (long i = 0; i < count; i++) {
const int *inArrayBody = &input[(i* *length)];
if (i == 0) {
for (long j = 0; j < *length; j++) {
outArrayBody[j] = 0;
}
}
for (long j = 0; j < *length; j++) {
outArrayBody[j] += inArrayBody[j];
}
}
}
}
extern "C"
// map for DataPoint class
__global__ void DataPointMap(int *size, const double *inputX, const double *inputY, double *output, const double *inFreeArray, int *length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < *size) {
// copy int array
const double *inArrayBody = &inputX[ix* *length];
double *outArrayBody = &output[ix* *length];
for (long i = 0; i < *length; i++) {
outArrayBody[i] = inArrayBody[i] + inFreeArray[i];
}
}
}
extern "C"
// reduce for DataPoint class
__global__ void DataPointReduce(int *size, const double *input, double *output, int *length, int *stage, int *totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (*stage == 0) {
if (ix < *size) {
assert(jump == blockDim.x * gridDim.x);
double *accArrayBody = const_cast<double *>(&input[ix* *length]);
for (long i = ix + jump; i < *size; i += jump) {
const double *inArrayBody = &input[(ix* *length) + i];
for (long j = 0; j < *length; j++) {
accArrayBody[j] += inArrayBody[j];
}
}
}
} else if (ix == 0) {
const long count = (*size < (long)jump) ? *size : (long)jump;
double *outArrayBody = &output[ix* *length];
for (long i = 0; i < count; i++) {
const double *inArrayBody = &input[(i* *length)];
if (i == 0) {
for (long j = 0; j < *length; j++) {
outArrayBody[j] = 0;
}
}
for (long j = 0; j < *length; j++) {
outArrayBody[j] += inArrayBody[j];
}
}
}
}
// map for Logistic regression
__device__ double sdotvv(const double * __restrict__ x, const double * __restrict__ y, int n) {
double ans = 0.0;
for(int i = 0; i < n; i++) {
ans += x[i] * y[i];
}
return ans;
}
__device__ void dmulvs(double *result, const double * __restrict__ x, double c, int n) {
for(int i = 0; i < n; i++) {
result[i] = x[i] * c;
}
}
__device__ void map(double *result, const double * __restrict__ x, double y, const double * __restrict__ w, int n) {
dmulvs(result, x, (1 / (1 + exp(-y * (sdotvv(w, x, n)))) - 1) * y, n);
}
#define WARPSIZE 32
__device__ inline double atomicAddDouble(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#if (__CUDA_ARCH__ >= 300)
__device__ inline double __shfl_double(double d, int lane) {
// Split the double number into 2 32b registers.
int lo, hi;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(d));
// Shuffle the two 32b registers.
lo = __shfl(lo, lane);
hi = __shfl(hi, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d) : "r"(lo), "r"(hi));
return d;
}
__device__ inline double warpReduceSum(double val) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
val += __shfl_double(val, (i + offset) % WARPSIZE);
}
return val;
}
__device__ inline double4 __shfl_double4(double4 d, int lane) {
// Split the double number into 2 32b registers.
int lox, loy, loz, low, hix, hiy, hiz, hiw;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lox), "=r"(hix) : "d"(d.x));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loy), "=r"(hiy) : "d"(d.y));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loz), "=r"(hiz) : "d"(d.z));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(low), "=r"(hiw) : "d"(d.w));
// Shuffle the two 32b registers.
lox = __shfl(lox, lane);
hix = __shfl(hix, lane);
loy = __shfl(loy, lane);
hiy = __shfl(hiy, lane);
loz = __shfl(loz, lane);
hiz = __shfl(hiz, lane);
low = __shfl(low, lane);
hiw = __shfl(hiw, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.x) : "r"(lox), "r"(hix));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.y) : "r"(loy), "r"(hiy));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.z) : "r"(loz), "r"(hiz));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.w) : "r"(low), "r"(hiw));
return d;
}
__device__ inline double4 warpReduceVSum(double4 val4) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
double4 shiftedVal4 = __shfl_double4(val4, (i + offset) % WARPSIZE);
val4.x += shiftedVal4.x;
val4.y += shiftedVal4.y;
val4.z += shiftedVal4.z;
val4.w += shiftedVal4.w;
}
return val4;
}
__device__ double* deviceReduceKernel(double * inArray, double *out, long i, long n, long length) {
double sum = 0;
double *inArrayBody;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum += inArrayBody[i];
}
sum = warpReduceSum(sum);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
atomicAddDouble(out, sum);
}
return out;
}
__device__ void deviceReduceArrayKernel(double * inArray, double *outputArrayBody, long length, long n) {
long i = 0;
double *inArrayBody;
// unrolled version
while ((length - i) >= 4) {
double4 sum4;
sum4.x = 0; sum4.y = 0; sum4.z = 0; sum4.w = 0;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum4.x += inArrayBody[i];
sum4.y += inArrayBody[i+1];
sum4.z += inArrayBody[i+2];
sum4.w += inArrayBody[i+3];
}
sum4 = warpReduceVSum(sum4);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
double *outx = &outputArrayBody[i];
double *outy = &outputArrayBody[i+1];
double *outz = &outputArrayBody[i+2];
double *outw = &outputArrayBody[i+3];
atomicAddDouble(outx, sum4.x);
atomicAddDouble(outy, sum4.y);
atomicAddDouble(outz, sum4.z);
atomicAddDouble(outw, sum4.w);
}
i += 4;
}
for (; i < length; i++) {
deviceReduceKernel(inArray, &outputArrayBody[i], i, n, length);
}
}
#endif
extern "C"
__global__
void blockReduce(int *count, double *data, double * result, int *user_D) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
#if (__CUDA_ARCH__ >= 300)
if (idx < *count)
deviceReduceArrayKernel(data, result, *user_D, *count);
#else
printf("not supported");
#endif
}
extern "C"
__global__ void
mapAll(int *count, double *x, double *y, double *result, double *w, int *user_D) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < *count)
map(&result[idx * *user_D], &x[idx * *user_D ], y[idx],w, *user_D);
}
|
the_stack
|
#define BIN_SIZE 32
using namespace std;
#define CHECK(res) if(res!=cudaSuccess){exit(-1);}
#define BLOCKNUM 1024*64
#define THREADNUM 128
__global__ void _k_CACU_SUM_SIZE_GPU(float_t **data, int num, int sum_size,
int length, int out_length, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int start_out, start_in;
int data_row, data_col;
for (int i = threadid; i < num * out_length; i += BLOCKNUM * THREADNUM) {
data_row = i / out_length;
data_col = i % out_length;
start_out = data_col;
start_in = data_col * sum_size;
out_data[data_row][start_out] = 0.0;
for (int j = 0; j < sum_size; j++)
out_data[data_row][start_out] += data[data_row][start_in + j];
}
}
//vec_t(size) -> vec_t(size/sum_size)
extern "C" void CACU_SUM_SIZE_GPU(float_t **&data, int num, int sum_size,
int length, int out_length, float_t **&out_data) {
assert(length / sum_size == out_length);
assert(length % sum_size == 0);
_k_CACU_SUM_SIZE_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, sum_size,
length, out_length, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_MEAN_GPU(float_t *data, int num, int length,
float_t *out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
extern __shared__ float_t shared_data[];
for (int i = bid; i < num; i += BLOCKNUM) {
shared_data[tid] = 0;
for (int j = tid; j < length; j += THREADNUM) {
shared_data[tid] += data[i * length + j];
}
__syncthreads();
if (tid == 0) {
for (int j = 1; j < THREADNUM; j++)
shared_data[0] += shared_data[j];
out_data[i] = shared_data[0] / length;
}
}
}
//vec_t(size) -> vec_t(size/sum_size)
extern "C" void CACU_MEAN_GPU(float_t *&data, int num, int length,
float_t *&out_data) {
_k_CACU_MEAN_GPU<<<BLOCKNUM, THREADNUM, THREADNUM * sizeof(float_t)>>>(data,
num, length, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_SUM_SIZE_ABS_GPU(float_t *data, int num, int sum_size,
int length, int out_length, float_t *out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int start_in;
int data_row, data_col;
for (int i = threadid; i < num * out_length; i += BLOCKNUM * THREADNUM) {
data_row = i / out_length;
data_col = i % out_length;
start_in = data_col * sum_size;
out_data[i] = 0.0;
for (int j = 0; j < sum_size; j++)
out_data[i] += abs(data[data_row * length + start_in + j]);
}
}
//vec_t(size) -> vec_t(size/sum_size)
extern "C" void CACU_SUM_SIZE_ABS_GPU(float_t *&data, int num, int sum_size,
int length, int out_length, float_t *&out_data) {
assert(length / sum_size == out_length);
assert(length % sum_size == 0);
_k_CACU_SUM_SIZE_ABS_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, sum_size,
length, out_length, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_MEAN_CHANNEL_GPU(float_t **data, float_t denominator,
int num, int dim, int channel, float_t *out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
extern __shared__ float_t share_data[];
int data_row, data_col;
share_data[tid] = 0;
for (int i = tid; i < dim * num; i += THREADNUM)
{
data_row = i / dim;
data_col = i % dim;
share_data[tid] += data[data_row][data_col * channel + bid];
}
__syncthreads();
if (tid == 0) {
for (int i = 1; i < THREADNUM; i++) {
share_data[0] += share_data[i];
}
out_data[bid] = share_data[0] / denominator;
}
}
//nums of vec_t(size) -> vec_t(size/sum_size)
//caculate the means for batch_size
extern "C" void CACU_MEAN_CHANNEL_GPU(float_t **&data, int num, int length,
int channel, float_t *&out_data) {
assert(length % channel == 0);
int dim = length / channel;
float_t denominator = (float_t) dim * num;
_k_CACU_MEAN_CHANNEL_GPU<<<channel, THREADNUM, THREADNUM * sizeof(float_t)>>>(
data, denominator, num, dim, channel, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_VARIANCE_CHANNEL_GPU(float_t **data,
float_t denominator, int num, int dim, int channel, float_t *mean,
float_t *out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
extern __shared__ float_t share_data[];
int data_row, data_col;
share_data[tid] = 0;
for (int i = tid; i < dim * num; i += THREADNUM)
{
data_row = i / dim;
data_col = i % dim;
share_data[tid] += ((data[data_row][data_col * channel + bid]
- mean[bid])
* (data[data_row][data_col * channel + bid] - mean[bid]));
}
__syncthreads();
if (tid == 0) {
for (int i = 1; i < THREADNUM; i++) {
share_data[0] += share_data[i];
}
out_data[bid] = share_data[0] / denominator;
}
}
//nums of vec_t(size) -> vec_t(size/sum_size)
//caculate the variance for batch_size
extern "C" void CACU_VARIANCE_CHANNEL_GPU(float_t **&data, float_t *&mean,
int num, int length, int channel, float_t *&out_data) {
assert(length % channel == 0);
int dim = length / channel;
float_t denominator = (float_t) dim * num;
_k_CACU_VARIANCE_CHANNEL_GPU<<<channel, THREADNUM,
THREADNUM * sizeof(float_t)>>>(data, denominator, num, dim, channel,
mean, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_DOT_GPU(float_t **data, float_t **scale, int num,
int length, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
out_data[data_row][data_col] = data[data_row][data_col]
* scale[data_row][data_col];
}
}
//nums of vec_t(size) -> vec_t(size/sum_size)-207.705643,1:-539.477417,2:-787.299805,
//caculate the channel's scale for batch_size
extern "C" void CACU_DOT_GPU(float_t **&data, float_t **&scale, int num,
int length, float_t **&out_data) {
_k_CACU_DOT_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, scale, num, length,
out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_SQRT_GPU(float_t **data, int num, int length,
float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
out_data[data_row][data_col] = sqrt(data[data_row][data_col]);
}
}
//nums of vec_t(size) -> vec_t(size/sum_size)
//caculate the channel's scale for batch_size
extern "C" void CACU_SQRT_GPU(float_t **&data, int num, int length,
float_t **&out_data) {
_k_CACU_SQRT_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_SCALE_GPU(float_t **data, float_t *scale, int num,
int length, int channel, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
out_data[data_row][data_col] = data[data_row][data_col]
* scale[data_col % channel];
}
}
//nums of vec_t(size) -> vec_t(size/sum_size)
//caculate the channel's scale for batch_size
extern "C" void CACU_SCALE_GPU(float_t **&data, float_t *&scale, int num,
int length, int channel, float_t **&out_data) {
assert(length % channel == 0);
_k_CACU_SCALE_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, scale, num, length,
channel, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_SCALE_GPU_D(float_t **data, float_t **scale, int num,
int length, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
out_data[data_row][data_col] = data[data_row][data_col]
* scale[data_row][data_col];
}
}
//nums of vec_t(size) -> vec_t(size/sum_size)
//caculate the matrix A*B
extern "C" void CACU_SCALE_GPU_D(float_t **&data, float_t **&scale, int num,
int length, float_t **&out_data) {
_k_CACU_SCALE_GPU_D<<<BLOCKNUM, THREADNUM, 0>>>(data, scale, num, length,
out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_SCALE_GPU_A(float_t **data, float_t scale, int num,
int length, float_t **out_data, int add) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
if (add == 0)
out_data[data_row][data_col] = data[data_row][data_col] * scale;
else
out_data[data_row][data_col] += data[data_row][data_col] * scale;
}
}
//nums of vec_t(size) -> vec_t(size/sum_size)
//caculate the matrix scale*B
extern "C" void CACU_SCALE_GPU_A(float_t **&data, float_t scale, int num,
int length, float_t **&out_data, int add) {
_k_CACU_SCALE_GPU_A<<<BLOCKNUM, THREADNUM, 0>>>(data, scale, num, length,
out_data, add);
cudaThreadSynchronize();
}
__global__ void _k_CACU_SCALE_GPU_B(float_t **data, float_t **scale, int num,
int dim, int channel, float_t *out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
extern __shared__ float_t share_data[];
int data_row, data_col;
share_data[tid] = 0;
for (int i = tid; i < dim * num; i += THREADNUM)
{
data_row = i / dim;
data_col = i % dim;
share_data[tid] += (data[data_row][data_col * channel + bid]
* scale[data_row][data_col * channel + bid]);
}
__syncthreads();
if (tid == 0) {
for (int i = 1; i < THREADNUM; i++) {
share_data[0] += share_data[i];
}
out_data[bid] = share_data[0];
}
}
//nums of vec_t(size) -> vec_t(size/sum_size)
//caculate the channel' scale_sum for batch_size
extern "C" void CACU_SCALE_GPU_B(float_t **&data, float_t **&scale, int num,
int length, int channel, float_t *&out_data) {
assert(length % channel == 0);
int dim = length / channel;
_k_CACU_SCALE_GPU_B<<<channel, THREADNUM, THREADNUM * sizeof(float_t)>>>(
data, scale, num, dim, channel, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_SUM_GPU(float_t **data, float_t *bias, int num,
int length, int channel, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
out_data[data_row][data_col] = data[data_row][data_col]
+ bias[data_col % channel];
}
}
//nums of vec_t(size) -> vec_t(size/sum_size)
//caculate the channel's sum bias for batch_size
extern "C" void CACU_SUM_GPU(float_t **&data, float_t *&bias, int num,
int length, int channel, float_t **&out_data) {
_k_CACU_SUM_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, bias, num, length,
channel, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_SUM_GPU_B(float_t **data, int num, int dim, int channel,
float_t *out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
extern __shared__ float_t share_data[];
int data_row, data_col;
share_data[tid] = 0;
for (int i = tid; i < dim * num; i += THREADNUM)
{
data_row = i / dim;
data_col = i % dim;
share_data[tid] += data[data_row][data_col * channel + bid];
}
__syncthreads();
if (tid == 0) {
for (int i = 1; i < THREADNUM; i++) {
share_data[0] += share_data[i];
}
out_data[bid] = share_data[0];
}
}
//nums of vec_t(size) -> vec_t(size/sum_size)
//caculate the channel's sum for batch_size
extern "C" void CACU_SUM_GPU_B(float_t **&data, int num, int length,
int channel, float_t *&out_data) {
assert(length % channel == 0);
int dim = length / channel;
_k_CACU_SUM_GPU_B<<<channel, THREADNUM, THREADNUM * sizeof(float_t)>>>(data,
num, dim, channel, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_SUM_GPU_C(float_t **data, int num, int out_length,
int channel, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < num * out_length; i += BLOCKNUM * THREADNUM) {
data_row = i / out_length;
data_col = i % out_length;
for (int j = 0; j < channel; j++) {
out_data[data_row][data_col] += data[data_row][data_col * channel
+ j];
}
}
}
//nums of vec_t(size) -> vec_t(size/sum_size)
//caculate the channel's sum for every sample
extern "C" void CACU_SUM_GPU_C(float_t **&data, int num, int length,
int out_length, int channel, float_t **&out_data) {
assert(length % channel == 0);
assert(length / channel == out_length);
_k_CACU_SUM_GPU_C<<<BLOCKNUM, THREADNUM, 0>>>(data, num, out_length,
channel, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_SUM_GPU_R(float_t **data, float_t **bias, int num,
int output_channel, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
for (int i = threadid; i < output_channel; i += BLOCKNUM * THREADNUM) {
for (int n = 0; n < num; n++)
out_data[i][0] = data[i][0] + bias[n][i];
}
}
//nums of vec_t(size) -> vec_t(size/sum_size)
//caculate the channel's sum bias for batch_size
extern "C" void CACU_SUM_GPU_R(float_t **&data, float_t **&bias, int num,
int output_channel, float_t **&out_data) {
_k_CACU_SUM_GPU_R<<<BLOCKNUM, THREADNUM, 0>>>(data, bias, num,
output_channel, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_SUM_ABS_GPU(float_t **data, int num, int out_length,
int channel, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < num * out_length; i += BLOCKNUM * THREADNUM) {
data_row = i / out_length;
data_col = i % out_length;
for (int j = 0; j < channel; j++) {
out_data[data_row][data_col] += abs(
data[data_row][data_col * channel + j]);
}
}
}
//nums of vec_t(size) -> vec_t(size/sum_size)
//caculate the channel's sum(abs(x)) for every sample
extern "C" void CACU_SUM_ABS_GPU(float_t **&data, int num, int length,
int out_length, int channel, float_t **&out_data) {
assert(length % channel == 0);
assert(length / channel == out_length);
_k_CACU_SUM_ABS_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, out_length,
channel, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_SUM_GPU_D(float_t **data, float_t **bias, int num,
int length, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
out_data[data_row][data_col] = data[data_row][data_col]
+ bias[data_row][data_col];
}
}
//nums of vec_t(size) -> vec_t(size/sum_size)
//caculate the dim's sum for every batch_size
extern "C" void CACU_SUM_GPU_D(float_t **&data, float_t **&bias, int num,
int length, float_t **&out_data) {
_k_CACU_SUM_GPU_D<<<BLOCKNUM, THREADNUM, 0>>>(data, bias, num, length,
out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_SUB_GPU(float_t **data, float_t *bias, int num,
int length, int channel, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
out_data[data_row][data_col] = data[data_row][data_col]
- bias[data_col % channel];
}
}
//nums of vec_t(size) -> vec_t(size/sum_size)
//caculate the subtraction for batch_size
extern "C" void CACU_SUB_GPU(float_t **&data, float_t *&bias, int num,
int length, int channel, float_t **&out_data) {
assert(length % channel == 0);
_k_CACU_SUB_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, bias, num, length,
channel, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_SUB_GPU_D(float_t *data, float_t *bias, int num,
int length, float_t *out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
out_data[i] = data[i] - bias[data_row];
}
}
//nums of vec_t(size) -> vec_t(size/sum_size)
//caculate the subtraction for batch_size
extern "C" void CACU_SUB_GPU_D(float_t *&data, float_t *&bias, int num,
int length, float_t *&out_data) {
_k_CACU_SUB_GPU_D<<<BLOCKNUM, THREADNUM, 0>>>(data, bias, num, length,
out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_DIVISION_GPU(float_t **data, float_t *scale, int num,
int length, int channel, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
out_data[data_row][data_col] = data[data_row][data_col]
/ scale[data_col % channel];
}
}
//nums of vec_t(size) -> vec_t(size/sum_size)
//caculate the division for batch_size
extern "C" void CACU_DIVISION_GPU(float_t **&data, float_t *&scale, int num,
int length, int channel, float_t **&out_data) {
assert(length % channel == 0);
_k_CACU_DIVISION_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, scale, num, length,
channel, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_ROU_GPU(float_t **data, float_t **dx_ba, float_t *mean,
float_t *variance, int num, int dim, int channel, float_t *out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
extern __shared__ float_t share_data[];
int data_row, data_col;
share_data[tid] = 0;
for (int i = tid; i < dim * num; i += THREADNUM)
{
data_row = i / dim;
data_col = i % dim;
share_data[tid] +=
(data[data_row][data_col * channel + bid] - mean[bid])
* dx_ba[data_row][data_col * channel + bid]
* (-0.5
/ (variance[bid] * variance[bid] * variance[bid]));
}
__syncthreads();
if (tid == 0) {
for (int i = 1; i < THREADNUM; i++) {
share_data[0] += share_data[i];
}
out_data[bid] = share_data[0];
}
}
//FOR BATCH_NORMALIZATION not common utilities
//caculate the division for batch_size
extern "C" void CACU_ROU_GPU(float_t **&data, float_t **&dx_ba, float_t *&mean,
float_t *&variance, int num, int length, int channel,
float_t *&out_data) {
assert(length % channel == 0);
int dim = length / channel;
_k_CACU_ROU_GPU<<<channel, THREADNUM, THREADNUM * sizeof(float_t)>>>(data,
dx_ba, mean, variance, num, dim, channel, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_MU_GPU(float_t **data, float_t **dx_ba, float_t *mean,
float_t *variance, float_t *rou, int dim, int channel, int num,
float_t *out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
extern __shared__ float_t share_data[];
int data_row, data_col;
int m = dim * num;
share_data[tid] = 0;
for (int i = tid; i < dim * num; i += THREADNUM)
{
data_row = i / dim;
data_col = i % dim;
share_data[tid] += ((dx_ba[data_row][data_col * channel + bid]
/ (-variance[bid]))
+ ((rou[bid] / m)
* (-2.0
* (data[data_row][data_col * channel + bid]
- mean[bid]))));
}
__syncthreads();
if (tid == 0) {
for (int i = 1; i < THREADNUM; i++) {
share_data[0] += share_data[i];
}
out_data[bid] = share_data[0];
}
}
//FOR BATCH_NORMALIZATION not common utilities
//caculate the division for batch_size
extern "C" void CACU_MU_GPU(float_t **&data, float_t **&dx_ba, float_t *&mean,
float_t *&variance, float_t *&rou, int num, int length, int channel,
float_t *&out_data) {
assert(length % channel == 0);
int dim = length / channel;
_k_CACU_MU_GPU<<<channel, THREADNUM, THREADNUM * sizeof(float_t)>>>(data,
dx_ba, mean, variance, rou, dim, channel, num, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_DX_GPU(float_t **data, float_t **dx_ba, float_t *mean,
float_t *variance, float_t *rou, float_t *mu, int length, int dim,
int num, int channel, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int c;
int m = dim * num;
int data_row, data_col;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
c = data_col % channel;
out_data[data_row][data_col] += ((dx_ba[data_row][data_col]
/ variance[c])
+ rou[c] * (2.0 * (data[data_row][data_col] - mean[c]) / m)
+ (mu[c] / m));
}
}
//FOR BATCH_NORMALIZATION not common utilities
//caculate the division for batch_size
extern "C" void CACU_DX_GPU(float_t **&data, float_t **&dx_ba, float_t *&mean,
float_t *&variance, float_t *&rou, float_t *&mu, int num, int length,
int channel, float_t **&out_data) {
assert(length % channel == 0);
int dim = length / channel;
_k_CACU_DX_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, dx_ba, mean, variance, rou,
mu, length, dim, num, channel, out_data);
cudaThreadSynchronize();
}
//__global__ void _k_CACU_SCALE_SUM_ROW_GPU(float_t **data, int num,
// int kernels_num, int sum_size, int out_length, float_t **kernel,
// float_t **bias, float_t **out_data) {
//
// int tid = threadIdx.x;
// int bid = blockIdx.x;
//
// int threadid = bid * THREADNUM + tid;
//
// int start_in, start_out;
//
// int data_row, data_col;
//
// int c;
//
// extern __shared__ float_t shared_data[];
//
// for (int i = bid; i < num * out_length; i += BLOCKNUM) {
// data_row = i / out_length;
// data_col = i % out_length;
//
// start_in = (data_col / kernels_num) * sum_size;
//
// c = data_col % kernels_num;
//
// start_out = data_col;
//
// for (int j = tid; j < sum_size; j += THREADNUM)
// {
// shared_data[tid] = data[data_row][start_in + j] * kernel[c][j];
// }
//
// __syncthreads();
//
// if (tid == 0) {
// for(int i = 1; i < THREADNUM ; i++)
// shared_data[0] += shared_data[i];
// out_data[data_row][start_out] = shared_data[0] + bias[c][0];
// }
// }
//}
//
//
////caculate the sum(a*x_0i)
//extern "C" void CACU_SCALE_SUM_ROW_GPU(float_t **&data, int num, int sum_size,
// int kernels_num, int out_length, float_t **&kernels, float_t **&bias,
// float_t **&out_data) {
//
// assert(out_length % kernels_num == 0);
//
// _k_CACU_SCALE_SUM_ROW_GPU<<<BLOCKNUM, THREADNUM, THREADNUM * sizeof(float_t)>>>(
// data, num, kernels_num, sum_size, out_length, kernels, bias,
// out_data);
//
// cudaThreadSynchronize();
//}
__global__ void _k_CACU_SCALE_SUM_ROW_GPU(float_t *data, int num,
int kernels_num, int sum_size, int out_length, float_t *kernel,
float_t *bias, float_t *out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int start_in;
int data_row, data_col;
int c;
int indata_length = (out_length / kernels_num) * sum_size;
__shared__ float_t share_data[THREADNUM];
for (int i = bid; i < num * out_length; i += BLOCKNUM) {
data_row = i / out_length;
data_col = i % out_length;
start_in = (data_col / kernels_num) * sum_size;
c = data_col % kernels_num;
share_data[tid] = 0.0;
for (int j = tid; j < sum_size; j += THREADNUM) {
share_data[tid] += data[data_row * indata_length + start_in + j]
* kernel[c * sum_size + j];
}
__syncthreads();
int flag = THREADNUM / 2;
while (flag > 0) {
if (tid < flag)
share_data[tid] += share_data[tid + flag];
__syncthreads();
flag = flag / 2;
}
out_data[i] = share_data[0] + bias[c];
}
}
//caculate the sum(a*x_0i)
extern "C" void CACU_SCALE_SUM_ROW_GPU(float_t *&data, int num, int sum_size,
int kernels_num, int out_length, float_t *&kernels, float_t *&bias,
float_t *&out_data) {
assert(out_length % kernels_num == 0);
_k_CACU_SCALE_SUM_ROW_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num,
kernels_num, sum_size, out_length, kernels, bias, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_DECONV_W_BIN_GPU(float_t *data, float_t *top_diff,
float_t *a, int num, int kernel_length, int output_dim, int kernels_num,
float_t *out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int dim = output_dim * output_dim;
int data_row, data_col;
int data_length = output_dim * output_dim * kernel_length;
int diff_length = dim * kernels_num;
float_t crop = 1.0;
for (int i = threadid; i < kernels_num * kernel_length;
i += BLOCKNUM * THREADNUM) {
data_row = i / kernel_length;
data_col = i % kernel_length;
out_data[i] = 0.0;
for (int n = 0; n < num; n++)
for (int j = 0; j < dim; j++) {
out_data[i] +=
data[n * data_length + j * kernel_length + data_col]
* top_diff[n * diff_length + j * kernels_num
+ data_row];
}
if (abs(out_data[i]) > 1)
crop = 0.0;
out_data[i] *= (((float_t) (1.0 / kernel_length) + a[data_row] * crop)
* ((float_t) kernel_length - (float_t) (1.0)));
}
}
//caculate the grad_convolution for W
//data : bottom
//top_diff : diffs
//out_data : diff_ws
extern "C" void CACU_DECONV_W_BIN_GPU(float_t *&data, float_t *&top_diff,
float_t *a, int num, int kernel_size, int kernels_num, int output_dim,
int channel, int stride, float_t *&out_data) {
_k_CACU_DECONV_W_BIN_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, top_diff, a, num,
kernel_size * kernel_size * channel, output_dim, kernels_num,
out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_DECONV_W_B_GPU(float_t *data, float_t *top_diff,
int num, int kernel_length, int output_dim, int kernels_num,
float_t *out_data, float_t *bias) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int dim = output_dim * output_dim;
int data_row, data_col;
int data_length = output_dim * output_dim * kernel_length;
int diff_length = dim * kernels_num;
// __shared__ float_t share_data[]
for (int i = threadid; i < kernels_num * kernel_length; i += BLOCKNUM*THREADNUM) {
data_row = i / kernel_length;
data_col = i % kernel_length;
out_data[i] = 0.0;
for (int n = 0; n < num; n++)
for (int j = 0; j < dim; j++) {
out_data[i] +=
data[n * data_length + j * kernel_length + data_col]
* top_diff[n * diff_length + j * kernels_num
+ data_row];
}
}
for (int i = threadid; i < kernels_num; i += BLOCKNUM * THREADNUM) {
bias[i] = 0.0;
for (int n = 0; n < num; n++)
for (int j = 0; j < dim; j++) {
bias[i] = bias[i]
+ top_diff[n * diff_length + j * kernels_num + i];
}
}
}
//caculate the grad_convolution for W
//data : bottom
//top_diff : diffs
//out_data : diff_ws
extern "C" void CACU_DECONV_W_B_GPU(float_t *&data, float_t *&top_diff, int num,
int kernel_size, int kernels_num, int output_dim, int channel,
int stride, float_t *&out_data, float_t *&bias) {
_k_CACU_DECONV_W_B_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, top_diff, num,
kernel_size * kernel_size * channel, output_dim, kernels_num,
out_data, bias);
cudaThreadSynchronize();
}
__global__ void _k_CACU_DECONV_DIFF_GPU(float_t **data, float_t **kernel,
int num, int channel, int kernels_num, int input_dim, int output_dim,
int stride, int kernel_size, int length, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
//the set in the input feature map
int startset_i, startset_j;
//the set in the output feature map
int outset_si, outset_sj, outset_i, outset_j;
//the count for stride in feature map
int count_i, count_j;
int data_row, data_col;
int k_index, diff_index;
int c;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
out_data[data_row][data_col] = 0.0;
startset_i = data_col / (channel * input_dim);
startset_j = (data_col / channel) % input_dim;
c = data_col % channel;
outset_si = startset_i / stride;
outset_sj = startset_j / stride;
if (outset_si >= output_dim)
outset_si = output_dim - 1;
if (outset_sj >= output_dim)
outset_sj = output_dim - 1;
count_i = 0;
count_j = 0;
while (outset_si - (count_i + 1) >= 0
&& ((outset_si - (count_i + 1)) * stride) + kernel_size
>= startset_i + 1) {
count_i++;
}
while (outset_sj - (count_j + 1) >= 0
&& ((outset_sj - (count_j + 1)) * stride) + kernel_size
>= startset_j + 1) {
count_j++;
}
//stride
for (int mi = 0; mi <= count_i; mi++)
for (int mj = 0; mj <= count_j; mj++) {
outset_i = outset_si - mi;
outset_j = outset_sj - mj;
k_index = ((startset_i - outset_i * stride) * kernel_size
+ (startset_j - outset_j * stride)) * channel + c;
diff_index = (outset_i * output_dim + outset_j) * kernels_num;
for (int kn = 0; kn < kernels_num; kn++) {
out_data[data_row][data_col] = out_data[data_row][data_col]
+ data[data_row][diff_index + kn]
* kernel[kn][k_index];
}
}
}
}
//caculate the grad_convolution for diff
//data : k
//top_diff : diffs
//out_data : diff_prevs
extern "C" void CACU_DECONV_DIFF_GPU(float_t **&data, float_t **&top_diff,
int kernel_size, int kernels_num, int num, int input_dim, int pad,
int channel, int stride, float_t **&out_data) {
int input_dim_ = (input_dim + 2 * pad);
int output_dim = (input_dim_ - kernel_size) / stride + 1;
int length = input_dim_ * input_dim_ * channel;
_k_CACU_DECONV_DIFF_GPU<<<BLOCKNUM, THREADNUM, 0>>>(top_diff, data, num,
channel, kernels_num, input_dim_, output_dim, stride, kernel_size,
length, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_DECONV_DIFF_COL_GPU(float_t *data, float_t *kernel,
int num, int kernels_num, int block_size, int length,
float_t *out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
//outset is the index in output feature map
//blockset is the index in block
int outset, blockset;
int data_row, data_col;
int data_length = (length / block_size) * kernels_num;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
out_data[i] = 0.0;
outset = data_col / block_size;
blockset = data_col % block_size;
for (int j = 0; j < kernels_num; j++) {
out_data[i] += kernel[j * block_size + blockset]
* data[data_row * data_length + outset * kernels_num + j];
// if (i == 100)
// printf("%f,%f,%f\n", kernel[j * block_size + blockset],
// data[data_row * data_length + outset * kernels_num + j],
// out_data[i]);
}
}
}
//caculate the grad_convolution for diff
//data : k
//top_diff : diffs
//out_data : diff_prevs
extern "C" void CACU_DECONV_DIFF_COL_GPU(float_t *&data, float_t *&top_diff,
int kernel_size, int kernels_num, int num, int input_dim, int pad,
int channel, int stride, float_t *&out_data) {
int input_dim_ = (input_dim + 2 * pad);
int output_dim = (input_dim_ - kernel_size) / stride + 1;
int block_size = kernel_size * kernel_size * channel;
int length = output_dim * output_dim * channel * kernel_size * kernel_size;
_k_CACU_DECONV_DIFF_COL_GPU<<<BLOCKNUM, THREADNUM, 0>>>(top_diff, data, num,
kernels_num, block_size, length, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_ACTIVATION_RELU_GPU(float_t **data, int num,
int length) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
data[data_row][data_col] = max((float_t) 0, data[data_row][data_col]);
}
}
extern "C" void CACU_ACTIVATION_RELU_GPU(float_t **&data, int num, int length) {
_k_CACU_ACTIVATION_RELU_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length);
cudaThreadSynchronize();
}
__global__ void _k_CACU_ACTIVATION_LEAKY_RELU_GPU(float_t **data, int num,
int length, float_t slope) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
data[data_row][data_col] =
0 <= data[data_row][data_col] ?
data[data_row][data_col] :
data[data_row][data_col] * slope;
}
}
extern "C" void CACU_ACTIVATION_LEAKY_RELU_GPU(float_t **&data, int num,
int length, float_t slope) {
_k_CACU_ACTIVATION_LEAKY_RELU_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num,
length, slope);
cudaThreadSynchronize();
}
__global__ void _k_CACU_DE_ACTIVATION_RELU_GPU(float_t **data, int num,
int length, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
float_t sign;
int data_row, data_col;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
sign = data[data_row][data_col] > 0 ? (float_t) 1 : (float_t) 0;
out_data[data_row][data_col] = sign * out_data[data_row][data_col];
}
}
extern "C" void CACU_DE_ACTIVATION_RELU_GPU(float_t **&data, int num,
int length, float_t **&out_data) {
_k_CACU_DE_ACTIVATION_RELU_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num,
length, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_DE_ACTIVATION_LEAKY_RELU_GPU(float_t **data, int num,
int length, float_t slope, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
float_t sign;
int data_row, data_col;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
sign = data[data_row][data_col] > 0 ? (float_t) 1 : slope;
out_data[data_row][data_col] = sign * out_data[data_row][data_col];
}
}
extern "C" void CACU_DE_ACTIVATION_LEAKY_RELU_GPU(float_t **&data, int num,
int length, float_t slope, float_t **&out_data) {
_k_CACU_DE_ACTIVATION_LEAKY_RELU_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num,
length, slope, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_ACTIVATION_SIGMOID_GPU(float_t **data, int num,
int length) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
data[data_row][data_col] = float_t(1)
/ (float_t(1) + exp(-data[data_row][data_col]));
}
}
extern "C" void CACU_ACTIVATION_SIGMOID_GPU(float_t **&data, int num,
int length) {
_k_CACU_ACTIVATION_SIGMOID_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num,
length);
cudaThreadSynchronize();
}
__global__ void _k_CACU_DE_ACTIVATION_SIGMOID_GPU(float_t **data, int num,
int length, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
out_data[data_row][data_col] = data[data_row][data_col]
* (float_t(1) - data[data_row][data_col]);
}
}
extern "C" void CACU_DE_ACTIVATION_SIGMOID_GPU(float_t **&data, int num,
int length, float_t **&out_data) {
_k_CACU_DE_ACTIVATION_SIGMOID_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num,
length, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_SOFTMAX_GPU(float_t **data, int num, int length,
float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
__shared__ float_t sum, max_data;
for (int j = bid; j < num; j += BLOCKNUM) {
if (tid == 0) {
max_data = data[bid][0];
for (int i = 1; i < length; i++)
max_data = max(max_data, data[bid][i]);
}
__syncthreads();
for (int i = tid; i < length; i += THREADNUM) {
data[bid][i] = exp(data[bid][i] - max_data);
}
__syncthreads();
if (tid == 0) {
sum = 0;
for (int i = 0; i < length; i++)
sum += data[bid][i];
}
__syncthreads();
for (int i = tid; i < length; i += THREADNUM) {
out_data[bid][i] = data[bid][i] / sum;
}
}
}
extern "C" void CACU_SOFTMAX_GPU(float_t **&data, int num, int length,
float_t **&out_data) {
_k_CACU_SOFTMAX_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length,
out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_GEMM_GPU(float_t *data, float_t *kernel, float_t *bias,
int num, int kernels_num, int length, float_t *out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int data_row, data_col;
__shared__ float_t share_data[THREADNUM];
for (int i = bid; i < num * kernels_num; i += BLOCKNUM) {
data_row = i / kernels_num;
data_col = i % kernels_num;
share_data[tid] = 0.0;
for (int j = tid; j < length; j += THREADNUM) {
share_data[tid] += data[data_row * length + j]
* kernel[data_col * length + j];
}
int flag = THREADNUM / 2;
while (flag > 0) {
if (tid < flag)
share_data[tid] += share_data[tid + flag];
__syncthreads();
flag = flag / 2;
}
out_data[i] = share_data[0] + bias[data_col];
}
}
//caculate the sum(a*x_0i+b)
extern "C" void CACU_GEMM_GPU(float_t *&data, float_t *&bias, int num,
int kernels_num, int length, float_t *&kernels, float_t *&out_data) {
_k_CACU_GEMM_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, kernels, bias, num,
kernels_num, length, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_DE_GEMM_W_GPU(float_t **data, float_t **scales, int num,
int kernels_num, int length, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < length * kernels_num; i +=
BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
out_data[data_row][data_col] = 0.0;
for (int j = 0; j < num; j++) {
out_data[data_row][data_col] = out_data[data_row][data_col]
+ data[j][data_row] * scales[j][data_col];
}
}
}
//data : top_diff
//scales : bottoms_data
//out_data : grad for w
extern "C" void CACU_DE_GEMM_W_GPU(float_t **&data, int num, int kernels_num,
int length, float_t **&scales, float_t **&out_data) {
_k_CACU_DE_GEMM_W_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, scales, num,
kernels_num, length, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_DE_GEMM_DIFF_GPU(float_t **data, float_t **scales,
int num, int kernels_num, int length, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < length * num; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
out_data[data_row][data_col] = 0.0;
for (int j = 0; j < kernels_num; j++) {
out_data[data_row][data_col] = out_data[data_row][data_col]
+ data[data_row][j] * scales[j][data_col];
}
}
}
//data : top_diff
//scales : w
//out_data : bottoms_diff
extern "C" void CACU_DE_GEMM_DIFF_GPU(float_t **&data, int num, int kernels_num,
int length, float_t **&scales, float_t **&out_data) {
_k_CACU_DE_GEMM_DIFF_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, scales, num,
kernels_num, length, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_AXBY_GPU(float_t **data, float_t a, float_t **bias,
float_t b, int num, int length, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
out_data[data_row][data_col] = data[data_row][data_col] * a
+ bias[data_row][data_col] * b;
}
}
//caculate the sum(a*x_0i+by)
extern "C" void CACU_AXBY_GPU(float_t **&data, float_t a, int num, int length,
float_t **&bias, float_t b, float_t **&out_data) {
_k_CACU_AXBY_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, a, bias, b, num, length,
out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_AXBY_CROP_GPU(float_t **data, float_t a, float_t **bias,
float_t b, int num, int length, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int data_row, data_col;
for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) {
data_row = i / length;
data_col = i % length;
if (abs(data[data_row][data_col] * a + bias[data_row][data_col] * b)
< 1)
out_data[data_row][data_col] = data[data_row][data_col] * a
+ bias[data_row][data_col] * b;
else
out_data[data_row][data_col] = data[data_row][data_col];
}
}
//caculate ||r|| < 1
extern "C" void CACU_AXBY_CROP_GPU(float_t **&data, float_t a, int num,
int length, float_t **&bias, float_t b, float_t **&out_data) {
_k_CACU_AXBY_CROP_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, a, bias, b, num,
length, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_A_POOLING_GPU(float_t **data, int num, int kernel_size,
int input_dim, int output_dim, int pad, int out_length, int channel,
int stride, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int set_i, set_j;
int start_i, start_j;
int start_in;
int c;
int data_row, data_col;
float_t sum;
int count;
for (int i = threadid; i < num * out_length; i += BLOCKNUM * THREADNUM) {
data_row = i / out_length;
data_col = i % out_length;
sum = 0;
count = 0;
set_i = (data_col / channel) / output_dim;
set_j = (data_col / channel) % output_dim;
start_i = set_i * stride;
start_j = set_j * stride;
c = data_col % channel;
start_in = (start_i * input_dim + start_j) * channel + c;
for (int ki = 0; ki < kernel_size && (ki + start_i) < input_dim; ki++) {
for (int kj = 0; kj < kernel_size && (kj + start_j) < input_dim;
kj++) {
sum +=
data[data_row][start_in
+ (ki * input_dim + kj) * channel];
count++;
}
}
out_data[data_row][data_col] = (float_t) (sum / count);
}
}
//caculate the sum(a*x_0i+b)
extern "C" void CACU_A_POOLING_GPU(float_t **&data, int num, int kernel_size,
int input_dim, int output_dim, int pad, int out_length, int channel,
int stride, float_t **&out_data) {
_k_CACU_A_POOLING_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, kernel_size,
input_dim, output_dim, pad, out_length, channel, stride, out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_M_POOLING_GPU(float_t **data, int num, int kernel_size,
int input_dim, int output_dim, int out_length, int channel, int stride,
float_t **out_data, float_t **index) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int set_i, set_j;
int start_i, start_j;
int start_in;
int c;
int data_row, data_col;
float_t sign;
for (int i = threadid; i < num * out_length; i += BLOCKNUM * THREADNUM) {
data_row = i / out_length;
data_col = i % out_length;
set_i = (data_col / channel) / output_dim;
set_j = (data_col / channel) % output_dim;
start_i = set_i * stride;
start_j = set_j * stride;
c = data_col % channel;
start_in = (start_i * input_dim + start_j) * channel + c;
for (int ki = 0; ki < kernel_size && (ki + set_i * stride) < input_dim;
ki++)
for (int kj = 0;
kj < kernel_size && (kj + set_j * stride) < input_dim;
kj++) {
sign =
data[data_row][start_in
+ (ki * input_dim + kj) * channel];
if (out_data[data_row][data_col] < sign
|| (ki == 0 && kj == 0)) {
index[data_row][data_col] = ki * kernel_size + kj;
out_data[data_row][data_col] = sign;
}
}
}
}
//caculate the sum(a*x_0i+b)
extern "C" void CACU_M_POOLING_GPU(float_t **&data, int num, int kernel_size,
int input_dim, int output_dim, int out_length, int channel, int stride,
float_t **&out_data, float_t **index) {
_k_CACU_M_POOLING_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, kernel_size,
input_dim, output_dim, out_length, channel, stride, out_data,
index);
cudaThreadSynchronize();
}
__global__ void _k_CACU_CE_LOSS_GPU(float_t **data, float_t **label, int num,
float_t *loss) {
int tid = threadIdx.x;
loss[0] = 0;
__shared__ float_t share_data[THREADNUM];
share_data[tid] = 0;
for (int i = tid; i < num; i += THREADNUM) {
int index = int(label[i][0]);
share_data[tid] -= (log(data[i][index]));
}
__syncthreads();
if (tid == 0) {
for (int i = 1; i < THREADNUM; i++)
share_data[0] += share_data[i];
loss[0] = share_data[0];
}
}
//caculate the loss
extern "C" void CACU_CE_LOSS_GPU(float_t **&data, float_t **label, int num,
float_t *&loss) {
_k_CACU_CE_LOSS_GPU<<<1, THREADNUM, 0>>>(data, label, num, loss);
cudaThreadSynchronize();
}
__global__ void _k_CACU_SUB_INDEX_GPU(float_t **data, float_t **label, int num,
float_t value, float_t **out_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
for (int i = threadid; i < num; i += BLOCKNUM * THREADNUM) {
int index = int(label[i][0]);
out_data[i][index] -= value;
}
}
//caculate the loss
extern "C" void CACU_SUB_INDEX_GPU(float_t **&data, float_t ** index,
float_t value, int num, float_t **&out_data) {
_k_CACU_SUB_INDEX_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, index, num, value,
out_data);
cudaThreadSynchronize();
}
__global__ void _k_CACU_RESET_DATA_GPU(float_t **data_input, int num,
int length) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int out_start;
int data_row;
for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) {
data_row = j / length;
out_start = j % length;
data_input[data_row][out_start] = 0;
}
}
extern "C" void CACU_RESET_DATA_GPU(float_t **&data, int num, int length) {
_k_CACU_RESET_DATA_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length);
cudaThreadSynchronize();
}
__global__ void _k_CACU_RESET_BIN_DATA_GPU(unsigned int **data_input, int num,
int length) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int threadid = bid * THREADNUM + tid;
int out_start;
int data_row;
for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) {
data_row = j / length;
out_start = j % length;
data_input[data_row][out_start] = 0;
}
}
extern "C" void CACU_RESET_BIN_DATA_GPU(unsigned int **&data, int num,
int length) {
_k_CACU_RESET_BIN_DATA_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length);
cudaThreadSynchronize();
}
|
the_stack
|
namespace RPU {
/******************************************************************************************/
/* DefferenceRPUDeviceCuda
CUDA implementation of TransferRPUDevice
*/
template <typename T> void TransferRPUDeviceCuda<T>::initialize(bool transfer_columns) {
transfer_pwu_ =
RPU::make_unique<PulsedWeightUpdater<T>>(this->context_, this->x_size_, this->d_size_);
if (transfer_columns) {
transfer_iom_ =
RPU::make_unique<InputOutputManager<T>>(this->context_, this->x_size_, this->d_size_);
} else {
transfer_iom_ =
RPU::make_unique<InputOutputManager<T>>(this->context_, this->d_size_, this->x_size_);
}
this->context_->synchronize();
}
template <typename T>
TransferRPUDeviceCuda<T>::TransferRPUDeviceCuda(
CudaContext *c, const TransferRPUDevice<T> &rpu_device) {
this->context_ = c;
populateFrom(rpu_device); // use populate to call parent
};
// copy construcutor
template <typename T>
TransferRPUDeviceCuda<T>::TransferRPUDeviceCuda(const TransferRPUDeviceCuda<T> &other)
: VectorRPUDeviceCuda<T>(other) {
if (other.transfer_vecs_ != nullptr) {
transfer_vecs_ = RPU::make_unique<CudaArray<T>>(*other.transfer_vecs_);
}
initialize(other.getPar().transfer_columns);
current_slice_indices_ = other.current_slice_indices_;
fully_hidden_ = other.fully_hidden_;
this->context_->synchronizeDevice();
};
// copy assignment
template <typename T>
TransferRPUDeviceCuda<T> &
TransferRPUDeviceCuda<T>::operator=(const TransferRPUDeviceCuda<T> &other) {
TransferRPUDeviceCuda<T> tmp(other);
swap(*this, tmp);
return *this;
};
// move constructor
template <typename T>
TransferRPUDeviceCuda<T>::TransferRPUDeviceCuda(TransferRPUDeviceCuda<T> &&other) {
*this = std::move(other);
};
// move assignment
template <typename T>
TransferRPUDeviceCuda<T> &TransferRPUDeviceCuda<T>::operator=(TransferRPUDeviceCuda<T> &&other) {
VectorRPUDeviceCuda<T>::operator=(std::move(other));
transfer_vecs_ = std::move(other.transfer_vecs_);
current_slice_indices_ = other.current_slice_indices_;
other.current_slice_indices_.clear();
fully_hidden_ = other.fully_hidden_;
transfer_pwu_ = std::move(other.transfer_pwu_);
transfer_iom_ = std::move(other.transfer_iom_);
transfer_tmp_ = std::move(other.transfer_tmp_);
return *this;
};
template <typename T>
void TransferRPUDeviceCuda<T>::populateFrom(const AbstractRPUDevice<T> &rpu_device_in) {
const auto &rpu_device = dynamic_cast<const TransferRPUDevice<T> &>(rpu_device_in);
if (&rpu_device == nullptr) {
RPU_FATAL("populateFrom expects TransferRPUDevice.");
}
VectorRPUDeviceCuda<T>::populateFrom(rpu_device_in);
const auto &par = getPar();
if (!par.singleDeviceUpdate()) {
RPU_FATAL("Multiple device update not supported for Transfer Device");
}
if (!par.same_context) {
RPU_FATAL("Only same context supported");
}
if (this->n_devices_ < 2) {
RPU_FATAL("Expect at least two devices.");
}
for (int j = 1; j < this->n_devices_ - 1; j++) {
if (par.transfer_every_vec[0] > par.transfer_every_vec[j]) {
RPU_FATAL("Later transfer periods need to be larger than first for CUDA.");
}
}
int in_size = par.getInSize();
transfer_vecs_ = RPU::make_unique<CudaArray<T>>(
this->context_, in_size * in_size, rpu_device.getTransferVecs());
initialize(par.transfer_columns); // pwu/iom
current_slice_indices_.resize(this->n_devices_ - 1);
std::fill(current_slice_indices_.begin(), current_slice_indices_.end(), (int)0);
this->current_update_idx_ = 0;
fully_hidden_ = par.fullyHidden();
}
/*********************************************************************************/
/* getPulseCountLearningRate */
/* Here we compute the LR for the A matrix (the SGD update). Because
of the device properties it is beneficial to use a constant LR
here, but scale the buffer with the scheduled SGD learning rate
later*/
template <typename T> T TransferRPUDeviceCuda<T>::getPulseCountLearningRate(T learning_rate) {
const auto &par = getPar();
if (par.fast_lr > 0) {
return par.fast_lr;
} else {
return learning_rate;
}
}
template <typename T>
void TransferRPUDeviceCuda<T>::readMatrix(
int device_idx, const T *in_vec, T *out_vec, int m_batch, T alpha) {
const auto &par = getPar();
if (par.transfer_columns) {
// forward with transfer vectors
RPU::detail::forwardMatrixIteratorIOManaged(
this->context_, this->dev_weights_ptrs_[device_idx], in_vec, this->x_size_, false, out_vec,
this->d_size_, false, m_batch, alpha, *transfer_iom_, par.transfer_io, false);
} else {
// backward with transfer vectors
RPU::detail::backwardMatrixIteratorIOManaged(
this->context_, this->dev_weights_ptrs_[device_idx], in_vec, this->d_size_, false, out_vec,
this->x_size_, false, m_batch, alpha, *transfer_iom_, par.transfer_io);
}
}
template <typename T>
void TransferRPUDeviceCuda<T>::writeMatrix(
int device_idx,
const T *in_vec,
const T *out_vec,
int m_batch,
const T lr,
const PulsedUpdateMetaParameter<T> &up) {
const auto &par = getPar();
// note that the ptrs might point to the current weight
T *W = this->dev_weights_ptrs_[device_idx];
if (par.transfer_columns) {
transfer_pwu_->update(
in_vec, out_vec, W, &*this->rpucuda_device_vec_[device_idx], up, fabs(lr), m_batch, false,
false);
} else {
transfer_pwu_->update(
out_vec, in_vec, W, &*this->rpucuda_device_vec_[device_idx], up, fabs(lr), m_batch, false,
false);
}
}
/*********************************************************************************/
/* partially transfer using the given "readout" transfer vectors
(with io-managed forward) and the usualy device update */
template <typename T>
void TransferRPUDeviceCuda<T>::readAndUpdate(
int to_device_idx,
int from_device_idx,
int i_col_start,
const T lr,
const T *vec,
const int n_vec,
const PulsedUpdateMetaParameter<T> &up) {
if (lr == (T)0.0) {
return;
}
const auto &par = getPar();
int out_size = par.getOutSize();
int t_size = n_vec * out_size; // transfer size
if ((transfer_tmp_ == nullptr) || transfer_tmp_->getSize() < t_size) {
transfer_tmp_ = RPU::make_unique<CudaArray<T>>(this->context_, t_size);
this->context_->synchronize();
}
T *out_vec = transfer_tmp_->getData();
// forward / backward with transfer vectors. Since we need *positive*
// update, LR needs to be negative. However, this is not supported
// in the PWU really. Thus we scale the output by -1 and set alpha
// accordingly
readMatrix(from_device_idx, vec, out_vec, n_vec, -1.0);
// update according to device
writeMatrix(to_device_idx, vec, out_vec, n_vec, fabs(lr), up);
}
/*********************************************************************************/
template <typename T>
void TransferRPUDeviceCuda<T>::transfer(
int to_device_idx,
int from_device_idx,
const PulsedUpdateMetaParameter<T> ¤t_up,
const T current_lr) {
int i_slice = current_slice_indices_[from_device_idx];
const auto &par = getPar();
int in_size = par.getInSize();
int out_size = par.getOutSize();
if (par.random_selection) {
i_slice = MAX(MIN(floor(this->rw_rng_.sampleUniform() * in_size), in_size - 1), 0);
}
// transfer_vecs_ is always in_size-major (that is trans==false)
T *tvec = transfer_vecs_->getData() + i_slice * in_size;
int n_rest = in_size - i_slice;
T lr = par.getTransferLR(to_device_idx, from_device_idx, current_lr);
const PulsedUpdateMetaParameter<T> *up;
up = &par.transfer_up;
int n_transfers = MIN(par.n_reads_per_transfer, in_size);
if (n_rest < n_transfers) {
// rest
readAndUpdate(to_device_idx, from_device_idx, i_slice, lr, tvec, n_rest, *up);
// from beginning
readAndUpdate(
to_device_idx, from_device_idx, 0, lr, transfer_vecs_->getData(), n_transfers - n_rest,
*up);
} else {
readAndUpdate(to_device_idx, from_device_idx, i_slice, lr, tvec, n_transfers, *up);
}
if (par.transfer_columns && this->rw_rng_.sampleUniform() < par.with_reset_prob) {
// COL-wise prob!! device-wise reset_prob=1
this->rpucuda_device_vec_[from_device_idx]->resetCols(
this->dev_weights_ptrs_[from_device_idx], i_slice, n_transfers, 1);
}
current_slice_indices_[from_device_idx] = (i_slice + n_transfers) % in_size;
}
/*********************************************************************************/
template <typename T>
inline int TransferRPUDeviceCuda<T>::getTransferEvery(int didx, int m_batch) const {
if (getPar().units_in_mbatch) {
return MAX(ceil(getPar().transfer_every_vec[didx] * m_batch), 0);
} else {
return MAX(round(getPar().transfer_every_vec[didx]), 0);
}
}
/*********************************************************************************/
template <typename T> inline int getNChunks(int m_batch, T every) {
if (every <= 0) {
return 1;
} else {
return MAX((int)(round((T)m_batch / every)), 1); // take next integer for period
}
}
inline int getChunkSize(int m_batch, int nchunks) {
return (m_batch + nchunks - 1) / nchunks; // to ensure not to have residual
}
inline uint64_t getNextTransfer(uint64_t current_update_idx, int transfer_every) {
if (transfer_every <= 0) {
return std::numeric_limits<uint64_t>::max();
}
return current_update_idx + transfer_every - (current_update_idx % transfer_every);
}
/*********************************************************************************/
template <typename T>
pwukpvec_t<T> TransferRPUDeviceCuda<T>::getUpdateKernels(
int m_batch, int nK32, int use_bo64, bool out_trans, const PulsedUpdateMetaParameter<T> &up) {
pwukpvec_t<T> v;
// just get approx chunk size for tuning
int nchunks = getNChunks(m_batch, getTransferEvery(0, m_batch));
int chunk_size = getChunkSize(m_batch, nchunks);
// use the first device as the "FAST" device that gets updates with the true gradients.
v = this->rpucuda_device_vec_[0]->getUpdateKernels(chunk_size, nK32, use_bo64, out_trans, up);
if (nchunks > 1) {
for (auto &kpars : v) {
kpars->ensureChunk();
}
}
return v;
}
/*********************************************************************************/
template <typename T>
void TransferRPUDeviceCuda<T>::runUpdateKernel(
pwukp_t<T> kpars,
CudaContext *up_context,
T *dev_weights,
int m_batch,
const BitLineMaker<T> *blm,
const PulsedUpdateMetaParameter<T> &up,
const T lr,
curandState_t *dev_states,
int one_sided,
uint32_t *x_counts_chunk,
uint32_t *d_counts_chunk) {
// calling kpars->run(..,this,..) directly should cause error because derived from abstract
// device..
DEBUG_OUT("start run update kernel.");
DEBUG_CALL(kpars->print(););
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
// always same (up) context.
CudaContext *c = up_context;
if (x_counts_chunk != nullptr || d_counts_chunk != nullptr) {
RPU_FATAL("Chunking not allowed here.");
}
// only look at first device here as it makes no sense to transfer
// the higer order devices more often
int transfer_every = getTransferEvery(0, m_batch);
auto next_transfer = getNextTransfer(this->current_update_idx_, transfer_every);
if (next_transfer >= m_batch + this->current_update_idx_) {
// just update the whole batch we do not call kpars directly to
// also make possible to have non-pulsed devices. Note that only
// one device is directly updated with the gradients, thus
// tuning kpars are always unique (and valid to that rpu_device
// only). However, the other RPU device kernels will be tuned
// during transfer, since we use a seperate PWU object
this->rpucuda_device_vec_[0]->runUpdateKernel(
kpars, c, this->dev_weights_ptrs_[0], m_batch, blm, up, lr, dev_states, one_sided);
if (up._currently_tuning) {
return;
}
this->current_update_idx_ += m_batch; // first update idx
// transfer
if (next_transfer == this->current_update_idx_) {
transfer(1, 0, up, lr);
}
// other higher order devices
for (int j = 1; j < this->n_devices_ - 1; j++) {
// all transfer periods will be rounded up to chunk_sizes
auto higher_order_next_transfer =
getNextTransfer(this->current_update_idx_ - m_batch, getTransferEvery(j, m_batch));
if (higher_order_next_transfer <= this->current_update_idx_) {
transfer(j + 1, j, up, lr);
}
}
} else {
// transfer is inbetween the mbatch, we need to chunk in some way
// need to do it chunkwise
int batch_start = 0;
int nK32 = blm->getNK32Current();
auto x_counts = blm->getXCountsData();
auto d_counts = blm->getDCountsData();
uint64_t final_update_idx = this->current_update_idx_ + m_batch;
while (next_transfer <= final_update_idx) {
int current_m_batch = (int)(next_transfer - this->current_update_idx_);
this->rpucuda_device_vec_[0]->runUpdateKernel(
kpars,
c, // same context since sequence important
this->dev_weights_ptrs_[0], current_m_batch, blm, up, lr, dev_states, one_sided,
x_counts + batch_start * this->x_size_ * nK32, // always non-trans
d_counts + batch_start * this->d_size_ * nK32);
if (up._currently_tuning) {
return;
}
this->current_update_idx_ += current_m_batch; // first update idx
batch_start += current_m_batch;
// transfer
if (next_transfer == this->current_update_idx_) {
transfer(1, 0, up, lr);
}
// other higher order devices
for (int j = 1; j < this->n_devices_ - 1; j++) {
// all transfer periods will be rounded up to chunk_sizes
auto higher_order_next_transfer = getNextTransfer(
this->current_update_idx_ - current_m_batch, getTransferEvery(j, m_batch));
if (higher_order_next_transfer <= this->current_update_idx_) {
transfer(j + 1, j, up, lr);
}
}
next_transfer = getNextTransfer(this->current_update_idx_, transfer_every);
}
}
// only reduce at end
this->reduceToWeights(up_context, dev_weights);
}
/*********************************************************************************/
template <typename T>
void TransferRPUDeviceCuda<T>::reduceToWeights(CudaContext *context, T *dev_weights) {
if (!fully_hidden_) {
VectorRPUDeviceCuda<T>::reduceToWeights(context, dev_weights);
}
}
template <typename T>
void TransferRPUDeviceCuda<T>::decayWeights(T *dev_weights, T alpha, bool bias_no_decay) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::decayWeights(dev_weights, alpha, bias_no_decay);
}
template <typename T>
void TransferRPUDeviceCuda<T>::decayWeights(T *dev_weights, bool bias_no_decay) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::decayWeights(dev_weights, bias_no_decay);
}
template <typename T>
void TransferRPUDeviceCuda<T>::driftWeights(T *dev_weights, T time_since_last_call) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::driftWeights(dev_weights, time_since_last_call);
}
template <typename T> void TransferRPUDeviceCuda<T>::diffuseWeights(T *dev_weights) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::diffuseWeights(dev_weights);
}
template <typename T> void TransferRPUDeviceCuda<T>::clipWeights(T *dev_weights, T clip) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::clipWeights(dev_weights, clip);
}
template <typename T>
void TransferRPUDeviceCuda<T>::resetCols(T *dev_weights, int start_col, int n_cols, T reset_prob) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::resetCols(dev_weights, start_col, n_cols, reset_prob);
}
template class TransferRPUDeviceCuda<float>;
#ifdef RPU_USE_DOUBLE
template class TransferRPUDeviceCuda<double>;
#endif
} // namespace RPU
|
the_stack
|
*
* \brief CUDA-specific routines for the GPU implementation of SETTLE constraints algorithm.
*
*
* \author Artem Zhmurov <[email protected]>
*
* \ingroup module_mdlib
*/
#include "gmxpre.h"
#include "settle_gpu_internal.h"
#include <assert.h>
#include <stdio.h>
#include <cmath>
#include <algorithm>
#include "gromacs/gpu_utils/cuda_arch_utils.cuh"
#include "gromacs/gpu_utils/cudautils.cuh"
#include "gromacs/gpu_utils/devicebuffer.h"
#include "gromacs/gpu_utils/gputraits.h"
#include "gromacs/gpu_utils/typecasts.cuh"
#include "gromacs/gpu_utils/vectype_ops.cuh"
#include "gromacs/math/functions.h"
#include "gromacs/math/vec.h"
#include "gromacs/pbcutil/pbc.h"
#include "gromacs/pbcutil/pbc_aiuc_cuda.cuh"
namespace gmx
{
//! Number of CUDA threads in a block
constexpr static int sc_threadsPerBlock = 256;
//! Maximum number of threads in a block (for __launch_bounds__)
constexpr static int sc_maxThreadsPerBlock = sc_threadsPerBlock;
/*! \brief SETTLE constraints kernel
*
* Each thread corresponds to a single constraints triangle (i.e. single water molecule).
*
* See original CPU version in settle.cpp
*
* \param [in] numSettles Number of constraints triangles (water molecules).
* \param [in] gm_settles Indexes of three atoms in the constraints triangle. The field .x of int3
* data type corresponds to Oxygen, fields .y and .z are two hydrogen atoms.
* \param [in] pars Parameters for the algorithm (i.e. masses, target distances, etc.).
* \param [in] gm_x Coordinates of atoms before the timestep.
* \param [in,out] gm_x Coordinates of atoms after the timestep (constrained coordinates will be
* saved here).
* \param [in] invdt Reciprocal timestep.
* \param [in] gm_v Velocities of the particles.
* \param [in] gm_virialScaled Virial tensor.
* \param [in] pbcAiuc Periodic boundary conditions data.
*/
template<bool updateVelocities, bool computeVirial>
__launch_bounds__(sc_maxThreadsPerBlock) __global__
void settle_kernel(const int numSettles,
const WaterMolecule* __restrict__ gm_settles,
const SettleParameters pars,
const float3* __restrict__ gm_x,
float3* __restrict__ gm_xprime,
float invdt,
float3* __restrict__ gm_v,
float* __restrict__ gm_virialScaled,
const PbcAiuc pbcAiuc)
{
/* ******************************************************************* */
/* ** */
/* Original code by Shuichi Miyamoto, last update Oct. 1, 1992 ** */
/* ** */
/* Algorithm changes by Berk Hess: ** */
/* 2004-07-15 Convert COM to double precision to avoid drift ** */
/* 2006-10-16 Changed velocity update to use differences ** */
/* 2012-09-24 Use oxygen as reference instead of COM ** */
/* 2016-02 Complete rewrite of the code for SIMD ** */
/* 2020-06 Completely remove use of COM to minimize drift ** */
/* ** */
/* Reference for the SETTLE algorithm ** */
/* S. Miyamoto et al., J. Comp. Chem., 13, 952 (1992). ** */
/* ** */
/* ******************************************************************* */
constexpr float almost_zero = real(1e-12);
extern __shared__ float sm_threadVirial[];
int tid = static_cast<int>(blockIdx.x * blockDim.x + threadIdx.x);
if (tid < numSettles)
{
// These are the indexes of three atoms in a single 'water' molecule.
// TODO Can be reduced to one integer if atoms are consecutive in memory.
WaterMolecule indices = gm_settles[tid];
float3 x_ow1 = gm_x[indices.ow1];
float3 x_hw2 = gm_x[indices.hw2];
float3 x_hw3 = gm_x[indices.hw3];
float3 xprime_ow1 = gm_xprime[indices.ow1];
float3 xprime_hw2 = gm_xprime[indices.hw2];
float3 xprime_hw3 = gm_xprime[indices.hw3];
float3 dist21 = pbcDxAiuc(pbcAiuc, x_hw2, x_ow1);
float3 dist31 = pbcDxAiuc(pbcAiuc, x_hw3, x_ow1);
float3 doh2 = pbcDxAiuc(pbcAiuc, xprime_hw2, xprime_ow1);
float3 doh3 = pbcDxAiuc(pbcAiuc, xprime_hw3, xprime_ow1);
float3 a1 = (-doh2 - doh3) * pars.wh;
float3 b1 = doh2 + a1;
float3 c1 = doh3 + a1;
float xakszd = dist21.y * dist31.z - dist21.z * dist31.y;
float yakszd = dist21.z * dist31.x - dist21.x * dist31.z;
float zakszd = dist21.x * dist31.y - dist21.y * dist31.x;
float xaksxd = a1.y * zakszd - a1.z * yakszd;
float yaksxd = a1.z * xakszd - a1.x * zakszd;
float zaksxd = a1.x * yakszd - a1.y * xakszd;
float xaksyd = yakszd * zaksxd - zakszd * yaksxd;
float yaksyd = zakszd * xaksxd - xakszd * zaksxd;
float zaksyd = xakszd * yaksxd - yakszd * xaksxd;
float axlng = rsqrt(xaksxd * xaksxd + yaksxd * yaksxd + zaksxd * zaksxd);
float aylng = rsqrt(xaksyd * xaksyd + yaksyd * yaksyd + zaksyd * zaksyd);
float azlng = rsqrt(xakszd * xakszd + yakszd * yakszd + zakszd * zakszd);
// TODO {1,2,3} indexes should be swapped with {.x, .y, .z} components.
// This way, we will be able to use vector ops more.
float3 trns1, trns2, trns3;
trns1.x = xaksxd * axlng;
trns2.x = yaksxd * axlng;
trns3.x = zaksxd * axlng;
trns1.y = xaksyd * aylng;
trns2.y = yaksyd * aylng;
trns3.y = zaksyd * aylng;
trns1.z = xakszd * azlng;
trns2.z = yakszd * azlng;
trns3.z = zakszd * azlng;
float2 b0d, c0d;
b0d.x = trns1.x * dist21.x + trns2.x * dist21.y + trns3.x * dist21.z;
b0d.y = trns1.y * dist21.x + trns2.y * dist21.y + trns3.y * dist21.z;
c0d.x = trns1.x * dist31.x + trns2.x * dist31.y + trns3.x * dist31.z;
c0d.y = trns1.y * dist31.x + trns2.y * dist31.y + trns3.y * dist31.z;
float3 b1d, c1d;
float a1d_z = trns1.z * a1.x + trns2.z * a1.y + trns3.z * a1.z;
b1d.x = trns1.x * b1.x + trns2.x * b1.y + trns3.x * b1.z;
b1d.y = trns1.y * b1.x + trns2.y * b1.y + trns3.y * b1.z;
b1d.z = trns1.z * b1.x + trns2.z * b1.y + trns3.z * b1.z;
c1d.x = trns1.x * c1.x + trns2.x * c1.y + trns3.x * c1.z;
c1d.y = trns1.y * c1.x + trns2.y * c1.y + trns3.y * c1.z;
c1d.z = trns1.z * c1.x + trns2.z * c1.y + trns3.z * c1.z;
float sinphi = a1d_z * rsqrt(pars.ra * pars.ra);
float tmp2 = 1.0F - sinphi * sinphi;
if (almost_zero > tmp2)
{
tmp2 = almost_zero;
}
float tmp = rsqrt(tmp2);
float cosphi = tmp2 * tmp;
float sinpsi = (b1d.z - c1d.z) * pars.irc2 * tmp;
tmp2 = 1.0F - sinpsi * sinpsi;
float cospsi = tmp2 * rsqrt(tmp2);
float a2d_y = pars.ra * cosphi;
float b2d_x = -pars.rc * cospsi;
float t1 = -pars.rb * cosphi;
float t2 = pars.rc * sinpsi * sinphi;
float b2d_y = t1 - t2;
float c2d_y = t1 + t2;
/* --- Step3 al,be,ga --- */
float alpha = b2d_x * (b0d.x - c0d.x) + b0d.y * b2d_y + c0d.y * c2d_y;
float beta = b2d_x * (c0d.y - b0d.y) + b0d.x * b2d_y + c0d.x * c2d_y;
float gamma = b0d.x * b1d.y - b1d.x * b0d.y + c0d.x * c1d.y - c1d.x * c0d.y;
float al2be2 = alpha * alpha + beta * beta;
tmp2 = (al2be2 - gamma * gamma);
float sinthe = (alpha * gamma - beta * tmp2 * rsqrt(tmp2)) * rsqrt(al2be2 * al2be2);
/* --- Step4 A3' --- */
tmp2 = 1.0F - sinthe * sinthe;
float costhe = tmp2 * rsqrt(tmp2);
float3 a3d, b3d, c3d;
a3d.x = -a2d_y * sinthe;
a3d.y = a2d_y * costhe;
a3d.z = a1d_z;
b3d.x = b2d_x * costhe - b2d_y * sinthe;
b3d.y = b2d_x * sinthe + b2d_y * costhe;
b3d.z = b1d.z;
c3d.x = -b2d_x * costhe - c2d_y * sinthe;
c3d.y = -b2d_x * sinthe + c2d_y * costhe;
c3d.z = c1d.z;
/* --- Step5 A3 --- */
float3 a3, b3, c3;
a3.x = trns1.x * a3d.x + trns1.y * a3d.y + trns1.z * a3d.z;
a3.y = trns2.x * a3d.x + trns2.y * a3d.y + trns2.z * a3d.z;
a3.z = trns3.x * a3d.x + trns3.y * a3d.y + trns3.z * a3d.z;
b3.x = trns1.x * b3d.x + trns1.y * b3d.y + trns1.z * b3d.z;
b3.y = trns2.x * b3d.x + trns2.y * b3d.y + trns2.z * b3d.z;
b3.z = trns3.x * b3d.x + trns3.y * b3d.y + trns3.z * b3d.z;
c3.x = trns1.x * c3d.x + trns1.y * c3d.y + trns1.z * c3d.z;
c3.y = trns2.x * c3d.x + trns2.y * c3d.y + trns2.z * c3d.z;
c3.z = trns3.x * c3d.x + trns3.y * c3d.y + trns3.z * c3d.z;
/* Compute and store the corrected new coordinate */
const float3 dxOw1 = a3 - a1;
const float3 dxHw2 = b3 - b1;
const float3 dxHw3 = c3 - c1;
gm_xprime[indices.ow1] = xprime_ow1 + dxOw1;
gm_xprime[indices.hw2] = xprime_hw2 + dxHw2;
gm_xprime[indices.hw3] = xprime_hw3 + dxHw3;
if (updateVelocities)
{
float3 v_ow1 = gm_v[indices.ow1];
float3 v_hw2 = gm_v[indices.hw2];
float3 v_hw3 = gm_v[indices.hw3];
/* Add the position correction divided by dt to the velocity */
v_ow1 = dxOw1 * invdt + v_ow1;
v_hw2 = dxHw2 * invdt + v_hw2;
v_hw3 = dxHw3 * invdt + v_hw3;
gm_v[indices.ow1] = v_ow1;
gm_v[indices.hw2] = v_hw2;
gm_v[indices.hw3] = v_hw3;
}
if (computeVirial)
{
float3 mdb = pars.mH * dxHw2;
float3 mdc = pars.mH * dxHw3;
float3 mdo = pars.mO * dxOw1 + mdb + mdc;
sm_threadVirial[0 * blockDim.x + threadIdx.x] =
-(x_ow1.x * mdo.x + dist21.x * mdb.x + dist31.x * mdc.x);
sm_threadVirial[1 * blockDim.x + threadIdx.x] =
-(x_ow1.x * mdo.y + dist21.x * mdb.y + dist31.x * mdc.y);
sm_threadVirial[2 * blockDim.x + threadIdx.x] =
-(x_ow1.x * mdo.z + dist21.x * mdb.z + dist31.x * mdc.z);
sm_threadVirial[3 * blockDim.x + threadIdx.x] =
-(x_ow1.y * mdo.y + dist21.y * mdb.y + dist31.y * mdc.y);
sm_threadVirial[4 * blockDim.x + threadIdx.x] =
-(x_ow1.y * mdo.z + dist21.y * mdb.z + dist31.y * mdc.z);
sm_threadVirial[5 * blockDim.x + threadIdx.x] =
-(x_ow1.z * mdo.z + dist21.z * mdb.z + dist31.z * mdc.z);
}
}
else
{
// Filling data for dummy threads with zeroes
if (computeVirial)
{
for (int d = 0; d < 6; d++)
{
sm_threadVirial[d * blockDim.x + threadIdx.x] = 0.0F;
}
}
}
// Basic reduction for the values inside single thread block
// TODO what follows should be separated out as a standard virial reduction subroutine
if (computeVirial)
{
// This is to ensure that all threads saved the data before reduction starts
__syncthreads();
// This casts unsigned into signed integers to avoid clang warnings
int tib = static_cast<int>(threadIdx.x);
int blockSize = static_cast<int>(blockDim.x);
// Reduce up to one virial per thread block
// All blocks are divided by half, the first half of threads sums
// two virials. Then the first half is divided by two and the first half
// of it sums two values... The procedure continues until only one thread left.
// Only works if the threads per blocks is a power of two.
for (int divideBy = 2; divideBy <= blockSize; divideBy *= 2)
{
int dividedAt = blockSize / divideBy;
if (tib < dividedAt)
{
for (int d = 0; d < 6; d++)
{
sm_threadVirial[d * blockSize + tib] +=
sm_threadVirial[d * blockSize + (tib + dividedAt)];
}
}
if (dividedAt > warpSize / 2)
{
__syncthreads();
}
}
// First 6 threads in the block add the 6 components of virial to the global memory address
if (tib < 6)
{
atomicAdd(&(gm_virialScaled[tib]), sm_threadVirial[tib * blockSize]);
}
}
}
/*! \brief Select templated kernel.
*
* Returns pointer to a CUDA kernel based on provided booleans.
*
* \param[in] updateVelocities If the velocities should be constrained.
* \param[in] bCalcVir If virial should be updated.
*
* \retrun Pointer to CUDA kernel
*/
inline auto getSettleKernelPtr(const bool updateVelocities, const bool computeVirial)
{
auto kernelPtr = settle_kernel<true, true>;
if (updateVelocities && computeVirial)
{
kernelPtr = settle_kernel<true, true>;
}
else if (updateVelocities && !computeVirial)
{
kernelPtr = settle_kernel<true, false>;
}
else if (!updateVelocities && computeVirial)
{
kernelPtr = settle_kernel<false, true>;
}
else if (!updateVelocities && !computeVirial)
{
kernelPtr = settle_kernel<false, false>;
}
return kernelPtr;
}
void launchSettleGpuKernel(const int numSettles,
const DeviceBuffer<WaterMolecule>& d_atomIds,
const SettleParameters& settleParameters,
const DeviceBuffer<Float3>& d_x,
DeviceBuffer<Float3> d_xp,
const bool updateVelocities,
DeviceBuffer<Float3> d_v,
const real invdt,
const bool computeVirial,
DeviceBuffer<float> virialScaled,
const PbcAiuc& pbcAiuc,
const DeviceStream& deviceStream)
{
static_assert(
gmx::isPowerOfTwo(sc_threadsPerBlock),
"Number of threads per block should be a power of two in order for reduction to work.");
auto kernelPtr = getSettleKernelPtr(updateVelocities, computeVirial);
KernelLaunchConfig config;
config.blockSize[0] = sc_threadsPerBlock;
config.blockSize[1] = 1;
config.blockSize[2] = 1;
config.gridSize[0] = (numSettles + sc_threadsPerBlock - 1) / sc_threadsPerBlock;
config.gridSize[1] = 1;
config.gridSize[2] = 1;
// Shared memory is only used for virial reduction
if (computeVirial)
{
config.sharedMemorySize = sc_threadsPerBlock * 6 * sizeof(float);
}
else
{
config.sharedMemorySize = 0;
}
const auto kernelArgs = prepareGpuKernelArguments(kernelPtr,
config,
&numSettles,
&d_atomIds,
&settleParameters,
asFloat3Pointer(&d_x),
asFloat3Pointer(&d_xp),
&invdt,
asFloat3Pointer(&d_v),
&virialScaled,
&pbcAiuc);
launchGpuKernel(kernelPtr,
config,
deviceStream,
nullptr,
"settle_kernel<updateVelocities, computeVirial>",
kernelArgs);
}
} // namespace gmx
|
the_stack
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <torch/extension.h>
__device__ static float atomicMin(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do
{
assumed = old;
old = ::atomicCAS(address_as_i, assumed, __float_as_int(::fminf(val, __int_as_float(assumed))));
}
while (assumed != old);
return __int_as_float(old);
}
__device__ constexpr float kDegenerateBarycentricCoordinatesCutoff() { return 0.9f; }
__device__ int clamped_integer_max(float a, float b, float c, int low, int high)
{
return min(max(float2int(ceil(max(max(a, b), c))), low), high);
}
__device__ int clamped_integer_min(float a, float b, float c, int low, int high) {
return min(max(float2int(floor(min(min(a, b), c))), low), high);
}
__device__ void compute_edge_functions(const float px, const float py, const float m_inv[9], float values[3])
{
for (int i = 0; i < 3; ++i)
{
const float a = m_inv[3 * i + 0];
const float b = m_inv[3 * i + 1];
const float c = m_inv[3 * i + 2];
values[i] = a * px + b * py + c;
}
}
__device__ void compute_unnormalized_matrix_inverse(
const float a11, const float a12, const float a13,
const float a21, const float a22, const float a23,
const float a31, const float a32, const float a33, float m_inv[9])
{
m_inv[0] = a22 * a33 - a32 * a23;
m_inv[1] = a13 * a32 - a33 * a12;
m_inv[2] = a12 * a23 - a22 * a13;
m_inv[3] = a23 * a31 - a33 * a21;
m_inv[4] = a11 * a33 - a31 * a13;
m_inv[5] = a13 * a21 - a23 * a11;
m_inv[6] = a21 * a32 - a31 * a22;
m_inv[7] = a12 * a31 - a32 * a11;
m_inv[8] = a11 * a22 - a21 * a12;
// The first column of the unnormalized M^-1 contains intermediate values for det(M).
const float det = a11 * m_inv[0] + a12 * m_inv[3] + a13 * m_inv[6];
// Transfer the sign of the determinant.
if (det < 0.0f)
{
for (int i = 0; i < 9; ++i)
{
m_inv[i] = -m_inv[i];
}
}
}
__device__ __forceinline__ bool pixel_is_inside_triangle(const float edge_values[3])
{
// Check that the edge values are all non-negative and that at least one is positive (triangle is non-degenerate).
// loose the constraint from 0.0 to -0.00001 to solve salt-pepper rendering
float eps = -0.00001;
return (edge_values[0] >= eps && edge_values[1] >= eps && edge_values[2] >= eps) && (edge_values[0] > 0 || edge_values[1] > 0 || edge_values[2] > 0);
}
__global__ void rasterize_triangles_cuda_forward_kernel(
const torch::PackedTensorAccessor<float, 2, torch::RestrictPtrTraits, size_t> vertices,
const torch::PackedTensorAccessor<int32_t, 2, torch::RestrictPtrTraits, size_t> triangles,
const int image_width,
const int image_height,
torch::PackedTensorAccessor<int32_t, 2, torch::RestrictPtrTraits, size_t> px_triangle_ids,
torch::PackedTensorAccessor<float, 3, torch::RestrictPtrTraits, size_t> px_barycentric_coordinates,
torch::PackedTensorAccessor<float, 2, torch::RestrictPtrTraits, size_t> z_buffer,
const int num_triangles
)
{
const int triangle_id = threadIdx.x + blockIdx.x * blockDim.x;
if (triangle_id >= num_triangles)
{
return;
}
const float half_image_width = 0.5 * image_width;
const float half_image_height = 0.5 * image_height;
float unnormalized_matrix_inverse[9];
float b_over_w[3];
const int v0_id = triangles[triangle_id][0];
const int v1_id = triangles[triangle_id][1];
const int v2_id = triangles[triangle_id][2];
const float v0w = vertices[v0_id][3];
const float v1w = vertices[v1_id][3];
const float v2w = vertices[v2_id][3];
// Early exit: if all w < 0, triangle is entirely behind the eye.
if (v0w < 0 && v1w < 0 && v2w < 0)
{
return;
}
const float v0x = vertices[v0_id][0];
const float v0y = vertices[v0_id][1];
const float v1x = vertices[v1_id][0];
const float v1y = vertices[v1_id][1];
const float v2x = vertices[v2_id][0];
const float v2y = vertices[v2_id][1];
compute_unnormalized_matrix_inverse(v0x, v1x, v2x,
v0y, v1y, v2y,
v0w, v1w, v2w,
unnormalized_matrix_inverse);
// Initialize the bounding box to the entire screen.
int left = 0, right = image_width, bottom = 0, top = image_height;
// If the triangle is entirely inside the screen, project the vertices to
// pixel coordinates and find the triangle bounding box enlarged to the
// nearest integer and clamped to the image boundaries.
if (v0w > 0 && v1w > 0 && v2w > 0)
{
const float p0x = (v0x / v0w + 1.0) * half_image_width;
const float p1x = (v1x / v1w + 1.0) * half_image_width;
const float p2x = (v2x / v2w + 1.0) * half_image_width;
const float p0y = (v0y / v0w + 1.0) * half_image_height;
const float p1y = (v1y / v1w + 1.0) * half_image_height;
const float p2y = (v2y / v2w + 1.0) * half_image_height;
left = clamped_integer_min(p0x, p1x, p2x, 0, image_width);
right = clamped_integer_max(p0x, p1x, p2x, 0, image_width);
bottom = clamped_integer_min(p0y, p1y, p2y, 0, image_height);
top = clamped_integer_max(p0y, p1y, p2y, 0, image_height);
}
// Iterate over each pixel in the bounding box.
for (int iy = bottom; iy < top; ++iy)
{
for (int ix = left; ix < right; ++ix)
{
const float px = ((ix + 0.5) / half_image_width) - 1.0;
const float py = ((iy + 0.5) / half_image_height) - 1.0;
compute_edge_functions(px, py, unnormalized_matrix_inverse, b_over_w);
if (!pixel_is_inside_triangle(b_over_w))
{
continue;
}
const float one_over_w = b_over_w[0] + b_over_w[1] + b_over_w[2];
const float b0 = b_over_w[0] / one_over_w;
const float b1 = b_over_w[1] / one_over_w;
const float b2 = b_over_w[2] / one_over_w;
const float v0z = vertices[v0_id][2];
const float v1z = vertices[v1_id][2];
const float v2z = vertices[v2_id][2];
// Since we computed an unnormalized w above, we need to recompute
// a properly scaled clip-space w value and then divide clip-space z
// by that.
const float clip_z = b0 * v0z + b1 * v1z + b2 * v2z;
const float clip_w = b0 * v0w + b1 * v1w + b2 * v2w;
const float z = clip_z / clip_w;
// Skip the pixel if it is farther than the current z-buffer pixel or beyond the near or far clipping plane.
if (z < -1.0 || z > 1.0 || z > z_buffer[iy][ix])
{
continue;
}
atomicMin(&z_buffer[iy][ix], z);
if (z == z_buffer[iy][ix])
{
px_triangle_ids[iy][ix] = triangle_id;
px_barycentric_coordinates[iy][ix][0] = b0;
px_barycentric_coordinates[iy][ix][1] = b1;
px_barycentric_coordinates[iy][ix][2] = b2;
}
}
}
}
std::vector<torch::Tensor> rasterize_triangles_cuda_forward(
const torch::Tensor &vertices,
const torch::Tensor &triangles,
const int image_width,
const int image_height,
torch::Tensor &px_triangle_ids, // image height * image width int32 zeros
torch::Tensor &px_barycentric_coordinates, // image height * image width * 3 float32 ones require grad
torch::Tensor &z_buffer // image height * image width * float32 ones
)
{
const int num_triangles = triangles.size(0);
const int threads = 512;
const dim3 blocks = ((num_triangles - 1) / threads + 1);
rasterize_triangles_cuda_forward_kernel<<<blocks, threads>>>(
vertices.packed_accessor<float, 2, torch::RestrictPtrTraits, size_t>(),
triangles.packed_accessor<int32_t, 2, torch::RestrictPtrTraits, size_t>(),
image_width,
image_height,
px_triangle_ids.packed_accessor<int32_t, 2, torch::RestrictPtrTraits, size_t>(),
px_barycentric_coordinates.packed_accessor<float, 3, torch::RestrictPtrTraits, size_t>(),
z_buffer.packed_accessor<float, 2, torch::RestrictPtrTraits, size_t>(),
num_triangles
);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in rasterize_triangles_cuda_forward: %s\n", cudaGetErrorString(err));
return {px_triangle_ids, px_barycentric_coordinates, z_buffer};
}
__global__ void rasterize_triangles_cuda_backward_kernel(
const torch::PackedTensorAccessor<float, 2, torch::RestrictPtrTraits, size_t> vertices,
const torch::PackedTensorAccessor<int32_t, 2, torch::RestrictPtrTraits, size_t> triangles,
const torch::PackedTensorAccessor<int32_t, 2, torch::RestrictPtrTraits, size_t> triangle_ids,
const torch::PackedTensorAccessor<float, 3, torch::RestrictPtrTraits, size_t> barycentric_coordinates,
const torch::PackedTensorAccessor<float, 3, torch::RestrictPtrTraits, size_t> df_dbarycentric_coordinates,
const int image_width,
const int image_height,
torch::PackedTensorAccessor<float, 2, torch::RestrictPtrTraits, size_t> df_dvertices
)
{
const int pixel_id = blockIdx.x * blockDim.x + threadIdx.x;
if (pixel_id >= image_width * image_height)
{
return ;
}
// We first loop over each pixel in the output image, and compute
// dbarycentric_coordinate[0,1,2]/dvertex[0x, 0y, 1x, 1y, 2x, 2y].
// Next we compute each value above's contribution to
// df/dvertices, building up that matrix as the output of this iteration.
const int ix = pixel_id % image_width;
const int iy = pixel_id / image_width;
// b0, b1, and b2 are the three barycentric coordinate values
// rendered at pixel pixel_id.
const float b0 = barycentric_coordinates[iy][ix][0];
const float b1 = barycentric_coordinates[iy][ix][1];
const float b2 = barycentric_coordinates[iy][ix][2];
if (b0 + b1 + b2 < kDegenerateBarycentricCoordinatesCutoff())
{
return;
}
const float df_db0 = df_dbarycentric_coordinates[iy][ix][0];
const float df_db1 = df_dbarycentric_coordinates[iy][ix][1];
const float df_db2 = df_dbarycentric_coordinates[iy][ix][2];
const int triangle_at_current_pixel = triangle_ids[iy][ix];
// Extract vertex indices for the current triangle.
const int v0_id = triangles[triangle_at_current_pixel][0];
const int v1_id = triangles[triangle_at_current_pixel][1];
const int v2_id = triangles[triangle_at_current_pixel][2];
// Extract x,y,w components of the vertices' clip space coordinates.
const float x0 = vertices[v0_id][0];
const float y0 = vertices[v0_id][1];
const float w0 = vertices[v0_id][3];
const float x1 = vertices[v1_id][0];
const float y1 = vertices[v1_id][1];
const float w1 = vertices[v1_id][3];
const float x2 = vertices[v2_id][0];
const float y2 = vertices[v2_id][1];
const float w2 = vertices[v2_id][3];
// Compute pixel's NDC-s.
const float px = 2 * (ix + 0.5f) / image_width - 1.0f;
const float py = 2 * (iy + 0.5f) / image_height - 1.0f;
// Baricentric gradients wrt each vertex coordinate share a common factor.
const float db0_dx = py * (w1 - w2) - (y1 - y2);
const float db1_dx = py * (w2 - w0) - (y2 - y0);
const float db2_dx = -(db0_dx + db1_dx);
const float db0_dy = (x1 - x2) - px * (w1 - w2);
const float db1_dy = (x2 - x0) - px * (w2 - w0);
const float db2_dy = -(db0_dy + db1_dy);
const float db0_dw = px * (y1 - y2) - py * (x1 - x2);
const float db1_dw = px * (y2 - y0) - py * (x2 - x0);
const float db2_dw = -(db0_dw + db1_dw);
// Combine them with chain rule.
const float df_dx = df_db0 * db0_dx + df_db1 * db1_dx + df_db2 * db2_dx;
const float df_dy = df_db0 * db0_dy + df_db1 * db1_dy + df_db2 * db2_dy;
const float df_dw = df_db0 * db0_dw + df_db1 * db1_dw + df_db2 * db2_dw;
// Values of edge equations and inverse w at the current pixel.
const float edge0_over_w = x2 * db0_dx + y2 * db0_dy + w2 * db0_dw;
const float edge1_over_w = x2 * db1_dx + y2 * db1_dy + w2 * db1_dw;
const float edge2_over_w = x1 * db2_dx + y1 * db2_dy + w1 * db2_dw;
const float w_inv = edge0_over_w + edge1_over_w + edge2_over_w;
// All gradients share a common denominator.
const float w_sqr = 1 / (w_inv * w_inv);
// Gradients wrt each vertex share a common factor.
const float edge0 = w_sqr * edge0_over_w;
const float edge1 = w_sqr * edge1_over_w;
const float edge2 = w_sqr * edge2_over_w;
atomicAdd(&df_dvertices[v0_id][0], edge0 * df_dx);
atomicAdd(&df_dvertices[v0_id][1], edge0 * df_dy);
atomicAdd(&df_dvertices[v0_id][3], edge0 * df_dw);
atomicAdd(&df_dvertices[v1_id][0], edge1 * df_dx);
atomicAdd(&df_dvertices[v1_id][1], edge1 * df_dy);
atomicAdd(&df_dvertices[v1_id][3], edge1 * df_dw);
atomicAdd(&df_dvertices[v2_id][0], edge2 * df_dx);
atomicAdd(&df_dvertices[v2_id][1], edge2 * df_dy);
atomicAdd(&df_dvertices[v2_id][3], edge2 * df_dw);
}
torch::Tensor rasterize_triangles_cuda_backward(
const torch::Tensor &vertices,
const torch::Tensor &triangles,
const torch::Tensor &triangle_ids,
const torch::Tensor &barycentric_coordinates,
const torch::Tensor &df_dbarycentric_coordinates,
const int image_width,
const int image_height,
torch::Tensor &df_dvertices // num_vertex * 4 float32 zeros
)
{
const int threads = 512;
const dim3 blocks = ((image_width * image_height - 1) / threads + 1);
rasterize_triangles_cuda_backward_kernel<<<blocks, threads>>>(
vertices.packed_accessor<float, 2, torch::RestrictPtrTraits, size_t>(),
triangles.packed_accessor<int32_t, 2, torch::RestrictPtrTraits, size_t>(),
triangle_ids.packed_accessor<int32_t, 2, torch::RestrictPtrTraits, size_t>(),
barycentric_coordinates.packed_accessor<float, 3, torch::RestrictPtrTraits, size_t>(),
df_dbarycentric_coordinates.packed_accessor<float, 3, torch::RestrictPtrTraits, size_t>(),
image_width,
image_height,
df_dvertices.packed_accessor<float, 2, torch::RestrictPtrTraits, size_t>()
);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error in rasterize_triangles_cuda_backward: %s\n", cudaGetErrorString(err));
return df_dvertices;
}
|
the_stack
|
#include "nnnormalize.hpp"
#include "impl/dispatcher.hpp"
#include <cmath>
#include <cassert>
#include <cstring>
using namespace vl ;
using namespace vl::nn ;
using namespace vl::impl ;
template<vl::DeviceType deviceType, vl::DataType dataType> struct LRNForward ;
template<vl::DeviceType deviceType, vl::DataType dataType> struct LRNBackward ;
// -------------------------------------------------------------------
// Fast approximated numerical routines
// -------------------------------------------------------------------
#ifndef _MSC_VER
#include <x86intrin.h>
#pragma GCC optimize ("fast-math")
#pragma GCC optimize ("tree-vectorize")
//#pragma GCC target ("veclibabi=svml")
//#pragma GCC target "sse4"
#endif
#define restrict __restrict
#define VL_NNNORMALIZE_FAST
#define max(a,b) (((a)>=(b))?a:b)
#define xat(t) x[(t) * offset]
#define yat(t) y[(t) * offset]
#define zat(t) z[(t) * offset]
#ifndef VL_NNNORMALIZE_FAST
inline double fast_pow(double a, double b) { return pow(a,b) ; }
inline float fast_pow(float a, float b) { return powf(a,b) ; }
#else
//#define VERY_FAST
#ifndef VERY_FAST
inline double fast_pow(double x, double y)
{
double z ;
double const plog3 = 0.164042561333445 ;
double const plog2 = -0.606737602222409 ;
double const plog1 = 1.442695040888963 ;
double const pexp3 = 0.079441541679836 ;
double const pexp2 = 0.227411277760219 ;
double const pexp1 = 0.693147180559945 ;
typedef long long int int_t;
const int_t offset = 1023LL << 52 ;
int_t ix = *(int_t*)&x - offset ;
int_t imx = (ix & ((1LL<<52)-1LL)) + offset;
double fx = (double)(ix >> 52) ;
double mx = *((double*)&imx) - 1 ;
double mx2 = mx*mx ;
double mx3 = mx2*mx ;
double t = y * (fx + mx*plog1 + mx2*plog2 + mx3*plog3) ;
// double t = y * (fx + mx) ;
double fz = floor(t) ;
double rz = t - fz ;
double rz2 = rz*rz ;
double rz3 = rz2*rz ;
double tz = fz + rz*pexp1 + rz2*pexp2 + rz3*pexp3 ;
// double tz = fz + rz ;
// mexPrintf("%g %g -- ix %ld imx %ld fx %g mx %g t %g\n", x,y, ix,imx, fx, mx, t) ;
*((int_t*)&z) = (int_t)(tz * (1LL<<52)) + offset ;
//z = exp(t * log(2.0)) ;
return z ;
}
#else
inline double fast_pow(double a, double b)
{
double z ;
typedef long long int int_t;
const int_t offset = 1023L << 52 ;
int_t ai = *((int_t*)&a) ;
*((int_t*)&z) = (int_t)(b * (ai - offset)) + offset ;
return z ;
}
#endif
#ifndef VERY_FAST
inline float fast_pow(float x, float y)
{
float z ;
float const plog3 = 0.164042561333445F ;
float const plog2 = -0.606737602222409F ;
float const plog1 = 1.442695040888963F ;
float const pexp3 = 0.079441541679836F ;
float const pexp2 = 0.227411277760219F ;
float const pexp1 = 0.693147180559945F ;
typedef int int_t;
const int_t offset = 127 << 23 ;
int_t ix = *(int_t*)&x - offset ;
int_t imx = (ix & ((1<<23)-1)) + offset;
float fx = (float)(ix >> 23) ;
float mx = *((float*)&imx) - 1 ;
float mx2 = mx*mx ;
float mx3 = mx2*mx ;
float t = y * (fx + mx*plog1 + mx2*plog2 + mx3*plog3) ;
float fz = floor(t) ;
float rz = t - fz ;
float rz2 = rz*rz ;
float rz3 = rz2*rz ;
float tz = fz + rz*pexp1 + rz2*pexp2 + rz3*pexp3 ;
*((int_t*)&z) = (int_t)(tz * (1<<23)) + offset ;
return z ;
}
#else
inline float fast_pow(float a, float b)
{
float z ;
typedef int int_t;
const int_t offset = 127 << 23 ;
int_t ai = *((int_t*)&a) ;
*((int_t*)&z) = (int_t)(b * (ai - offset)) + offset ;
return z ;
}
#endif
#endif
// -------------------------------------------------------------------
// Forward CPU
// -------------------------------------------------------------------
template<vl::DataType dataType>
struct LRNForward<vl::VLDT_CPU, dataType>
{
vl::ErrorCode operator()(vl::nn::LRN &op,
vl::Tensor &output,
vl::Tensor const &input)
{
typedef typename vl::DataTypeTraits<dataType>::type type ;
auto width = output.getWidth() ;
auto height = output.getHeight() ;
auto depth = output.getDepth() ;
auto num = output.getSize() ;
auto inputData = (type const*)input.getMemory() ;
auto outputData = (type*)output.getMemory() ;
int t ;
int m1 = ((signed)op.normDepth-1)/2 ;
int m2 = (int)op.normDepth - m1 - 1 ;
int offset = (int)width*(int)height ;
#ifndef VL_NNNORMALIZE_FAST
for (int k = 0 ; k < num ; ++k) {
for (int h = 0 ; h < height ; ++h) {
for (int w = 0 ; w < width ; ++w) {
type const* x = data + w + h * width ;
type* y = output + w + h * width ;
type acc = 0 ;
for (t = -m2 ; t < (signed)depth ; ++t) {
type ap = 0 ;
type am = 0 ;
if (t-m1-1 >= 0) { am = xat(t-m1-1) ; }
if (t+m2 < depth) { ap = xat(t+m2) ; }
acc += ap*ap - am*am ;
if (0 <= t && t < depth) {
yat(t) = xat(t) * fast_pow(kappa + alpha * acc, -beta) ;
}
}
}
}
data += width*height*depth ;
output += width*height*depth ;
}
#else
type * acc = (type*) calloc(sizeof(type), width*height) ;
for (int k = 0 ; k < num ; ++k) {
memset(acc, 0, sizeof(type) * width*height) ;
for (t = -m2 ; t < (signed)depth ; ++t) {
int tm = t - m1 - 1 ;
int tp = t + m2 ;
type const* xam = inputData + offset * (t-m1-1) ;
type const* xap = inputData + offset * (t+m2) ;
type *end = acc + width*height ;
if (0 <= tm && tp < depth) {
for(type *xacc = acc ; xacc != end ; ++xacc, ++xam, ++xap) {
type am = *xam ;
type ap = *xap ;
*xacc += ap*ap - am*am ;
}
} else if (0 > tm && tp < depth) {
for(type *xacc = acc ; xacc != end ; ++xacc, ++xap) {
type ap = *xap ;
*xacc += ap*ap ;
}
} else if (0 <= tm && tp >= depth) {
for(type *xacc = acc ; xacc != end ; ++xacc, ++xam) {
type am = *xam ;
*xacc -= am*am ;
}
}
if (0 <= t && t < depth) {
type const* xx = inputData + offset * t ;
type * xy = outputData + offset * t ;
for(type *xacc = acc ; xacc != end ; ++xacc, ++xx, ++xy) {
(*xy) = (*xx) * fast_pow(op.kappa + op.alpha * (*xacc), -op.beta) ;
}
}
}
inputData += width*height*depth ;
outputData += width*height*depth ;
}
free(acc) ;
#endif
return VLE_Success ;
}
} ;
// -------------------------------------------------------------------
// Backward CPU
// -------------------------------------------------------------------
template<vl::DataType dataType>
struct LRNBackward<vl::VLDT_CPU, dataType>
{
vl::ErrorCode operator()(vl::nn::LRN &op,
vl::Tensor &derInput,
vl::Tensor const &input,
vl::Tensor const &derOutput)
{
typedef typename vl::DataTypeTraits<dataType>::type type ;
auto width = derOutput.getWidth() ;
auto height = derOutput.getHeight() ;
auto depth = derOutput.getDepth() ;
auto num = derOutput.getSize() ;
auto inputData = (type const*)input.getMemory() ;
auto derOutputData = (type const*)derOutput.getMemory() ;
auto derInputData = (type*)derInput.getMemory() ;
int m1 = ((signed)op.normDepth-1)/2 ;
int m2 = (int)op.normDepth - m1 - 1 ;
int offset = (int)width*(int)height ;
type ab2 = 2*op.alpha*op.beta ;
int t ;
#ifndef VL_NNNORMALIZE_FAST
int q ;
for (int k = 0 ; k < num ; ++k) {
for (int h = 0 ; h < height ; ++h) {
for (int w = 0 ; w < width ; ++w) {
type const* x = data + w + h * width ;
T* y = output + w + h * width ;
type const* z = derOutput + w + h * width ;
type acc = 0 ;
for (t = 0 ; t < (signed)depth ; ++t) {
yat(t) = 0 ;
}
for (t = -m2 ; t < (signed)depth ; ++t) {
int q1 = t-m1 ;
int q2 = t+m2 ;
type ap = 0 ;
type am = 0 ;
if (t-m1-1 >= 0) { am = xat(t-m1-1) ; } else { q1 = 0 ; }
if (t+m2 < depth) { ap = xat(t+m2) ; } else { q2 = depth - 1 ; }
acc += ap*ap - am*am ;
type L = kappa + alpha * acc ;
type Lbeta = fast_pow(L, -beta) ;
type Lbeta1 = Lbeta / L ;
if (0 <= t && t < depth) {
yat(t) += zat(t) * Lbeta ;
for (q = q1 ; q <= q2 ; ++ q) {
yat(q) -= zat(t) * xat(t) * xat(q) * ab2 * Lbeta1 ;
}
}
}
}
}
data += width*height*depth ;
output += width*height*depth ;
derOutput += width*height*depth ;
}
#else
type * restrict acc = (type*) malloc(sizeof(type) * width*height) ;
type * restrict acc2 = (type*) malloc(sizeof(type) * width*height*depth) ;
for (int k = 0 ; k < num ; ++k) {
memset(acc, 0, sizeof(type) * width*height) ;
for (t = -m2 ; t < (signed)depth ; ++t) {
/*
Compue the square of the input data x.^2 summed in the normalization window. This is done
incrementally, by updating the previous normalization window sum.
*/
{
int const tm = t - m1 - 1 ;
int const tp = t + m2 ;
type const* restrict datam_ = inputData + offset * tm ;
type const* restrict datap_ = inputData + offset * tp ;
type *end = acc + width*height ;
if (0 <= tm && tp < depth) {
for(type * restrict acc_ = acc ; acc_ != end ; ++acc_, ++datap_, ++datam_) {
type am = *datam_ ;
type ap = *datap_ ;
*acc_ += ap*ap - am*am ;
}
} else if (0 > tm && tp < depth) {
for(type * restrict acc_ = acc ; acc_ != end ; ++acc_, ++datap_) {
type ap = *datap_ ;
*acc_ += ap*ap ;
}
} else if (0 <= tm && tp >= depth) {
for(type * restrict acc_ = acc ; acc_ != end ; ++acc_, ++datam_) {
type am = *datam_ ;
*acc_ -= am*am ;
}
}
}
/*
Compute the arguments of the summation in the derivative
expression, storing them into acc2.
*/
if (0 <= t && t < depth) {
type const* restrict data_ = inputData + offset * t ;
type const* restrict derOutput_ = derOutputData + offset * t ;
type * restrict output_ = derInputData + offset * t ;
type * restrict acc2_ = acc2 + offset * t ;
type * end = acc + width*height ;
for(type * restrict acc_ = acc ; acc_ != end ;
++acc_, ++acc2_, ++data_, ++derOutput_, ++output_) {
type L = op.kappa + op.alpha * (*acc_) ;
type Lbeta = fast_pow(L, -(type)op.beta) ;
type temp1 = (*derOutput_) * Lbeta ;
type temp2 = (*data_) * ab2 * temp1 / L ;
*output_ = temp1 ;
*acc2_ = temp2 ;
}
}
}
/*
Integrate along feature channels in acc2, summing plane t-1 to
plane t.
*/
for (t = 1 ; t < (signed)depth ; ++t) {
type * restrict acc2_ = acc2 + t * offset ;
type const* restrict src_ = acc2_ - offset ;
type const* end = acc2_ + offset ;
for( ; acc2_ != end ; ++acc2_, ++src_) {
*acc2_ += *src_ ;
}
}
/*
Compute summation in the derivative expression from the integral
just obtained.
*/
for (t = 0 ; t < (signed)depth ; ++t) {
int q1 = t - m2 - 1 ;
int q2 = ((t + m1) <= (depth - 1)) ? t + m1 : depth - 1 ;
type const* restrict acc22_ = acc2 + offset * q2 ;
type const* restrict acc21_ = acc2 + offset * q1 ;
type const* restrict data_ = inputData + offset * t ;
type const* restrict end = data_ + width*height ;
type * restrict output_ = derInputData + offset * t ;
if (q1 >= 0) {
for( ; data_ != end ; ++data_, ++acc22_, ++acc21_, ++output_) {
*output_ -= (*acc22_ - *acc21_) * (*data_) ;
}
} else {
for( ; data_ != end ; ++data_, ++acc22_, ++output_) {
*output_ -= (*acc22_) * (*data_) ;
}
}
}
inputData += width*height*depth ;
derInputData += width*height*depth ;
derOutputData += width*height*depth ;
}
free(acc) ;
free(acc2) ;
#endif
return VLE_Success ;
}
} ;
/* ---------------------------------------------------------------- */
/* Driver */
/* ---------------------------------------------------------------- */
#if ENABLE_GPU
#include "nnnormalize_gpu.cu"
#endif
LRN::LRN(vl::Context &context,
int normDepth,
double kappa,
double alpha,
double beta)
: context(context), normDepth(normDepth), kappa(kappa), alpha(alpha), beta(beta)
{ }
vl::ErrorCode
LRN::forward(vl::Tensor &output,
vl::Tensor const &input)
{
return dispatch<LRNForward>()(*this,output,input) ;
}
vl::ErrorCode
LRN::backward(vl::Tensor &derInput,
vl::Tensor const &input,
vl::Tensor const &derOutput)
{
return dispatch<LRNBackward>()(*this,derInput,input,derOutput) ;
}
|
the_stack
|
namespace MegBA {
namespace geo {
namespace {
template <typename T>
__global__ void RadialDistortionNoGradKernel(
const int nItem, const int N, const T *px_valueDevicePtr,
const T *py_valueDevicePtr, const T *px_gradDevicePtr,
const T *py_gradDevicePtr, const T *f_ptr, const T *k1_ptr, const T *k2_ptr,
T *valueDevicePtr, T *gradDevicePtr) {
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T f = f_ptr[tid], k1 = k1_ptr[tid], k2 = k2_ptr[tid];
T px = px_valueDevicePtr[tid];
T py = py_valueDevicePtr[tid];
T l2_pow2 = px * px + py * py;
T partial = 2 * f * (k1 + 2 * k2 * l2_pow2);
for (unsigned int i = 0; i < N; ++i)
gradDevicePtr[tid + nItem * i] =
partial * (px_gradDevicePtr[tid + nItem * i] * px +
py_gradDevicePtr[tid + nItem * i] * py);
valueDevicePtr[tid] = f * (T(1.) + k1 * l2_pow2 + k2 * l2_pow2 * l2_pow2);
}
template <typename T>
__global__ void RadialDistortionKernel(
const int nItem, const int N, const T *px_valueDevicePtr,
const T *py_valueDevicePtr, const T *px_gradDevicePtr,
const T *py_gradDevicePtr, const T *f_ptr, const T *k1_ptr, const T *k2_ptr,
const T *f_gradDevicePtr, const T *k1_gradDevicePtr,
const T *k2_gradDevicePtr, T *valueDevicePtr, T *gradDevicePtr) {
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T f = f_ptr[tid], k1 = k1_ptr[tid], k2 = k2_ptr[tid];
T px = px_valueDevicePtr[tid];
T py = py_valueDevicePtr[tid];
T l2_pow2 = px * px + py * py;
T partial = 2 * f * (k1 + 2 * k2 * l2_pow2);
for (unsigned int i = 0; i < N; ++i) {
unsigned int index = tid + nItem * i;
gradDevicePtr[index] = partial * (px_gradDevicePtr[tid + nItem * i] * px +
py_gradDevicePtr[tid + nItem * i] * py) +
f_gradDevicePtr[index] *
(T(1.) + k1 * l2_pow2 + k2 * l2_pow2 * l2_pow2) +
k1_gradDevicePtr[index] * f * l2_pow2 +
k2_gradDevicePtr[index] * f * l2_pow2 * l2_pow2;
}
valueDevicePtr[tid] = f * (T(1.) + k1 * l2_pow2 + k2 * l2_pow2 * l2_pow2);
}
template <typename T>
__global__ void RadialDistortionFastGradKernel(
const int nItem, const int N, const T *px_valueDevicePtr,
const T *py_valueDevicePtr, const T *px_gradDevicePtr,
const T *py_gradDevicePtr, const T *f_ptr, const T *k1_ptr, const T *k2_ptr,
const int f_grad_position, const int k1_grad_position,
const int k2_grad_position, T *valueDevicePtr, T *gradDevicePtr) {
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid >= nItem) return;
T f = f_ptr[tid], k1 = k1_ptr[tid], k2 = k2_ptr[tid];
T px = px_valueDevicePtr[tid];
T py = py_valueDevicePtr[tid];
T l2_pow2 = px * px + py * py;
T partial = 2 * f * (k1 + 2 * k2 * l2_pow2);
for (unsigned int i = 0; i < N; ++i) {
unsigned int index = tid + nItem * i;
gradDevicePtr[index] =
partial * (px_gradDevicePtr[tid + nItem * i] * px +
py_gradDevicePtr[tid + nItem * i] * py) +
(i == f_grad_position ? 1 : 0) *
(T(1.) + k1 * l2_pow2 + k2 * l2_pow2 * l2_pow2) +
(i == k1_grad_position ? 1 : 0) * f * l2_pow2 +
(i == k2_grad_position ? 1 : 0) * f * l2_pow2 * l2_pow2;
}
valueDevicePtr[tid] = f * (T(1.) + k1 * l2_pow2 + k2 * l2_pow2 * l2_pow2);
}
template <typename T>
void RadialDistortionImpl(const JV3<T> &point, const JV3<T> &intrinsic,
JetVector<T> *out) {
const auto N = out->getGradShape();
bool use_fast_grad{true};
for (int i = 0; i < 3; ++i)
use_fast_grad &= intrinsic(i).getGradPosition() != -1;
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
const auto nItem = out->getItemNum(i);
dim3 block_dim(std::min(decltype(nItem)(256), nItem));
dim3 grid_dim((nItem - 1) / block_dim.x + 1);
if (intrinsic(0).getGradShape() == 0) {
RadialDistortionNoGradKernel<T><<<grid_dim, block_dim>>>(
nItem, N, point(0).getCUDAResPtr()[i], point(1).getCUDAResPtr()[i],
point(0).getCUDAGradPtr()[i], point(1).getCUDAGradPtr()[i],
intrinsic(0).getCUDAResPtr()[i], intrinsic(1).getCUDAResPtr()[i],
intrinsic(2).getCUDAResPtr()[i], out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
} else {
if (use_fast_grad) {
RadialDistortionFastGradKernel<T><<<grid_dim, block_dim>>>(
nItem, N, point(0).getCUDAResPtr()[i], point(1).getCUDAResPtr()[i],
point(0).getCUDAGradPtr()[i], point(1).getCUDAGradPtr()[i],
intrinsic(0).getCUDAResPtr()[i], intrinsic(1).getCUDAResPtr()[i],
intrinsic(2).getCUDAResPtr()[i], intrinsic(0).getGradPosition(),
intrinsic(1).getGradPosition(), intrinsic(2).getGradPosition(),
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
} else {
RadialDistortionKernel<T><<<grid_dim, block_dim>>>(
nItem, N, point(0).getCUDAResPtr()[i], point(1).getCUDAResPtr()[i],
point(0).getCUDAGradPtr()[i], point(1).getCUDAGradPtr()[i],
intrinsic(0).getCUDAResPtr()[i], intrinsic(1).getCUDAResPtr()[i],
intrinsic(2).getCUDAResPtr()[i], intrinsic(0).getCUDAGradPtr()[i],
intrinsic(1).getCUDAGradPtr()[i], intrinsic(2).getCUDAGradPtr()[i],
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
}
}
}
template <typename T>
void RadialDistortionImpl(const JV3<T> &point,
const Eigen::Map<const JV3<T>> &intrinsic,
JetVector<T> *out) {
const auto N = out->getGradShape();
bool use_fast_grad{true};
for (int i = 0; i < 3; ++i)
use_fast_grad &= intrinsic(i).getGradPosition() != -1;
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
const auto nItem = out->getItemNum(i);
dim3 block_dim(std::min(decltype(nItem)(256), nItem));
dim3 grid_dim((nItem - 1) / block_dim.x + 1);
if (intrinsic(0).getGradShape() == 0) {
RadialDistortionNoGradKernel<T><<<grid_dim, block_dim>>>(
nItem, N, point(0).getCUDAResPtr()[i], point(1).getCUDAResPtr()[i],
point(0).getCUDAGradPtr()[i], point(1).getCUDAGradPtr()[i],
intrinsic(0).getCUDAResPtr()[i], intrinsic(1).getCUDAResPtr()[i],
intrinsic(2).getCUDAResPtr()[i], out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
} else {
if (use_fast_grad) {
RadialDistortionFastGradKernel<T><<<grid_dim, block_dim>>>(
nItem, N, point(0).getCUDAResPtr()[i], point(1).getCUDAResPtr()[i],
point(0).getCUDAGradPtr()[i], point(1).getCUDAGradPtr()[i],
intrinsic(0).getCUDAResPtr()[i], intrinsic(1).getCUDAResPtr()[i],
intrinsic(2).getCUDAResPtr()[i], intrinsic(0).getGradPosition(),
intrinsic(1).getGradPosition(), intrinsic(2).getGradPosition(),
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
} else {
RadialDistortionKernel<T><<<grid_dim, block_dim>>>(
nItem, N, point(0).getCUDAResPtr()[i], point(1).getCUDAResPtr()[i],
point(0).getCUDAGradPtr()[i], point(1).getCUDAGradPtr()[i],
intrinsic(0).getCUDAResPtr()[i], intrinsic(1).getCUDAResPtr()[i],
intrinsic(2).getCUDAResPtr()[i], intrinsic(0).getCUDAGradPtr()[i],
intrinsic(1).getCUDAGradPtr()[i], intrinsic(2).getCUDAGradPtr()[i],
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
}
}
}
template <typename T>
void RadialDistortionImpl(const JV3<T> &point,
const Eigen::Map<const JVD<T>> &intrinsic,
JetVector<T> *out) {
const auto N = out->getGradShape();
bool use_fast_grad{true};
for (int i = 0; i < 3; ++i)
use_fast_grad &= intrinsic(i).getGradPosition() != -1;
for (int i = 0; i < MemoryPool::getWorldSize(); ++i) {
cudaSetDevice(i);
const auto nItem = out->getItemNum(i);
dim3 block_dim(std::min(decltype(nItem)(256), nItem));
dim3 grid_dim((nItem - 1) / block_dim.x + 1);
if (intrinsic(0).getGradShape() == 0) {
RadialDistortionNoGradKernel<T><<<grid_dim, block_dim>>>(
nItem, N, point(0).getCUDAResPtr()[i], point(1).getCUDAResPtr()[i],
point(0).getCUDAGradPtr()[i], point(1).getCUDAGradPtr()[i],
intrinsic(0).getCUDAResPtr()[i], intrinsic(1).getCUDAResPtr()[i],
intrinsic(2).getCUDAResPtr()[i], out->getCUDAResPtr()[i],
out->getCUDAGradPtr()[i]);
} else {
if (use_fast_grad) {
RadialDistortionFastGradKernel<T><<<grid_dim, block_dim>>>(
nItem, N, point(0).getCUDAResPtr()[i], point(1).getCUDAResPtr()[i],
point(0).getCUDAGradPtr()[i], point(1).getCUDAGradPtr()[i],
intrinsic(0).getCUDAResPtr()[i], intrinsic(1).getCUDAResPtr()[i],
intrinsic(2).getCUDAResPtr()[i], intrinsic(0).getGradPosition(),
intrinsic(1).getGradPosition(), intrinsic(2).getGradPosition(),
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
} else {
RadialDistortionKernel<T><<<grid_dim, block_dim>>>(
nItem, N, point(0).getCUDAResPtr()[i], point(1).getCUDAResPtr()[i],
point(0).getCUDAGradPtr()[i], point(1).getCUDAGradPtr()[i],
intrinsic(0).getCUDAResPtr()[i], intrinsic(1).getCUDAResPtr()[i],
intrinsic(2).getCUDAResPtr()[i], intrinsic(0).getCUDAGradPtr()[i],
intrinsic(1).getCUDAGradPtr()[i], intrinsic(2).getCUDAGradPtr()[i],
out->getCUDAResPtr()[i], out->getCUDAGradPtr()[i]);
}
}
}
}
} // namespace
template <typename T>
JetVector<T> RadialDistortion(const JV3<T> &point, const JV3<T> &intrinsic) {
return JetVector<T>{point(0, 0), [&](JetVector<T> *out) {
RadialDistortionImpl(point, intrinsic, out);
}};
}
template <typename T>
JetVector<T> RadialDistortion(const JV3<T> &point,
const Eigen::Map<const JV3<T>> &intrinsic) {
return JetVector<T>{point(0), [&](JetVector<T> *out) {
RadialDistortionImpl(point, intrinsic, out);
}};
}
template <typename T>
JetVector<T> RadialDistortion(const JV3<T> &point,
const Eigen::Map<const JVD<T>> &intrinsic) {
assert(intrinsic.rows() == 3 && intrinsic.cols() == 1);
return JetVector<T>{point(0), [&](JetVector<T> *out) {
RadialDistortionImpl(point, intrinsic, out);
}};
}
template JetVector<float> RadialDistortion(const JV3<float> &point,
const JV3<float> &intrinsic);
template JetVector<double> RadialDistortion(const JV3<double> &point,
const JV3<double> &intrinsic);
template JetVector<float> RadialDistortion(
const JV3<float> &point, const Eigen::Map<const JV3<float>> &intrinsic);
template JetVector<double> RadialDistortion(
const JV3<double> &point, const Eigen::Map<const JV3<double>> &intrinsic);
template JetVector<float> RadialDistortion(
const JV3<float> &point, const Eigen::Map<const JVD<float>> &intrinsic);
template JetVector<double> RadialDistortion(
const JV3<double> &point, const Eigen::Map<const JVD<double>> &intrinsic);
} // namespace geo
} // namespace MegBA
|
the_stack
|
#include <faiss/gpu/GpuResources.h>
#include <faiss/impl/FaissAssert.h>
#include <faiss/gpu/impl/L2Norm.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/MatrixMult.cuh>
#include <faiss/gpu/utils/Pair.cuh>
#include <faiss/gpu/utils/Reductions.cuh>
#include <curand_kernel.h>
namespace faiss {
namespace gpu {
extern __shared__ char smem[];
/** encode using iterative conditional mode
*
* For subcode cm of a vector, we fix the other subcodes cj (j != m)
* and then find the optimal value of cm (cm = 1,...,K) such that
* minimizing the objective function.
*
* @param uterm precomputed unary terms, size (M, n, K)
* @param bterm precomputed binary terms, size (M1, M2, K1, K2)
* @param codes output vector encodings, size (n, M)
* @param M number of codebooks
* @param K number of codewords in a codebook
* @param m identify which subcode to condition on
*/
__global__ void runIcmEncodeStep(
const float* uterm,
const float* bterm,
int32_t* codes,
int M,
int K,
int m) {
using KVPair = Pair<float, int>;
int id = blockIdx.x; // each block takes care of one vector
int code = threadIdx.x; // each thread takes care of one possible code
// compute the objective value by look-up tables
KVPair obj(0.0f, code);
obj.k = uterm[id * K + code];
#pragma unroll
for (int m2 = 0; m2 < M; m2++) {
if (m2 == m) {
continue;
}
int32_t code2 = codes[id * M + m2];
obj.k += bterm[m2 * K * K + code * K + code2];
}
// find the minimum objective value and the corresponding code
__syncthreads();
obj = blockReduceAll<KVPair, Min<KVPair>, false, false>(
obj, Min<KVPair>(), (KVPair*)smem);
if (code == 0) {
codes[id * M + m] = obj.v;
}
}
/** compute reconstruction error for each vector
*
* decoded_x[i] = \sum codebooks[m][codes[i][m]], m = 1,..,M
* obj[i] = ||x[i] - decoded_x[i]||^2
*
* @param x input vectors, size [n, dims]
* @param codebooks codebooks, size [M, K, dims]
* @param codes vector codes, size [n, M]
* @param obj output reconstruction errors, size [n]
* @param n number of input vectors
* @param K number of codewords in a codebook
* @param M number of codebooks
*/
__global__ void runEvaluation(
const float* x,
const float* codebooks,
const int32_t* codes,
float* obj, // output
int n,
int M,
int K,
int dims) {
int id = blockIdx.x; // each block takes care of one vector
int d = threadIdx.x; // each thread takes care of one dimension
float acc = 0.0f;
#pragma unroll
for (int m = 0; m < M; m++) {
int32_t code = codes[id * M + m];
acc += codebooks[m * K * dims + code * dims + d];
}
acc -= x[id * dims + d];
acc = acc * acc;
// sum values of all dimensions together
__syncthreads();
acc = blockReduceAllSum<float, false, false>(acc, (float*)smem);
if (d == 0) {
obj[id] = acc;
}
}
/** perturb vector codes
*
* repeat nperts times:
* codes[i][randint(0, M)] = randint(0, K)
*
* @param seed random seed
* @param codes vector codes, size [n, M]
* @param n number of input vectors
* @param M number of codebooks
* @param K number of codewords in a codebook
* @param nperts number of subcode to be perturbed in a vector
*/
__global__ void runCodesPerturbation(
int seed,
int32_t* codes,
int n,
int M,
int K,
int nperts) {
// each thread takes care of one vector
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= n) {
return;
}
// we have to initialize the state
curandState_t state;
curand_init(seed, id, 0, &state);
for (int i = 0; i < nperts; i++) {
int pos = int(curand_uniform(&state) * M);
int32_t val = int32_t(curand_uniform(&state) * K);
codes[id * M + pos] = val;
}
}
/** select the best codes by reconstruction errors
*
* if objs[i] < best_objs[i]:
* best_objs[i] = objs[i]
* best_codes[i] = codes[i]
*
* @param bestCodes the best codes we've encountered, size [n, M]
* @param bestObjs min reconstruction errors we've encountered, size [n]
* @param codes input vector codes, size [n, M]
* @param objs reconstruction errors of input vector codes, size [n]
* @param n number of input vectors
*/
__global__ void runCodesSelection(
int32_t* bestCodes,
float* bestObjs,
const int32_t* codes,
const float* objs,
int n,
int M) {
// each thread takes care of one vector
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= n || objs[id] >= bestObjs[id]) {
return;
}
bestObjs[id] = objs[id];
#pragma unroll
for (int m = 0; m < M; m++) {
bestCodes[id * M + m] = codes[id * M + m];
}
}
/** add L2 norm of codewords in a codebook to the unary terms
*
* uterm[i][k] = norm[k]
*
* @param uterm unary terms, size [n, K]
* @param norm L2 norm of each codeword in a codebook, size [K]
* @param K number of codewords in a codebook
*/
__global__ void runNormAddition(float* uterm, const float* norm, int K) {
int id = blockIdx.x;
int code = threadIdx.x;
uterm[id * K + code] += norm[code];
}
IcmEncoderImpl::IcmEncoderImpl(
int M,
int K,
int dims,
GpuResourcesProvider* prov,
int device)
: M(M), K(K), dims(dims), prov(prov), device(device) {
res = prov->getResources();
}
void IcmEncoderImpl::computeUnaryTerms(
float* uterm, // output, [M, n, K]
const float* x, // [n, d]
const float* codebooks, // [M, K, d]
int n) const {
auto stream = res->getDefaultStreamCurrentDevice();
auto handle = res->getBlasHandleCurrentDevice();
DeviceTensor<float, 2, true> vecs(const_cast<float*>(x), {n, dims});
for (int m = 0; m < M; m++) {
auto cPtr = const_cast<float*>(codebooks + m * K * dims);
auto bPtr = uterm + m * n * K;
DeviceTensor<float, 2, true> ci(cPtr, {K, dims});
DeviceTensor<float, 2, true> bi(bPtr, {n, K});
runMatrixMult(
bi, false, vecs, false, ci, true, -2.0f, 0.0f, handle, stream);
}
DeviceTensor<float, 2, true> c(
const_cast<float*>(codebooks), {M * K, dims});
DeviceTensor<float, 1, true> norm(
res.get(), makeTempAlloc(AllocType::Other, stream), {M * K});
runL2Norm(c, true, norm, true, stream);
for (int m = 0; m < M; m++) {
auto uPtr = uterm + m * n * K;
auto nPtr = norm.data() + m * K;
runNormAddition<<<n, K, 0, stream>>>(uPtr, nPtr, K);
}
}
void IcmEncoderImpl::computeBinaryTerms(float* bterm, const float* codebooks)
const {
auto stream = res->getDefaultStreamCurrentDevice();
auto handle = res->getBlasHandleCurrentDevice();
for (int m1 = 0; m1 < M; m1++) {
for (int m2 = 0; m2 < M; m2++) {
auto ptr1 = const_cast<float*>(codebooks + m1 * K * dims);
auto ptr2 = const_cast<float*>(codebooks + m2 * K * dims);
auto ptr3 = bterm + m1 * M * K * K + m2 * K * K;
DeviceTensor<float, 2, true> c1(ptr1, {K, dims});
DeviceTensor<float, 2, true> c2(ptr2, {K, dims});
DeviceTensor<float, 2, true> b(ptr3, {K, K});
runMatrixMult(
b, false, c1, false, c2, true, 2.0f, 0.0f, handle, stream);
}
}
}
void IcmEncoderImpl::setBinaryTerm(const float* codebooksHost) {
DeviceScope scope(device);
auto device = getCurrentDevice();
auto stream = res->getDefaultStreamCurrentDevice();
// copy from host to device memory
codebooks = toDeviceNonTemporary<float, 3>(
res.get(),
device,
const_cast<float*>(codebooksHost),
stream,
{M, K, dims});
bterm = DeviceTensor<float, 4, true>(
res.get(), makeDevAlloc(AllocType::Other, stream), {M, M, K, K});
computeBinaryTerms(bterm.data(), codebooks.data());
}
void IcmEncoderImpl::encode(
int32_t* codesHost,
const float* xHost,
const float* codebooksHost,
std::mt19937& gen,
int n,
int nperts,
int ilsIters,
int icmIters) const {
DeviceScope scope(device);
auto device = getCurrentDevice();
auto stream = res->getDefaultStreamCurrentDevice();
// copy from host to device memory
auto codes = toDeviceTemporary<int32_t, 2>(
res.get(), device, const_cast<int32_t*>(codesHost), stream, {n, M});
auto x = toDeviceTemporary<float, 2>(
res.get(), device, const_cast<float*>(xHost), stream, {n, dims});
// compute unary terms
DeviceTensor<float, 3, true> uterm(
res.get(), makeTempAlloc(AllocType::Other, stream), {M, n, K});
computeUnaryTerms(uterm.data(), x.data(), codebooks.data(), n);
DeviceTensor<int32_t, 2, true> bestCodes(
res.get(), makeTempAlloc(AllocType::Other, stream), {n, M});
fromDevice<int32_t, 2>(codes, bestCodes.data(), stream);
DeviceTensor<float, 1, true> bestObjs(
res.get(), makeTempAlloc(AllocType::Other, stream), {n});
DeviceTensor<float, 1, true> objs(
res.get(), makeTempAlloc(AllocType::Other, stream), {n});
// compute how much shared memory we need
const int evaluateSmem = sizeof(float) * (dims + kWarpSize - 1) / kWarpSize;
const int encodeSmem =
sizeof(Pair<float, int>) * (K + kWarpSize - 1) / kWarpSize;
// compute the reconstruction error for each vector
runEvaluation<<<n, dims, evaluateSmem, stream>>>(
x.data(),
codebooks.data(),
codes.data(),
bestObjs.data(),
n,
M,
K,
dims);
int blockSize = 256;
int numBlocks = (n + blockSize - 1) / blockSize;
for (int i = 0; i < ilsIters; i++) {
runCodesPerturbation<<<numBlocks, blockSize, 0, stream>>>(
gen(), codes.data(), n, M, K, nperts);
// perform icm encoding
for (int j = 0; j < icmIters; j++) {
for (int m = 0; m < M; m++) {
runIcmEncodeStep<<<n, K, encodeSmem, stream>>>(
uterm[m].data(),
bterm[m].data(),
codes.data(),
M,
K,
m);
}
}
// compute the reconstruction error for each vector given codes
runEvaluation<<<n, dims, evaluateSmem, stream>>>(
x.data(),
codebooks.data(),
codes.data(),
objs.data(),
n,
M,
K,
dims);
// if objs[i] < best_objs[i], replace best_codes[i] with codes[i]
runCodesSelection<<<numBlocks, blockSize, 0, stream>>>(
bestCodes.data(),
bestObjs.data(),
codes.data(),
objs.data(),
n,
M);
codes.copyFrom(bestCodes, stream);
}
// copy back to host memory
fromDevice<int32_t, 2>(bestCodes, codesHost, stream);
}
} // namespace gpu
} // namespace faiss
|
the_stack
|
* \file
* Common type manipulation (metaprogramming) utilities
*/
#pragma once
#include <iostream>
#include <limits>
#include <cfloat>
#if (__CUDACC_VER_MAJOR__ >= 9)
#include <cuda_fp16.h>
#endif
#include "util_macro.cuh"
#include "util_arch.cuh"
#include "util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/**
* \addtogroup UtilModule
* @{
*/
/******************************************************************************
* Type equality
******************************************************************************/
/**
* \brief Type selection (<tt>IF ? ThenType : ElseType</tt>)
*/
template <bool IF, typename ThenType, typename ElseType>
struct If
{
/// Conditional type result
typedef ThenType Type; // true
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename ThenType, typename ElseType>
struct If<false, ThenType, ElseType>
{
typedef ElseType Type; // false
};
#endif // DOXYGEN_SHOULD_SKIP_THIS
/******************************************************************************
* Conditional types
******************************************************************************/
/**
* \brief Type equality test
*/
template <typename A, typename B>
struct Equals
{
enum {
VALUE = 0,
NEGATE = 1
};
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename A>
struct Equals <A, A>
{
enum {
VALUE = 1,
NEGATE = 0
};
};
#endif // DOXYGEN_SHOULD_SKIP_THIS
/******************************************************************************
* Static math
******************************************************************************/
/**
* \brief Statically determine log2(N), rounded up.
*
* For example:
* Log2<8>::VALUE // 3
* Log2<3>::VALUE // 2
*/
template <int N, int CURRENT_VAL = N, int COUNT = 0>
struct Log2
{
/// Static logarithm value
enum { VALUE = Log2<N, (CURRENT_VAL >> 1), COUNT + 1>::VALUE }; // Inductive case
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <int N, int COUNT>
struct Log2<N, 0, COUNT>
{
enum {VALUE = (1 << (COUNT - 1) < N) ? // Base case
COUNT :
COUNT - 1 };
};
#endif // DOXYGEN_SHOULD_SKIP_THIS
/**
* \brief Statically determine if N is a power-of-two
*/
template <int N>
struct PowerOfTwo
{
enum { VALUE = ((N & (N - 1)) == 0) };
};
/******************************************************************************
* Pointer vs. iterator detection
******************************************************************************/
/**
* \brief Pointer vs. iterator
*/
template <typename Tp>
struct IsPointer
{
enum { VALUE = 0 };
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename Tp>
struct IsPointer<Tp*>
{
enum { VALUE = 1 };
};
#endif // DOXYGEN_SHOULD_SKIP_THIS
/******************************************************************************
* Qualifier detection
******************************************************************************/
/**
* \brief Volatile modifier test
*/
template <typename Tp>
struct IsVolatile
{
enum { VALUE = 0 };
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename Tp>
struct IsVolatile<Tp volatile>
{
enum { VALUE = 1 };
};
#endif // DOXYGEN_SHOULD_SKIP_THIS
/******************************************************************************
* Qualifier removal
******************************************************************************/
/**
* \brief Removes \p const and \p volatile qualifiers from type \p Tp.
*
* For example:
* <tt>typename RemoveQualifiers<volatile int>::Type // int;</tt>
*/
template <typename Tp, typename Up = Tp>
struct RemoveQualifiers
{
/// Type without \p const and \p volatile qualifiers
typedef Up Type;
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename Tp, typename Up>
struct RemoveQualifiers<Tp, volatile Up>
{
typedef Up Type;
};
template <typename Tp, typename Up>
struct RemoveQualifiers<Tp, const Up>
{
typedef Up Type;
};
template <typename Tp, typename Up>
struct RemoveQualifiers<Tp, const volatile Up>
{
typedef Up Type;
};
/******************************************************************************
* Marker types
******************************************************************************/
/**
* \brief A simple "NULL" marker type
*/
struct NullType
{
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
template <typename T>
__host__ __device__ __forceinline__ NullType& operator =(const T&) { return *this; }
__host__ __device__ __forceinline__ bool operator ==(const NullType&) { return true; }
__host__ __device__ __forceinline__ bool operator !=(const NullType&) { return false; }
#endif // DOXYGEN_SHOULD_SKIP_THIS
};
/**
* \brief Allows for the treatment of an integral constant as a type at compile-time (e.g., to achieve static call dispatch based on constant integral values)
*/
template <int A>
struct Int2Type
{
enum {VALUE = A};
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
/******************************************************************************
* Size and alignment
******************************************************************************/
/// Structure alignment
template <typename T>
struct AlignBytes
{
struct Pad
{
T val;
char byte;
};
enum
{
/// The "true CUDA" alignment of T in bytes
ALIGN_BYTES = sizeof(Pad) - sizeof(T)
};
/// The "truly aligned" type
typedef T Type;
};
// Specializations where host C++ compilers (e.g., 32-bit Windows) may disagree
// with device C++ compilers (EDG) on types passed as template parameters through
// kernel functions
#define __CUB_ALIGN_BYTES(t, b) \
template <> struct AlignBytes<t> \
{ enum { ALIGN_BYTES = b }; typedef __align__(b) t Type; };
__CUB_ALIGN_BYTES(short4, 8)
__CUB_ALIGN_BYTES(ushort4, 8)
__CUB_ALIGN_BYTES(int2, 8)
__CUB_ALIGN_BYTES(uint2, 8)
__CUB_ALIGN_BYTES(long long, 8)
__CUB_ALIGN_BYTES(unsigned long long, 8)
__CUB_ALIGN_BYTES(float2, 8)
__CUB_ALIGN_BYTES(double, 8)
#ifdef _WIN32
__CUB_ALIGN_BYTES(long2, 8)
__CUB_ALIGN_BYTES(ulong2, 8)
#else
__CUB_ALIGN_BYTES(long2, 16)
__CUB_ALIGN_BYTES(ulong2, 16)
#endif
__CUB_ALIGN_BYTES(int4, 16)
__CUB_ALIGN_BYTES(uint4, 16)
__CUB_ALIGN_BYTES(float4, 16)
__CUB_ALIGN_BYTES(long4, 16)
__CUB_ALIGN_BYTES(ulong4, 16)
__CUB_ALIGN_BYTES(longlong2, 16)
__CUB_ALIGN_BYTES(ulonglong2, 16)
__CUB_ALIGN_BYTES(double2, 16)
__CUB_ALIGN_BYTES(longlong4, 16)
__CUB_ALIGN_BYTES(ulonglong4, 16)
__CUB_ALIGN_BYTES(double4, 16)
template <typename T> struct AlignBytes<volatile T> : AlignBytes<T> {};
template <typename T> struct AlignBytes<const T> : AlignBytes<T> {};
template <typename T> struct AlignBytes<const volatile T> : AlignBytes<T> {};
/// Unit-words of data movement
template <typename T>
struct UnitWord
{
enum {
ALIGN_BYTES = AlignBytes<T>::ALIGN_BYTES
};
template <typename Unit>
struct IsMultiple
{
enum {
UNIT_ALIGN_BYTES = AlignBytes<Unit>::ALIGN_BYTES,
IS_MULTIPLE = (sizeof(T) % sizeof(Unit) == 0) && (ALIGN_BYTES % UNIT_ALIGN_BYTES == 0)
};
};
/// Biggest shuffle word that T is a whole multiple of and is not larger than the alignment of T
typedef typename If<IsMultiple<int>::IS_MULTIPLE,
unsigned int,
typename If<IsMultiple<short>::IS_MULTIPLE,
unsigned short,
unsigned char>::Type>::Type ShuffleWord;
/// Biggest volatile word that T is a whole multiple of and is not larger than the alignment of T
typedef typename If<IsMultiple<long long>::IS_MULTIPLE,
unsigned long long,
ShuffleWord>::Type VolatileWord;
/// Biggest memory-access word that T is a whole multiple of and is not larger than the alignment of T
typedef typename If<IsMultiple<longlong2>::IS_MULTIPLE,
ulonglong2,
VolatileWord>::Type DeviceWord;
/// Biggest texture reference word that T is a whole multiple of and is not larger than the alignment of T
typedef typename If<IsMultiple<int4>::IS_MULTIPLE,
uint4,
typename If<IsMultiple<int2>::IS_MULTIPLE,
uint2,
ShuffleWord>::Type>::Type TextureWord;
};
// float2 specialization workaround (for SM10-SM13)
template <>
struct UnitWord <float2>
{
typedef int ShuffleWord;
#if (CUB_PTX_ARCH > 0) && (CUB_PTX_ARCH <= 130)
typedef float VolatileWord;
typedef uint2 DeviceWord;
#else
typedef unsigned long long VolatileWord;
typedef unsigned long long DeviceWord;
#endif
typedef float2 TextureWord;
};
// float4 specialization workaround (for SM10-SM13)
template <>
struct UnitWord <float4>
{
typedef int ShuffleWord;
#if (CUB_PTX_ARCH > 0) && (CUB_PTX_ARCH <= 130)
typedef float VolatileWord;
typedef uint4 DeviceWord;
#else
typedef unsigned long long VolatileWord;
typedef ulonglong2 DeviceWord;
#endif
typedef float4 TextureWord;
};
// char2 specialization workaround (for SM10-SM13)
template <>
struct UnitWord <char2>
{
typedef unsigned short ShuffleWord;
#if (CUB_PTX_ARCH > 0) && (CUB_PTX_ARCH <= 130)
typedef unsigned short VolatileWord;
typedef short DeviceWord;
#else
typedef unsigned short VolatileWord;
typedef unsigned short DeviceWord;
#endif
typedef unsigned short TextureWord;
};
template <typename T> struct UnitWord<volatile T> : UnitWord<T> {};
template <typename T> struct UnitWord<const T> : UnitWord<T> {};
template <typename T> struct UnitWord<const volatile T> : UnitWord<T> {};
#endif // DOXYGEN_SHOULD_SKIP_THIS
/******************************************************************************
* Vector type inference utilities.
******************************************************************************/
/**
* \brief Exposes a member typedef \p Type that names the corresponding CUDA vector type if one exists. Otherwise \p Type refers to the CubVector structure itself, which will wrap the corresponding \p x, \p y, etc. vector fields.
*/
template <typename T, int vec_elements> struct CubVector;
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
enum
{
/// The maximum number of elements in CUDA vector types
MAX_VEC_ELEMENTS = 4,
};
/**
* Generic vector-1 type
*/
template <typename T>
struct CubVector<T, 1>
{
T x;
typedef T BaseType;
typedef CubVector<T, 1> Type;
};
/**
* Generic vector-2 type
*/
template <typename T>
struct CubVector<T, 2>
{
T x;
T y;
typedef T BaseType;
typedef CubVector<T, 2> Type;
};
/**
* Generic vector-3 type
*/
template <typename T>
struct CubVector<T, 3>
{
T x;
T y;
T z;
typedef T BaseType;
typedef CubVector<T, 3> Type;
};
/**
* Generic vector-4 type
*/
template <typename T>
struct CubVector<T, 4>
{
T x;
T y;
T z;
T w;
typedef T BaseType;
typedef CubVector<T, 4> Type;
};
/**
* Macro for expanding partially-specialized built-in vector types
*/
#define CUB_DEFINE_VECTOR_TYPE(base_type,short_type) \
\
template<> struct CubVector<base_type, 1> : short_type##1 \
{ \
typedef base_type BaseType; \
typedef short_type##1 Type; \
__host__ __device__ __forceinline__ CubVector operator+(const CubVector &other) const { \
CubVector retval; \
retval.x = x + other.x; \
return retval; \
} \
__host__ __device__ __forceinline__ CubVector operator-(const CubVector &other) const { \
CubVector retval; \
retval.x = x - other.x; \
return retval; \
} \
}; \
\
template<> struct CubVector<base_type, 2> : short_type##2 \
{ \
typedef base_type BaseType; \
typedef short_type##2 Type; \
__host__ __device__ __forceinline__ CubVector operator+(const CubVector &other) const { \
CubVector retval; \
retval.x = x + other.x; \
retval.y = y + other.y; \
return retval; \
} \
__host__ __device__ __forceinline__ CubVector operator-(const CubVector &other) const { \
CubVector retval; \
retval.x = x - other.x; \
retval.y = y - other.y; \
return retval; \
} \
}; \
\
template<> struct CubVector<base_type, 3> : short_type##3 \
{ \
typedef base_type BaseType; \
typedef short_type##3 Type; \
__host__ __device__ __forceinline__ CubVector operator+(const CubVector &other) const { \
CubVector retval; \
retval.x = x + other.x; \
retval.y = y + other.y; \
retval.z = z + other.z; \
return retval; \
} \
__host__ __device__ __forceinline__ CubVector operator-(const CubVector &other) const { \
CubVector retval; \
retval.x = x - other.x; \
retval.y = y - other.y; \
retval.z = z - other.z; \
return retval; \
} \
}; \
\
template<> struct CubVector<base_type, 4> : short_type##4 \
{ \
typedef base_type BaseType; \
typedef short_type##4 Type; \
__host__ __device__ __forceinline__ CubVector operator+(const CubVector &other) const { \
CubVector retval; \
retval.x = x + other.x; \
retval.y = y + other.y; \
retval.z = z + other.z; \
retval.w = w + other.w; \
return retval; \
} \
__host__ __device__ __forceinline__ CubVector operator-(const CubVector &other) const { \
CubVector retval; \
retval.x = x - other.x; \
retval.y = y - other.y; \
retval.z = z - other.z; \
retval.w = w - other.w; \
return retval; \
} \
};
// Expand CUDA vector types for built-in primitives
CUB_DEFINE_VECTOR_TYPE(char, char)
CUB_DEFINE_VECTOR_TYPE(signed char, char)
CUB_DEFINE_VECTOR_TYPE(short, short)
CUB_DEFINE_VECTOR_TYPE(int, int)
CUB_DEFINE_VECTOR_TYPE(long, long)
CUB_DEFINE_VECTOR_TYPE(long long, longlong)
CUB_DEFINE_VECTOR_TYPE(unsigned char, uchar)
CUB_DEFINE_VECTOR_TYPE(unsigned short, ushort)
CUB_DEFINE_VECTOR_TYPE(unsigned int, uint)
CUB_DEFINE_VECTOR_TYPE(unsigned long, ulong)
CUB_DEFINE_VECTOR_TYPE(unsigned long long, ulonglong)
CUB_DEFINE_VECTOR_TYPE(float, float)
CUB_DEFINE_VECTOR_TYPE(double, double)
CUB_DEFINE_VECTOR_TYPE(bool, uchar)
// Undefine macros
#undef CUB_DEFINE_VECTOR_TYPE
#endif // DOXYGEN_SHOULD_SKIP_THIS
/******************************************************************************
* Wrapper types
******************************************************************************/
/**
* \brief A storage-backing wrapper that allows types with non-trivial constructors to be aliased in unions
*/
template <typename T>
struct Uninitialized
{
/// Biggest memory-access word that T is a whole multiple of and is not larger than the alignment of T
typedef typename UnitWord<T>::DeviceWord DeviceWord;
enum
{
WORDS = sizeof(T) / sizeof(DeviceWord)
};
/// Backing storage
DeviceWord storage[WORDS];
/// Alias
__host__ __device__ __forceinline__ T& Alias()
{
return reinterpret_cast<T&>(*this);
}
};
/**
* \brief A key identifier paired with a corresponding value
*/
template <
typename _Key,
typename _Value
#if defined(_WIN32) && !defined(_WIN64)
, bool KeyIsLT = (AlignBytes<_Key>::ALIGN_BYTES < AlignBytes<_Value>::ALIGN_BYTES)
, bool ValIsLT = (AlignBytes<_Value>::ALIGN_BYTES < AlignBytes<_Key>::ALIGN_BYTES)
#endif // #if defined(_WIN32) && !defined(_WIN64)
>
struct KeyValuePair
{
typedef _Key Key; ///< Key data type
typedef _Value Value; ///< Value data type
Key key; ///< Item key
Value value; ///< Item value
/// Constructor
__host__ __device__ __forceinline__
KeyValuePair() {}
/// Constructor
__host__ __device__ __forceinline__
KeyValuePair(Key const& key, Value const& value) : key(key), value(value) {}
/// Inequality operator
__host__ __device__ __forceinline__ bool operator !=(const KeyValuePair &b)
{
return (value != b.value) || (key != b.key);
}
};
#if defined(_WIN32) && !defined(_WIN64)
/**
* Win32 won't do 16B alignment. This can present two problems for
* should-be-16B-aligned (but actually 8B aligned) built-in and intrinsics members:
* 1) If a smaller-aligned item were to be listed first, the host compiler places the
* should-be-16B item at too early an offset (and disagrees with device compiler)
* 2) Or, if a smaller-aligned item lists second, the host compiler gets the size
* of the struct wrong (and disagrees with device compiler)
*
* So we put the larger-should-be-aligned item first, and explicitly pad the
* end of the struct
*/
/// Smaller key specialization
template <typename K, typename V>
struct KeyValuePair<K, V, true, false>
{
typedef K Key;
typedef V Value;
typedef char Pad[AlignBytes<V>::ALIGN_BYTES - AlignBytes<K>::ALIGN_BYTES];
Value value; // Value has larger would-be alignment and goes first
Key key;
Pad pad;
/// Constructor
__host__ __device__ __forceinline__
KeyValuePair() {}
/// Constructor
__host__ __device__ __forceinline__
KeyValuePair(Key const& key, Value const& value) : key(key), value(value) {}
/// Inequality operator
__host__ __device__ __forceinline__ bool operator !=(const KeyValuePair &b)
{
return (value != b.value) || (key != b.key);
}
};
/// Smaller value specialization
template <typename K, typename V>
struct KeyValuePair<K, V, false, true>
{
typedef K Key;
typedef V Value;
typedef char Pad[AlignBytes<K>::ALIGN_BYTES - AlignBytes<V>::ALIGN_BYTES];
Key key; // Key has larger would-be alignment and goes first
Value value;
Pad pad;
/// Constructor
__host__ __device__ __forceinline__
KeyValuePair() {}
/// Constructor
__host__ __device__ __forceinline__
KeyValuePair(Key const& key, Value const& value) : key(key), value(value) {}
/// Inequality operator
__host__ __device__ __forceinline__ bool operator !=(const KeyValuePair &b)
{
return (value != b.value) || (key != b.key);
}
};
#endif // #if defined(_WIN32) && !defined(_WIN64)
#ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document
/**
* \brief A wrapper for passing simple static arrays as kernel parameters
*/
template <typename T, int COUNT>
struct ArrayWrapper
{
/// Statically-sized array of type \p T
T array[COUNT];
/// Constructor
__host__ __device__ __forceinline__ ArrayWrapper() {}
};
#endif // DOXYGEN_SHOULD_SKIP_THIS
/**
* \brief Double-buffer storage wrapper for multi-pass stream transformations that require more than one storage array for streaming intermediate results back and forth.
*
* Many multi-pass computations require a pair of "ping-pong" storage
* buffers (e.g., one for reading from and the other for writing to, and then
* vice-versa for the subsequent pass). This structure wraps a set of device
* buffers and a "selector" member to track which is "current".
*/
template <typename T>
struct DoubleBuffer
{
/// Pair of device buffer pointers
T *d_buffers[2];
/// Selector into \p d_buffers (i.e., the active/valid buffer)
int selector;
/// \brief Constructor
__host__ __device__ __forceinline__ DoubleBuffer()
{
selector = 0;
d_buffers[0] = NULL;
d_buffers[1] = NULL;
}
/// \brief Constructor
__host__ __device__ __forceinline__ DoubleBuffer(
T *d_current, ///< The currently valid buffer
T *d_alternate) ///< Alternate storage buffer of the same size as \p d_current
{
selector = 0;
d_buffers[0] = d_current;
d_buffers[1] = d_alternate;
}
/// \brief Return pointer to the currently valid buffer
__host__ __device__ __forceinline__ T* Current() { return d_buffers[selector]; }
/// \brief Return pointer to the currently invalid buffer
__host__ __device__ __forceinline__ T* Alternate() { return d_buffers[selector ^ 1]; }
};
/******************************************************************************
* Typedef-detection
******************************************************************************/
/**
* \brief Defines a structure \p detector_name that is templated on type \p T. The \p detector_name struct exposes a constant member \p VALUE indicating whether or not parameter \p T exposes a nested type \p nested_type_name
*/
#define CUB_DEFINE_DETECT_NESTED_TYPE(detector_name, nested_type_name) \
template <typename T> \
struct detector_name \
{ \
template <typename C> \
static char& test(typename C::nested_type_name*); \
template <typename> \
static int& test(...); \
enum \
{ \
VALUE = sizeof(test<T>(0)) < sizeof(int) \
}; \
};
/******************************************************************************
* Simple enable-if (similar to Boost)
******************************************************************************/
/**
* \brief Simple enable-if (similar to Boost)
*/
template <bool Condition, class T = void>
struct EnableIf
{
/// Enable-if type for SFINAE dummy variables
typedef T Type;
};
template <class T>
struct EnableIf<false, T> {};
/******************************************************************************
* Typedef-detection
******************************************************************************/
/**
* \brief Determine whether or not BinaryOp's functor is of the form <tt>bool operator()(const T& a, const T&b)</tt> or <tt>bool operator()(const T& a, const T&b, unsigned int idx)</tt>
*/
template <typename T, typename BinaryOp>
struct BinaryOpHasIdxParam
{
private:
/*
template <typename BinaryOpT, bool (BinaryOpT::*)(const T &a, const T &b, unsigned int idx) const> struct SFINAE1 {};
template <typename BinaryOpT, bool (BinaryOpT::*)(const T &a, const T &b, unsigned int idx)> struct SFINAE2 {};
template <typename BinaryOpT, bool (BinaryOpT::*)(T a, T b, unsigned int idx) const> struct SFINAE3 {};
template <typename BinaryOpT, bool (BinaryOpT::*)(T a, T b, unsigned int idx)> struct SFINAE4 {};
*/
template <typename BinaryOpT, bool (BinaryOpT::*)(const T &a, const T &b, int idx) const> struct SFINAE5 {};
template <typename BinaryOpT, bool (BinaryOpT::*)(const T &a, const T &b, int idx)> struct SFINAE6 {};
template <typename BinaryOpT, bool (BinaryOpT::*)(T a, T b, int idx) const> struct SFINAE7 {};
template <typename BinaryOpT, bool (BinaryOpT::*)(T a, T b, int idx)> struct SFINAE8 {};
/*
template <typename BinaryOpT> static char Test(SFINAE1<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> static char Test(SFINAE2<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> static char Test(SFINAE3<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> static char Test(SFINAE4<BinaryOpT, &BinaryOpT::operator()> *);
*/
template <typename BinaryOpT> __host__ __device__ static char Test(SFINAE5<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> __host__ __device__ static char Test(SFINAE6<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> __host__ __device__ static char Test(SFINAE7<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> __host__ __device__ static char Test(SFINAE8<BinaryOpT, &BinaryOpT::operator()> *);
template <typename BinaryOpT> static int Test(...);
public:
/// Whether the functor BinaryOp has a third <tt>unsigned int</tt> index param
static const bool HAS_PARAM = sizeof(Test<BinaryOp>(NULL)) == sizeof(char);
};
/******************************************************************************
* Simple type traits utilities.
*
* For example:
* Traits<int>::CATEGORY // SIGNED_INTEGER
* Traits<NullType>::NULL_TYPE // true
* Traits<uint4>::CATEGORY // NOT_A_NUMBER
* Traits<uint4>::PRIMITIVE; // false
*
******************************************************************************/
/**
* \brief Basic type traits categories
*/
enum Category
{
NOT_A_NUMBER,
SIGNED_INTEGER,
UNSIGNED_INTEGER,
FLOATING_POINT
};
/**
* \brief Basic type traits
*/
template <Category _CATEGORY, bool _PRIMITIVE, bool _NULL_TYPE, typename _UnsignedBits, typename T>
struct BaseTraits
{
/// Category
static const Category CATEGORY = _CATEGORY;
enum
{
PRIMITIVE = _PRIMITIVE,
NULL_TYPE = _NULL_TYPE,
};
};
/**
* Basic type traits (unsigned primitive specialization)
*/
template <typename _UnsignedBits, typename T>
struct BaseTraits<UNSIGNED_INTEGER, true, false, _UnsignedBits, T>
{
typedef _UnsignedBits UnsignedBits;
static const Category CATEGORY = UNSIGNED_INTEGER;
static const UnsignedBits LOWEST_KEY = UnsignedBits(0);
static const UnsignedBits MAX_KEY = UnsignedBits(-1);
enum
{
PRIMITIVE = true,
NULL_TYPE = false,
};
static __device__ __forceinline__ UnsignedBits TwiddleIn(UnsignedBits key)
{
return key;
}
static __device__ __forceinline__ UnsignedBits TwiddleOut(UnsignedBits key)
{
return key;
}
static __host__ __device__ __forceinline__ T Max()
{
UnsignedBits retval = MAX_KEY;
return reinterpret_cast<T&>(retval);
}
static __host__ __device__ __forceinline__ T Lowest()
{
UnsignedBits retval = LOWEST_KEY;
return reinterpret_cast<T&>(retval);
}
};
/**
* Basic type traits (signed primitive specialization)
*/
template <typename _UnsignedBits, typename T>
struct BaseTraits<SIGNED_INTEGER, true, false, _UnsignedBits, T>
{
typedef _UnsignedBits UnsignedBits;
static const Category CATEGORY = SIGNED_INTEGER;
static const UnsignedBits HIGH_BIT = UnsignedBits(1) << ((sizeof(UnsignedBits) * 8) - 1);
static const UnsignedBits LOWEST_KEY = HIGH_BIT;
static const UnsignedBits MAX_KEY = UnsignedBits(-1) ^ HIGH_BIT;
enum
{
PRIMITIVE = true,
NULL_TYPE = false,
};
static __device__ __forceinline__ UnsignedBits TwiddleIn(UnsignedBits key)
{
return key ^ HIGH_BIT;
};
static __device__ __forceinline__ UnsignedBits TwiddleOut(UnsignedBits key)
{
return key ^ HIGH_BIT;
};
static __host__ __device__ __forceinline__ T Max()
{
UnsignedBits retval = MAX_KEY;
return reinterpret_cast<T&>(retval);
}
static __host__ __device__ __forceinline__ T Lowest()
{
UnsignedBits retval = LOWEST_KEY;
return reinterpret_cast<T&>(retval);
}
};
template <typename _T>
struct FpLimits;
template <>
struct FpLimits<float>
{
static __host__ __device__ __forceinline__ float Max() {
return FLT_MAX;
}
static __host__ __device__ __forceinline__ float Lowest() {
return FLT_MAX * float(-1);
}
};
template <>
struct FpLimits<double>
{
static __host__ __device__ __forceinline__ double Max() {
return DBL_MAX;
}
static __host__ __device__ __forceinline__ double Lowest() {
return DBL_MAX * double(-1);
}
};
#if (__CUDACC_VER_MAJOR__ >= 9)
template <>
struct FpLimits<__half>
{
static __host__ __device__ __forceinline__ __half Max() {
unsigned short max_word = 0x7BFF;
return reinterpret_cast<__half&>(max_word);
}
static __host__ __device__ __forceinline__ __half Lowest() {
unsigned short lowest_word = 0xFBFF;
return reinterpret_cast<__half&>(lowest_word);
}
};
#endif
/**
* Basic type traits (fp primitive specialization)
*/
template <typename _UnsignedBits, typename T>
struct BaseTraits<FLOATING_POINT, true, false, _UnsignedBits, T>
{
typedef _UnsignedBits UnsignedBits;
static const Category CATEGORY = FLOATING_POINT;
static const UnsignedBits HIGH_BIT = UnsignedBits(1) << ((sizeof(UnsignedBits) * 8) - 1);
static const UnsignedBits LOWEST_KEY = UnsignedBits(-1);
static const UnsignedBits MAX_KEY = UnsignedBits(-1) ^ HIGH_BIT;
enum
{
PRIMITIVE = true,
NULL_TYPE = false,
};
static __device__ __forceinline__ UnsignedBits TwiddleIn(UnsignedBits key)
{
UnsignedBits mask = (key & HIGH_BIT) ? UnsignedBits(-1) : HIGH_BIT;
return key ^ mask;
};
static __device__ __forceinline__ UnsignedBits TwiddleOut(UnsignedBits key)
{
UnsignedBits mask = (key & HIGH_BIT) ? HIGH_BIT : UnsignedBits(-1);
return key ^ mask;
};
static __host__ __device__ __forceinline__ T Max() {
return FpLimits<T>::Max();
}
static __host__ __device__ __forceinline__ T Lowest() {
return FpLimits<T>::Lowest();
}
};
/**
* \brief Numeric type traits
*/
template <typename T> struct NumericTraits : BaseTraits<NOT_A_NUMBER, false, false, T, T> {};
template <> struct NumericTraits<NullType> : BaseTraits<NOT_A_NUMBER, false, true, NullType, NullType> {};
template <> struct NumericTraits<char> : BaseTraits<(std::numeric_limits<char>::is_signed) ? SIGNED_INTEGER : UNSIGNED_INTEGER, true, false, unsigned char, char> {};
template <> struct NumericTraits<signed char> : BaseTraits<SIGNED_INTEGER, true, false, unsigned char, signed char> {};
template <> struct NumericTraits<short> : BaseTraits<SIGNED_INTEGER, true, false, unsigned short, short> {};
template <> struct NumericTraits<int> : BaseTraits<SIGNED_INTEGER, true, false, unsigned int, int> {};
template <> struct NumericTraits<long> : BaseTraits<SIGNED_INTEGER, true, false, unsigned long, long> {};
template <> struct NumericTraits<long long> : BaseTraits<SIGNED_INTEGER, true, false, unsigned long long, long long> {};
template <> struct NumericTraits<unsigned char> : BaseTraits<UNSIGNED_INTEGER, true, false, unsigned char, unsigned char> {};
template <> struct NumericTraits<unsigned short> : BaseTraits<UNSIGNED_INTEGER, true, false, unsigned short, unsigned short> {};
template <> struct NumericTraits<unsigned int> : BaseTraits<UNSIGNED_INTEGER, true, false, unsigned int, unsigned int> {};
template <> struct NumericTraits<unsigned long> : BaseTraits<UNSIGNED_INTEGER, true, false, unsigned long, unsigned long> {};
template <> struct NumericTraits<unsigned long long> : BaseTraits<UNSIGNED_INTEGER, true, false, unsigned long long, unsigned long long> {};
template <> struct NumericTraits<float> : BaseTraits<FLOATING_POINT, true, false, unsigned int, float> {};
template <> struct NumericTraits<double> : BaseTraits<FLOATING_POINT, true, false, unsigned long long, double> {};
#if (__CUDACC_VER_MAJOR__ >= 9)
template <> struct NumericTraits<__half> : BaseTraits<FLOATING_POINT, true, false, unsigned short, __half> {};
#endif
template <> struct NumericTraits<bool> : BaseTraits<UNSIGNED_INTEGER, true, false, typename UnitWord<bool>::VolatileWord, bool> {};
/**
* \brief Type traits
*/
template <typename T>
struct Traits : NumericTraits<typename RemoveQualifiers<T>::Type> {};
#endif // DOXYGEN_SHOULD_SKIP_THIS
/** @} */ // end group UtilModule
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
|
the_stack
|
#include "sph/sph_fugue.h"
#include "cuda_helper.h"
#include <host_defines.h>
#define USE_SHARED 1
uint32_t *d_fugue256_hashoutput[8];
uint32_t *d_resultNonce[8];
__constant__ uint32_t GPUstate[30]; // Single GPU
__constant__ uint32_t pTarget[8]; // Single GPU
texture<unsigned int, 1, cudaReadModeElementType> mixTab0Tex;
texture<unsigned int, 1, cudaReadModeElementType> mixTab1Tex;
texture<unsigned int, 1, cudaReadModeElementType> mixTab2Tex;
texture<unsigned int, 1, cudaReadModeElementType> mixTab3Tex;
#if USE_SHARED
#define mixtab0(x) (*((uint32_t*)mixtabs + ( (x))))
#define mixtab1(x) (*((uint32_t*)mixtabs + (256+(x))))
#define mixtab2(x) (*((uint32_t*)mixtabs + (512+(x))))
#define mixtab3(x) (*((uint32_t*)mixtabs + (768+(x))))
#else
#define mixtab0(x) tex1Dfetch(mixTab0Tex, x)
#define mixtab1(x) tex1Dfetch(mixTab1Tex, x)
#define mixtab2(x) tex1Dfetch(mixTab2Tex, x)
#define mixtab3(x) tex1Dfetch(mixTab3Tex, x)
#endif
/* TABELLEN */
static const uint32_t mixtab0_cpu[] = {
SPH_C32(0x63633297), SPH_C32(0x7c7c6feb), SPH_C32(0x77775ec7),
SPH_C32(0x7b7b7af7), SPH_C32(0xf2f2e8e5), SPH_C32(0x6b6b0ab7),
SPH_C32(0x6f6f16a7), SPH_C32(0xc5c56d39), SPH_C32(0x303090c0),
SPH_C32(0x01010704), SPH_C32(0x67672e87), SPH_C32(0x2b2bd1ac),
SPH_C32(0xfefeccd5), SPH_C32(0xd7d71371), SPH_C32(0xabab7c9a),
SPH_C32(0x767659c3), SPH_C32(0xcaca4005), SPH_C32(0x8282a33e),
SPH_C32(0xc9c94909), SPH_C32(0x7d7d68ef), SPH_C32(0xfafad0c5),
SPH_C32(0x5959947f), SPH_C32(0x4747ce07), SPH_C32(0xf0f0e6ed),
SPH_C32(0xadad6e82), SPH_C32(0xd4d41a7d), SPH_C32(0xa2a243be),
SPH_C32(0xafaf608a), SPH_C32(0x9c9cf946), SPH_C32(0xa4a451a6),
SPH_C32(0x727245d3), SPH_C32(0xc0c0762d), SPH_C32(0xb7b728ea),
SPH_C32(0xfdfdc5d9), SPH_C32(0x9393d47a), SPH_C32(0x2626f298),
SPH_C32(0x363682d8), SPH_C32(0x3f3fbdfc), SPH_C32(0xf7f7f3f1),
SPH_C32(0xcccc521d), SPH_C32(0x34348cd0), SPH_C32(0xa5a556a2),
SPH_C32(0xe5e58db9), SPH_C32(0xf1f1e1e9), SPH_C32(0x71714cdf),
SPH_C32(0xd8d83e4d), SPH_C32(0x313197c4), SPH_C32(0x15156b54),
SPH_C32(0x04041c10), SPH_C32(0xc7c76331), SPH_C32(0x2323e98c),
SPH_C32(0xc3c37f21), SPH_C32(0x18184860), SPH_C32(0x9696cf6e),
SPH_C32(0x05051b14), SPH_C32(0x9a9aeb5e), SPH_C32(0x0707151c),
SPH_C32(0x12127e48), SPH_C32(0x8080ad36), SPH_C32(0xe2e298a5),
SPH_C32(0xebeba781), SPH_C32(0x2727f59c), SPH_C32(0xb2b233fe),
SPH_C32(0x757550cf), SPH_C32(0x09093f24), SPH_C32(0x8383a43a),
SPH_C32(0x2c2cc4b0), SPH_C32(0x1a1a4668), SPH_C32(0x1b1b416c),
SPH_C32(0x6e6e11a3), SPH_C32(0x5a5a9d73), SPH_C32(0xa0a04db6),
SPH_C32(0x5252a553), SPH_C32(0x3b3ba1ec), SPH_C32(0xd6d61475),
SPH_C32(0xb3b334fa), SPH_C32(0x2929dfa4), SPH_C32(0xe3e39fa1),
SPH_C32(0x2f2fcdbc), SPH_C32(0x8484b126), SPH_C32(0x5353a257),
SPH_C32(0xd1d10169), SPH_C32(0x00000000), SPH_C32(0xededb599),
SPH_C32(0x2020e080), SPH_C32(0xfcfcc2dd), SPH_C32(0xb1b13af2),
SPH_C32(0x5b5b9a77), SPH_C32(0x6a6a0db3), SPH_C32(0xcbcb4701),
SPH_C32(0xbebe17ce), SPH_C32(0x3939afe4), SPH_C32(0x4a4aed33),
SPH_C32(0x4c4cff2b), SPH_C32(0x5858937b), SPH_C32(0xcfcf5b11),
SPH_C32(0xd0d0066d), SPH_C32(0xefefbb91), SPH_C32(0xaaaa7b9e),
SPH_C32(0xfbfbd7c1), SPH_C32(0x4343d217), SPH_C32(0x4d4df82f),
SPH_C32(0x333399cc), SPH_C32(0x8585b622), SPH_C32(0x4545c00f),
SPH_C32(0xf9f9d9c9), SPH_C32(0x02020e08), SPH_C32(0x7f7f66e7),
SPH_C32(0x5050ab5b), SPH_C32(0x3c3cb4f0), SPH_C32(0x9f9ff04a),
SPH_C32(0xa8a87596), SPH_C32(0x5151ac5f), SPH_C32(0xa3a344ba),
SPH_C32(0x4040db1b), SPH_C32(0x8f8f800a), SPH_C32(0x9292d37e),
SPH_C32(0x9d9dfe42), SPH_C32(0x3838a8e0), SPH_C32(0xf5f5fdf9),
SPH_C32(0xbcbc19c6), SPH_C32(0xb6b62fee), SPH_C32(0xdada3045),
SPH_C32(0x2121e784), SPH_C32(0x10107040), SPH_C32(0xffffcbd1),
SPH_C32(0xf3f3efe1), SPH_C32(0xd2d20865), SPH_C32(0xcdcd5519),
SPH_C32(0x0c0c2430), SPH_C32(0x1313794c), SPH_C32(0xececb29d),
SPH_C32(0x5f5f8667), SPH_C32(0x9797c86a), SPH_C32(0x4444c70b),
SPH_C32(0x1717655c), SPH_C32(0xc4c46a3d), SPH_C32(0xa7a758aa),
SPH_C32(0x7e7e61e3), SPH_C32(0x3d3db3f4), SPH_C32(0x6464278b),
SPH_C32(0x5d5d886f), SPH_C32(0x19194f64), SPH_C32(0x737342d7),
SPH_C32(0x60603b9b), SPH_C32(0x8181aa32), SPH_C32(0x4f4ff627),
SPH_C32(0xdcdc225d), SPH_C32(0x2222ee88), SPH_C32(0x2a2ad6a8),
SPH_C32(0x9090dd76), SPH_C32(0x88889516), SPH_C32(0x4646c903),
SPH_C32(0xeeeebc95), SPH_C32(0xb8b805d6), SPH_C32(0x14146c50),
SPH_C32(0xdede2c55), SPH_C32(0x5e5e8163), SPH_C32(0x0b0b312c),
SPH_C32(0xdbdb3741), SPH_C32(0xe0e096ad), SPH_C32(0x32329ec8),
SPH_C32(0x3a3aa6e8), SPH_C32(0x0a0a3628), SPH_C32(0x4949e43f),
SPH_C32(0x06061218), SPH_C32(0x2424fc90), SPH_C32(0x5c5c8f6b),
SPH_C32(0xc2c27825), SPH_C32(0xd3d30f61), SPH_C32(0xacac6986),
SPH_C32(0x62623593), SPH_C32(0x9191da72), SPH_C32(0x9595c662),
SPH_C32(0xe4e48abd), SPH_C32(0x797974ff), SPH_C32(0xe7e783b1),
SPH_C32(0xc8c84e0d), SPH_C32(0x373785dc), SPH_C32(0x6d6d18af),
SPH_C32(0x8d8d8e02), SPH_C32(0xd5d51d79), SPH_C32(0x4e4ef123),
SPH_C32(0xa9a97292), SPH_C32(0x6c6c1fab), SPH_C32(0x5656b943),
SPH_C32(0xf4f4fafd), SPH_C32(0xeaeaa085), SPH_C32(0x6565208f),
SPH_C32(0x7a7a7df3), SPH_C32(0xaeae678e), SPH_C32(0x08083820),
SPH_C32(0xbaba0bde), SPH_C32(0x787873fb), SPH_C32(0x2525fb94),
SPH_C32(0x2e2ecab8), SPH_C32(0x1c1c5470), SPH_C32(0xa6a65fae),
SPH_C32(0xb4b421e6), SPH_C32(0xc6c66435), SPH_C32(0xe8e8ae8d),
SPH_C32(0xdddd2559), SPH_C32(0x747457cb), SPH_C32(0x1f1f5d7c),
SPH_C32(0x4b4bea37), SPH_C32(0xbdbd1ec2), SPH_C32(0x8b8b9c1a),
SPH_C32(0x8a8a9b1e), SPH_C32(0x70704bdb), SPH_C32(0x3e3ebaf8),
SPH_C32(0xb5b526e2), SPH_C32(0x66662983), SPH_C32(0x4848e33b),
SPH_C32(0x0303090c), SPH_C32(0xf6f6f4f5), SPH_C32(0x0e0e2a38),
SPH_C32(0x61613c9f), SPH_C32(0x35358bd4), SPH_C32(0x5757be47),
SPH_C32(0xb9b902d2), SPH_C32(0x8686bf2e), SPH_C32(0xc1c17129),
SPH_C32(0x1d1d5374), SPH_C32(0x9e9ef74e), SPH_C32(0xe1e191a9),
SPH_C32(0xf8f8decd), SPH_C32(0x9898e556), SPH_C32(0x11117744),
SPH_C32(0x696904bf), SPH_C32(0xd9d93949), SPH_C32(0x8e8e870e),
SPH_C32(0x9494c166), SPH_C32(0x9b9bec5a), SPH_C32(0x1e1e5a78),
SPH_C32(0x8787b82a), SPH_C32(0xe9e9a989), SPH_C32(0xcece5c15),
SPH_C32(0x5555b04f), SPH_C32(0x2828d8a0), SPH_C32(0xdfdf2b51),
SPH_C32(0x8c8c8906), SPH_C32(0xa1a14ab2), SPH_C32(0x89899212),
SPH_C32(0x0d0d2334), SPH_C32(0xbfbf10ca), SPH_C32(0xe6e684b5),
SPH_C32(0x4242d513), SPH_C32(0x686803bb), SPH_C32(0x4141dc1f),
SPH_C32(0x9999e252), SPH_C32(0x2d2dc3b4), SPH_C32(0x0f0f2d3c),
SPH_C32(0xb0b03df6), SPH_C32(0x5454b74b), SPH_C32(0xbbbb0cda),
SPH_C32(0x16166258)
};
static const uint32_t mixtab1_cpu[] = {
SPH_C32(0x97636332), SPH_C32(0xeb7c7c6f), SPH_C32(0xc777775e),
SPH_C32(0xf77b7b7a), SPH_C32(0xe5f2f2e8), SPH_C32(0xb76b6b0a),
SPH_C32(0xa76f6f16), SPH_C32(0x39c5c56d), SPH_C32(0xc0303090),
SPH_C32(0x04010107), SPH_C32(0x8767672e), SPH_C32(0xac2b2bd1),
SPH_C32(0xd5fefecc), SPH_C32(0x71d7d713), SPH_C32(0x9aabab7c),
SPH_C32(0xc3767659), SPH_C32(0x05caca40), SPH_C32(0x3e8282a3),
SPH_C32(0x09c9c949), SPH_C32(0xef7d7d68), SPH_C32(0xc5fafad0),
SPH_C32(0x7f595994), SPH_C32(0x074747ce), SPH_C32(0xedf0f0e6),
SPH_C32(0x82adad6e), SPH_C32(0x7dd4d41a), SPH_C32(0xbea2a243),
SPH_C32(0x8aafaf60), SPH_C32(0x469c9cf9), SPH_C32(0xa6a4a451),
SPH_C32(0xd3727245), SPH_C32(0x2dc0c076), SPH_C32(0xeab7b728),
SPH_C32(0xd9fdfdc5), SPH_C32(0x7a9393d4), SPH_C32(0x982626f2),
SPH_C32(0xd8363682), SPH_C32(0xfc3f3fbd), SPH_C32(0xf1f7f7f3),
SPH_C32(0x1dcccc52), SPH_C32(0xd034348c), SPH_C32(0xa2a5a556),
SPH_C32(0xb9e5e58d), SPH_C32(0xe9f1f1e1), SPH_C32(0xdf71714c),
SPH_C32(0x4dd8d83e), SPH_C32(0xc4313197), SPH_C32(0x5415156b),
SPH_C32(0x1004041c), SPH_C32(0x31c7c763), SPH_C32(0x8c2323e9),
SPH_C32(0x21c3c37f), SPH_C32(0x60181848), SPH_C32(0x6e9696cf),
SPH_C32(0x1405051b), SPH_C32(0x5e9a9aeb), SPH_C32(0x1c070715),
SPH_C32(0x4812127e), SPH_C32(0x368080ad), SPH_C32(0xa5e2e298),
SPH_C32(0x81ebeba7), SPH_C32(0x9c2727f5), SPH_C32(0xfeb2b233),
SPH_C32(0xcf757550), SPH_C32(0x2409093f), SPH_C32(0x3a8383a4),
SPH_C32(0xb02c2cc4), SPH_C32(0x681a1a46), SPH_C32(0x6c1b1b41),
SPH_C32(0xa36e6e11), SPH_C32(0x735a5a9d), SPH_C32(0xb6a0a04d),
SPH_C32(0x535252a5), SPH_C32(0xec3b3ba1), SPH_C32(0x75d6d614),
SPH_C32(0xfab3b334), SPH_C32(0xa42929df), SPH_C32(0xa1e3e39f),
SPH_C32(0xbc2f2fcd), SPH_C32(0x268484b1), SPH_C32(0x575353a2),
SPH_C32(0x69d1d101), SPH_C32(0x00000000), SPH_C32(0x99ededb5),
SPH_C32(0x802020e0), SPH_C32(0xddfcfcc2), SPH_C32(0xf2b1b13a),
SPH_C32(0x775b5b9a), SPH_C32(0xb36a6a0d), SPH_C32(0x01cbcb47),
SPH_C32(0xcebebe17), SPH_C32(0xe43939af), SPH_C32(0x334a4aed),
SPH_C32(0x2b4c4cff), SPH_C32(0x7b585893), SPH_C32(0x11cfcf5b),
SPH_C32(0x6dd0d006), SPH_C32(0x91efefbb), SPH_C32(0x9eaaaa7b),
SPH_C32(0xc1fbfbd7), SPH_C32(0x174343d2), SPH_C32(0x2f4d4df8),
SPH_C32(0xcc333399), SPH_C32(0x228585b6), SPH_C32(0x0f4545c0),
SPH_C32(0xc9f9f9d9), SPH_C32(0x0802020e), SPH_C32(0xe77f7f66),
SPH_C32(0x5b5050ab), SPH_C32(0xf03c3cb4), SPH_C32(0x4a9f9ff0),
SPH_C32(0x96a8a875), SPH_C32(0x5f5151ac), SPH_C32(0xbaa3a344),
SPH_C32(0x1b4040db), SPH_C32(0x0a8f8f80), SPH_C32(0x7e9292d3),
SPH_C32(0x429d9dfe), SPH_C32(0xe03838a8), SPH_C32(0xf9f5f5fd),
SPH_C32(0xc6bcbc19), SPH_C32(0xeeb6b62f), SPH_C32(0x45dada30),
SPH_C32(0x842121e7), SPH_C32(0x40101070), SPH_C32(0xd1ffffcb),
SPH_C32(0xe1f3f3ef), SPH_C32(0x65d2d208), SPH_C32(0x19cdcd55),
SPH_C32(0x300c0c24), SPH_C32(0x4c131379), SPH_C32(0x9dececb2),
SPH_C32(0x675f5f86), SPH_C32(0x6a9797c8), SPH_C32(0x0b4444c7),
SPH_C32(0x5c171765), SPH_C32(0x3dc4c46a), SPH_C32(0xaaa7a758),
SPH_C32(0xe37e7e61), SPH_C32(0xf43d3db3), SPH_C32(0x8b646427),
SPH_C32(0x6f5d5d88), SPH_C32(0x6419194f), SPH_C32(0xd7737342),
SPH_C32(0x9b60603b), SPH_C32(0x328181aa), SPH_C32(0x274f4ff6),
SPH_C32(0x5ddcdc22), SPH_C32(0x882222ee), SPH_C32(0xa82a2ad6),
SPH_C32(0x769090dd), SPH_C32(0x16888895), SPH_C32(0x034646c9),
SPH_C32(0x95eeeebc), SPH_C32(0xd6b8b805), SPH_C32(0x5014146c),
SPH_C32(0x55dede2c), SPH_C32(0x635e5e81), SPH_C32(0x2c0b0b31),
SPH_C32(0x41dbdb37), SPH_C32(0xade0e096), SPH_C32(0xc832329e),
SPH_C32(0xe83a3aa6), SPH_C32(0x280a0a36), SPH_C32(0x3f4949e4),
SPH_C32(0x18060612), SPH_C32(0x902424fc), SPH_C32(0x6b5c5c8f),
SPH_C32(0x25c2c278), SPH_C32(0x61d3d30f), SPH_C32(0x86acac69),
SPH_C32(0x93626235), SPH_C32(0x729191da), SPH_C32(0x629595c6),
SPH_C32(0xbde4e48a), SPH_C32(0xff797974), SPH_C32(0xb1e7e783),
SPH_C32(0x0dc8c84e), SPH_C32(0xdc373785), SPH_C32(0xaf6d6d18),
SPH_C32(0x028d8d8e), SPH_C32(0x79d5d51d), SPH_C32(0x234e4ef1),
SPH_C32(0x92a9a972), SPH_C32(0xab6c6c1f), SPH_C32(0x435656b9),
SPH_C32(0xfdf4f4fa), SPH_C32(0x85eaeaa0), SPH_C32(0x8f656520),
SPH_C32(0xf37a7a7d), SPH_C32(0x8eaeae67), SPH_C32(0x20080838),
SPH_C32(0xdebaba0b), SPH_C32(0xfb787873), SPH_C32(0x942525fb),
SPH_C32(0xb82e2eca), SPH_C32(0x701c1c54), SPH_C32(0xaea6a65f),
SPH_C32(0xe6b4b421), SPH_C32(0x35c6c664), SPH_C32(0x8de8e8ae),
SPH_C32(0x59dddd25), SPH_C32(0xcb747457), SPH_C32(0x7c1f1f5d),
SPH_C32(0x374b4bea), SPH_C32(0xc2bdbd1e), SPH_C32(0x1a8b8b9c),
SPH_C32(0x1e8a8a9b), SPH_C32(0xdb70704b), SPH_C32(0xf83e3eba),
SPH_C32(0xe2b5b526), SPH_C32(0x83666629), SPH_C32(0x3b4848e3),
SPH_C32(0x0c030309), SPH_C32(0xf5f6f6f4), SPH_C32(0x380e0e2a),
SPH_C32(0x9f61613c), SPH_C32(0xd435358b), SPH_C32(0x475757be),
SPH_C32(0xd2b9b902), SPH_C32(0x2e8686bf), SPH_C32(0x29c1c171),
SPH_C32(0x741d1d53), SPH_C32(0x4e9e9ef7), SPH_C32(0xa9e1e191),
SPH_C32(0xcdf8f8de), SPH_C32(0x569898e5), SPH_C32(0x44111177),
SPH_C32(0xbf696904), SPH_C32(0x49d9d939), SPH_C32(0x0e8e8e87),
SPH_C32(0x669494c1), SPH_C32(0x5a9b9bec), SPH_C32(0x781e1e5a),
SPH_C32(0x2a8787b8), SPH_C32(0x89e9e9a9), SPH_C32(0x15cece5c),
SPH_C32(0x4f5555b0), SPH_C32(0xa02828d8), SPH_C32(0x51dfdf2b),
SPH_C32(0x068c8c89), SPH_C32(0xb2a1a14a), SPH_C32(0x12898992),
SPH_C32(0x340d0d23), SPH_C32(0xcabfbf10), SPH_C32(0xb5e6e684),
SPH_C32(0x134242d5), SPH_C32(0xbb686803), SPH_C32(0x1f4141dc),
SPH_C32(0x529999e2), SPH_C32(0xb42d2dc3), SPH_C32(0x3c0f0f2d),
SPH_C32(0xf6b0b03d), SPH_C32(0x4b5454b7), SPH_C32(0xdabbbb0c),
SPH_C32(0x58161662)
};
static const uint32_t mixtab2_cpu[] = {
SPH_C32(0x32976363), SPH_C32(0x6feb7c7c), SPH_C32(0x5ec77777),
SPH_C32(0x7af77b7b), SPH_C32(0xe8e5f2f2), SPH_C32(0x0ab76b6b),
SPH_C32(0x16a76f6f), SPH_C32(0x6d39c5c5), SPH_C32(0x90c03030),
SPH_C32(0x07040101), SPH_C32(0x2e876767), SPH_C32(0xd1ac2b2b),
SPH_C32(0xccd5fefe), SPH_C32(0x1371d7d7), SPH_C32(0x7c9aabab),
SPH_C32(0x59c37676), SPH_C32(0x4005caca), SPH_C32(0xa33e8282),
SPH_C32(0x4909c9c9), SPH_C32(0x68ef7d7d), SPH_C32(0xd0c5fafa),
SPH_C32(0x947f5959), SPH_C32(0xce074747), SPH_C32(0xe6edf0f0),
SPH_C32(0x6e82adad), SPH_C32(0x1a7dd4d4), SPH_C32(0x43bea2a2),
SPH_C32(0x608aafaf), SPH_C32(0xf9469c9c), SPH_C32(0x51a6a4a4),
SPH_C32(0x45d37272), SPH_C32(0x762dc0c0), SPH_C32(0x28eab7b7),
SPH_C32(0xc5d9fdfd), SPH_C32(0xd47a9393), SPH_C32(0xf2982626),
SPH_C32(0x82d83636), SPH_C32(0xbdfc3f3f), SPH_C32(0xf3f1f7f7),
SPH_C32(0x521dcccc), SPH_C32(0x8cd03434), SPH_C32(0x56a2a5a5),
SPH_C32(0x8db9e5e5), SPH_C32(0xe1e9f1f1), SPH_C32(0x4cdf7171),
SPH_C32(0x3e4dd8d8), SPH_C32(0x97c43131), SPH_C32(0x6b541515),
SPH_C32(0x1c100404), SPH_C32(0x6331c7c7), SPH_C32(0xe98c2323),
SPH_C32(0x7f21c3c3), SPH_C32(0x48601818), SPH_C32(0xcf6e9696),
SPH_C32(0x1b140505), SPH_C32(0xeb5e9a9a), SPH_C32(0x151c0707),
SPH_C32(0x7e481212), SPH_C32(0xad368080), SPH_C32(0x98a5e2e2),
SPH_C32(0xa781ebeb), SPH_C32(0xf59c2727), SPH_C32(0x33feb2b2),
SPH_C32(0x50cf7575), SPH_C32(0x3f240909), SPH_C32(0xa43a8383),
SPH_C32(0xc4b02c2c), SPH_C32(0x46681a1a), SPH_C32(0x416c1b1b),
SPH_C32(0x11a36e6e), SPH_C32(0x9d735a5a), SPH_C32(0x4db6a0a0),
SPH_C32(0xa5535252), SPH_C32(0xa1ec3b3b), SPH_C32(0x1475d6d6),
SPH_C32(0x34fab3b3), SPH_C32(0xdfa42929), SPH_C32(0x9fa1e3e3),
SPH_C32(0xcdbc2f2f), SPH_C32(0xb1268484), SPH_C32(0xa2575353),
SPH_C32(0x0169d1d1), SPH_C32(0x00000000), SPH_C32(0xb599eded),
SPH_C32(0xe0802020), SPH_C32(0xc2ddfcfc), SPH_C32(0x3af2b1b1),
SPH_C32(0x9a775b5b), SPH_C32(0x0db36a6a), SPH_C32(0x4701cbcb),
SPH_C32(0x17cebebe), SPH_C32(0xafe43939), SPH_C32(0xed334a4a),
SPH_C32(0xff2b4c4c), SPH_C32(0x937b5858), SPH_C32(0x5b11cfcf),
SPH_C32(0x066dd0d0), SPH_C32(0xbb91efef), SPH_C32(0x7b9eaaaa),
SPH_C32(0xd7c1fbfb), SPH_C32(0xd2174343), SPH_C32(0xf82f4d4d),
SPH_C32(0x99cc3333), SPH_C32(0xb6228585), SPH_C32(0xc00f4545),
SPH_C32(0xd9c9f9f9), SPH_C32(0x0e080202), SPH_C32(0x66e77f7f),
SPH_C32(0xab5b5050), SPH_C32(0xb4f03c3c), SPH_C32(0xf04a9f9f),
SPH_C32(0x7596a8a8), SPH_C32(0xac5f5151), SPH_C32(0x44baa3a3),
SPH_C32(0xdb1b4040), SPH_C32(0x800a8f8f), SPH_C32(0xd37e9292),
SPH_C32(0xfe429d9d), SPH_C32(0xa8e03838), SPH_C32(0xfdf9f5f5),
SPH_C32(0x19c6bcbc), SPH_C32(0x2feeb6b6), SPH_C32(0x3045dada),
SPH_C32(0xe7842121), SPH_C32(0x70401010), SPH_C32(0xcbd1ffff),
SPH_C32(0xefe1f3f3), SPH_C32(0x0865d2d2), SPH_C32(0x5519cdcd),
SPH_C32(0x24300c0c), SPH_C32(0x794c1313), SPH_C32(0xb29decec),
SPH_C32(0x86675f5f), SPH_C32(0xc86a9797), SPH_C32(0xc70b4444),
SPH_C32(0x655c1717), SPH_C32(0x6a3dc4c4), SPH_C32(0x58aaa7a7),
SPH_C32(0x61e37e7e), SPH_C32(0xb3f43d3d), SPH_C32(0x278b6464),
SPH_C32(0x886f5d5d), SPH_C32(0x4f641919), SPH_C32(0x42d77373),
SPH_C32(0x3b9b6060), SPH_C32(0xaa328181), SPH_C32(0xf6274f4f),
SPH_C32(0x225ddcdc), SPH_C32(0xee882222), SPH_C32(0xd6a82a2a),
SPH_C32(0xdd769090), SPH_C32(0x95168888), SPH_C32(0xc9034646),
SPH_C32(0xbc95eeee), SPH_C32(0x05d6b8b8), SPH_C32(0x6c501414),
SPH_C32(0x2c55dede), SPH_C32(0x81635e5e), SPH_C32(0x312c0b0b),
SPH_C32(0x3741dbdb), SPH_C32(0x96ade0e0), SPH_C32(0x9ec83232),
SPH_C32(0xa6e83a3a), SPH_C32(0x36280a0a), SPH_C32(0xe43f4949),
SPH_C32(0x12180606), SPH_C32(0xfc902424), SPH_C32(0x8f6b5c5c),
SPH_C32(0x7825c2c2), SPH_C32(0x0f61d3d3), SPH_C32(0x6986acac),
SPH_C32(0x35936262), SPH_C32(0xda729191), SPH_C32(0xc6629595),
SPH_C32(0x8abde4e4), SPH_C32(0x74ff7979), SPH_C32(0x83b1e7e7),
SPH_C32(0x4e0dc8c8), SPH_C32(0x85dc3737), SPH_C32(0x18af6d6d),
SPH_C32(0x8e028d8d), SPH_C32(0x1d79d5d5), SPH_C32(0xf1234e4e),
SPH_C32(0x7292a9a9), SPH_C32(0x1fab6c6c), SPH_C32(0xb9435656),
SPH_C32(0xfafdf4f4), SPH_C32(0xa085eaea), SPH_C32(0x208f6565),
SPH_C32(0x7df37a7a), SPH_C32(0x678eaeae), SPH_C32(0x38200808),
SPH_C32(0x0bdebaba), SPH_C32(0x73fb7878), SPH_C32(0xfb942525),
SPH_C32(0xcab82e2e), SPH_C32(0x54701c1c), SPH_C32(0x5faea6a6),
SPH_C32(0x21e6b4b4), SPH_C32(0x6435c6c6), SPH_C32(0xae8de8e8),
SPH_C32(0x2559dddd), SPH_C32(0x57cb7474), SPH_C32(0x5d7c1f1f),
SPH_C32(0xea374b4b), SPH_C32(0x1ec2bdbd), SPH_C32(0x9c1a8b8b),
SPH_C32(0x9b1e8a8a), SPH_C32(0x4bdb7070), SPH_C32(0xbaf83e3e),
SPH_C32(0x26e2b5b5), SPH_C32(0x29836666), SPH_C32(0xe33b4848),
SPH_C32(0x090c0303), SPH_C32(0xf4f5f6f6), SPH_C32(0x2a380e0e),
SPH_C32(0x3c9f6161), SPH_C32(0x8bd43535), SPH_C32(0xbe475757),
SPH_C32(0x02d2b9b9), SPH_C32(0xbf2e8686), SPH_C32(0x7129c1c1),
SPH_C32(0x53741d1d), SPH_C32(0xf74e9e9e), SPH_C32(0x91a9e1e1),
SPH_C32(0xdecdf8f8), SPH_C32(0xe5569898), SPH_C32(0x77441111),
SPH_C32(0x04bf6969), SPH_C32(0x3949d9d9), SPH_C32(0x870e8e8e),
SPH_C32(0xc1669494), SPH_C32(0xec5a9b9b), SPH_C32(0x5a781e1e),
SPH_C32(0xb82a8787), SPH_C32(0xa989e9e9), SPH_C32(0x5c15cece),
SPH_C32(0xb04f5555), SPH_C32(0xd8a02828), SPH_C32(0x2b51dfdf),
SPH_C32(0x89068c8c), SPH_C32(0x4ab2a1a1), SPH_C32(0x92128989),
SPH_C32(0x23340d0d), SPH_C32(0x10cabfbf), SPH_C32(0x84b5e6e6),
SPH_C32(0xd5134242), SPH_C32(0x03bb6868), SPH_C32(0xdc1f4141),
SPH_C32(0xe2529999), SPH_C32(0xc3b42d2d), SPH_C32(0x2d3c0f0f),
SPH_C32(0x3df6b0b0), SPH_C32(0xb74b5454), SPH_C32(0x0cdabbbb),
SPH_C32(0x62581616)
};
static const uint32_t mixtab3_cpu[] = {
SPH_C32(0x63329763), SPH_C32(0x7c6feb7c), SPH_C32(0x775ec777),
SPH_C32(0x7b7af77b), SPH_C32(0xf2e8e5f2), SPH_C32(0x6b0ab76b),
SPH_C32(0x6f16a76f), SPH_C32(0xc56d39c5), SPH_C32(0x3090c030),
SPH_C32(0x01070401), SPH_C32(0x672e8767), SPH_C32(0x2bd1ac2b),
SPH_C32(0xfeccd5fe), SPH_C32(0xd71371d7), SPH_C32(0xab7c9aab),
SPH_C32(0x7659c376), SPH_C32(0xca4005ca), SPH_C32(0x82a33e82),
SPH_C32(0xc94909c9), SPH_C32(0x7d68ef7d), SPH_C32(0xfad0c5fa),
SPH_C32(0x59947f59), SPH_C32(0x47ce0747), SPH_C32(0xf0e6edf0),
SPH_C32(0xad6e82ad), SPH_C32(0xd41a7dd4), SPH_C32(0xa243bea2),
SPH_C32(0xaf608aaf), SPH_C32(0x9cf9469c), SPH_C32(0xa451a6a4),
SPH_C32(0x7245d372), SPH_C32(0xc0762dc0), SPH_C32(0xb728eab7),
SPH_C32(0xfdc5d9fd), SPH_C32(0x93d47a93), SPH_C32(0x26f29826),
SPH_C32(0x3682d836), SPH_C32(0x3fbdfc3f), SPH_C32(0xf7f3f1f7),
SPH_C32(0xcc521dcc), SPH_C32(0x348cd034), SPH_C32(0xa556a2a5),
SPH_C32(0xe58db9e5), SPH_C32(0xf1e1e9f1), SPH_C32(0x714cdf71),
SPH_C32(0xd83e4dd8), SPH_C32(0x3197c431), SPH_C32(0x156b5415),
SPH_C32(0x041c1004), SPH_C32(0xc76331c7), SPH_C32(0x23e98c23),
SPH_C32(0xc37f21c3), SPH_C32(0x18486018), SPH_C32(0x96cf6e96),
SPH_C32(0x051b1405), SPH_C32(0x9aeb5e9a), SPH_C32(0x07151c07),
SPH_C32(0x127e4812), SPH_C32(0x80ad3680), SPH_C32(0xe298a5e2),
SPH_C32(0xeba781eb), SPH_C32(0x27f59c27), SPH_C32(0xb233feb2),
SPH_C32(0x7550cf75), SPH_C32(0x093f2409), SPH_C32(0x83a43a83),
SPH_C32(0x2cc4b02c), SPH_C32(0x1a46681a), SPH_C32(0x1b416c1b),
SPH_C32(0x6e11a36e), SPH_C32(0x5a9d735a), SPH_C32(0xa04db6a0),
SPH_C32(0x52a55352), SPH_C32(0x3ba1ec3b), SPH_C32(0xd61475d6),
SPH_C32(0xb334fab3), SPH_C32(0x29dfa429), SPH_C32(0xe39fa1e3),
SPH_C32(0x2fcdbc2f), SPH_C32(0x84b12684), SPH_C32(0x53a25753),
SPH_C32(0xd10169d1), SPH_C32(0x00000000), SPH_C32(0xedb599ed),
SPH_C32(0x20e08020), SPH_C32(0xfcc2ddfc), SPH_C32(0xb13af2b1),
SPH_C32(0x5b9a775b), SPH_C32(0x6a0db36a), SPH_C32(0xcb4701cb),
SPH_C32(0xbe17cebe), SPH_C32(0x39afe439), SPH_C32(0x4aed334a),
SPH_C32(0x4cff2b4c), SPH_C32(0x58937b58), SPH_C32(0xcf5b11cf),
SPH_C32(0xd0066dd0), SPH_C32(0xefbb91ef), SPH_C32(0xaa7b9eaa),
SPH_C32(0xfbd7c1fb), SPH_C32(0x43d21743), SPH_C32(0x4df82f4d),
SPH_C32(0x3399cc33), SPH_C32(0x85b62285), SPH_C32(0x45c00f45),
SPH_C32(0xf9d9c9f9), SPH_C32(0x020e0802), SPH_C32(0x7f66e77f),
SPH_C32(0x50ab5b50), SPH_C32(0x3cb4f03c), SPH_C32(0x9ff04a9f),
SPH_C32(0xa87596a8), SPH_C32(0x51ac5f51), SPH_C32(0xa344baa3),
SPH_C32(0x40db1b40), SPH_C32(0x8f800a8f), SPH_C32(0x92d37e92),
SPH_C32(0x9dfe429d), SPH_C32(0x38a8e038), SPH_C32(0xf5fdf9f5),
SPH_C32(0xbc19c6bc), SPH_C32(0xb62feeb6), SPH_C32(0xda3045da),
SPH_C32(0x21e78421), SPH_C32(0x10704010), SPH_C32(0xffcbd1ff),
SPH_C32(0xf3efe1f3), SPH_C32(0xd20865d2), SPH_C32(0xcd5519cd),
SPH_C32(0x0c24300c), SPH_C32(0x13794c13), SPH_C32(0xecb29dec),
SPH_C32(0x5f86675f), SPH_C32(0x97c86a97), SPH_C32(0x44c70b44),
SPH_C32(0x17655c17), SPH_C32(0xc46a3dc4), SPH_C32(0xa758aaa7),
SPH_C32(0x7e61e37e), SPH_C32(0x3db3f43d), SPH_C32(0x64278b64),
SPH_C32(0x5d886f5d), SPH_C32(0x194f6419), SPH_C32(0x7342d773),
SPH_C32(0x603b9b60), SPH_C32(0x81aa3281), SPH_C32(0x4ff6274f),
SPH_C32(0xdc225ddc), SPH_C32(0x22ee8822), SPH_C32(0x2ad6a82a),
SPH_C32(0x90dd7690), SPH_C32(0x88951688), SPH_C32(0x46c90346),
SPH_C32(0xeebc95ee), SPH_C32(0xb805d6b8), SPH_C32(0x146c5014),
SPH_C32(0xde2c55de), SPH_C32(0x5e81635e), SPH_C32(0x0b312c0b),
SPH_C32(0xdb3741db), SPH_C32(0xe096ade0), SPH_C32(0x329ec832),
SPH_C32(0x3aa6e83a), SPH_C32(0x0a36280a), SPH_C32(0x49e43f49),
SPH_C32(0x06121806), SPH_C32(0x24fc9024), SPH_C32(0x5c8f6b5c),
SPH_C32(0xc27825c2), SPH_C32(0xd30f61d3), SPH_C32(0xac6986ac),
SPH_C32(0x62359362), SPH_C32(0x91da7291), SPH_C32(0x95c66295),
SPH_C32(0xe48abde4), SPH_C32(0x7974ff79), SPH_C32(0xe783b1e7),
SPH_C32(0xc84e0dc8), SPH_C32(0x3785dc37), SPH_C32(0x6d18af6d),
SPH_C32(0x8d8e028d), SPH_C32(0xd51d79d5), SPH_C32(0x4ef1234e),
SPH_C32(0xa97292a9), SPH_C32(0x6c1fab6c), SPH_C32(0x56b94356),
SPH_C32(0xf4fafdf4), SPH_C32(0xeaa085ea), SPH_C32(0x65208f65),
SPH_C32(0x7a7df37a), SPH_C32(0xae678eae), SPH_C32(0x08382008),
SPH_C32(0xba0bdeba), SPH_C32(0x7873fb78), SPH_C32(0x25fb9425),
SPH_C32(0x2ecab82e), SPH_C32(0x1c54701c), SPH_C32(0xa65faea6),
SPH_C32(0xb421e6b4), SPH_C32(0xc66435c6), SPH_C32(0xe8ae8de8),
SPH_C32(0xdd2559dd), SPH_C32(0x7457cb74), SPH_C32(0x1f5d7c1f),
SPH_C32(0x4bea374b), SPH_C32(0xbd1ec2bd), SPH_C32(0x8b9c1a8b),
SPH_C32(0x8a9b1e8a), SPH_C32(0x704bdb70), SPH_C32(0x3ebaf83e),
SPH_C32(0xb526e2b5), SPH_C32(0x66298366), SPH_C32(0x48e33b48),
SPH_C32(0x03090c03), SPH_C32(0xf6f4f5f6), SPH_C32(0x0e2a380e),
SPH_C32(0x613c9f61), SPH_C32(0x358bd435), SPH_C32(0x57be4757),
SPH_C32(0xb902d2b9), SPH_C32(0x86bf2e86), SPH_C32(0xc17129c1),
SPH_C32(0x1d53741d), SPH_C32(0x9ef74e9e), SPH_C32(0xe191a9e1),
SPH_C32(0xf8decdf8), SPH_C32(0x98e55698), SPH_C32(0x11774411),
SPH_C32(0x6904bf69), SPH_C32(0xd93949d9), SPH_C32(0x8e870e8e),
SPH_C32(0x94c16694), SPH_C32(0x9bec5a9b), SPH_C32(0x1e5a781e),
SPH_C32(0x87b82a87), SPH_C32(0xe9a989e9), SPH_C32(0xce5c15ce),
SPH_C32(0x55b04f55), SPH_C32(0x28d8a028), SPH_C32(0xdf2b51df),
SPH_C32(0x8c89068c), SPH_C32(0xa14ab2a1), SPH_C32(0x89921289),
SPH_C32(0x0d23340d), SPH_C32(0xbf10cabf), SPH_C32(0xe684b5e6),
SPH_C32(0x42d51342), SPH_C32(0x6803bb68), SPH_C32(0x41dc1f41),
SPH_C32(0x99e25299), SPH_C32(0x2dc3b42d), SPH_C32(0x0f2d3c0f),
SPH_C32(0xb03df6b0), SPH_C32(0x54b74b54), SPH_C32(0xbb0cdabb),
SPH_C32(0x16625816)
};
#define TIX2(q, x00, x01, x08, x10, x24) { \
x10 ^= x00; \
x00 = (q); \
x08 ^= x00; \
x01 ^= x24; \
}
#define TIX3(q, x00, x01, x04, x08, x16, x27, x30) { \
x16 ^= x00; \
x00 = (q); \
x08 ^= x00; \
x01 ^= x27; \
x04 ^= x30; \
}
#define TIX4(q, x00, x01, x04, x07, x08, x22, x24, x27, x30) { \
x22 ^= x00; \
x00 = (q); \
x08 ^= x00; \
x01 ^= x24; \
x04 ^= x27; \
x07 ^= x30; \
}
#define CMIX30(x00, x01, x02, x04, x05, x06, x15, x16, x17) { \
x00 ^= x04; \
x01 ^= x05; \
x02 ^= x06; \
x15 ^= x04; \
x16 ^= x05; \
x17 ^= x06; \
}
#define CMIX36(x00, x01, x02, x04, x05, x06, x18, x19, x20) { \
x00 ^= x04; \
x01 ^= x05; \
x02 ^= x06; \
x18 ^= x04; \
x19 ^= x05; \
x20 ^= x06; \
}
#define SMIX(x0, x1, x2, x3) { \
uint32_t c0 = 0; \
uint32_t c1 = 0; \
uint32_t c2 = 0; \
uint32_t c3 = 0; \
uint32_t r0 = 0; \
uint32_t r1 = 0; \
uint32_t r2 = 0; \
uint32_t r3 = 0; \
uint32_t tmp; \
tmp = mixtab0(x0 >> 24); \
c0 ^= tmp; \
tmp = mixtab1((x0 >> 16) & 0xFF); \
c0 ^= tmp; \
r1 ^= tmp; \
tmp = mixtab2((x0 >> 8) & 0xFF); \
c0 ^= tmp; \
r2 ^= tmp; \
tmp = mixtab3(x0 & 0xFF); \
c0 ^= tmp; \
r3 ^= tmp; \
tmp = mixtab0(x1 >> 24); \
c1 ^= tmp; \
r0 ^= tmp; \
tmp = mixtab1((x1 >> 16) & 0xFF); \
c1 ^= tmp; \
tmp = mixtab2((x1 >> 8) & 0xFF); \
c1 ^= tmp; \
r2 ^= tmp; \
tmp = mixtab3(x1 & 0xFF); \
c1 ^= tmp; \
r3 ^= tmp; \
tmp = mixtab0(x2 >> 24); \
c2 ^= tmp; \
r0 ^= tmp; \
tmp = mixtab1((x2 >> 16) & 0xFF); \
c2 ^= tmp; \
r1 ^= tmp; \
tmp = mixtab2((x2 >> 8) & 0xFF); \
c2 ^= tmp; \
tmp = mixtab3(x2 & 0xFF); \
c2 ^= tmp; \
r3 ^= tmp; \
tmp = mixtab0(x3 >> 24); \
c3 ^= tmp; \
r0 ^= tmp; \
tmp = mixtab1((x3 >> 16) & 0xFF); \
c3 ^= tmp; \
r1 ^= tmp; \
tmp = mixtab2((x3 >> 8) & 0xFF); \
c3 ^= tmp; \
r2 ^= tmp; \
tmp = mixtab3(x3 & 0xFF); \
c3 ^= tmp; \
x0 = ((c0 ^ r0) & SPH_C32(0xFF000000)) \
| ((c1 ^ r1) & SPH_C32(0x00FF0000)) \
| ((c2 ^ r2) & SPH_C32(0x0000FF00)) \
| ((c3 ^ r3) & SPH_C32(0x000000FF)); \
x1 = ((c1 ^ (r0 << 8)) & SPH_C32(0xFF000000)) \
| ((c2 ^ (r1 << 8)) & SPH_C32(0x00FF0000)) \
| ((c3 ^ (r2 << 8)) & SPH_C32(0x0000FF00)) \
| ((c0 ^ (r3 >> 24)) & SPH_C32(0x000000FF)); \
x2 = ((c2 ^ (r0 << 16)) & SPH_C32(0xFF000000)) \
| ((c3 ^ (r1 << 16)) & SPH_C32(0x00FF0000)) \
| ((c0 ^ (r2 >> 16)) & SPH_C32(0x0000FF00)) \
| ((c1 ^ (r3 >> 16)) & SPH_C32(0x000000FF)); \
x3 = ((c3 ^ (r0 << 24)) & SPH_C32(0xFF000000)) \
| ((c0 ^ (r1 >> 8)) & SPH_C32(0x00FF0000)) \
| ((c1 ^ (r2 >> 8)) & SPH_C32(0x0000FF00)) \
| ((c2 ^ (r3 >> 8)) & SPH_C32(0x000000FF)); \
/* */ \
}
#define S00 (sc[ 0])
#define S01 (sc[ 1])
#define S02 (sc[ 2])
#define S03 (sc[ 3])
#define S04 (sc[ 4])
#define S05 (sc[ 5])
#define S06 (sc[ 6])
#define S07 (sc[ 7])
#define S08 (sc[ 8])
#define S09 (sc[ 9])
#define S10 (sc[10])
#define S11 (sc[11])
#define S12 (sc[12])
#define S13 (sc[13])
#define S14 (sc[14])
#define S15 (sc[15])
#define S16 (sc[16])
#define S17 (sc[17])
#define S18 (sc[18])
#define S19 (sc[19])
#define S20 (sc[20])
#define S21 (sc[21])
#define S22 (sc[22])
#define S23 (sc[23])
#define S24 (sc[24])
#define S25 (sc[25])
#define S26 (sc[26])
#define S27 (sc[27])
#define S28 (sc[28])
#define S29 (sc[29])
#define S30 (sc[30])
#define S31 (sc[31])
#define S32 (sc[32])
#define S33 (sc[33])
#define S34 (sc[34])
#define S35 (sc[35])
#define SWAB32(x) ( ((x & 0x000000FF) << 24) | ((x & 0x0000FF00) << 8) | ((x & 0x00FF0000) >> 8) | ((x & 0xFF000000) >> 24) )
/* GPU - FUNKTIONEN */
#if USE_SHARED
__global__ void __launch_bounds__(256)
#else
__global__ void
#endif
fugue256_gpu_hash(int thr_id, int threads, uint32_t startNounce, void *outputHash, uint32_t *resNounce)
{
#if USE_SHARED
extern __shared__ char mixtabs[];
*((uint32_t*)mixtabs + ( threadIdx.x)) = tex1Dfetch(mixTab0Tex, threadIdx.x);
*((uint32_t*)mixtabs + (256+threadIdx.x)) = tex1Dfetch(mixTab1Tex, threadIdx.x);
*((uint32_t*)mixtabs + (512+threadIdx.x)) = tex1Dfetch(mixTab2Tex, threadIdx.x);
*((uint32_t*)mixtabs + (768+threadIdx.x)) = tex1Dfetch(mixTab3Tex, threadIdx.x);
__syncthreads();
#endif
int thread = (blockDim.x * blockIdx.x + threadIdx.x);
if (thread < threads)
{
/* Nimm den State und verarbeite das letztenByte (die Nounce) */
uint32_t sc[30];
#pragma unroll 30
for(int i=0;i<30;i++)
sc[i] = GPUstate[i];
uint32_t nounce = startNounce + thread; // muss noch ermittelt werden
uint32_t q;
// Bei Byte 80 laufen die Teilrunden: 4-0-1 (hier fest)
// Teilrunde 4
q = nounce;
TIX2(q, S06, S07, S14, S16, S00);
CMIX30(S03, S04, S05, S07, S08, S09, S18, S19, S20);
SMIX(S03, S04, S05, S06);
CMIX30(S00, S01, S02, S04, S05, S06, S15, S16, S17);
SMIX(S00, S01, S02, S03);
// Teilrunde 0
q = 0;
TIX2(q, S00, S01, S08, S10, S24);
CMIX30(S27, S28, S29, S01, S02, S03, S12, S13, S14);
SMIX(S27, S28, S29, S00);
CMIX30(S24, S25, S26, S28, S29, S00, S09, S10, S11);
SMIX(S24, S25, S26, S27);
// Teilrunde 1
q = 0x280; // hoffentlich richtig rum...
TIX2(q, S24, S25, S02, S04, S18);
CMIX30(S21, S22, S23, S25, S26, S27, S06, S07, S08);
SMIX(S21, S22, S23, S24);
CMIX30(S18, S19, S20, S22, S23, S24, S03, S04, S05);
SMIX(S18, S19, S20, S21);
// Rundenende
// rms = 12, d.h. 30 - 12 = 18
#pragma unroll 10
for(int i=0;i<10;i++)
{
//ROR(3, 30);
uint32_t tmp[3];
#pragma unroll 3
for(int k=0;k<3;k++)
tmp[k] = sc[27+k];
#pragma unroll 27
for(int k=26;k>=0;k--)
sc[k+3] = sc[k];
#pragma unroll 3
for(int k=0;k<3;k++)
sc[k] = tmp[k];
CMIX30(sc[18], sc[19], sc[20], sc[22], sc[23], sc[24], sc[3], sc[4], sc[5]);
SMIX(sc[18], sc[19], sc[20], sc[21]);
}
#pragma unroll 13
for(int i=0;i<13;i++)
{
sc[22] ^= sc[18];
sc[3] ^= sc[18];
// ROR(15, 30); BEGIN
uint32_t tmp1[15];
#pragma unroll 15
for(int k=0;k<15;k++)
tmp1[k] = sc[15+k];
#pragma unroll 15
for(int k=14;k>=0;k--)
sc[k+15] = sc[k];
#pragma unroll 15
for(int k=0;k<15;k++)
sc[k] = tmp1[k];
// ROR(15, 30); END
SMIX(sc[18], sc[19], sc[20], sc[21]);
sc[22] ^= sc[18];
sc[4] ^= sc[18];
// ROR(14, 30); BEGIN
uint32_t tmp2[14];
#pragma unroll 14
for(int k=0;k<14;k++)
tmp2[k] = sc[16+k];
#pragma unroll 16
for(int k=15;k>=0;k--)
sc[k+14] = sc[k];
#pragma unroll 14
for(int k=0;k<14;k++)
sc[k] = tmp2[k];
// ROR(14, 30); END
SMIX(sc[18], sc[19], sc[20], sc[21]);
}
sc[22] ^= sc[18];
sc[3] ^= sc[18];
/*
// SWAP32 und Daten ausgeben
#pragma unroll 4
for(int i=0;i<4;i++)
((uint32_t*)outputHash)[8*thread+i] = SWAB32(sc[19+i]);
#pragma unroll 4
for(int i=0;i<4;i++)
((uint32_t*)outputHash)[8*thread+i+4] = SWAB32(sc[3+i]);
*/
uint32_t hash[8];
#pragma unroll 4
for(int i=0;i<4;i++)
((uint32_t*)hash)[i] = SWAB32(sc[19+i]);
#pragma unroll 4
for(int i=0;i<4;i++)
((uint32_t*)hash)[i+4] = SWAB32(sc[3+i]);
int i;
bool rc = true;
for (i = 7; i >= 0; i--) {
if (hash[i] > pTarget[i]) {
rc = false;
break;
}
if (hash[i] < pTarget[i]) {
rc = true;
break;
}
}
if(rc == true)
{
if(resNounce[0] > nounce)
resNounce[0] = nounce;
}
}
}
#define texDef(texname, texmem, texsource, texsize) \
unsigned int *texmem; \
cudaMalloc(&texmem, texsize); \
cudaMemcpy(texmem, texsource, texsize, cudaMemcpyHostToDevice); \
texname.normalized = 0; \
texname.filterMode = cudaFilterModePoint; \
texname.addressMode[0] = cudaAddressModeClamp; \
{ cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<unsigned int>(); \
cudaBindTexture(NULL, &texname, texmem, &channelDesc, texsize ); }
void fugue256_cpu_init(int thr_id, int threads)
{
cudaSetDevice(device_map[thr_id]);
// Kopiere die Hash-Tabellen in den GPU-Speicher
texDef(mixTab0Tex, mixTab0m, mixtab0_cpu, sizeof(uint32_t)*256);
texDef(mixTab1Tex, mixTab1m, mixtab1_cpu, sizeof(uint32_t)*256);
texDef(mixTab2Tex, mixTab2m, mixtab2_cpu, sizeof(uint32_t)*256);
texDef(mixTab3Tex, mixTab3m, mixtab3_cpu, sizeof(uint32_t)*256);
// Speicher für alle Ergebnisse belegen
cudaMalloc(&d_fugue256_hashoutput[thr_id], 8 * sizeof(uint32_t) * threads);
cudaMalloc(&d_resultNonce[thr_id], sizeof(uint32_t));
}
__host__ void fugue256_cpu_setBlock(int thr_id, void *data, void *pTargetIn)
{
// CPU-Vorbereitungen treffen
sph_fugue256_context ctx_fugue_const;
sph_fugue256_init(&ctx_fugue_const);
sph_fugue256 (&ctx_fugue_const, data, 80); // State speichern
cudaMemcpyToSymbol( GPUstate,
ctx_fugue_const.S,
sizeof(uint32_t) * 30 );
cudaMemcpyToSymbol( pTarget,
pTargetIn,
sizeof(uint32_t) * 8 );
cudaMemset(d_resultNonce[thr_id], 0xFF, sizeof(uint32_t));
}
__host__ void fugue256_cpu_hash(int thr_id, int threads, int startNounce, void *outputHashes, uint32_t *nounce)
{
#if USE_SHARED
const int threadsperblock = 256; // Alignment mit mixtab Grösse. NICHT ÄNDERN
#else
const int threadsperblock = 512; // so einstellen wie gewünscht ;-)
#endif
// berechne wie viele Thread Blocks wir brauchen
dim3 grid((threads + threadsperblock-1)/threadsperblock);
dim3 block(threadsperblock);
// Größe des dynamischen Shared Memory Bereichs
#if USE_SHARED
size_t shared_size = 4 * 256 * sizeof(uint32_t);
#else
size_t shared_size = 0;
#endif
fugue256_gpu_hash<<<grid, block, shared_size>>>(thr_id, threads, startNounce, d_fugue256_hashoutput[thr_id], d_resultNonce[thr_id]);
// Strategisches Sleep Kommando zur Senkung der CPU Last
MyStreamSynchronize(NULL, 0, thr_id);
//cudaMemcpy(outputHashes, d_fugue256_hashoutput[thr_id], 8 * sizeof(uint32_t), cudaMemcpyDeviceToHost);
cudaMemcpy(nounce, d_resultNonce[thr_id], sizeof(uint32_t), cudaMemcpyDeviceToHost);
}
|
the_stack
|
\brief CUTLASS Library handle.
*/
#include <iostream>
#include <stdexcept>
#include <cstdint>
#include "cutlass/library/handle.h"
#include "cutlass/library/singleton.h"
#include "cutlass/library/util.h"
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Constructor
Handle::Handle(
cudaStream_t stream,
size_t workspace_size
):
provider_(Provider::kCUTLASS),
stream_(stream),
workspace_(nullptr),
workspace_size_(0),
scalar_pointer_mode_(ScalarPointerMode::kHost),
last_operation_(nullptr) {
int device_idx = -1;
cudaError_t error = cudaGetDevice(&device_idx);
if (error != cudaSuccess) {
throw std::runtime_error("cudaGetDevice() failed");
}
error = cudaGetDeviceProperties(&device_, device_idx);
if (error != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed");
}
set_workspace_size(workspace_size);
Singleton::get();
}
/// Destructor
Handle::~Handle() {
if (workspace_) {
if (workspace_) {
cudaFree(workspace_);
}
workspace_ = nullptr;
workspace_size_ = 0;
}
}
/// Move constructor
Handle::Handle(Handle && handle) {
device_ = handle.device_;
workspace_size_ = handle.workspace_size_;
workspace_ = handle.workspace_;
stream_ = handle.stream_;
scalar_pointer_mode_ = handle.scalar_pointer_mode_;
handle.workspace_ = nullptr;
handle.workspace_size_ = 0;
}
/// Move assignment operator
Handle & Handle::operator=(Handle && handle) {
provider_ = handle.provider_;
device_ = handle.device_;
workspace_size_ = handle.workspace_size_;
workspace_ = handle.workspace_;
stream_ = handle.stream_;
scalar_pointer_mode_ = handle.scalar_pointer_mode_;
handle.workspace_ = nullptr;
handle.workspace_size_ = 0;
return *this;
}
int Handle::compute_capability() const {
return device_.major * 10 + device_.minor;
}
/// Sets the current CUDA stream
void Handle::set_stream(cudaStream_t stream) {
stream_ = stream;
}
/// Gets the current CUDA stream
cudaStream_t Handle::get_stream() const {
return stream_;
}
/// Gets the current provider
Provider Handle::get_provider() const {
return provider_;
}
/// Sets the provider of operations
void Handle::set_provider(Provider provider) {
provider_ = provider;
}
/// Gets the device workspace size
size_t Handle::get_workspace_size() const {
return workspace_size_;
}
/// Gets a pointer to the device workspace allocation in Global Memory
void *Handle::get_workspace() const {
return workspace_;
}
/// Sets the size of device workspace, invalidating previous calls to get_device_workspace()
void Handle::set_workspace_size(size_t bytes) {
if (bytes != workspace_size_) {
if (workspace_) {
cudaFree(workspace_);
}
workspace_ = nullptr;
workspace_size_ = bytes;
if (workspace_size_) {
cudaError_t error = cudaMalloc((void **)&workspace_, workspace_size_);
if (error != cudaSuccess) {
throw std::runtime_error("Failed to allocate workspace");
}
}
}
if (workspace_) {
cudaError_t error = cudaMemset(workspace_, 0, workspace_size_);
if (error != cudaSuccess) {
throw std::runtime_error("Failed to clear workspace");
}
}
}
/// Gets the scalar pointer mode
ScalarPointerMode Handle::get_scalar_pointer_mode() const {
return scalar_pointer_mode_;
}
/// Sets the scalar pointer mode
void Handle::set_scalar_pointer_mode(ScalarPointerMode mode) {
scalar_pointer_mode_ = mode;
}
/// Gets the last operation
Operation const *Handle::get_last_operation() const {
return last_operation_;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Returns the maximum required alignment for each operator
static int maximum_alignment_requirement(GemmDescription const &desc) {
return std::max(
std::max(desc.A.alignment, desc.B.alignment), desc.C.alignment);
}
/// Returns the largest alignment (in units of elements) the problem satisfies, starting from a
/// given upper limit.
static int gemm_problem_alignment(
int M,
int N,
int K,
NumericTypeID element_A,
void const *ptr_A,
int64_t lda,
int64_t batch_stride_A,
NumericTypeID element_B,
void const *ptr_B,
int64_t ldb,
int64_t batch_stride_B,
NumericTypeID element_C,
void const * ptr_C,
int64_t ldc,
int64_t batch_stride_C,
void const * ptr_D,
int64_t ldd,
int64_t batch_stride_D,
int max_alignment_in_bytes = 16
) {
void const *pointers[] = {
ptr_A, ptr_B, ptr_C, ptr_D
};
int64_t extents[] = {
M, N, K, lda, ldb, ldc, ldd, batch_stride_A, batch_stride_B, batch_stride_C, batch_stride_D
};
NumericTypeID elements[] = {
element_A, element_B, element_C
};
for (; max_alignment_in_bytes > 0; max_alignment_in_bytes /= 2) {
bool satisfied = true;
// Can pointers satisfy this?
for (void const *ptr : pointers) {
std::uintptr_t int_ptr = reinterpret_cast<std::uintptr_t>(ptr);
if (int_ptr % max_alignment_in_bytes) {
satisfied = false;
break;
}
}
if (!satisfied) {
continue;
}
// Compute the maximum alignment based on element data types
int max_element_alignment = 0;
for (NumericTypeID type_id : elements) {
int element_alignment = max_alignment_in_bytes * 8 / library::sizeof_bits(type_id);
max_element_alignment = std::max(max_element_alignment, element_alignment);
}
// Can the problem size and leading dimensions satisfy this?
for (int64_t extent : extents) {
if (extent % max_element_alignment) {
satisfied = false;
break;
}
}
if (!satisfied) {
continue;
}
// Yes
return max_element_alignment;
}
// No alignment satisfies this problem
return 0;
}
/// Find the best kernel in descending order of preference.
static Operation const * find_gemm_operation(
GemmOperationFunctionalMap::const_iterator operators_it,
GemmPreferenceKey const preference_key) {
auto cc_it = operators_it->second.upper_bound(preference_key);
if (cc_it == operators_it->second.begin()) {
return nullptr;
}
Operation const *operation = nullptr;
// Search in descending order of compute capability
do {
--cc_it;
// Search tile sizes in order, for now.
for (auto const * op : cc_it->second) {
GemmDescription const &desc = static_cast<GemmDescription const &>(op->description());
int min_cc = desc.tile_description.minimum_compute_capability;
int max_cc = desc.tile_description.maximum_compute_capability;
int op_alignment = maximum_alignment_requirement(desc);
if ((min_cc <= preference_key.compute_capability) &&
(preference_key.compute_capability <= max_cc) &&
(op_alignment <= preference_key.alignment)) {
operation = op;
break;
}
}
} while (!operation && cc_it != operators_it->second.begin());
return operation;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Executes a GEMM computation: D <= alpha * A*B + beta * C
Status Handle::gemm(
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices
void const * ptr_A, /// Pointer to A matrix in Global Memory
int64_t lda, /// Leading dimension of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices
void const * ptr_B, /// Pointer to B matrix in Global Memory
int64_t ldb, /// Leading dimension of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrices
void const * ptr_C, /// Pointer to C matrix
int64_t ldc, /// Leading dimension of C matrix
void * ptr_D, /// Pointer to D matrix
int64_t ldd /// Leading dimension of D matrix
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kGemm,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = gemm_problem_alignment(
M, N, K,
element_A, ptr_A, lda, 0,
element_B, ptr_B, ldb, 0,
element_C, ptr_C, ldc, 0,
ptr_D, ldd, 0, kMaximumAlignmentSize
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmConfiguration configuration{
{M, N, K},
lda,
ldb,
ldc,
ldd,
1
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmArguments arguments{
ptr_A,
ptr_B,
ptr_C,
ptr_D,
alpha,
beta,
scalar_pointer_mode_
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Executes a GEMM computation: D <= alpha * A*B + beta * C.
//
// Supports batched-strided, batched array or split-K serial or split-K parallel.
//
Status Handle::gemm_universal(
GemmUniversalMode mode, /// indicates the mode in which the kUniversal GEMM is launched
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix - ignored for real-valued matrices
void const * ptr_A, /// Pointer to A matrix in Global Memory
int64_t lda, /// Leading dimension of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix - ignored for real-valued matrices
void const * ptr_B, /// Pointer to B matrix in Global Memory
int64_t ldb, /// Leading dimension of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrices
void const * ptr_C, /// Pointer to C matrix
int64_t ldc, /// Leading dimension of C matrix
void * ptr_D, /// Pointer to D matrix
int64_t ldd, /// Leading dimension of D matrix
int batch_count, /// Batch count or number of split-K slices
int64_t batch_stride_A, /// Batch stride of A operand
int64_t batch_stride_B, /// Batch stride of B operand
int64_t batch_stride_C, /// Batch stride of C operand
int64_t batch_stride_D /// Batch stride of D operand
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kUniversal,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
void const *ptr_A_check = ptr_A;
void const *ptr_B_check = ptr_B;
void const *ptr_C_check = ptr_C;
void * ptr_D_check = ptr_D;
// Ignore alignment of pointers to pointers. We can't check this from the host,
// as each batch index has its own pointer in device memory.
if (mode == GemmUniversalMode::kArray) {
ptr_A_check = nullptr;
ptr_B_check = nullptr;
ptr_C_check = nullptr;
ptr_D_check = nullptr;
}
int alignment = gemm_problem_alignment(
M, N, K,
element_A, ptr_A_check, lda, 0,
element_B, ptr_B_check, ldb, 0,
element_C, ptr_C_check, ldc, 0,
ptr_D_check, ldd, 0, kMaximumAlignmentSize
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmUniversalConfiguration configuration{
mode,
{M, N, K},
batch_count,
lda,
ldb,
ldc,
ldd
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmUniversalArguments arguments{
ptr_A,
ptr_B,
ptr_C,
ptr_D,
alpha,
beta,
scalar_pointer_mode_,
batch_stride_A,
batch_stride_B,
batch_stride_C,
batch_stride_D
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Planar complex GEMM
Status Handle::gemm_planar_complex(
int M, /// GEMM M dimension
int N, /// GEMM N dimension
int K, /// GEMM K dimension
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix
void const * ptr_A_real, /// Pointer to real part of A matrix
void const * ptr_A_imag, /// Pointer to imaginary part of A matrix
int64_t lda_real, /// Leading dimension of real part of A matrix
int64_t lda_imag, /// Leading dimension of imaginary part of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix
void const * ptr_B_real, /// Pointer to real part of B matrix
void const * ptr_B_imag, /// Pointer to imaginary part of B matrix
int64_t ldb_real, /// Leading dimension of real part of B matrix
int64_t ldb_imag, /// Leading dimension of imaginary part of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrix
void const * ptr_C_real, /// Pointer to real part of C matrix
void const * ptr_C_imag, /// Pointer to imaginary part of C matrix
int64_t ldc_real, /// Leading dimension of real part of C matrix
int64_t ldc_imag, /// Leading dimension of imaginary part of C matrix
void * ptr_D_real, /// Pointer to real part of D matrix
void * ptr_D_imag, /// Pointer to imaginary part of D matrix
int64_t ldd_real, /// Leading dimension of real part of D matrix
int64_t ldd_imag, /// Leading dimension of imaginary part of D matrix
int batch_count, /// Number of batched GEMMs to execute
int64_t batch_stride_A_real,
int64_t batch_stride_A_imag,
int64_t batch_stride_B_real,
int64_t batch_stride_B_imag,
int64_t batch_stride_C_real,
int64_t batch_stride_C_imag,
int64_t batch_stride_D_real,
int64_t batch_stride_D_imag
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kPlanarComplex,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = std::max(
gemm_problem_alignment(
M, N, K,
element_A, ptr_A_real, lda_real, batch_stride_A_real,
element_B, ptr_B_real, ldb_real, batch_stride_B_real,
element_C, ptr_C_real, ldc_real, batch_stride_C_real,
ptr_D_real, ldd_real, batch_stride_D_real, kMaximumAlignmentSize
),
gemm_problem_alignment(
M, N, K,
element_A, ptr_A_imag, lda_imag, batch_stride_A_imag,
element_B, ptr_B_imag, ldb_imag, batch_stride_B_imag,
element_C, ptr_C_imag, ldc_imag, batch_stride_C_imag,
ptr_D_imag, ldd_imag, batch_stride_D_imag, kMaximumAlignmentSize
)
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmPlanarComplexConfiguration configuration{
GemmUniversalMode::kBatched,
{M, N, K},
batch_count,
lda_real,
lda_imag,
ldb_real,
ldb_imag,
ldc_real,
ldc_imag,
ldd_real,
ldd_imag
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmPlanarComplexArguments arguments{
ptr_A_real,
ptr_A_imag,
ptr_B_real,
ptr_B_imag,
ptr_C_real,
ptr_C_imag,
ptr_D_real,
ptr_D_imag,
alpha,
beta,
scalar_pointer_mode_,
batch_stride_A_real,
batch_stride_A_imag,
batch_stride_B_real,
batch_stride_B_imag,
batch_stride_C_real,
batch_stride_C_imag,
batch_stride_D_real,
batch_stride_D_imag
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Planar complex batched GEMM loading pointers from arrays in global memory
Status Handle::gemm_planar_complex_array(
int expected_M, /// Expected GEMM M dimension (used for sizing CUDA grid)
int expected_N, /// Expected GEMM N dimension (used for sizing CUDA grid)
int expected_K, /// Expected GEMM K dimension
int batch_count, /// Number of independent GEMM computations to execute
int const *M, /// Array containing the GEMM M dimension for each batch index
int const *N, /// Array containing the GEMM N dimension for each batch index
int const *K, /// Array containing the GEMM K dimension for each batch index
NumericTypeID element_compute, /// Data type of internal accumulation
NumericTypeID element_scalar, /// Data type of alpha/beta scalars
void const *alpha, /// Pointer to alpha scalar
NumericTypeID element_A, /// Data type of A matrix elements
LayoutTypeID layout_A, /// Layout of A matrix
ComplexTransform transform_A, /// Complex transformation applied to A matrix
void const * const * ptr_A_real, /// Pointer to array containing pointers to real part of A matrices
void const * const * ptr_A_imag, /// Pointer to array containing pointers to imaginary part of A matrices
int64_t lda_real, /// Leading dimension of real part of A matrix
int64_t lda_imag, /// Leading dimension of imaginary part of A matrix
NumericTypeID element_B, /// Data type of B matrix elements
LayoutTypeID layout_B, /// Layout of B matrix
ComplexTransform transform_B, /// Complex transformation applied to B matrix
void const * const * ptr_B_real, /// Pointer to array containing pointers to real part of B matrices
void const * const * ptr_B_imag, /// Pointer to array containing pointers to imaginary part of B matrices
int64_t ldb_real, /// Leading dimension of real part of B matrix
int64_t ldb_imag, /// Leading dimension of imaginary part of B matrix
void const * beta, /// Pointer to beta scalar
NumericTypeID element_C, /// Data type of C and D matrix
void const * const * ptr_C_real, /// Pointer to array containing pointers to real part of C matrices
void const * const * ptr_C_imag, /// Pointer to array containing poitners to imaginary part of C matrices
int64_t ldc_real, /// Leading dimension of real part of C matrix
int64_t ldc_imag, /// Leading dimension of imaginary part of C matrix
void * const * ptr_D_real, /// Pointer to array containing pointers to real part of D matrices
void * const * ptr_D_imag, /// Pointer to array containing poitners to imaginary part of D matrices
int64_t ldd_real, /// Leading dimension of real part of D matrix
int64_t ldd_imag /// Leading dimension of imaginary part of D matrix
) {
//
// Find the operation
//
GemmFunctionalKey key(
provider_,
GemmKind::kPlanarComplexArray,
element_compute,
element_scalar,
element_A,
layout_A,
transform_A,
element_B,
layout_B,
transform_B,
element_C
);
auto operators_it = Singleton::get().operation_table.gemm_operations.find(key);
if (operators_it == Singleton::get().operation_table.gemm_operations.end()) {
return cutlass::Status::kErrorNotSupported;
}
if (operators_it->second.empty()) {
return cutlass::Status::kErrorNotSupported;
}
//
// Compute the largest alignment restriction the kernel can satisfy.
//
// Maximum alignment expectation among all kernels (in units of bytes)
int const kMaximumAlignmentSize = 16;
int alignment = std::max(
gemm_problem_alignment(
expected_M, expected_N, expected_K,
element_A, nullptr, lda_real, 0,
element_B, nullptr, ldb_real, 0,
element_C, nullptr, ldc_real, 0,
nullptr, ldd_real, 0, kMaximumAlignmentSize
),
gemm_problem_alignment(
expected_M, expected_N, expected_K,
element_A, nullptr, lda_imag, 0,
element_B, nullptr, ldb_imag, 0,
element_C, nullptr, ldc_imag, 0,
nullptr, ldd_imag, 0, kMaximumAlignmentSize
)
);
//
// Find the best kernel in descending order of preference.
//
GemmPreferenceKey preference_key(compute_capability(), alignment);
Operation const *operation = find_gemm_operation(operators_it, preference_key);
if (!operation) {
return cutlass::Status::kErrorNotSupported;
}
last_operation_ = operation;
//
// Configure operation
//
GemmPlanarComplexArrayConfiguration configuration{
{expected_M, expected_N, expected_K},
batch_count,
lda_real,
lda_imag,
ldb_real,
ldb_imag,
ldc_real,
ldc_imag,
ldd_real,
ldd_imag
};
// Query host work space size
uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration);
if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
char host_workspace[kHostWorkspaceSize];
// Query device workspace size
uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration);
if (uint64_t(workspace_size_) < device_workspace_size_needed) {
return cutlass::Status::kErrorNotSupported;
}
// Initialize host and device workspaces
Status status = operation->initialize(
&configuration,
host_workspace,
workspace_,
stream_);
if (status != cutlass::Status::kSuccess) {
return status;
}
// Run the operator
GemmPlanarComplexArrayArguments arguments{
M, N, K,
ptr_A_real,
ptr_A_imag,
ptr_B_real,
ptr_B_imag,
ptr_C_real,
ptr_C_imag,
ptr_D_real,
ptr_D_imag,
alpha,
beta,
scalar_pointer_mode_
};
return operation->run(&arguments, host_workspace, workspace_, stream_);
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Finds conv operation instances with Conv::ElementC = Reduction::ElementWorkspace
Operation const* find_conv_operation_for_parallel_reduction(Operation const *operation) {
ConvDescription const &conv_desc =
static_cast<ConvDescription const &>(operation->description());
// if the curren conv operation accumulator and output data type match return operation
if(conv_desc.tile_description.math_instruction.element_accumulator == conv_desc.C.element) {
return operation;
}
// find conv operation to match conv output and reduction workspace data type
ConvFunctionalKey key(
library::Provider::kCUTLASS,
conv_desc.conv_kind,
conv_desc.A.element,
conv_desc.A.layout,
conv_desc.B.element,
conv_desc.B.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.C.layout,
conv_desc.tile_description.math_instruction.element_accumulator,
conv_desc.element_epilogue);
// conv operation table for conv2d or conv3d
auto conv_operations = (conv_desc.kind == OperationKind::kConv2d) ?
Singleton::get().operation_table.conv2d_operations :
Singleton::get().operation_table.conv3d_operations;
// find ConvFunctionalKey in convolution operation table
auto operators_it = conv_operations.find(key);
if (operators_it == conv_operations.end()) {
return nullptr;
}
if (operators_it->second.empty()) {
return nullptr;
}
// conv operation for same compute capability and iterator algorithm
ConvPreferenceKey preference_key(
conv_desc.tile_description.minimum_compute_capability,
conv_desc.iterator_algorithm);
auto it = operators_it->second.find(preference_key);
if(it == operators_it->second.end()) {
return nullptr;
}
// return matching conv opertion (same tile sizes and instruction)
for (auto op : it->second) {
if (op->description().tile_description == operation->description().tile_description) {
return op;
}
}
return nullptr;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
the_stack
|
// Bilinear sampling is done in BHWD (coalescing is not obvious in BDHW)
// we assume BHWD format in inputImages
// we assume BHW(YX) format on grids
__device__ void getTopLeft(float x, int xOut, int width, int& point, float& weight)
{
/* for interpolation :
stores in point and weight :
- the x-coordinate of the pixel on the left (or y-coordinate of the upper pixel)
- the weight for interpolating
*/
//float xcoord = (x + 1) * (width - 1) / 2;
float xcoord = x + xOut;
if (xcoord < 0) { xcoord = 0; }
if (xcoord > (width-1) ) { xcoord = width -1; }
point = floor(xcoord);
weight = 1 - (xcoord - point);
}
__device__ bool between(int value, int lowerBound, int upperBound)
{
return (value >= lowerBound && value <= upperBound);
}
__device__ void sumReduceShMem(volatile float s[])
{
/* obviously only works for 32 elements */
/* sums up a shared memory array of 32 elements, stores it in s[0] */
/* whole warp can then read first element (broadcasting) */
if(threadIdx.x<16) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+16]; }
if(threadIdx.x<8) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+8]; }
if(threadIdx.x<4) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+4]; }
if(threadIdx.x<2) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+2]; }
if(threadIdx.x<1) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+1]; }
}
__global__ void bilinearScaling(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideHeight, int inputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideHeight, int grids_strideWidth,
float* output_data, int output_strideBatch, int output_strideChannels, int output_strideHeight, int output_strideWidth,
int inputImages_channels, int inputImages_height, int inputImages_width, int output_width)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates (xOut = blockIdx.x*16+blockDim.y+threadIdx.y)
// z = batch index
// threadIdx.x : used for features (coalescing is trivial)
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < output_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 2 < output_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int b = blockIdx.z;
float yf,xf;
__shared__ float gridData[32];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + threadIdx.x];
}
__syncthreads();
if(!withinImageBounds) return;
yf = gridData[threadIdx.y*2];
xf = gridData[threadIdx.y*2+1];
int yInTopLeft, xInTopLeft;
float yWeightTopLeft, xWeightTopLeft;
getTopLeft(xf, xOut, inputImages_width, xInTopLeft, xWeightTopLeft);
getTopLeft(yf, yOut, inputImages_height, yInTopLeft, yWeightTopLeft);
//getTopLeft(xf, inputImages_width, xInTopLeft, xWeightTopLeft);
//getTopLeft(yf, inputImages_height, yInTopLeft, yWeightTopLeft);
const int outAddress = output_strideBatch * b + output_strideHeight * yOut + output_strideWidth * xOut;
const int inTopLeftAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeft + inputImages_strideWidth * xInTopLeft;
const int inTopRightAddress = inTopLeftAddress + inputImages_strideWidth;
const int inBottomLeftAddress = inTopLeftAddress + inputImages_strideHeight;
const int inBottomRightAddress = inBottomLeftAddress + inputImages_strideWidth;
float v=0;
float inTopLeft=0;
float inTopRight=0;
float inBottomLeft=0;
float inBottomRight=0;
bool topLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft, 0, height-1);
bool topRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft, 0, height-1);
bool bottomLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft+1, 0, height-1);
bool bottomRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft+1, 0, height-1);
// interpolation happens here
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
if(topLeftIsIn) inTopLeft = inputImages_data[inTopLeftAddress + t];
if(topRightIsIn) inTopRight = inputImages_data[inTopRightAddress + t];
if(bottomLeftIsIn) inBottomLeft = inputImages_data[inBottomLeftAddress + t];
if(bottomRightIsIn) inBottomRight = inputImages_data[inBottomRightAddress + t];
v = xWeightTopLeft * yWeightTopLeft * inTopLeft
+ (1 - xWeightTopLeft) * yWeightTopLeft * inTopRight
+ xWeightTopLeft * (1 - yWeightTopLeft) * inBottomLeft
+ (1 - xWeightTopLeft) * (1 - yWeightTopLeft) * inBottomRight;
//v = inBottomRight;
output_data[outAddress + t] = v;
}
}
static int cunn_ScaleBHWD_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor *)luaT_checkudata(L, 4, "torch.CudaTensor");
dim3 blocks((output->size[2]+15)/16, output->size[1], output->size[0]);
dim3 threads(32,16);
/* assume BHWD */
bilinearScaling <<< blocks, threads, 0, THCState_getCurrentStream(state) >>> (THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_data(state, output),
THCudaTensor_stride(state, output, 0),
THCudaTensor_stride(state, output, 3),
THCudaTensor_stride(state, output, 1),
THCudaTensor_stride(state, output, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, output, 2));
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
template<bool onlyGrid> __global__ void backwardScaling(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideHeight, int inputImages_strideWidth,
float* gradInputImages_data, int gradInputImages_strideBatch, int gradInputImages_strideChannels, int gradInputImages_strideHeight, int gradInputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideHeight, int grids_strideWidth,
float* gradGrids_data, int gradGrids_strideBatch, int gradGrids_strideYX, int gradGrids_strideHeight, int gradGrids_strideWidth,
float* gradOutput_data, int gradOutput_strideBatch, int gradOutput_strideChannels, int gradOutput_strideHeight, int gradOutput_strideWidth,
int inputImages_channels, int inputImages_height, int inputImages_width, int gradOutput_width)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates
// z = batch index
// threads : used for features
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < gradOutput_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 2 < gradOutput_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int b = blockIdx.z;
float yf,xf;
__shared__ float gridData[32];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + threadIdx.x];
}
__syncthreads();
if(withinImageBounds)
{
yf = gridData[threadIdx.y*2];
xf = gridData[threadIdx.y*2+1];
int yInTopLeft, xInTopLeft;
float yWeightTopLeft, xWeightTopLeft;
getTopLeft(xf, xOut, inputImages_width, xInTopLeft, xWeightTopLeft);
getTopLeft(yf, yOut, inputImages_height, yInTopLeft, yWeightTopLeft);
const int inTopLeftAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeft + inputImages_strideWidth * xInTopLeft;
const int inTopRightAddress = inTopLeftAddress + inputImages_strideWidth;
const int inBottomLeftAddress = inTopLeftAddress + inputImages_strideHeight;
const int inBottomRightAddress = inBottomLeftAddress + inputImages_strideWidth;
const int gradInputImagesTopLeftAddress = gradInputImages_strideBatch * b + gradInputImages_strideHeight * yInTopLeft + gradInputImages_strideWidth * xInTopLeft;
const int gradInputImagesTopRightAddress = gradInputImagesTopLeftAddress + gradInputImages_strideWidth;
const int gradInputImagesBottomLeftAddress = gradInputImagesTopLeftAddress + gradInputImages_strideHeight;
const int gradInputImagesBottomRightAddress = gradInputImagesBottomLeftAddress + gradInputImages_strideWidth;
const int gradOutputAddress = gradOutput_strideBatch * b + gradOutput_strideHeight * yOut + gradOutput_strideWidth * xOut;
float topLeftDotProduct = 0;
float topRightDotProduct = 0;
float bottomLeftDotProduct = 0;
float bottomRightDotProduct = 0;
bool topLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft, 0, height-1);
bool topRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft, 0, height-1);
bool bottomLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft+1, 0, height-1);
bool bottomRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft+1, 0, height-1);
/*
In that loop we accumulate
- gradients into the gradInputImages array with atomic adds
- we compute the dot product that we need for the grid gradient
*/
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
float gradOutValue = gradOutput_data[gradOutputAddress + t];
// bool between(int value, int lowerBound, int upperBound)
if(topLeftIsIn)
{
float inTopLeft = inputImages_data[inTopLeftAddress + t];
topLeftDotProduct += inTopLeft * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesTopLeftAddress + t], xWeightTopLeft * yWeightTopLeft * gradOutValue);
}
if(topRightIsIn)
{
float inTopRight = inputImages_data[inTopRightAddress + t];
topRightDotProduct += inTopRight * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesTopRightAddress + t], (1 - xWeightTopLeft) * yWeightTopLeft * gradOutValue);
}
if(bottomLeftIsIn)
{
float inBottomLeft = inputImages_data[inBottomLeftAddress + t];
bottomLeftDotProduct += inBottomLeft * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBottomLeftAddress + t], xWeightTopLeft * (1 - yWeightTopLeft) * gradOutValue);
}
if(bottomRightIsIn)
{
float inBottomRight = inputImages_data[inBottomRightAddress + t];
bottomRightDotProduct += inBottomRight * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBottomRightAddress + t], (1 - xWeightTopLeft) * (1 - yWeightTopLeft) * gradOutValue);
}
}
/*
Here we reduce the dot product and compute the grid gradient before writing it.
*/
/* could do shuffles and use no shmem at all but cuda arch is 2.0 */
__shared__ volatile float __shmem[16][32];
__shmem[threadIdx.y][threadIdx.x] = topLeftDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topLeftDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = topRightDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topRightDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = bottomLeftDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomLeftDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = bottomRightDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomRightDotProduct = __shmem[threadIdx.y][0];
yf = - xWeightTopLeft * topLeftDotProduct + xWeightTopLeft * bottomLeftDotProduct - (1-xWeightTopLeft) * topRightDotProduct + (1-xWeightTopLeft) * bottomRightDotProduct;
xf = - yWeightTopLeft * topLeftDotProduct + yWeightTopLeft * topRightDotProduct - (1-yWeightTopLeft) * bottomLeftDotProduct + (1-yWeightTopLeft) * bottomRightDotProduct;
if(threadIdx.x==0)
{
gridData[threadIdx.y*2] = yf * (inputImages_height-1) / 2;
gridData[threadIdx.y*2+1] = xf * (inputImages_width-1) / 2;
}
}// must put a big if condition in order not to hang at __syncthreads()...
__syncthreads();
if(threadIdx.y==0 && withinGridBounds)
gradGrids_data[b*gradGrids_strideBatch + yOut*gradGrids_strideHeight + xOut*gradGrids_strideWidth + threadIdx.x] = gridData[threadIdx.x];
}
static int cunn_ScaleBHWD_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradInputImages = (THCudaTensor *)luaT_checkudata(L, 4, "torch.CudaTensor");
THCudaTensor *gradGrids = (THCudaTensor *)luaT_checkudata(L, 5, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 6, "torch.CudaTensor");
dim3 blocks((gradOutput->size[2]+15)/16, gradOutput->size[1], gradOutput->size[0]);
dim3 threads(32,16);
backwardScaling <false> <<< blocks, threads, 0, THCState_getCurrentStream(state) >>> (
THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
THCudaTensor_data(state, gradInputImages),
THCudaTensor_stride(state, gradInputImages, 0),
THCudaTensor_stride(state, gradInputImages, 3),
THCudaTensor_stride(state, gradInputImages, 1),
THCudaTensor_stride(state, gradInputImages, 2),
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_data(state, gradGrids),
THCudaTensor_stride(state, gradGrids, 0),
THCudaTensor_stride(state, gradGrids, 3),
THCudaTensor_stride(state, gradGrids, 1),
THCudaTensor_stride(state, gradGrids, 2),
THCudaTensor_data(state, gradOutput),
THCudaTensor_stride(state, gradOutput, 0),
THCudaTensor_stride(state, gradOutput, 3),
THCudaTensor_stride(state, gradOutput, 1),
THCudaTensor_stride(state, gradOutput, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, gradOutput, 2));
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in BilinearSampler.updateGradInput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
static int cunn_ScaleBHWD_updateGradInputOnlyGrid(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradGrids = (THCudaTensor *)luaT_checkudata(L, 5, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 6, "torch.CudaTensor");
dim3 blocks((gradOutput->size[2]+15)/16, gradOutput->size[1], gradOutput->size[0]);
dim3 threads(32,16);
backwardScaling <true> <<< blocks, threads, 0, THCState_getCurrentStream(state) >>> (
THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
0,
0,
0,
0,
0,
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_data(state, gradGrids),
THCudaTensor_stride(state, gradGrids, 0),
THCudaTensor_stride(state, gradGrids, 3),
THCudaTensor_stride(state, gradGrids, 1),
THCudaTensor_stride(state, gradGrids, 2),
THCudaTensor_data(state, gradOutput),
THCudaTensor_stride(state, gradOutput, 0),
THCudaTensor_stride(state, gradOutput, 3),
THCudaTensor_stride(state, gradOutput, 1),
THCudaTensor_stride(state, gradOutput, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, gradOutput, 2));
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in BilinearSampler.updateGradInput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
static const struct luaL_Reg cunn_ScaleBHWD__ [] = {
{"ScaleBHWD_updateOutput", cunn_ScaleBHWD_updateOutput},
{"ScaleBHWD_updateGradInput", cunn_ScaleBHWD_updateGradInput},
{"ScaleBHWD_updateGradInputOnlyGrid", cunn_ScaleBHWD_updateGradInputOnlyGrid},
{NULL, NULL}
};
static void cunn_ScaleBHWD_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_ScaleBHWD__, "nn");
lua_pop(L,1);
}
|
the_stack
|
#include <cugraph/algorithms.hpp>
#include <cugraph/graph_view.hpp>
#include <cugraph/prims/copy_to_adj_matrix_row_col.cuh>
#include <cugraph/prims/count_if_e.cuh>
#include <cugraph/prims/reduce_op.cuh>
#include <cugraph/prims/row_col_properties.cuh>
#include <cugraph/prims/transform_reduce_e.cuh>
#include <cugraph/prims/update_frontier_v_push_if_out_nbr.cuh>
#include <cugraph/prims/vertex_frontier.cuh>
#include <cugraph/utilities/error.hpp>
#include <cugraph/vertex_partition_device_view.cuh>
#include <raft/cudart_utils.h>
#include <thrust/fill.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/optional.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
#include <limits>
namespace cugraph {
namespace detail {
template <typename GraphViewType, typename PredecessorIterator>
void sssp(raft::handle_t const& handle,
GraphViewType const& push_graph_view,
typename GraphViewType::weight_type* distances,
PredecessorIterator predecessor_first,
typename GraphViewType::vertex_type source_vertex,
typename GraphViewType::weight_type cutoff,
bool do_expensive_check)
{
using vertex_t = typename GraphViewType::vertex_type;
using weight_t = typename GraphViewType::weight_type;
static_assert(std::is_integral<vertex_t>::value,
"GraphViewType::vertex_type should be integral.");
static_assert(!GraphViewType::is_adj_matrix_transposed,
"GraphViewType should support the push model.");
auto const num_vertices = push_graph_view.get_number_of_vertices();
auto const num_edges = push_graph_view.get_number_of_edges();
if (num_vertices == 0) { return; }
// implements the Near-Far Pile method in
// A. Davidson, S. Baxter, M. Garland, and J. D. Owens, "Work-efficient parallel GPU methods for
// single-source shortest paths," 2014.
// 1. check input arguments
CUGRAPH_EXPECTS(push_graph_view.is_valid_vertex(source_vertex),
"Invalid input argument: source vertex out-of-range.");
CUGRAPH_EXPECTS(push_graph_view.is_weighted(),
"Invalid input argument: an unweighted graph is passed to SSSP, BFS is more "
"efficient for unweighted graphs.");
if (do_expensive_check) {
auto num_negative_edge_weights =
count_if_e(handle,
push_graph_view,
dummy_properties_t<vertex_t>{}.device_view(),
dummy_properties_t<vertex_t>{}.device_view(),
[] __device__(vertex_t, vertex_t, weight_t w, auto, auto) { return w < 0.0; });
CUGRAPH_EXPECTS(num_negative_edge_weights == 0,
"Invalid input argument: input graph should have non-negative edge weights.");
}
// 2. initialize distances and predecessors
auto constexpr invalid_distance = std::numeric_limits<weight_t>::max();
auto constexpr invalid_vertex = invalid_vertex_id<vertex_t>::value;
auto val_first = thrust::make_zip_iterator(thrust::make_tuple(distances, predecessor_first));
thrust::transform(handle.get_thrust_policy(),
thrust::make_counting_iterator(push_graph_view.get_local_vertex_first()),
thrust::make_counting_iterator(push_graph_view.get_local_vertex_last()),
val_first,
[source_vertex] __device__(auto val) {
auto distance = invalid_distance;
if (val == source_vertex) { distance = weight_t{0.0}; }
return thrust::make_tuple(distance, invalid_vertex);
});
if (num_edges == 0) { return; }
// 3. update delta
weight_t average_vertex_degree{0.0};
weight_t average_edge_weight{0.0};
thrust::tie(average_vertex_degree, average_edge_weight) = transform_reduce_e(
handle,
push_graph_view,
dummy_properties_t<vertex_t>{}.device_view(),
dummy_properties_t<vertex_t>{}.device_view(),
[] __device__(vertex_t, vertex_t, weight_t w, auto, auto) {
return thrust::make_tuple(weight_t{1.0}, w);
},
thrust::make_tuple(weight_t{0.0}, weight_t{0.0}));
average_vertex_degree /= static_cast<weight_t>(num_vertices);
average_edge_weight /= static_cast<weight_t>(num_edges);
auto delta =
(static_cast<weight_t>(raft::warp_size()) * average_edge_weight) / average_vertex_degree;
// 4. initialize SSSP frontier
enum class Bucket { cur_near, next_near, far, num_buckets };
VertexFrontier<vertex_t,
void,
GraphViewType::is_multi_gpu,
static_cast<size_t>(Bucket::num_buckets)>
vertex_frontier(handle);
// 5. SSSP iteration
auto adj_matrix_row_distances =
GraphViewType::is_multi_gpu ? row_properties_t<GraphViewType, weight_t>(handle, push_graph_view)
: row_properties_t<GraphViewType, weight_t>{};
if (GraphViewType::is_multi_gpu) {
adj_matrix_row_distances.fill(std::numeric_limits<weight_t>::max(), handle.get_stream());
}
if (push_graph_view.is_local_vertex_nocheck(source_vertex)) {
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).insert(source_vertex);
}
auto near_far_threshold = delta;
while (true) {
if (GraphViewType::is_multi_gpu) {
copy_to_adj_matrix_row(
handle,
push_graph_view,
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).begin(),
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).end(),
distances,
adj_matrix_row_distances);
}
auto vertex_partition = vertex_partition_device_view_t<vertex_t, GraphViewType::is_multi_gpu>(
push_graph_view.get_vertex_partition_view());
update_frontier_v_push_if_out_nbr(
handle,
push_graph_view,
vertex_frontier,
static_cast<size_t>(Bucket::cur_near),
std::vector<size_t>{static_cast<size_t>(Bucket::next_near), static_cast<size_t>(Bucket::far)},
GraphViewType::is_multi_gpu
? adj_matrix_row_distances.device_view()
: detail::major_properties_device_view_t<vertex_t, weight_t const*>(distances),
dummy_properties_t<vertex_t>{}.device_view(),
[vertex_partition, distances, cutoff] __device__(
vertex_t src, vertex_t dst, weight_t w, auto src_val, auto) {
auto push = true;
auto new_distance = src_val + w;
auto threshold = cutoff;
if (vertex_partition.is_local_vertex_nocheck(dst)) {
auto local_vertex_offset =
vertex_partition.get_local_vertex_offset_from_vertex_nocheck(dst);
auto old_distance = *(distances + local_vertex_offset);
threshold = old_distance < threshold ? old_distance : threshold;
}
if (new_distance >= threshold) { push = false; }
return push ? thrust::optional<thrust::tuple<weight_t, vertex_t>>{thrust::make_tuple(
new_distance, src)}
: thrust::nullopt;
},
reduce_op::min<thrust::tuple<weight_t, vertex_t>>(),
distances,
thrust::make_zip_iterator(thrust::make_tuple(distances, predecessor_first)),
[near_far_threshold] __device__(auto v, auto v_val, auto pushed_val) {
auto new_dist = thrust::get<0>(pushed_val);
auto idx = new_dist < v_val
? (new_dist < near_far_threshold ? static_cast<size_t>(Bucket::next_near)
: static_cast<size_t>(Bucket::far))
: VertexFrontier<vertex_t>::kInvalidBucketIdx;
return new_dist < v_val
? thrust::optional<thrust::tuple<size_t, decltype(pushed_val)>>{thrust::make_tuple(
static_cast<size_t>(new_dist < near_far_threshold ? Bucket::next_near
: Bucket::far),
pushed_val)}
: thrust::nullopt;
});
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).clear();
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).shrink_to_fit();
if (vertex_frontier.get_bucket(static_cast<size_t>(Bucket::next_near)).aggregate_size() > 0) {
vertex_frontier.swap_buckets(static_cast<size_t>(Bucket::cur_near),
static_cast<size_t>(Bucket::next_near));
} else if (vertex_frontier.get_bucket(static_cast<size_t>(Bucket::far)).aggregate_size() >
0) { // near queue is empty, split the far queue
auto old_near_far_threshold = near_far_threshold;
near_far_threshold += delta;
size_t near_size{0};
size_t far_size{0};
while (true) {
vertex_frontier.split_bucket(
static_cast<size_t>(Bucket::far),
std::vector<size_t>{static_cast<size_t>(Bucket::cur_near)},
[vertex_partition, distances, old_near_far_threshold, near_far_threshold] __device__(
auto v) {
auto dist =
*(distances + vertex_partition.get_local_vertex_offset_from_vertex_nocheck(v));
return dist >= old_near_far_threshold
? thrust::optional<size_t>{static_cast<size_t>(
dist < near_far_threshold ? Bucket::cur_near : Bucket::far)}
: thrust::nullopt;
});
near_size =
vertex_frontier.get_bucket(static_cast<size_t>(Bucket::cur_near)).aggregate_size();
far_size = vertex_frontier.get_bucket(static_cast<size_t>(Bucket::far)).aggregate_size();
if ((near_size > 0) || (far_size == 0)) {
break;
} else {
near_far_threshold += delta;
}
}
if ((near_size == 0) && (far_size == 0)) { break; }
} else {
break;
}
}
CUDA_TRY(cudaStreamSynchronize(
handle.get_stream())); // this is as necessary vertex_frontier will become out-of-scope once
// this function returns (FIXME: should I stream sync in VertexFrontier
// destructor?)
}
} // namespace detail
template <typename vertex_t, typename edge_t, typename weight_t, bool multi_gpu>
void sssp(raft::handle_t const& handle,
graph_view_t<vertex_t, edge_t, weight_t, false, multi_gpu> const& graph_view,
weight_t* distances,
vertex_t* predecessors,
vertex_t source_vertex,
weight_t cutoff,
bool do_expensive_check)
{
if (predecessors != nullptr) {
detail::sssp(
handle, graph_view, distances, predecessors, source_vertex, cutoff, do_expensive_check);
} else {
detail::sssp(handle,
graph_view,
distances,
thrust::make_discard_iterator(),
source_vertex,
cutoff,
do_expensive_check);
}
}
} // namespace cugraph
|
the_stack
|
#include "common.h"
#include <math.h>
#include <stdio.h>
#define RELERROR 1.0e-12 /* smallest relative error we want */
#define MAXPOW 32 /* max power of 10 we wish to search to */
#define MAXIT 800 /* max number of iterations */
#define MAX_RECURSE_DEPTH 10 /* maximum recursion depth for sbisect */
#define SMALL_ENOUGH 1.0e-12 /* a coefficient smaller than SMALL_ENOUGH
* is considered to be zero (0.0). */
/* structure type for representing a polynomial */
typedef struct p {
int ord;
double coef[Maxdegree+1];
} poly;
/*---------------------------------------------------------------------------
* evalpoly
*
* evaluate polynomial defined in coef returning its value.
*--------------------------------------------------------------------------*/
__host__ __device__ double evalpoly (int ord, double *coef, double x)
{
double *fp = &coef[ord];
double f = *fp;
for (fp--; fp >= coef; fp--)
f = x * f + *fp;
return(f);
}
__host__ __device__ int modrf_pos( int ord, double *coef,
double a, double b, double *val, int invert)
{
int its;
double fx, lfx;
double *fp;
double *scoef = coef;
double *ecoef = &coef[ord];
double fa, fb;
// Invert the interval if required
if (invert)
{
double temp = a;
a = 1.0 / b;
b = 1.0 / temp;
}
// Evaluate the polynomial at the end points
if (invert)
{
fb = fa = *scoef;
for (fp = scoef + 1; fp <= ecoef; fp++)
{
fa = a * fa + *fp;
fb = b * fb + *fp;
}
}
else
{
fb = fa = *ecoef;
for (fp = ecoef - 1; fp >= scoef; fp--)
{
fa = a * fa + *fp;
fb = b * fb + *fp;
}
}
// if there is no sign difference the method won't work
if (fa * fb > 0.0)
return(0);
// Return if the values are close to zero already
if (fabs(fa) < RELERROR)
{
*val = invert ? 1.0/a : a;
return(1);
}
if (fabs(fb) < RELERROR)
{
*val = invert ? 1.0/b : b;
return(1);
}
lfx = fa;
for (its = 0; its < MAXIT; its++)
{
// Assuming straight line from a to b, find zero
double x = (fb * a - fa * b) / (fb - fa);
// Evaluate the polynomial at x
if (invert)
{
fx = *scoef;
for (fp = scoef + 1; fp <= ecoef; fp++)
fx = x * fx + *fp;
}
else
{
fx = *ecoef;
for (fp = ecoef - 1; fp >= scoef; fp--)
fx = x * fx + *fp;
}
// Evaluate two stopping conditions
if (fabs(x) > RELERROR && fabs(fx/x) < RELERROR)
{
*val = invert ? 1.0/x : x;
return(1);
}
else if (fabs(fx) < RELERROR)
{
*val = invert ? 1.0/x : x;
return(1);
}
// Subdivide region, depending on whether fx has same sign as fa or fb
if ((fa * fx) < 0)
{
b = x;
fb = fx;
if ((lfx * fx) > 0) fa /= 2;
}
else
{
a = x;
fa = fx;
if ((lfx * fx) > 0) fb /= 2;
}
// Return if the difference between a and b is very small
if (fabs(b-a) < fabs(RELERROR * a))
{
*val = invert ? 1.0/a : a;
return(1);
}
lfx = fx;
}
//==================================================================
// This is debugging in case something goes wrong.
// If we reach here, we have not converged -- give some diagnostics
//==================================================================
#ifdef RH_DEBUG
printf("modrf overflow on interval %f %f\n", a, b);
printf("\t b-a = %12.5e\n", b-a);
printf("\t fa = %12.5e\n", fa);
printf("\t fb = %12.5e\n", fb);
printf("\t fx = %12.5e\n", fx);
// Evaluate the true values at a and b
if (invert)
{
fb = fa = *scoef;
for (fp = scoef + 1; fp <= ecoef; fp++)
{
fa = a * fa + *fp;
fb = b * fb + *fp;
}
}
else
{
fb = fa = *ecoef;
for (fp = ecoef - 1; fp >= scoef; fp--)
{
fa = a * fa + *fp;
fb = b * fb + *fp;
}
}
printf("\t true fa = %12.5e\n", fa);
printf("\t true fb = %12.5e\n", fb);
printf("\t gradient= %12.5e\n", (fb-fa)/(b-a));
// Print out the polynomial
printf("Polynomial coefficients\n");
for (fp = ecoef; fp >= scoef; fp--)
printf ("\t%12.5e\n", *fp);
#endif
return(0);
}
/*---------------------------------------------------------------------------
* modrf
*
* uses the modified regula-falsi method to evaluate the root
* in interval [a,b] of the polynomial described in coef. The
* root is returned is returned in *val. The routine returns zero
* if it can't converge.
*--------------------------------------------------------------------------*/
__host__ __device__ int modrf (int ord, double *coef, double a, double b, double *val)
{
// This is an interface to modrf that takes account of different cases
// The idea is that the basic routine works badly for polynomials on
// intervals that extend well beyond [-1, 1], because numbers get too large
double *fp;
double *scoef = coef;
double *ecoef = &coef[ord];
const int invert = 1;
double fp1= 0.0, fm1 = 0.0; // Values of function at 1 and -1
double fa = 0.0, fb = 0.0; // Values at end points
// We assume that a < b
if (a > b)
{
double temp = a;
a = b;
b = temp;
}
// The normal case, interval is inside [-1, 1]
if (b <= 1.0 && a >= -1.0) return modrf_pos (ord, coef, a, b, val, !invert);
// The case where the interval is outside [-1, 1]
if (a >= 1.0 || b <= -1.0)
return modrf_pos (ord, coef, a, b, val, invert);
// If we have got here, then the interval includes the points 1 or -1.
// In this case, we need to evaluate at these points
// Evaluate the polynomial at the end points
for (fp = ecoef - 1; fp >= scoef; fp--)
{
fp1 = *fp + fp1;
fm1 = *fp - fm1;
fa = a * fa + *fp;
fb = b * fb + *fp;
}
// Then there is the case where the interval contains -1 or 1
if (a < -1.0 && b > 1.0)
{
// Interval crosses over 1.0, so cut
if (fa * fm1 < 0.0) // The solution is between a and -1
return modrf_pos (ord, coef, a, -1.0, val, invert);
else if (fb * fp1 < 0.0) // The solution is between 1 and b
return modrf_pos (ord, coef, 1.0, b, val, invert);
else // The solution is between -1 and 1
return modrf_pos(ord, coef, -1.0, 1.0, val, !invert);
}
else if (a < -1.0)
{
// Interval crosses over 1.0, so cut
if (fa * fm1 < 0.0) // The solution is between a and -1
return modrf_pos (ord, coef, a, -1.0, val, invert);
else // The solution is between -1 and b
return modrf_pos(ord, coef, -1.0, b, val, !invert);
}
else // b > 1.0
{
if (fb * fp1 < 0.0) // The solution is between 1 and b
return modrf_pos (ord, coef, 1.0, b, val, invert);
else // The solution is between a and 1
return modrf_pos(ord, coef, a, 1.0, val, !invert);
}
}
/*---------------------------------------------------------------------------
* modp
*
* calculates the modulus of u(x) / v(x) leaving it in r, it
* returns 0 if r(x) is a constant.
* note: this function assumes the leading coefficient of v is 1 or -1
*--------------------------------------------------------------------------*/
__host__ __device__ static int modp(poly *u, poly *v, poly *r)
{
int j, k; /* Loop indices */
double *nr = r->coef;
double *end = &u->coef[u->ord];
double *uc = u->coef;
while (uc <= end)
*nr++ = *uc++;
if (v->coef[v->ord] < 0.0)
{
for (k = u->ord - v->ord - 1; k >= 0; k -= 2)
r->coef[k] = -r->coef[k];
for (k = u->ord - v->ord; k >= 0; k--)
for (j = v->ord + k - 1; j >= k; j--)
r->coef[j] = -r->coef[j] - r->coef[v->ord + k] * v->coef[j - k];
}
else
{
for (k = u->ord - v->ord; k >= 0; k--)
for (j = v->ord + k - 1; j >= k; j--)
r->coef[j] -= r->coef[v->ord + k] * v->coef[j - k];
}
k = v->ord - 1;
while (k >= 0 && fabs(r->coef[k]) < SMALL_ENOUGH)
{
r->coef[k] = 0.0;
k--;
}
r->ord = (k < 0) ? 0 : k;
return(r->ord);
}
/*---------------------------------------------------------------------------
* buildsturm
*
* build up a sturm sequence for a polynomial in smat, returning
* the number of polynomials in the sequence
*--------------------------------------------------------------------------*/
__host__ __device__ int buildsturm(int ord, poly *sseq)
{
sseq[0].ord = ord;
sseq[1].ord = ord - 1;
/* calculate the derivative and normalise the leading coefficient */
{
int i; // Loop index
poly *sp;
double f = fabs(sseq[0].coef[ord] * ord);
double *fp = sseq[1].coef;
double *fc = sseq[0].coef + 1;
for (i=1; i<=ord; i++)
*fp++ = *fc++ * i / f;
/* construct the rest of the Sturm sequence */
for (sp = sseq + 2; modp(sp - 2, sp - 1, sp); sp++)
{
/* reverse the sign and normalise */
f = -fabs(sp->coef[sp->ord]);
for (fp = &sp->coef[sp->ord]; fp >= sp->coef; fp--)
*fp /= f;
}
sp->coef[0] = -sp->coef[0]; /* reverse the sign */
return(sp - sseq);
}
}
/*---------------------------------------------------------------------------
* numchanges
*
* return the number of sign changes in the Sturm sequence in
* sseq at the value a.
*--------------------------------------------------------------------------*/
__host__ __device__ int numchanges(int np, poly *sseq, double a)
{
int changes = 0;
double lf = evalpoly(sseq[0].ord, sseq[0].coef, a);
poly *s;
for (s = sseq + 1; s <= sseq + np; s++)
{
double f = evalpoly(s->ord, s->coef, a);
if (lf == 0.0 || lf * f < 0)
changes++;
lf = f;
}
return(changes);
}
/*---------------------------------------------------------------------------
* numroots
*
* return the number of distinct real roots of the polynomial described in sseq.
*--------------------------------------------------------------------------*/
__host__ __device__ int numroots (int np, poly *sseq, int *atneg, int *atpos, bool non_neg)
{
int atposinf = 0;
int atneginf = 0;
/* changes at positive infinity */
double f;
double lf = sseq[0].coef[sseq[0].ord];
poly *s;
for (s = sseq + 1; s <= sseq + np; s++)
{
f = s->coef[s->ord];
if (lf == 0.0 || lf * f < 0)
atposinf++;
lf = f;
}
// changes at negative infinity or zero
if (non_neg)
atneginf = numchanges(np, sseq, 0.0);
else
{
if (sseq[0].ord & 1)
lf = -sseq[0].coef[sseq[0].ord];
else
lf = sseq[0].coef[sseq[0].ord];
for (s = sseq + 1; s <= sseq + np; s++)
{
if (s->ord & 1)
f = -s->coef[s->ord];
else
f = s->coef[s->ord];
if (lf == 0.0 || lf * f < 0)
atneginf++;
lf = f;
}
}
*atneg = atneginf;
*atpos = atposinf;
return(atneginf - atposinf);
}
/*---------------------------------------------------------------------------
* sbisect
*
* uses a bisection based on the sturm sequence for the polynomial
* described in sseq to isolate intervals in which roots occur,
* the roots are returned in the roots array in order of magnitude.
*--------------------------------------------------------------------------*/
template <int recurse_depth>
__host__ __device__ int sbisect(int np, poly *sseq,
double min, double max,
int atmin, int atmax,
double *roots)
{
double mid;
int atmid;
int its;
int n1 = 0, n2 = 0;
int nroot = atmin - atmax;
if (nroot == 1)
{
/* first try a less expensive technique. */
if (modrf(sseq->ord, sseq->coef, min, max, &roots[0]))
return 1;
/*
* if we get here we have to evaluate the root the hard
* way by using the Sturm sequence.
*/
for (its = 0; its < MAXIT; its++)
{
mid = (double) ((min + max) / 2);
atmid = numchanges(np, sseq, mid);
if (fabs(mid) > RELERROR)
{
if (fabs((max - min) / mid) < RELERROR)
{
roots[0] = mid;
return 1;
}
}
else if (fabs(max - min) < RELERROR)
{
roots[0] = mid;
return 1;
}
if ((atmin - atmid) == 0)
min = mid;
else
max = mid;
}
if (its == MAXIT)
{
#ifdef RH_DEBUG
printf("sbisect: overflow min %f max %f\
diff %e nroot %d n1 %d n2 %d\n",
min, max, max - min, nroot, n1, n2);
#endif
roots[0] = mid;
}
return 1;
}
/* more than one root in the interval, we have to bisect */
for (its = 0; its < MAXIT; its++)
{
mid = (double) ((min + max) / 2);
atmid = numchanges(np, sseq, mid);
n1 = atmin - atmid;
n2 = atmid - atmax;
if (n1 != 0 && n2 != 0)
{
sbisect<recurse_depth + 1>(np, sseq, min, mid, atmin, atmid, roots);
sbisect<recurse_depth + 1>(np, sseq, mid, max, atmid, atmax, &roots[n1]);
break;
}
if (n1 == 0)
min = mid;
else
max = mid;
}
if (its == MAXIT)
{
#ifdef RH_DEBUG
printf("sbisect: roots too close together\n");
printf("sbisect: overflow min %f max %f diff %e\
nroot %d n1 %d n2 %d\n",
min, max, max - min, nroot, n1, n2);
#endif
for (n1 = atmax; n1 < atmin; n1++)
roots[n1 - atmax] = mid;
}
return 1;
}
template <>
__host__ __device__ int sbisect<MAX_RECURSE_DEPTH>(int np, poly *sseq,
double min, double max,
int atmin, int atmax,
double *roots)
{
return 1;
}
__host__ __device__ int find_real_roots_sturm(
double *p, int degree, double *roots, int *nroots, bool non_neg)
{
/*
* finds the roots of the input polynomial. They are returned in roots.
* It is assumed that roots is already allocated with space for the roots.
*/
poly sseq[Maxdegree+1];
double min, max;
int i, nchanges, np, atmin, atmax;
// Copy the coefficients from the input p. Normalize as we go
double norm = 1.0 / p[degree];
for (i=0; i<=degree; i++)
sseq[0].coef[i] = p[i] * norm;
// Now, also normalize the other terms
double val0 = fabs(sseq[0].coef[0]);
double fac = 1.0; // This will be a factor for the roots
if (val0 > 10.0) // Do this in case there are zero roots
{
fac = pow(val0, -1.0/degree);
double mult = fac;
for (int i=degree-1; i>=0; i--)
{
sseq[0].coef[i] *= mult;
mult = mult * fac;
}
}
/* build the Sturm sequence */
np = buildsturm(degree, sseq);
#ifdef RH_DEBUG
{
int i, j;
printf("Sturm sequence for:\n");
for (i=degree; i>=0; i--)
printf("%lf ", sseq[0].coef[i]);
printf("\n\n");
for (i = 0; i <= np; i++) {
for (j = sseq[i].ord; j >= 0; j--)
printf("%10f ", sseq[i].coef[j]);
printf("\n");
}
printf("\n");
}
#endif
// get the number of real roots
*nroots = numroots(np, sseq, &atmin, &atmax, non_neg);
if (*nroots == 0)
{
// printf("solve: no real roots\n");
return 0 ;
}
/* calculate the bracket that the roots live in */
if (non_neg) min = 0.0;
else
{
min = -1.0;
nchanges = numchanges(np, sseq, min);
for (i = 0; nchanges != atmin && i != MAXPOW; i++)
{
min *= 10.0;
nchanges = numchanges(np, sseq, min);
}
if (nchanges != atmin)
{
#ifdef RH_DEBUG
printf("solve: unable to bracket all negative roots\n");
#endif
atmin = nchanges;
}
}
max = 1.0;
nchanges = numchanges(np, sseq, max);
for (i = 0; nchanges != atmax && i != MAXPOW; i++)
{
max *= 10.0;
nchanges = numchanges(np, sseq, max);
}
if (nchanges != atmax)
{
#ifdef RH_DEBUG
printf("solve: unable to bracket all positive roots\n");
#endif
atmax = nchanges;
}
*nroots = atmin - atmax;
/* perform the bisection */
sbisect<0>(np, sseq, min, max, atmin, atmax, roots);
/* Finally, reorder the roots */
for (i=0; i<*nroots; i++)
roots[i] /= fac;
#ifdef RH_DEBUG
/* write out the roots */
printf("Number of roots = %d\n", *nroots);
for (i=0; i<*nroots; i++)
printf("%12.5e\n", roots[i]);
#endif
return 1;
}
__host__ __device__ int find_num_real_roots_sturm ( double *p, int degree )
{
/*
* finds the roots of the input polynomial. They are returned in roots.
* It is assumed that roots is already allocated with space for the roots.
*/
poly sseq[Maxdegree+1];
// Copy the coefficients from the input p. Normalize as we go
double norm = 1.0 / p[degree];
for (int i=0; i<=degree; i++)
sseq[0].coef[i] = p[i] * norm;
// Now, also normalize the other terms
double val0 = fabs(sseq[0].coef[0]);
double fac = 1.0; // This will be a factor for the roots
if (val0 > 10.0) // Do this in case there are zero roots
{
fac = pow(val0, -1.0/degree);
double mult = fac;
for (int i=degree-1; i>=0; i--)
{
sseq[0].coef[i] *= mult;
mult = mult * fac;
}
}
/* build the Sturm sequence */
int np = buildsturm(degree, sseq);
// get the number of real roots
int atmin, atmax;
int nroots = numroots(np, sseq, &atmin, &atmax, false);
return nroots;
}
|
the_stack
|
#include "caffe/util/math_functions.hpp"
#include "caffe/util/gpu_util.cuh"
#include "caffe/layers/spixel_feature_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SpixelFeatureXYForwardGPU(const int nthreads,
const Dtype* index_data, const Dtype ignore_idx_value,
const int out_dim, const int height,
const int width, const int max_spixels,
const float xy_scale, Dtype* top_data, Dtype* count_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int spatial_dim = height * width;
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int idx = static_cast<int>(index_data[n * spatial_dim + s]);
if (idx != ignore_idx_value) {
const int y = s / width;
const int x = s % width;
int count_offset = (n * max_spixels + idx);
for (int k = 0; k < out_dim; k++) {
int top_offset = ((n * out_dim + k) * max_spixels + idx);
if (k == 0) {
caffe_gpu_atomic_add((Dtype) xy_scale * y, top_data + top_offset);
} else if (k == 1) {
caffe_gpu_atomic_add((Dtype) xy_scale * x, top_data + top_offset);
}
}
caffe_gpu_atomic_add((Dtype) 1., count_data + count_offset);
}
}
}
template <typename Dtype>
__global__ void SpixelFeatureRGBXYForwardGPU(const int nthreads,
const Dtype* bottom_data, const Dtype* index_data,
const Dtype ignore_idx_value,
const int out_dim, const int in_dim, const int height, const int width,
const int max_spixels, const float rgbxy_rgb_scale,
const float rgbxy_xy_scale, Dtype* top_data, Dtype* count_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int spatial_dim = height * width;
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int idx = static_cast<int>(index_data[n * spatial_dim + s]);
if (idx != ignore_idx_value) {
const int y = s / width;
const int x = s % width;
int count_offset = (n * max_spixels + idx);
for (int k = 0; k < out_dim; k++) {
int top_offset = ((n * out_dim + k) * max_spixels + idx);
if (k < in_dim) {
int bottom_offset = ((n * in_dim + k) * spatial_dim + s);
caffe_gpu_atomic_add((Dtype) rgbxy_rgb_scale * bottom_data[bottom_offset],
top_data + top_offset);
} else if (k == in_dim) {
caffe_gpu_atomic_add((Dtype) rgbxy_xy_scale * y, top_data + top_offset);
} else if (k == in_dim + 1) {
caffe_gpu_atomic_add((Dtype) rgbxy_xy_scale * x, top_data + top_offset);
}
}
caffe_gpu_atomic_add((Dtype) 1., count_data + count_offset);
}
}
}
template <typename Dtype>
__global__ void SpixelFeatureRGBForwardGPU(const int nthreads,
const Dtype* bottom_data, const Dtype* index_data,
const Dtype ignore_idx_value,
const int out_dim, const int in_dim, const int height, const int width,
const int max_spixels, const float rgb_scale,
Dtype* top_data, Dtype* count_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int spatial_dim = height * width;
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int idx = static_cast<int>(index_data[n * spatial_dim + s]);
if (idx != ignore_idx_value) {
int count_offset = (n * max_spixels + idx);
for (int k = 0; k < out_dim; k++) {
int top_offset = ((n * out_dim + k) * max_spixels + idx);
if (k < in_dim) {
int bottom_offset = ((n * in_dim + k) * spatial_dim + s);
caffe_gpu_atomic_add((Dtype) rgb_scale * bottom_data[bottom_offset],
top_data + top_offset);
}
}
caffe_gpu_atomic_add((Dtype) 1., count_data + count_offset);
}
}
}
template <typename Dtype>
__global__ void SpixelFeatureXYRGBXYForwardGPU(const int nthreads,
const Dtype* bottom_data, const Dtype* index_data,
const Dtype ignore_idx_value,
const int out_dim, const int in_dim, const int height, const int width,
const int max_spixels, const float xy_scale, const float rgbxy_rgb_scale,
const float rgbxy_xy_scale, Dtype* top_data, Dtype* count_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int spatial_dim = height * width;
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int idx = static_cast<int>(index_data[n * spatial_dim + s]);
if (idx != ignore_idx_value) {
const int y = s / width;
const int x = s % width;
int count_offset = (n * max_spixels + idx);
for (int k = 0; k < out_dim; k++) {
int top_offset = ((n * out_dim + k) * max_spixels + idx);
if (k == 0) {
caffe_gpu_atomic_add((Dtype) xy_scale * y, top_data + top_offset);
} else if (k == 1) {
caffe_gpu_atomic_add((Dtype) xy_scale * x, top_data + top_offset);
} else if (k < in_dim + 2) {
int bottom_offset = ((n * in_dim + (k-2)) * spatial_dim + s);
caffe_gpu_atomic_add((Dtype) rgbxy_rgb_scale * bottom_data[bottom_offset],
top_data + top_offset);
} else if (k == in_dim + 2) {
caffe_gpu_atomic_add((Dtype) rgbxy_xy_scale * y, top_data + top_offset);
} else if (k == in_dim + 3) {
caffe_gpu_atomic_add((Dtype) rgbxy_xy_scale * x, top_data + top_offset);
}
}
caffe_gpu_atomic_add((Dtype) 1., count_data + count_offset);
}
}
}
template <typename Dtype>
__global__ void SpixelFeatureRGBXYRGBXYForwardGPU(const int nthreads,
const Dtype* bottom_data, const Dtype* index_data,
const Dtype ignore_idx_value,
const int out_dim, const int in_dim, const int height, const int width,
const int max_spixels, const float rgb_scale,
const float xy_scale, const float rgbxy_rgb_scale,
const float rgbxy_xy_scale, Dtype* top_data, Dtype* count_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int spatial_dim = height * width;
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int idx = static_cast<int>(index_data[n * spatial_dim + s]);
if (idx != ignore_idx_value) {
const int y = s / width;
const int x = s % width;
int count_offset = (n * max_spixels + idx);
for (int k = 0; k < out_dim; k++) {
int top_offset = ((n * out_dim + k) * max_spixels + idx);
if (k < in_dim) {
int bottom_offset = ((n * in_dim + k) * spatial_dim + s);
caffe_gpu_atomic_add((Dtype) rgb_scale * bottom_data[bottom_offset],
top_data + top_offset);
} else if (k == in_dim) {
caffe_gpu_atomic_add((Dtype) xy_scale * y, top_data + top_offset);
} else if (k == in_dim + 1) {
caffe_gpu_atomic_add((Dtype) xy_scale * x, top_data + top_offset);
} else if (k < 2 * in_dim + 2) {
int bottom_offset = ((n * in_dim + ((k-2) % in_dim)) * spatial_dim + s);
caffe_gpu_atomic_add((Dtype) rgbxy_rgb_scale * bottom_data[bottom_offset],
top_data + top_offset);
} else if (k == 2 * in_dim + 2) {
caffe_gpu_atomic_add((Dtype) rgbxy_xy_scale * y, top_data + top_offset);
} else if (k == 2 * in_dim + 3) {
caffe_gpu_atomic_add((Dtype) rgbxy_xy_scale * x, top_data + top_offset);
}
}
caffe_gpu_atomic_add((Dtype) 1., count_data + count_offset);
}
}
}
template <typename Dtype>
__global__ void SpixelFeatureAverageForwardGPU(const int nthreads,
const int max_spixels, const int out_dim, const float ignore_value,
Dtype* top_data, Dtype* count_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / max_spixels;
const int s = index % max_spixels;
const int count_offset = (n * max_spixels + s);
for (int k = 0; k < out_dim; k++) {
const int top_offset = ((n * out_dim + k) * max_spixels + s);
if (count_data[count_offset] == 0) {
top_data[top_offset] = ignore_value;
} else {
top_data[top_offset] /= count_data[count_offset];
}
}
}
}
template <typename Dtype>
__global__ void SpixelFeatureCopyToPixelsGPU(const int nthreads,
const Dtype* index_data, const Dtype ignore_idx_value,
const int spatial_dim, const int max_spixels,
const int out_dim, const float ignore_feature_value,
Dtype* top_data, Dtype* top_data_2) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int idx = static_cast<int>(index_data[n * spatial_dim + s]);
if (idx != ignore_idx_value) {
for (int k = 0; k < out_dim; k++) {
int top_offset = ((n * out_dim + k) * max_spixels + idx);
int top_offset_2 = ((n * out_dim + k) * spatial_dim + s);
top_data_2[top_offset_2] = top_data[top_offset];
}
}
else {
for (int k = 0; k < out_dim; k++) {
int top_offset_2 = ((n * out_dim + k) * spatial_dim + s);
top_data_2[top_offset_2] = ignore_feature_value;
}
}
}
}
/*
Forward GPU function
*/
template <typename Dtype>
void SpixelFeatureLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
caffe_gpu_set(top[0]->count(), (Dtype)0., top[0]->mutable_gpu_data());
caffe_gpu_set(spixel_counts_.count(), (Dtype)0.,
spixel_counts_.mutable_gpu_data());
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* index_data = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* count_data = spixel_counts_.mutable_gpu_data();
switch (this->layer_param_.spixel_feature_param().type()) {
case SpixelFeatureParameter_Feature_AVGXY: {
const int nthreads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
SpixelFeatureXYForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, index_data, ignore_idx_value_,
out_channels_, height_, width_,
max_spixels_, xy_scale_, top_data,
count_data);
const int nthreads2 = num_ * max_spixels_;
// NOLINT_NEXT_LINE(whitespace/operators)
SpixelFeatureAverageForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads2),
CAFFE_CUDA_NUM_THREADS>>>(nthreads2, max_spixels_,
out_channels_, ignore_feature_value_,
top_data, count_data);
break;
}
case SpixelFeatureParameter_Feature_AVGRGBXY: {
const int nthreads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
SpixelFeatureRGBXYForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bottom_data, index_data,
ignore_idx_value_,
out_channels_, in_channels_, height_, width_,
max_spixels_, rgbxy_rgb_scale_, rgbxy_xy_scale_,
top_data, count_data);
const int nthreads2 = num_ * max_spixels_;
// NOLINT_NEXT_LINE(whitespace/operators)
SpixelFeatureAverageForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads2),
CAFFE_CUDA_NUM_THREADS>>>(nthreads2, max_spixels_,
out_channels_, ignore_feature_value_,
top_data, count_data);
break;
}
case SpixelFeatureParameter_Feature_AVGXYRGBXY: {
const int nthreads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
SpixelFeatureXYRGBXYForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bottom_data, index_data,
ignore_idx_value_,
out_channels_, in_channels_, height_, width_,
max_spixels_, xy_scale_,
rgbxy_rgb_scale_, rgbxy_xy_scale_,
top_data, count_data);
const int nthreads2 = num_ * max_spixels_;
// NOLINT_NEXT_LINE(whitespace/operators)
SpixelFeatureAverageForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads2),
CAFFE_CUDA_NUM_THREADS>>>(nthreads2, max_spixels_,
out_channels_, ignore_feature_value_,
top_data, count_data);
break;
}
case SpixelFeatureParameter_Feature_AVGRGBXYRGBXY: {
const int nthreads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
SpixelFeatureRGBXYRGBXYForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bottom_data, index_data,
ignore_idx_value_,
out_channels_, in_channels_, height_, width_,
max_spixels_, rgb_scale_, xy_scale_,
rgbxy_rgb_scale_, rgbxy_xy_scale_,
top_data, count_data);
const int nthreads2 = num_ * max_spixels_;
// NOLINT_NEXT_LINE(whitespace/operators)
SpixelFeatureAverageForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads2),
CAFFE_CUDA_NUM_THREADS>>>(nthreads2, max_spixels_,
out_channels_, ignore_feature_value_,
top_data, count_data);
break;
}
case SpixelFeatureParameter_Feature_AVGRGB: {
const int nthreads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
SpixelFeatureRGBForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bottom_data, index_data,
ignore_idx_value_,
out_channels_, in_channels_, height_, width_,
max_spixels_, rgb_scale_,
top_data, count_data);
const int nthreads2 = num_ * max_spixels_;
// NOLINT_NEXT_LINE(whitespace/operators)
SpixelFeatureAverageForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads2),
CAFFE_CUDA_NUM_THREADS>>>(nthreads2, max_spixels_,
out_channels_, ignore_feature_value_,
top_data, count_data);
break;
}
default:
LOG(FATAL) << "Undefined feature type of superpixel feature";
}
if (top.size() > 1) {
caffe_gpu_set(top[1]->count(), (Dtype)0., top[1]->mutable_gpu_data());
Dtype* top_data_2 = top[1]->mutable_gpu_data();
const int nthreads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
SpixelFeatureCopyToPixelsGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, index_data, ignore_idx_value_,
height_ * width_, max_spixels_,
out_channels_, ignore_feature_value_,
top_data, top_data_2);
}
}
/*
Backward GPU function (NOT_IMPLEMENTED for now)
*/
template <typename Dtype>
void SpixelFeatureLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
}
INSTANTIATE_LAYER_GPU_FUNCS(SpixelFeatureLayer);
} // namespace caffe
|
the_stack
|
#include "TensorCUDA.hpp"
#include <stdio.h>
#include "Macros.hpp"
#include "ensure.hpp"
#include <memory>
////////////////////////////////////////////////////////////
/// NAMESPACE AI
////////////////////////////////////////////////////////////
namespace ai
{
////////////////////////////////////////////////////////////
/// ERROR HANDLING
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
void HandleError(cudaError_t err, const char* file, int line)
{
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__))
////////////////////////////////////////////////////////////
/// UTIL
////////////////////////////////////////////////////////////
//Get nearest lower power of two
unsigned int low_pow2 (unsigned int x)
{
x = x | (x >> 1);
x = x | (x >> 2);
x = x | (x >> 4);
x = x | (x >> 8);
x = x | (x >> 16);
return x - (x >> 1);
}
//Get nearest higher power of two
unsigned long high_pow2(unsigned long v)
{
v--;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
v++;
return v;
}
////////////////////////////////////////////////////////////
/// KERNELS
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
__global__ void knl_tensor_fill(float* t, float val, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
t[tid] = val;
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_tensor_fill_random(float* t, float mean, float dev, unsigned int seed, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tseed = seed * (tid + 1);
while (tid < size) {
tseed ^= tseed << 13;
tseed ^= tseed >> 17;
tseed ^= tseed << 5;
t[tid] = mean - dev + ((float)tseed / UINT_MAX) * dev * 2.f;
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_tensor_scale(float* t, float factor, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
t[tid] *= factor;
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_tensor_diff(float* t1, float* t2, float* tout, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
tout[tid] = t1[tid] - t2[tid];
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_tensor_add(float* t1, float* t2, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
t1[tid] += t2[tid];
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
__global__ void knl_tensor_copy(float* t1, float* t2, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
while (tid < size) {
t1[tid] = t2[tid];
tid += blockDim.x * gridDim.x;
}
}
////////////////////////////////////////////////////////////
/// TENSOR GPU
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T>::TensorCUDA()
{
_data = NULL;
_size = 0;
_depth = _height = _width = 0;
_owner = false;
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T>::TensorCUDA(const TensorCUDA<T>& t)
{
point(t);
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T>::TensorCUDA(int width)
{
_width = width;
_height = 1;
_depth = 1;
_size = _width * _depth * _height;
_owner = true;
HANDLE_ERROR( cudaMalloc( &_data, _size * sizeof(T) ));
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T>::TensorCUDA(int width, int height)
{
_width = width;
_height = height;
_depth = 1;
_size = _width * _depth * _height;
_owner = true;
HANDLE_ERROR( cudaMalloc( &_data, _size * sizeof(T)) );
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T>::TensorCUDA(int width, int height, int depth)
{
_width = width;
_height = height;
_depth = depth;
_size = _width * _depth * _height;
_owner = true;
HANDLE_ERROR( cudaMalloc( &_data, _size * sizeof(T)) );
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T>::~TensorCUDA()
{
clear();
}
////////////////////////////////////////////////////////////
template <typename T>
void TensorCUDA<T>::load(ai::IOData& data, std::string dataname)
{
clear();
IOData* node_width = data.findNode(dataname + "_width");
IOData* node_height = data.findNode(dataname + "_height");
IOData* node_depth = data.findNode(dataname + "_depth");
IOData* node_data = data.findNode(dataname + "_data");
ensure(node_width != NULL);
ensure(node_height != NULL);
ensure(node_depth != NULL);
ensure(node_data != NULL);
node_width->get(_width);
node_height->get(_height);
node_depth->get(_depth);
_size = _width * _height * _depth;
std::unique_ptr<T> tmp = std::unique_ptr<T>(new T[_size]);
node_data->get(reinterpret_cast<char*>(&tmp.get()[0]));
HANDLE_ERROR( cudaMalloc( &_data, _size * sizeof(T)) );
copyToDevice(&tmp.get()[0], _size);
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::load(std::ifstream& file)
{
clear();
file.read(reinterpret_cast<char*>(&_size), sizeof(_size));
file.read(reinterpret_cast<char*>(&_width), sizeof(_width));
file.read(reinterpret_cast<char*>(&_height), sizeof(_height));
file.read(reinterpret_cast<char*>(&_depth), sizeof(_depth));
_owner = true;
std::unique_ptr<T> tmp = std::unique_ptr<T>(new T[_size]);
file.read(reinterpret_cast<char*>(&tmp.get()[0]), sizeof(T) * _size);
HANDLE_ERROR( cudaMalloc( &_data, _size * sizeof(T)) );
copyToDevice(&tmp.get()[0], _size);
}
////////////////////////////////////////////////////////////
template <typename T>
void TensorCUDA<T>::save(ai::IOData& data, std::string dataname)
{
std::unique_ptr<T> tmp_safe = std::unique_ptr<T>(new T[_size]);
T* tmp = tmp_safe.get();
copyToHost(&tmp[0], _size);
data.pushNode(dataname + "_width", _width);
data.pushNode(dataname + "_height", _height);
data.pushNode(dataname + "_depth", _depth);
data.pushNode(dataname + "_data", reinterpret_cast<char*>(&tmp[0]), sizeof(T) * _size);
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::save(std::ofstream& file)
{
std::unique_ptr<T> tmp_safe = std::unique_ptr<T>(new T[_size]);
T* tmp = tmp_safe.get();
copyToHost(&tmp[0], _size);
file.write(reinterpret_cast<char*>(&_size), sizeof(_size));
file.write(reinterpret_cast<char*>(&_width), sizeof(_width));
file.write(reinterpret_cast<char*>(&_height), sizeof(_height));
file.write(reinterpret_cast<char*>(&_depth), sizeof(_depth));
file.write(reinterpret_cast<char*>(&tmp[0]), sizeof(T) * _size);
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::setshape(const int width)
{
ensure(width > 0);
clear();
_width = width;
_height = 1;
_depth = 1;
_size = _width * _height * _depth;
_owner = true;
HANDLE_ERROR( cudaMalloc( &_data, _size * sizeof(T)) );
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::setshape(const int width, const int height)
{
ensure(width > 0 && height > 0);
clear();
_width = width;
_height = height;
_depth = 1;
_size = _width * _height * _depth;
_owner = true;
HANDLE_ERROR( cudaMalloc( &_data, _size * sizeof(T)) );
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::setshape(const int width, const int height, const int depth)
{
ensure(width > 0 && height > 0 && depth > 0);
clear();
_width = width;
_height = height;
_depth = depth;
_size = _width * _height * _depth;
_owner = true;
HANDLE_ERROR( cudaMalloc( &_data, _size * sizeof(T)) );
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::setshape(Tensor<T>& host_tensor)
{
clear();
_width = host_tensor.width();
_height = host_tensor.height();
_depth = host_tensor.depth();
_size = _width * _height * _depth;
_owner = true;
HANDLE_ERROR( cudaMalloc( &_data, _size * sizeof(T)) );
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::point(const TensorCUDA& t)
{
clear();
_data = t._data;
_size = t._width * t._height * t._depth;
_width = t._width;
_height = t._height;
_depth = t._depth;
_owner = false;
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::point(const TensorCUDA& t, const unsigned int offset_d)
{
clear();
_data = &t._data[offset_d * t._width * t._height];
_size = t._width * t._height;
_width = t._width;
_height = t._height;
_depth = 1;
_owner = false;
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::point(const TensorCUDA& t, const unsigned int offset_d, const unsigned int offset_y)
{
clear();
_data = &t._data[offset_d * t._width * t._height + offset_y * t._width];
_size = t._width;
_width = t._width;
_height = 1;
_depth = 1;
_owner = false;
}
//////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::clear()
{
if (_data != NULL && _size != 0 && _owner == true)
HANDLE_ERROR( cudaFree(_data) );
_data = NULL;
_size = 0;
_width = 0;
_height = 0;
_depth = 0;
_owner = false;
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::fill(T val)
{
std::unique_ptr<T> tmp_safe = std::unique_ptr<T>(new T[_size]);
T* temp = tmp_safe.get();
for (int i = 0; i < _size; i++) temp[i] = val;
copyToDevice(temp, _size);
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::fill(float mean, float dev)
{
std::unique_ptr<T> tmp_safe = std::unique_ptr<T>(new T[_size]);
T* temp = tmp_safe.get();
for (int i = 0; i < _size; i++)
temp[i] = (T)( mean - dev + ((double)rand() / RAND_MAX) * dev * 2.f);
copyToDevice(temp, _size);
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::copyToHost(T *arr, int size) const
{
ensure(size <= _size);
HANDLE_ERROR( cudaMemcpy( arr, _data, size * sizeof(T), cudaMemcpyDeviceToHost));
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::copyToDevice(const T *arr, int size)
{
ensure(size <= _size);
HANDLE_ERROR( cudaMemcpy(_data, arr, size * sizeof(T), cudaMemcpyHostToDevice));
}
////////////////////////////////////////////////////////////
template < typename T >
void TensorCUDA<T>::copy(const TensorCUDA<T>& tensor)
{
if (width() != tensor.width() || height() != tensor.height() || depth() != tensor.height())
setshape((int)tensor.width(), (int)tensor.height(), (int)tensor.depth());
HANDLE_ERROR( cudaMemcpy(_data, tensor.pointer(), tensor.size() * sizeof(T), cudaMemcpyDeviceToDevice));
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T> TensorCUDA<T>::ptr(const int d)
{
TensorCUDA<T> t;
t.point(*this, d);
return t;
}
////////////////////////////////////////////////////////////
template < typename T >
TensorCUDA<T> TensorCUDA<T>::ptr(const int d, const int y)
{
TensorCUDA<T> t;
t.point(*this, d, y);
return t;
}
////////////////////////////////////////////////////////////
/// TYPE SPECIFIC FUNCTIONS
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
void TensorCUDA_float_fill(TensorCUDA_float& t, float val)
{
ensure(t.pointer() != NULL && t.size() != 0);
int _threads = min(low_pow2(t.size()), CUDA_MAX_THREADS);
int _blocks = min(_threads / t.size() + 1, CUDA_MAX_CORES);
knl_tensor_fill<<<_blocks, _threads>>>(t.pointer(), val, t.size());
}
////////////////////////////////////////////////////////////
void TensorCUDA_float_fill(TensorCUDA_float& t, float mean, float dev)
{
ensure(t.pointer() != NULL && t.size() != 0);
int _threads = min(low_pow2(t.size()), CUDA_MAX_THREADS);
int _blocks = min(_threads / t.size() + 1, CUDA_MAX_CORES);
knl_tensor_fill_random<<<_blocks, _threads>>>(t.pointer(), mean, dev, rand(), t.size());
}
////////////////////////////////////////////////////////////
void TensorCUDA_float_scale(TensorCUDA_float& t, float factor)
{
ensure(t.pointer() != NULL && t.size() != 0);
int _threads = min(low_pow2(t.size()), CUDA_MAX_THREADS);
int _blocks = min(_threads / t.size() + 1, CUDA_MAX_CORES);
knl_tensor_scale<<<_blocks, _threads>>>(t.pointer(), factor, t.size());
}
////////////////////////////////////////////////////////////
void TensorCUDA_float_diff(TensorCUDA_float& t1, TensorCUDA_float& t2, TensorCUDA_float& tout)
{
ensure(tout.pointer() != NULL && t1.pointer() != NULL && t2.pointer() != NULL &&
(tout.size() == t1.size() && tout.size() == t2.size()));
int _threads = min(low_pow2(tout.size()), CUDA_MAX_THREADS);
int _blocks = min(_threads / tout.size() + 1, CUDA_MAX_CORES);
knl_tensor_diff<<<_blocks, _threads>>>(t1.pointer(), t2.pointer(), tout.pointer(), tout.size());
}
////////////////////////////////////////////////////////////
void TensorCUDA_float_sum(TensorCUDA_float& t, TensorCUDA_float& tout)
{
ensure(tout.pointer() != NULL && t.pointer() != NULL && tout.size() == t.size());
int _threads = min(low_pow2(tout.size()), CUDA_MAX_THREADS);
int _blocks = min(_threads / tout.size() + 1, CUDA_MAX_CORES);
knl_tensor_add<<<_blocks, _threads>>>(tout.pointer(), t.pointer(), tout.size());
}
////////////////////////////////////////////////////////////
void TensorCUDA_float_copy(TensorCUDA_float& t, TensorCUDA_float& tout)
{
ensure(tout.pointer() != NULL && t.pointer() != NULL && tout.size() == t.size());
int _threads = min(low_pow2(tout.size()), CUDA_MAX_THREADS);
int _blocks = min(_threads / tout.size() + 1, CUDA_MAX_CORES);
knl_tensor_copy<<<_blocks, _threads>>>(tout.pointer(), t.pointer(), tout.size());
}
//Specialization
template < > void TensorCUDA<float*>::fill(float mean, float dev) { }
//Explicit instantiations
template class TensorCUDA<float>;
template class TensorCUDA<float*>;
template class TensorCUDA<int>;
////////////////////////////////////////////////////////////
} //namespace ai
|
the_stack
|
#include <typeinfo>
// number of all triangles for topologies up to __3__ triangles
#define NumTri 220
#define NumTop 96
__constant__ float eps=1e-6;
__constant__ float thres=1e-4;
// up to __3__ triangles
__constant__ int acceptTopologyWithFlip[2][96]={ {1, 2, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 19, 25, 31, 32, 34, 35, 38, 47, 48, 49, 50, 51, 55, 59, 63, 64, 68, 70, 76, 79, 96, 98, 100, 102, 103, 110, 111, 112, 115, 118, 119, 127, 0, 255, 128, 136, 137, 140, 143, 144, 145, 152, 153, 155, 157, 159, 176, 179, 185, 187, 191, 192, 196, 200, 204, 205, 206, 207, 208, 217, 220, 221, 223, 224, 230, 236, 238, 239, 240, 241, 242, 243, 244, 246, 247, 248, 249, 251, 252, 253, 254},
{1, 1, 2, 1, 2, 3, 1, 2, 3, 2, 3, 3, 2, 1, 2, 3, 3, 3, 1, 2, 3, 3, 3, 2, 3, 3, 2, 3, 3, 2, 1, 2, 3, 3, 3, 2, 3, 3, 2, 3, 3, 2, 3, 3, 3, 2, 1, 0, 0, 1, 2, 3, 3, 3, 2, 3, 3, 2, 3, 3, 2, 3, 3, 3, 2, 1, 2, 3, 3, 2, 3, 3, 2, 3, 3, 3, 2, 1, 3, 3, 3, 2, 1, 2, 3, 3, 2, 3, 2, 1, 3, 2, 1, 2, 1, 1}};
// look-up-table in Marching Cubes Algorithm
__constant__ int triTable[256][16] =
{{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1},
{3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1},
{4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1},
{9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1},
{10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1},
{5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1},
{8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1},
{2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1},
{11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1},
{5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1},
{11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1},
{11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1},
{9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1},
{6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1},
{6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1},
{8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1},
{7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1},
{3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1},
{9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1},
{8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1},
{0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1},
{6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1},
{10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1},
{10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1},
{0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1},
{3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1},
{9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1},
{8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1},
{3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1},
{10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1},
{10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1},
{7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1},
{1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1},
{11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1},
{8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1},
{0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1},
{7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1},
{7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1},
{10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1},
{0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1},
{7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1},
{9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1},
{6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1},
{4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1},
{10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1},
{8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1},
{1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1},
{10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1},
{10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1},
{9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1},
{7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1},
{3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1},
{7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1},
{3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1},
{6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1},
{9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1},
{1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1},
{4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1},
{7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1},
{6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1},
{0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1},
{6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1},
{0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1},
{11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1},
{6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1},
{5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1},
{9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1},
{1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1},
{10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1},
{0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1},
{5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1},
{11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1},
{9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1},
{7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1},
{2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1},
{9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1},
{1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1},
{10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1},
{2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1},
{0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1},
{0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1},
{9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1},
{5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1},
{5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1},
{9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1},
{1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1},
{3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1},
{4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1},
{9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1},
{11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1},
{2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1},
{9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1},
{3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1},
{1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1},
{4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1},
{0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1},
{1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}};
__constant__ int vertices_to_offset[12][4]={ {0, 1, 1, 0}, // #0
{1, 1, 1, 0}, // #1
{0, 1, 0, 0}, // #2
{1, 0, 1, 0}, // #3
{0, 1, 1, 1}, // #4
{1, 1, 1, 1}, // #5
{0, 1, 0, 1}, // #6
{1, 0, 1, 1}, // #7
{2, 0, 1, 1}, // #8
{2, 1, 1, 1}, // #9
{2, 1, 0, 1}, // #10
{2, 0, 0, 1}}; // #11
namespace{
/**
* get the vertex locations from the vertex displacement field
*/
template <typename scalar_t>
__device__ void offset_to_vertices_cuda(const scalar_t *offset, const int W, const int H, const int D, const int x, const int y, const int z, scalar_t *vertices){
// #0
vertices[0 ] = 0.5-offset[0 + (x+1)*H*D + (y+1)*D + z ];
vertices[1 ] = 1.0;
vertices[2 ] = 0.0;
// #1
vertices[3 ] = 1.0;
vertices[4 ] = 0.5-offset[1*W*H*D + (x+1)*H*D + (y+1)*D + z ];
vertices[5 ] = 0.0;
// #2
vertices[6 ] = 0.5-offset[0 + (x+1)*H*D + (y )*D + z ];
vertices[7 ] = 0.0;
vertices[8 ] = 0.0;
// #3
vertices[9 ] = 0.0;
vertices[10] = 0.5-offset[1*W*H*D + (x )*H*D + (y+1)*D + z ];
vertices[11] = 0.0;
// #4
vertices[12] = 0.5-offset[0 + (x+1)*H*D + (y+1)*D + z+1 ];
vertices[13] = 1.0;
vertices[14] = 1.0;
// #5
vertices[15] = 1.0;
vertices[16] = 0.5-offset[1*W*H*D + (x+1)*H*D + (y+1)*D + z+1 ];
vertices[17] = 1.0;
// #6
vertices[18] = 0.5-offset[0 + (x+1)*H*D + (y )*D + z+1 ];
vertices[19] = 0.0;
vertices[20] = 1.0;
// #7
vertices[21] = 0.0;
vertices[22] = 0.5-offset[1*W*H*D + (x )*H*D + (y+1)*D + z+1 ];
vertices[23] = 1.0;
// #8
vertices[24] = 0.0;
vertices[25] = 1.0;
vertices[26] = 0.5-offset[2*W*H*D + (x )*H*D + (y+1)*D + z+1 ];
// #9
vertices[27] = 1.0;
vertices[28] = 1.0;
vertices[29] = 0.5-offset[2*W*H*D + (x+1)*H*D + (y+1)*D + z+1 ];
// #10
vertices[30] = 1.0;
vertices[31] = 0.0;
vertices[32] = 0.5-offset[2*W*H*D + (x+1)*H*D + (y )*D + z+1 ];
// #11
vertices[33] = 0.0;
vertices[34] = 0.0;
vertices[35] = 0.5-offset[2*W*H*D + (x )*H*D + (y )*D + z+1 ];
}
/**
* check the intersection between two integer lists
* param:
* array1 input, integer list denoting the vertex indices on a single face, length 4
* array2 input, integer list denoting the vertex indices of a triangle, length 3
* out output, intersected vertex indices, padded with -1 to a fixed length, length 3
*/
//template <typename scalar_t>
__device__ void intersection(const int *array1, const int *array2, int *out){
int count = 0;
// initialization
for (int i=0; i<3; i++){
out[i] = -1;
}
for (int i=0; i<4; i++){
for (int j=0; j<3; j++){
if (array2[j]==array1[i]){
out[count] = array1[i];
count ++;
}
}
}
}
/**
* return the vertex indices on a given surface of a cell
*/
//template <typename scalar_t>
__device__ void get_vertices_on_face(const int r, int *row){
int vertices_on_location[6][4] = { {5, 9, 1, 10},
{7, 8, 3, 11},
{4, 9, 0, 8},
{6, 10, 2, 11},
{4, 5, 6, 7},
{0, 1, 2, 3} };
for (int i=0; i<4; i++){
row[i]=vertices_on_location[r][i];
}
}
/**
* calculate dn/dpb
* 0 c3-a3 -(c2-a2)
* -(c3-a3) 0 c1-a1
* c2-a2 -(c1-a1) 0
*/
template <typename scalar_t>
__device__ void dn_dpb( const scalar_t *vertices, const int a, const int c, const scalar_t *dn, scalar_t *db ){
scalar_t d3 = vertices[c*3+2] - vertices[a*3+2];
scalar_t d2 = vertices[c*3+1] - vertices[a*3+1];
scalar_t d1 = vertices[c*3+0] - vertices[a*3+0];
db[0] = d3*dn[1] - d2*dn[2];
db[1] = -d3*dn[0] + d1*dn[2];
db[2] = d2*dn[0] - d1*dn[1] ;
}
/**
* dn/dpc
* 0 -(b3-a3) b2-a2
* (b3-a3) 0 -(b1-a1)
* -(b2-a2) b1-a1 0
*/
template <typename scalar_t>
__device__ void dn_dpc( const scalar_t *vertices, const int a, const int b, const scalar_t *dn, scalar_t *dc ){
scalar_t d3 = vertices[b*3+2] - vertices[a*3+2];
scalar_t d2 = vertices[b*3+1] - vertices[a*3+1];
scalar_t d1 = vertices[b*3+0] - vertices[a*3+0];
dc[0] = - d3*dn[1] + d2*dn[2];
dc[1] = d3*dn[0] - d1*dn[2];
dc[2] = -d2*dn[0] + d1*dn[1] ;
}
/**
* calculate dn/dpa
* 0 b3-c3 -(b2-c2)
* -(b3-c3) 0 b1-c1
* b2-c2 -(b1-c1) 0
*/
template <typename scalar_t>
__device__ void dn_dpa( const scalar_t *vertices, const int b, const int c, const scalar_t *dn, scalar_t *da ){
scalar_t d3 = vertices[b*3+2] - vertices[c*3+2];
scalar_t d2 = vertices[b*3+1] - vertices[c*3+1];
scalar_t d1 = vertices[b*3+0] - vertices[c*3+0];
da[0] = d3*dn[1] - d2*dn[2];
da[1] = -d3*dn[0] + d1*dn[2];
da[2] = d2*dn[0] - d1*dn[1] ;
}
/**
* offset_to_normals, return normal vectors of all triangles (NOT topologies)
* params:
* offset input
* W input, number of cells on one of the directions
* H input, number of cells on one of the directions
* D input, number of cells on one of the directions
* i_ input, index of the cell on one of the directions
* j_ input, index of the cell on one of the directions
* k_ input, index of the cell on one of the directions
* location input, indicating the relative location of the current cell in the pairwise loss
* 0: x1
* 1: x2
* 2: y1
* 3: y2
* 4: z1
* 5: z2
* 6: dummy case for inner cell loss
* 7: dummy case for inner cell loss
* normal output
* length output, return the length of the normal vector for computing the gradient
*/
template <typename scalar_t>
__device__ void offset_to_normals(const scalar_t *offset, const int W, const int H, const int D, const int i_, const int j_, const int k_, const int location, scalar_t *normal, scalar_t *length){
// offset_to_vertices
scalar_t vertices[12*3];
offset_to_vertices_cuda(offset, W, H, D, i_, j_, k_, vertices);
int vertices_on_face[4];
get_vertices_on_face(location, vertices_on_face);
int tri_cnt = 0;
for (int i = 0; i < NumTop; i++){
int top_ind = acceptTopologyWithFlip[0][i];
int num_triangle = acceptTopologyWithFlip[1][i];
for (int tri_ind = 0; tri_ind<num_triangle; tri_ind++){
// get the indices of the triangle vertices
int triangle[3] = {triTable[top_ind][tri_ind*3], triTable[top_ind][tri_ind*3+1], triTable[top_ind][tri_ind*3+2]};
// check if the triangle has a line on the face we care about
// simply assign a dummy normal vector if not
int inter_ind[3];
intersection(vertices_on_face, triangle, inter_ind);
// location > 5 means inner case instead of x, y, z direction
if (location>5 || (location <=5 && inter_ind[0]>-1 && inter_ind[1]>-1 && inter_ind[2]==-1) ){
// consider inside/outside, then the direction of the normal vector
// decided by the look-up-table
int a, b, c;
a = triangle[0];
b = triangle[1];
c = triangle[2];
// compute the normal
scalar_t vec1[3] = { vertices[b*3+0] - vertices[a*3+0],
vertices[b*3+1] - vertices[a*3+1],
vertices[b*3+2] - vertices[a*3+2] };
scalar_t vec2[3] = { vertices[c*3+0] - vertices[a*3+0],
vertices[c*3+1] - vertices[a*3+1],
vertices[c*3+2] - vertices[a*3+2] };
// cross product
scalar_t cross[3] = { vec1[1]*vec2[2] - vec1[2]*vec2[1],
vec1[2]*vec2[0] - vec1[0]*vec2[2],
vec1[0]*vec2[1] - vec1[1]*vec2[0] };
// normalized to unit vector
scalar_t l2 = sqrt(cross[0]*cross[0] + cross[1]*cross[1] + cross[2]*cross[2]);
if (l2<eps) { l2=eps; }
// copy to the normal vector, which saved the normal of all triangles
normal[tri_cnt*3 + 0] = cross[0]/l2;
normal[tri_cnt*3 + 1] = cross[1]/l2;
normal[tri_cnt*3 + 2] = cross[2]/l2;
length[tri_cnt] = l2;
}
else{
// set dummy normal vector
normal[tri_cnt*3 + 0] = 1.0;
normal[tri_cnt*3 + 1] = 1.0;
normal[tri_cnt*3 + 2] = 1.0;
}
tri_cnt ++;
}
}
}
/**
* calculate the gradient back-propagated to the offset
*/
template <typename scalar_t>
__device__ void grad_normal_to_offset(scalar_t *grad_offset, const scalar_t *grad_normal, const scalar_t *offset, const int W, const int H, const int D, const int i_, const int j_, int k_, const int location){
// offset_to_vertices
scalar_t vertices[12*3];
offset_to_vertices_cuda(offset, W, H, D, i_, j_, k_, vertices);
int vertices_on_face[4];
get_vertices_on_face(location, vertices_on_face);
int tri_cnt = 0;
for (int i = 0; i < NumTop; i++){
int top_ind = acceptTopologyWithFlip[0][i];
int num_triangle = acceptTopologyWithFlip[1][i];
for (int tri_ind = 0; tri_ind<num_triangle; tri_ind++){
// get the gradient on the normal vector of the current triangle
scalar_t grad_tri[3] = {grad_normal[tri_cnt*3 + 0], grad_normal[tri_cnt*3 +1], grad_normal[tri_cnt*3 + 2]};
// get the indices of the triangle vertices
int triangle[3] = {triTable[top_ind][tri_ind*3], triTable[top_ind][tri_ind*3+1], triTable[top_ind][tri_ind*3+2]};
// check if the triangle has a line on the face we care about
// simply assign a dummy normal vector if not
int inter_ind[3];
intersection(vertices_on_face, triangle, inter_ind);
// location > 5 means inner case instead of x, y, z direction
if (location>5 || (location <=5 && inter_ind[0]>-1 && inter_ind[1]>-1 && inter_ind[2]==-1) ){
// consider inside/outside, then the direction of the normal vector
// decided by the look-up-table
int a, b, c;
a = triangle[0];
b = triangle[1];
c = triangle[2];
// dn_da
scalar_t da[3];
dn_dpa(vertices, b, c, grad_tri, da);
atomicAdd( &grad_offset[ vertices_to_offset[a][0]*W*H*D +
(vertices_to_offset[a][1]+i_)*H*D +
(vertices_to_offset[a][2]+j_)*D +
vertices_to_offset[a][3]+k_],
da[vertices_to_offset[a][0]] );
// dn_db
scalar_t db[3];
dn_dpb(vertices, a, c, grad_tri, db);
atomicAdd( &grad_offset[ vertices_to_offset[b][0]*W*H*D +
(vertices_to_offset[b][1]+i_)*H*D +
(vertices_to_offset[b][2]+j_)*D +
vertices_to_offset[b][3]+k_],
db[vertices_to_offset[b][0]] );
// dn_dc
scalar_t dc[3];
dn_dpc(vertices, a, b, grad_tri, dc);
atomicAdd( &grad_offset[ vertices_to_offset[c][0]*W*H*D +
(vertices_to_offset[c][1]+i_)*H*D +
(vertices_to_offset[c][2]+j_)*D +
vertices_to_offset[c][3]+k_],
dc[vertices_to_offset[c][0]] );
}
tri_cnt++;
}
}
}
/**
* calculate d(normalized normal vector)/d(normal vector)
*/
template <typename scalar_t>
__device__ void grad_normalized_to_normal(scalar_t *grad_normal, const scalar_t *normal, const scalar_t l){
scalar_t orig_normal[3] = {normal[0]*l, normal[1]*l, normal[2]*l};
scalar_t l3 = l*l*l;
scalar_t g00 = (orig_normal[1]*orig_normal[1] + orig_normal[2]*orig_normal[2])/l3;
scalar_t g01 = -orig_normal[0]*orig_normal[1]/l3;
scalar_t g02 = -orig_normal[0]*orig_normal[2]/l3;
scalar_t g10 = g01;
scalar_t g11 = (orig_normal[0]*orig_normal[0] + orig_normal[2]*orig_normal[2])/l3;
scalar_t g12 = -orig_normal[1]*orig_normal[2]/l3;
scalar_t g20 = g02;
scalar_t g21 = g12;
scalar_t g22 = (orig_normal[0]*orig_normal[0] + orig_normal[1]*orig_normal[1])/l3;
scalar_t g1 = grad_normal[0]*g00 + grad_normal[1]*g01 + grad_normal[2]*g02;
scalar_t g2 = grad_normal[0]*g10 + grad_normal[1]*g11 + grad_normal[2]*g12;
scalar_t g3 = grad_normal[0]*g20 + grad_normal[1]*g21 + grad_normal[2]*g22;
grad_normal[0] = g1;
grad_normal[1] = g2;
grad_normal[2] = g3;
}
/**
* calculate the loss between two neighboring cells
* params:
* offset input, the vertex displacement field of the full grid
* topolopy input, probability for each triangle'
* mask input, mask denoting if two topogolies have connected triangles or not
* loss output, curvature loss
* direction input, a integer denoting the neighoring relationship between two cells
* 0: two cells adajecent in x direction
* 1: two cells adajecent in y direction
* 2: two cells adajecent in z direction
* 3: dummy label for inner cell loss
*/
template <typename scalar_t>
__global__ void pairwise_loss(const scalar_t *offset, const scalar_t *topology, const scalar_t *mask, scalar_t *loss, const int direction){
int i1 = blockIdx.x;
int j1 = blockIdx.y;
int k1 = threadIdx.x;
int W = gridDim.x;
int H = gridDim.y;
int D = blockDim.x;
int T = NumTri;
int i2=0, j2=0, k2=0, ind1=0, ind2=0;
// x direction
if (direction==0){
if (i1==W-1) return;
ind1 = i1*H*D + j1*H + k1;
ind2 = ind1+H*D;
i2 = i1+1;
j2 = j1;
k2 = k1;
}
// y direction
else if (direction==1){
if (j1==H-1) return;
ind1 = i1*H*D + j1*H + k1;
ind2 = ind1+H;
i2 = i1;
j2 = j1+1;
k2 = k1;
}
// z direction
else if (direction==2){
if (k1==D-1) return;
ind1 = i1*H*D + j1*H + k1;
ind2 = ind1+1;
i2 = i1;
j2 = j1;
k2 = k1+1;
}
// inner loss, within the same cell
else if (direction==3){
ind1 = i1*H*D + j1*H + k1;
ind2 = ind1;
i2 = i1;
j2 = j1;
k2 = k1;
}
// get normal vector in both grids
scalar_t norm1[NumTri*3];
scalar_t norm2[NumTri*3];
scalar_t length1[NumTri];
scalar_t length2[NumTri];
offset_to_normals(offset, W+1, H+1, D+1, i1, j1, k1, direction*2, norm1, length1);
offset_to_normals(offset, W+1, H+1, D+1, i2, j2, k2, direction*2+1, norm2, length2);
scalar_t loss_=0;
for (int ti=0; ti<T; ti++){
for (int tj=0; tj<T; tj++){
// no loss if two topologies are not connected
scalar_t conn_ij = mask[ti*T + tj];
if (conn_ij<eps) continue;
// joint probability of two topology combinations
// corresponding to outer product
scalar_t pi = topology[ind1*T+ti];
scalar_t pj = topology[ind2*T+tj];
scalar_t p_ij = pi*pj;
// l2 loss
scalar_t diff_norm0 = norm1[ti*3 + 0]-norm2[tj*3 + 0];
scalar_t diff_norm1 = norm1[ti*3 + 1]-norm2[tj*3 + 1];
scalar_t diff_norm2 = norm1[ti*3 + 2]-norm2[tj*3 + 2];
scalar_t loss_tmp = conn_ij * p_ij * (diff_norm0*diff_norm0 + diff_norm1*diff_norm1 + diff_norm2*diff_norm2);
loss_ += loss_tmp;
}
}
loss[ind1] = loss_;
__syncthreads();
}
/**
* calculate the gradient back-propagated to the offset
* offset input, the vertex displacement field of the full grid
* topology input, the topology probability
* grad_offset output, gradient on the offset
* mask input, mask denoting if two topogolies have connected triangles or not
* direction input, a integer denoting the neighoring relationship between two cells
* 0: two cells adajecent in x direction
* 1: two cells adajecent in y direction
* 2: two cells adajecent in z direction
* 3: dummy label for inner cell loss
*/
template <typename scalar_t>
__global__ void pairwise_grad(const scalar_t *offset, const scalar_t *topology, scalar_t *grad_offset, const scalar_t *mask, const int direction){
int i1 = blockIdx.x;
int j1 = blockIdx.y;
int k1 = threadIdx.x;
int W = gridDim.x;
int H = gridDim.y;
int D = blockDim.x;
// TODO: change below
int T = NumTri;
int i2=0, j2=0, k2=0, ind1=0, ind2=0;
// x direction
if (direction==0){
if (i1==W-1) return;
ind1 = i1*H*D + j1*H + k1;
ind2 = ind1+H*D;
i2 = i1+1;
j2 = j1;
k2 = k1;
}
// y direction
else if (direction==1){
if (j1==H-1) return;
ind1 = i1*H*D + j1*H + k1;
ind2 = ind1+H;
i2 = i1;
j2 = j1+1;
k2 = k1;
}
// z direction
else if (direction==2){
if (k1==D-1) return;
ind1 = i1*H*D + j1*H + k1;
ind2 = ind1+1;
i2 = i1;
j2 = j1;
k2 = k1+1;
}
// inner loss, within the same cell
else if (direction==3){
ind1 = i1*H*D + j1*H + k1;
ind2 = ind1;
i2 = i1;
j2 = j1;
k2 = k1;
}
// get normal vector in both grids
scalar_t norm1[NumTri*3];
scalar_t norm2[NumTri*3];
scalar_t length1[NumTri];
scalar_t length2[NumTri];
offset_to_normals(offset, W+1, H+1, D+1, i1, j1, k1, direction*2, norm1, length1);
offset_to_normals(offset, W+1, H+1, D+1, i2, j2, k2, direction*2+1, norm2, length2);
scalar_t grad_norm1[NumTri*3]={0};
scalar_t grad_norm2[NumTri*3]={0};
for (int ti=0; ti<T; ti++){
for (int tj=0; tj<T; tj++){
// no loss if two topologies are not connected
scalar_t conn_ij = mask[ti*T + tj];
if (conn_ij<eps) continue;
// joint probability of two topology combinations
// corresponding to outer product
scalar_t pi = topology[ind1*T+ti];
scalar_t pj = topology[ind2*T+tj];
scalar_t p_ij = pi*pj;
// l2 loss
scalar_t grad_norm1_[3] = {-2*conn_ij*p_ij*norm2[tj*3 + 0], -2*conn_ij*p_ij*norm2[tj*3 + 1], -2*conn_ij*p_ij*norm2[tj*3 + 2]};
scalar_t grad_norm2_[3] = {-2*conn_ij*p_ij*norm1[ti*3 + 0], -2*conn_ij*p_ij*norm1[ti*3 + 1], -2*conn_ij*p_ij*norm1[ti*3 + 2]};
scalar_t norm1_[3] = {norm1[ti*3 + 0], norm1[ti*3 + 1], norm1[ti*3 + 2]};
scalar_t norm2_[3] = {norm2[tj*3 + 0], norm2[tj*3 + 1], norm2[tj*3 + 2]};
grad_normalized_to_normal(grad_norm1_, norm1_, length1[ti]);
grad_normalized_to_normal(grad_norm2_, norm2_, length2[tj]);
grad_norm1[ti*3 + 0] += grad_norm1_[0];
grad_norm1[ti*3 + 1] += grad_norm1_[1];
grad_norm1[ti*3 + 2] += grad_norm1_[2];
grad_norm2[tj*3 + 0] += grad_norm2_[0];
grad_norm2[tj*3 + 1] += grad_norm2_[1];
grad_norm2[tj*3 + 2] += grad_norm2_[2];
}
}
grad_normal_to_offset(grad_offset, grad_norm1, offset, W+1, H+1, D+1, i1, j1, k1, direction*2);
grad_normal_to_offset(grad_offset, grad_norm2, offset, W+1, H+1, D+1, i2, j2, k2, direction*2 + 1);
}
} //namespace
/*
* Forward function, calculating the distance from a set of points to one single linesegment
* params:
* offset input, vertex displacement field, 3x(W+1)x(H+1)x(D+1)
* topolopy input, probability for each topology, (WxHxD)xT', T' is the number of triangles instead of topologies
* xTable input, connected triangles in x direction, T'xT'
* yTable input, connected triangles in y direction, T'xT'
* zTable input, connected triangles in z direction, T'xT'
* innerTable input, connected triangles within the same topology, T'xT'
* loss output, smoothness loss
*/
void curvature_constraint_kernel_forward(
at::Tensor offset,
at::Tensor topology,
at::Tensor xTable,
at::Tensor yTable,
at::Tensor zTable,
at::Tensor innerTable,
at::Tensor loss_x,
at::Tensor loss_y,
at::Tensor loss_z,
at::Tensor loss_inner){
int W = offset.size(1)-1;
int H = offset.size(2)-1;
int D = offset.size(3)-1;
dim3 dimGrid(W, H, 1);
dim3 dimBlock(D, 1, 1);
// Some checks
assert(offset.type().scalarType() == at::ScalarType::Float);
assert(topology.type().scalarType() == at::ScalarType::Float);
assert(xTable.type().scalarType() == at::ScalarType::Float);
assert(yTable.type().scalarType() == at::ScalarType::Float);
assert(zTable.type().scalarType() == at::ScalarType::Float);
assert(innerTable.type().scalarType() == at::ScalarType::Float);
assert(loss_x.type().scalarType() == at::ScalarType::Float);
assert(loss_y.type().scalarType() == at::ScalarType::Float);
assert(loss_z.type().scalarType() == at::ScalarType::Float);
assert(loss_inner.type().scalarType() == at::ScalarType::Float);
// Lauch the kernels
// x loss
pairwise_loss<float><<< dimGrid, dimBlock>>>(
offset.data<float>(),
topology.data<float>(),
xTable.data<float>(),
loss_x.data<float>(),
0);
// y loss
pairwise_loss<float><<< dimGrid, dimBlock>>>(
offset.data<float>(),
topology.data<float>(),
yTable.data<float>(),
loss_y.data<float>(),
1);
// z loss
pairwise_loss<float><<< dimGrid, dimBlock>>>(
offset.data<float>(),
topology.data<float>(),
zTable.data<float>(),
loss_z.data<float>(),
2);
// inner loss
pairwise_loss<float><<< dimGrid, dimBlock>>>(
offset.data<float>(),
topology.data<float>(),
innerTable.data<float>(),
loss_inner.data<float>(),
3);
}
/*
* Backward function, calculating the derivative of the topology with respect to the loss
* params:
* grad_output input, gradient on the output loss, 1
* offset input, vertex displacement field, 3x(W+1)x(H+1)x(D+1)
* topolopy input, probability for each topology, (WxHxD)xT', T' is the number of triangles instead of topologies
* xTable input, connected triangles in x direction, T'xT'
* yTable input, connected triangles in y direction, T'xT'
* zTable input, connected triangles in z direction, T'xT'
* innerTable input, connected triangles within the same topology, T'xT'
* grad_offset output, gradient on the offset, 3x(W+1)x(H+1)x(D+1)
*
*/
void curvature_constraint_kernel_backward(
at::Tensor grad_output,
at::Tensor offset,
at::Tensor topology,
at::Tensor xTable,
at::Tensor yTable,
at::Tensor zTable,
at::Tensor innerTable,
at::Tensor grad_offset){
int W = offset.size(1) - 1;
int H = offset.size(2) - 1;
int D = offset.size(3) - 1;
dim3 dimGrid(W, H, 1);
dim3 dimBlock(D, 1, 1);
assert(offset.type().scalarType() == at::ScalarType::Float);
assert(topology.type().scalarType() == at::ScalarType::Float);
assert(xTable.type().scalarType() == at::ScalarType::Float);
assert(yTable.type().scalarType() == at::ScalarType::Float);
assert(zTable.type().scalarType() == at::ScalarType::Float);
assert(innerTable.type().scalarType() == at::ScalarType::Float);
assert(grad_output.type().scalarType() == at::ScalarType::Float);
assert(grad_offset.type().scalarType() == at::ScalarType::Float);
// lauch the kernel
pairwise_grad<float><<< dimGrid, dimBlock>>>(
offset.data<float>(),
topology.data<float>(),
grad_offset.data<float>(),
xTable.data<float>(),
0);
pairwise_grad<float><<< dimGrid, dimBlock>>>(
offset.data<float>(),
topology.data<float>(),
grad_offset.data<float>(),
yTable.data<float>(),
1);
pairwise_grad<float><<< dimGrid, dimBlock>>>(
offset.data<float>(),
topology.data<float>(),
grad_offset.data<float>(),
zTable.data<float>(),
2);
pairwise_grad<float><<< dimGrid, dimBlock>>>(
offset.data<float>(),
topology.data<float>(),
grad_offset.data<float>(),
innerTable.data<float>(),
3);
// Multiply with incoming gradient
// Do that in Python now
// grad_offset *= grad_output;
}
|
the_stack
|
const int MAX_TEXTURES = 10;
__constant__ float Acuda[16];
void init_Acuda()
{
static bool initialized(false);
if (!initialized)
{
float A_h[16] = { -1.0/6.0, 3.0/6.0, -3.0/6.0, 1.0/6.0,
3.0/6.0, -6.0/6.0, 0.0/6.0, 4.0/6.0,
-3.0/6.0, 3.0/6.0, 3.0/6.0, 1.0/6.0,
1.0/6.0, 0.0/6.0, 0.0/6.0, 0.0/6.0
};
cudaMemcpyToSymbol(Acuda, A_h, 16*sizeof(float), 0, cudaMemcpyHostToDevice);
initialized = true;
}
}
texture<float,1,cudaReadModeElementType> myTex;
texture<float,1,cudaReadModeElementType> tex00, tex01, tex02, tex03, tex04, tex05, tex06, tex07, tex08, tex09;
bool textureInUse[MAX_TEXTURES] = { false, false, false, false, false,
false, false, false, false, false
};
#define arraytexFetch(_u, _texnum, _return)\
switch(_texnum)\
{\
case 0:\
_return = tex1D(tex00, (_u)); \
break;\
case 1:\
_return = tex1D(tex01, (_u)); \
break; \
case 2:\
_return = tex1D(tex02, (_u)); \
break; \
case 3:\
_return = tex1D(tex03, (_u)); \
break; \
case 4:\
_return = tex1D(tex04, (_u)); \
break; \
case 5:\
_return = tex1D(tex05, (_u)); \
break; \
case 6:\
_return = tex1D(tex06, (_u)); \
break; \
case 7:\
_return = tex1D(tex07, (_u)); \
break; \
case 8:\
_return = tex1D(tex08, (_u)); \
break; \
case 9:\
_return = tex1D(tex09, (_u)); \
break; \
}
#include <stdio.h>
TextureSpline::TextureSpline()
{
int iTex = 0;
while ( iTex < MAX_TEXTURES && textureInUse[iTex]) iTex++;
if (iTex == MAX_TEXTURES)
{
fprintf (stderr, "Unable to allocated a texture. Increase MAX_TEXTURES "
"in CudaCoulomb.cu.\n");
abort();
}
MyTexture = iTex;
textureInUse[iTex] = true;
}
TextureSpline::~TextureSpline()
{
textureInUse[MyTexture] = false;
}
void
TextureSpline::set(double data[], int numPoints,
double rmin, double rmax)
{
rMin = rmin;
rMax = rmax;
NumPoints = numPoints;
float data_Host[numPoints];
for (int i=0; i<numPoints; i++)
data_Host[i] = data[i];
cudaChannelFormatDesc channelDesc =
cudaCreateChannelDesc(32,0,0,0,cudaChannelFormatKindFloat);
cudaMallocArray(&myArray, &channelDesc, numPoints);
cudaMemcpyToArrayAsync(myArray, 0, 0, data_Host, numPoints*sizeof(float),
cudaMemcpyHostToDevice);
switch (MyTexture)
{
case 0:
tex00.addressMode[0] = cudaAddressModeClamp;
tex00.filterMode = cudaFilterModeLinear;
tex00.normalized = false;
cudaBindTextureToArray(tex00, myArray, channelDesc);
break;
case 1:
tex01.addressMode[0] = cudaAddressModeClamp;
tex01.filterMode = cudaFilterModeLinear;
tex01.normalized = false;
cudaBindTextureToArray(tex01, myArray, channelDesc);
break;
case 2:
tex02.addressMode[0] = cudaAddressModeClamp;
tex02.filterMode = cudaFilterModeLinear;
tex02.normalized = false;
cudaBindTextureToArray(tex02, myArray, channelDesc);
break;
case 3:
tex03.addressMode[0] = cudaAddressModeClamp;
tex03.filterMode = cudaFilterModeLinear;
tex03.normalized = false;
cudaBindTextureToArray(tex03, myArray, channelDesc);
break;
case 4:
tex04.addressMode[0] = cudaAddressModeClamp;
tex04.filterMode = cudaFilterModeLinear;
tex04.normalized = false;
cudaBindTextureToArray(tex04, myArray, channelDesc);
break;
case 5:
tex05.addressMode[0] = cudaAddressModeClamp;
tex05.filterMode = cudaFilterModeLinear;
tex05.normalized = false;
cudaBindTextureToArray(tex05, myArray, channelDesc);
break;
case 6:
tex06.addressMode[0] = cudaAddressModeClamp;
tex06.filterMode = cudaFilterModeLinear;
tex06.normalized = false;
cudaBindTextureToArray(tex06, myArray, channelDesc);
break;
case 7:
tex07.addressMode[0] = cudaAddressModeClamp;
tex07.filterMode = cudaFilterModeLinear;
tex07.normalized = false;
cudaBindTextureToArray(tex07, myArray, channelDesc);
break;
case 8:
tex08.addressMode[0] = cudaAddressModeClamp;
tex08.filterMode = cudaFilterModeLinear;
tex08.normalized = false;
cudaBindTextureToArray(tex08, myArray, channelDesc);
break;
case 9:
tex09.addressMode[0] = cudaAddressModeClamp;
tex09.filterMode = cudaFilterModeLinear;
tex09.normalized = false;
cudaBindTextureToArray(tex09, myArray, channelDesc);
break;
}
}
__device__ float dist (float dx, float dy, float dz)
{
return sqrtf(dx*dx + dy*dy + dz*dz);
}
__device__ double dist (double dx, double dy, double dz)
{
return sqrt(dx*dx + dy*dy + dz*dz);
}
template<typename T>
__device__
T min_dist (T& x, T& y, T& z,
T L[3][3], T Linv[3][3])
{
T u0 = Linv[0][0]*x + Linv[0][1]*y + Linv[0][2]*z;
T u1 = Linv[1][0]*x + Linv[1][1]*y + Linv[1][2]*z;
T u2 = Linv[2][0]*x + Linv[2][1]*y + Linv[2][2]*z;
u0 -= rintf(u0);
u1 -= rintf(u1);
u2 -= rintf(u2);
x = L[0][0]*u0 + L[0][1]*u1 + L[0][2]*u2;
y = L[1][0]*u0 + L[1][1]*u1 + L[1][2]*u2;
z = L[2][0]*u0 + L[2][1]*u1 + L[2][2]*u2;
// T u0 = Linv[0][0]*x; u0 -= rintf(u0); x = L[0][0]*u0;
// T u1 = Linv[1][1]*y; u1 -= rintf(u1); y = L[1][1]*u1;
// T u2 = Linv[2][2]*z; u2 -= rintf(u2); z = L[2][2]*u2;
// return sqrtf(x*x + y*y + z*z);
T d2min = x*x + y*y + z*z;
for (T i=-1.0f; i<=1.001; i+=1.0f)
for (T j=-1.0f; j<=1.001; j+=1.0f)
for (T k=-1.0f; k<=1.001; k+=1.0f)
{
T xnew = L[0][0]*(u0+i) + L[0][1]*(u1+j) + L[0][2]*(u2+k);
T ynew = L[1][0]*(u0+i) + L[1][1]*(u1+j) + L[1][2]*(u2+k);
T znew = L[2][0]*(u0+i) + L[2][1]*(u1+j) + L[2][2]*(u2+k);
T d2 = xnew*xnew + ynew*ynew + znew*znew;
d2min = min (d2, d2min);
if (d2 < d2min)
{
d2min = d2;
x = xnew;
y = ynew;
z = znew;
}
}
return sqrt(d2min);
}
template<typename T>
__device__
T min_dist2 (T& x, T& y, T& z,
T L[3][3], T Linv[3][3])
{
T u0 = Linv[0][0]*x + Linv[0][1]*y + Linv[0][2]*z;
T u1 = Linv[1][0]*x + Linv[1][1]*y + Linv[1][2]*z;
T u2 = Linv[2][0]*x + Linv[2][1]*y + Linv[2][2]*z;
u0 -= rintf(u0);
u1 -= rintf(u1);
u2 -= rintf(u2);
x = L[0][0]*u0 + L[0][1]*u1 + L[0][2]*u2;
y = L[1][0]*u0 + L[1][1]*u1 + L[1][2]*u2;
z = L[2][0]*u0 + L[2][1]*u1 + L[2][2]*u2;
// T u0 = Linv[0][0]*x; u0 -= rintf(u0); x = L[0][0]*u0;
// T u1 = Linv[1][1]*y; u1 -= rintf(u1); y = L[1][1]*u1;
// T u2 = Linv[2][2]*z; u2 -= rintf(u2); z = L[2][2]*u2;
// return sqrtf(x*x + y*y + z*z);
T d2min = x*x + y*y + z*z;
for (T i=-1.0f; i<=1.001; i+=1.0f)
for (T j=-1.0f; j<=1.001; j+=1.0f)
for (T k=-1.0f; k<=1.001; k+=1.0f)
{
T xnew = L[0][0]*(u0+i) + L[0][1]*(u1+j) + L[0][2]*(u2+k);
T ynew = L[1][0]*(u0+i) + L[1][1]*(u1+j) + L[1][2]*(u2+k);
T znew = L[2][0]*(u0+i) + L[2][1]*(u1+j) + L[2][2]*(u2+k);
T d2 = xnew*xnew + ynew*ynew + znew*znew;
d2min = min (d2, d2min);
if (d2 < d2min)
{
d2min = d2;
x = xnew;
y = ynew;
z = znew;
}
}
return d2min;
}
__device__ float recipSqrt (float x)
{
return rsqrtf(x);
}
__device__ double recipSqrt (double x)
{
return rsqrt(x);
}
template<typename T, int BS>
__global__ void
coulomb_AA_PBC_kernel(T **R, int N, T rMax, int Ntex,
int textureNum, T *lattice, T *latticeInv, T *sum)
{
int tid = threadIdx.x;
__shared__ T *myR;
if (tid == 0)
myR = R[blockIdx.x];
__shared__ T L[3][3], Linv[3][3];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__syncthreads();
T nrm = (T)(Ntex-1)/rMax;
__shared__ T r1[BS][3], r2[BS][3];
int NB = N/BS + ((N%BS) ? 1 : 0);
T mysum = (T)0.0;
// Do diagonal blocks first
for (int b=0; b<NB; b++)
{
for (int i=0; i<3; i++)
if ((3*b+i)*BS + tid < 3*N)
r1[0][i*BS+tid] = myR[(3*b+i)*BS + tid];
int ptcl1 = b*BS + tid;
if (ptcl1 < N)
{
int end = (b+1)*BS < N ? BS : N-b*BS;
for (int p2=0; p2<end; p2++)
{
int ptcl2 = b*BS + p2;
T dx, dy, dz;
dx = r1[p2][0] - r1[tid][0];
dy = r1[p2][1] - r1[tid][1];
dz = r1[p2][2] - r1[tid][2];
T dist = min_dist(dx, dy, dz, L, Linv);
if (ptcl1 != ptcl2)
{
float tval;
arraytexFetch(nrm*dist+0.5, textureNum, tval);
mysum += tval/dist;
}
// mysum += dist;
}
}
}
// Avoid double-counting on the diagonal blocks
mysum *= 0.5;
// Now do off-diagonal blocks
for (int b1=0; b1<NB; b1++)
{
for (int i=0; i<3; i++)
if ((3*b1+i)*BS + tid < 3*N)
r1[0][i*BS+tid] = myR[(3*b1+i)*BS + tid];
int ptcl1 = b1*BS + tid;
if (ptcl1 < N)
{
for (int b2=b1+1; b2<NB; b2++)
{
for (int i=0; i<3; i++)
if ((3*b2+i)*BS + tid < 3*N)
r2[0][i*BS+tid] = myR[(3*b2+i)*BS + tid];
int end = ((b2+1)*BS < N) ? BS : (N-b2*BS);
for (int j=0; j<end; j++)
{
T dx, dy, dz;
dx = r2[j][0] - r1[tid][0];
dy = r2[j][1] - r1[tid][1];
dz = r2[j][2] - r1[tid][2];
T dist = min_dist(dx, dy, dz, L, Linv);
float tval;
arraytexFetch(nrm*dist+0.5, textureNum, tval);
mysum += tval/dist;
// mysum += tex1D(shortTex[textureNum], nrm*dist+0.5)/dist;
}
}
}
}
__shared__ T shared_sum[BS];
shared_sum[tid] = mysum;
__syncthreads();
for (int s=BS>>1; s>0; s >>=1)
{
if (tid < s)
shared_sum[tid] += shared_sum[tid+s];
__syncthreads();
}
if (tid==0)
sum[blockIdx.x] = shared_sum[0];
}
template<typename T, int BS>
__global__ void
coulomb_AA_kernel(T **R, int N, T *sum)
{
int tid = threadIdx.x;
__shared__ T *myR;
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
__shared__ T r1[BS][3], r2[BS][3];
int NB = (N+BS-1)/BS;
T mysum = (T)0.0;
// Do diagonal blocks first
for (int b=0; b<NB; b++)
{
for (int i=0; i<3; i++)
if ((3*b+i)*BS + tid < 3*N)
r1[0][i*BS+tid] = myR[(3*b+i)*BS + tid];
int ptcl1 = b*BS + tid;
if (ptcl1 < N)
{
int end = (b+1)*BS < N ? BS : N-b*BS;
for (int p2=0; p2<end; p2++)
{
int ptcl2 = b*BS + p2;
T dx, dy, dz;
dx = r1[p2][0] - r1[tid][0];
dy = r1[p2][1] - r1[tid][1];
dz = r1[p2][2] - r1[tid][2];
T distInv =recipSqrt(dx*dx + dy*dy + dz*dz);
if (ptcl1 != ptcl2)
mysum += distInv;
// mysum += dist;
}
}
}
// Avoid double-counting on the diagonal blocks
mysum *= 0.5;
// Now do off-diagonal blocks
for (int b1=0; b1<NB; b1++)
{
for (int i=0; i<3; i++)
if ((3*b1+i)*BS + tid < 3*N)
r1[0][i*BS+tid] = myR[(3*b1+i)*BS + tid];
int ptcl1 = b1*BS + tid;
if (ptcl1 < N)
{
for (int b2=b1+1; b2<NB; b2++)
{
for (int i=0; i<3; i++)
if ((3*b2+i)*BS + tid < 3*N)
r2[0][i*BS+tid] = myR[(3*b2+i)*BS + tid];
int end = ((b2+1)*BS < N) ? BS : (N-b2*BS);
for (int j=0; j<end; j++)
{
T dx, dy, dz;
dx = r2[j][0] - r1[tid][0];
dy = r2[j][1] - r1[tid][1];
dz = r2[j][2] - r1[tid][2];
T distInv =recipSqrt(dx*dx + dy*dy + dz*dz);
mysum += distInv;
}
}
}
}
__shared__ T shared_sum[BS];
shared_sum[tid] = mysum;
__syncthreads();
for (int s=BS>>1; s>0; s >>=1)
{
if (tid < s)
shared_sum[tid] += shared_sum[tid+s];
__syncthreads();
}
if (tid==0)
sum[blockIdx.x] = shared_sum[0];
}
void
CoulombAA_SR_Sum(float *R[], int N, float rMax, int Ntex,
int textureNum, float lattice[], float latticeInv[],
float sum[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
coulomb_AA_PBC_kernel<float,BS><<<dimGrid,dimBlock>>>
(R, N, rMax, Ntex, textureNum, lattice, latticeInv, sum);
}
void
CoulombAA_SR_Sum(double *R[], int N, double rMax, int Ntex,
int textureNum, double lattice[], double latticeInv[],
double sum[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
coulomb_AA_PBC_kernel<double,BS><<<dimGrid,dimBlock>>>
(R, N, rMax, Ntex, textureNum, lattice, latticeInv, sum);
}
void
CoulombAA_Sum(float *R[], int N, float sum[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
coulomb_AA_kernel<float,BS><<<dimGrid,dimBlock>>>
(R, N, sum);
}
void
CoulombAA_Sum(double *R[], int N, double sum[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
coulomb_AA_kernel<double,BS><<<dimGrid,dimBlock>>>
(R, N, sum);
}
template<typename T, int BS>
__global__ void
MPC_SR_kernel(T **R, int N,
T *lattice, T *latticeInv, T *sum)
{
int tid = threadIdx.x;
__shared__ T *myR;
if (tid == 0)
myR = R[blockIdx.x];
__shared__ T L[3][3], Linv[3][3];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__syncthreads();
__shared__ T r1[BS][3], r2[BS][3];
int NB = (N+BS-1)/BS;
T mysum = (T)0.0;
// Do diagonal blocks first
for (int b=0; b<NB; b++)
{
for (int i=0; i<3; i++)
if ((3*b+i)*BS + tid < 3*N)
r1[0][i*BS+tid] = myR[(3*b+i)*BS + tid];
__syncthreads();
int ptcl1 = b*BS + tid;
if (ptcl1 < N)
{
int end = (b+1)*BS < N ? BS : N-b*BS;
for (int p2=0; p2<end; p2++)
{
int ptcl2 = b*BS + p2;
T dx, dy, dz;
dx = r1[p2][0] - r1[tid][0];
dy = r1[p2][1] - r1[tid][1];
dz = r1[p2][2] - r1[tid][2];
T distinv = recipSqrt(min_dist2(dx, dy, dz, L, Linv));
if (ptcl1 != ptcl2)
mysum += distinv;
}
}
}
// Avoid double-counting on the diagonal blocks
mysum *= 0.5;
// Now do off-diagonal blocks
for (int b1=0; b1<NB; b1++)
{
for (int i=0; i<3; i++)
if ((3*b1+i)*BS + tid < 3*N)
r1[0][i*BS+tid] = myR[(3*b1+i)*BS + tid];
__syncthreads();
int ptcl1 = b1*BS + tid;
if (ptcl1 < N)
{
for (int b2=b1+1; b2<NB; b2++)
{
for (int i=0; i<3; i++)
if ((3*b2+i)*BS + tid < 3*N)
r2[0][i*BS+tid] = myR[(3*b2+i)*BS + tid];
int end = ((b2+1)*BS < N) ? BS : (N-b2*BS);
for (int j=0; j<end; j++)
{
T dx, dy, dz;
dx = r2[j][0] - r1[tid][0];
dy = r2[j][1] - r1[tid][1];
dz = r2[j][2] - r1[tid][2];
T distinv = recipSqrt(min_dist2(dx, dy, dz, L, Linv));
mysum += distinv;
}
}
}
}
__shared__ T shared_sum[BS];
shared_sum[tid] = mysum;
__syncthreads();
for (int s=BS>>1; s>0; s >>=1)
{
if (tid < s)
shared_sum[tid] += shared_sum[tid+s];
__syncthreads();
}
if (tid==0)
sum[blockIdx.x] = shared_sum[0];
}
void
MPC_SR_Sum(float *R[], int N, float lattice[], float latticeInv[],
float sum[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
MPC_SR_kernel<float,BS><<<dimGrid,dimBlock>>>
(R, N, lattice, latticeInv, sum);
}
void
MPC_SR_Sum(double *R[], int N, double lattice[], double latticeInv[],
double sum[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
MPC_SR_kernel<double,BS><<<dimGrid,dimBlock>>>
(R, N, lattice, latticeInv, sum);
}
template<typename T> struct Three {};
template<> struct Three<float>
{
typedef float3 type;
};
template<> struct Three<double>
{
typedef double3 type;
};
template<typename T, int BS>
__global__ void
MPC_LR_kernel(T **R, int N, T* coefs, typename Three<T>::type gridInv, uint3 dim,
uint3 strides, T *latticeInv, T *sum)
{
int tid = threadIdx.x;
__shared__ T r[BS][3], u[BS][3], Linv[3][3];
__shared__ int index[BS][3];
__shared__ T* myR;
if (tid < 9)
Linv[0][tid] = latticeInv[tid];
if (tid ==0)
myR = R[blockIdx.x];
__syncthreads();
int numBlocks = (N+BS-1)/BS;
T myval = T();
for (int block=0; block<numBlocks; block++)
{
for (int i=0; i<3; i++)
{
int off = (3*block+i)*BS + tid;
if (off < 3*N)
r[0][i*BS+tid] = myR[off];
}
__syncthreads();
u[tid][0] = (Linv[0][0]*r[tid][0] + Linv[0][1]*r[tid][1] + Linv[0][2]*r[tid][2]);
u[tid][1] = (Linv[1][0]*r[tid][0] + Linv[1][1]*r[tid][1] + Linv[1][2]*r[tid][2]);
u[tid][2] = (Linv[2][0]*r[tid][0] + Linv[2][1]*r[tid][1] + Linv[2][2]*r[tid][2]);
u[tid][0] -= floor(u[tid][0]);
u[tid][1] -= floor(u[tid][1]);
u[tid][2] -= floor(u[tid][2]);
// We don't need r anymore, so we can now reuse to store t.
T s, sf;
s = u[tid][0] * gridInv.x;
sf = floor (s);
index[tid][0] = min (max(0,(int)sf), dim.x-1);
r[tid][0] = s - sf;
s = u[tid][1] * gridInv.y;
sf = floor (s);
index[tid][1] = min (max(0,(int)sf), dim.y-1);
r[tid][1] = s - sf;
s = u[tid][2] * gridInv.z;
sf = floor (s);
index[tid][2] = min (max(0,(int)sf), dim.z-1);
r[tid][2] = s - sf;
int end = min (BS, N-block*BS);
// This loop assumes BS=32
for (int i=0; i<end; i++)
{
__shared__ T a[4][3];
if (tid < 12)
{
int j = tid>>2;
int k = tid&3;
T t = r[i][j];
a[k][j] = (Acuda[4*k+0]*t*t*t + Acuda[4*k+1]*t*t +
Acuda[4*k+2]*t + Acuda[4*k+3]);
}
__syncthreads();
// There are 64 elements to sum. With BS=32, we use 2 passes
// First 32 coefs
int ix = tid>>4;
int iy = (tid>>2) & 3;
int iz = (tid & 3);
T abc = a[ix][0]*a[iy][1]*a[iz][2];
int off = ((index[i][0]+ix)*strides.x +
(index[i][1]+iy)*strides.y +
(index[i][2]+iz));
myval += abc*coefs[off];
// Second 32 coefs
ix+=2;
abc = a[ix][0]*a[iy][1]*a[iz][2];
off = ((index[i][0]+ix)*strides.x +
(index[i][1]+iy)*strides.y +
(index[i][2]+iz));
myval += abc*coefs[off];
}
}
__syncthreads();
// reuse u for reduction
u[0][tid] = myval;
for (int s=BS>>1; s>0; s>>=1)
{
if (tid<s)
u[0][tid] += u[0][tid+s];
__syncthreads();
}
if (tid == 0)
sum[blockIdx.x] = u[0][0];
}
void
MPC_LR_Sum(float *R[], int N, UBspline_3d_s_cuda *spline,
float latticeInv[], float sum[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
MPC_LR_kernel<float,BS><<<dimGrid,dimBlock>>>\
(R, N, spline->coefs, spline->gridInv, spline->dim,
spline->stride, latticeInv, sum);
}
void
MPC_LR_Sum(double *R[], int N, UBspline_3d_d_cuda *spline,
double latticeInv[], double sum[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
MPC_LR_kernel<double,BS><<<dimGrid,dimBlock>>>\
(R, N, spline->coefs, spline->gridInv, spline->dim,
spline->stride, latticeInv, sum);
}
template<typename T, int BS>
__global__ void
coulomb_AB_PBC_kernel(T **R, int Nelec, T *I, int Ifirst, int Ilast,
T rMax, int Ntex, int textureNum,
T *lattice, T *latticeInv, T *sum)
{
int tid = threadIdx.x;
__shared__ T *myR;
int Nion = Ilast - Ifirst + 1;
if (tid == 0)
myR = R[blockIdx.x];
__shared__ T L[3][3], Linv[3][3];
if (tid < 9)
{
L[0][tid] = lattice[tid];
Linv[0][tid] = latticeInv[tid];
}
__syncthreads();
T nrm = (T)(Ntex-1)/rMax;
__shared__ T r[BS][3], i[BS][3];
int NeBlocks = Nelec/BS + ((Nelec%BS) ? 1 : 0);
int NiBlocks = Nion/BS + ((Nion %BS) ? 1 : 0);
T mysum = (T)0.0;
// Now do off-diagonal blocks
for (int iBlock=0; iBlock<NiBlocks; iBlock++)
{
for (int j=0; j<3; j++)
if ((3*iBlock+j)*BS + tid < 3*Nion)
i[0][j*BS+tid] = I[3*Ifirst+(3*iBlock+j)*BS + tid];
__syncthreads();
int ion = iBlock*BS + tid;
for (int eBlock=0; eBlock<NeBlocks; eBlock++)
{
for (int j=0; j<3; j++)
if ((3*eBlock+j)*BS + tid < 3*Nelec)
r[0][j*BS+tid] = myR[(3*eBlock+j)*BS + tid];
__syncthreads();
int end = ((eBlock+1)*BS < Nelec) ? BS : (Nelec-eBlock*BS);
if (ion < Nion)
{
for (int j=0; j<end; j++)
{
T dx, dy, dz;
dx = r[j][0] - i[tid][0];
dy = r[j][1] - i[tid][1];
dz = r[j][2] - i[tid][2];
T dist = min_dist(dx, dy, dz, L, Linv);
float tval;
arraytexFetch(nrm*dist+0.5, textureNum, tval);
mysum += tval / dist;
}
}
__syncthreads();
}
}
__shared__ T shared_sum[BS];
shared_sum[tid] = mysum;
__syncthreads();
for (int s=BS>>1; s>0; s >>=1)
{
if (tid < s)
shared_sum[tid] += shared_sum[tid+s];
__syncthreads();
}
if (tid==0)
sum[blockIdx.x] = shared_sum[0];
}
void
CoulombAB_SR_Sum(float *R[], int Nelec, float I[], int Ifirst, int Ilast,
float rMax, int Ntex, int textureNum,
float lattice[], float latticeInv[],
float sum[], int numWalkers)
{
const int BS=64;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
coulomb_AB_PBC_kernel<float,BS><<<dimGrid,dimBlock>>>
(R, Nelec, I, Ifirst, Ilast, rMax, Ntex, textureNum,
lattice, latticeInv, sum);
}
void
CoulombAB_SR_Sum(double *R[], int Nelec, double I[], int Ifirst, int Ilast,
double rMax, int Ntex, int textureNum,
double lattice[], double latticeInv[],
double sum[], int numWalkers)
{
const int BS=64;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
coulomb_AB_PBC_kernel<double,BS><<<dimGrid,dimBlock>>>
(R, Nelec, I, Ifirst, Ilast, rMax, Ntex, textureNum,
lattice, latticeInv, sum);
}
template<typename T, int BS>
__global__ void
local_ecp_kernel(T **R, int Nelec, T *I, int Ifirst, int Ilast,
T rMax, int Ntex, int textureNum, T *sum)
{
int tid = threadIdx.x;
__shared__ T *myR;
int Nion = Ilast - Ifirst + 1;
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
T nrm = (T)(Ntex-1)/rMax;
__shared__ T r[BS][3], i[BS][3];
int NeBlocks = Nelec/BS + ((Nelec%BS) ? 1 : 0);
int NiBlocks = Nion/BS + ((Nion %BS) ? 1 : 0);
T mysum = (T)0.0;
// Now do off-diagonal blocks
for (int iBlock=0; iBlock<NiBlocks; iBlock++)
{
for (int j=0; j<3; j++)
if ((3*iBlock+j)*BS + tid < 3*Nion)
i[0][j*BS+tid] = I[3*Ifirst+(3*iBlock+j)*BS + tid];
__syncthreads();
int ion = iBlock*BS + tid;
for (int eBlock=0; eBlock<NeBlocks; eBlock++)
{
for (int j=0; j<3; j++)
if ((3*eBlock+j)*BS + tid < 3*Nelec)
r[0][j*BS+tid] = myR[(3*eBlock+j)*BS + tid];
__syncthreads();
int end = ((eBlock+1)*BS < Nelec) ? BS : (Nelec-eBlock*BS);
if (ion < Nion)
{
for (int j=0; j<end; j++)
{
T dx, dy, dz;
dx = r[j][0] - i[tid][0];
dy = r[j][1] - i[tid][1];
dz = r[j][2] - i[tid][2];
T d = dist(dx, dy, dz);
float tval;
arraytexFetch(nrm*d+0.5, textureNum, tval);
mysum += tval / d;
}
}
__syncthreads();
}
}
__shared__ T shared_sum[BS];
shared_sum[tid] = mysum;
__syncthreads();
for (int s=BS>>1; s>0; s >>=1)
{
if (tid < s)
shared_sum[tid] += shared_sum[tid+s];
__syncthreads();
}
if (tid==0)
sum[blockIdx.x] = shared_sum[0];
}
void
local_ecp_sum(float *R[], int Nelec, float I[], int Ifirst, int Ilast,
float rMax, int Ntex, int textureNum,
float sum[], int numWalkers)
{
const int BS=64;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
local_ecp_kernel<float,BS><<<dimGrid,dimBlock>>>
(R, Nelec, I, Ifirst, Ilast, rMax, Ntex, textureNum, sum);
}
void
local_ecp_sum(double *R[], int Nelec, double I[], int Ifirst, int Ilast,
double rMax, int Ntex, int textureNum,
double sum[], int numWalkers)
{
const int BS=64;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
local_ecp_kernel<double,BS><<<dimGrid,dimBlock>>>
(R, Nelec, I, Ifirst, Ilast, rMax, Ntex, textureNum, sum);
}
template<typename T, int BS>
__global__ void
coulomb_AB_kernel(T **R, int Nelec, T *I, T *Zion, int Nion, T *sum)
{
int tid = threadIdx.x;
__shared__ T *myR;
if (tid == 0)
myR = R[blockIdx.x];
__syncthreads();
__shared__ T r[BS][3], i[BS][3], z[BS];
int NeBlocks = Nelec/BS + ((Nelec%BS) ? 1 : 0);
int NiBlocks = Nion/BS + ((Nion %BS) ? 1 : 0);
T mysum = (T)0.0;
// Now do off-diagonal blocks
for (int iBlock=0; iBlock<NiBlocks; iBlock++)
{
for (int j=0; j<3; j++)
if ((3*iBlock+j)*BS + tid < 3*Nion)
i[0][j*BS+tid] = I[(3*iBlock+j)*BS + tid];
if (tid < Nion)
z[tid] = Zion[tid];
__syncthreads();
int ion = iBlock*BS + tid;
for (int eBlock=0; eBlock<NeBlocks; eBlock++)
{
for (int j=0; j<3; j++)
if ((3*eBlock+j)*BS + tid < 3*Nelec)
r[0][j*BS+tid] = myR[(3*eBlock+j)*BS + tid];
__syncthreads();
int end = ((eBlock+1)*BS < Nelec) ? BS : (Nelec-eBlock*BS);
if (ion < Nion)
{
for (int j=0; j<end; j++)
{
T dx, dy, dz;
dx = r[j][0] - i[tid][0];
dy = r[j][1] - i[tid][1];
dz = r[j][2] - i[tid][2];
T distInv = recipSqrt(dx*dx + dy*dy + dz*dz);
mysum -= z[tid]*distInv;
}
}
__syncthreads();
}
}
__shared__ T shared_sum[BS];
shared_sum[tid] = mysum;
__syncthreads();
for (int s=BS>>1; s>0; s >>=1)
{
if (tid < s)
shared_sum[tid] += shared_sum[tid+s];
__syncthreads();
}
if (tid==0)
sum[blockIdx.x] = shared_sum[0];
}
void
CoulombAB_Sum(float *R[], int Nelec, float I[], float Zion[], int Nion,
float sum[], int numWalkers)
{
const int BS=64;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
coulomb_AB_kernel<float,BS><<<dimGrid,dimBlock>>>
(R, Nelec, I, Zion, Nion, sum);
}
void
CoulombAB_Sum(double *R[], int Nelec, double I[], double Zion[], int Nion,
double sum[], int numWalkers)
{
const int BS=64;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
coulomb_AB_kernel<double,BS><<<dimGrid,dimBlock>>>
(R, Nelec, I, Zion, Nion, sum);
}
template<typename T, int BS>
__global__ void
eval_rhok_kernel (T **R, int numr,
T *kpoints, int numk, T **rhok)
{
int tid = threadIdx.x;
__shared__ T r[BS][3], k[BS][3], *myR, *myrhok;
if (tid == 0)
{
myR = R[blockIdx.x];
myrhok = rhok[blockIdx.x];
}
__syncthreads();
int NrBlock = numr/BS + ((numr%BS) ? 1 : 0);
int NkBlock = numk/BS + ((numk%BS) ? 1 : 0);
__shared__ T rhok_re[BS], rhok_im[BS], rhok_s[2*BS];
for (int kBlock=0; kBlock<NkBlock; kBlock++)
{
for (int i=0; i<3; i++)
if ((i+3*kBlock)*BS + tid < 3*numk)
k[0][BS*i+tid] = kpoints[(i+3*kBlock)*BS+tid];
rhok_re[tid] = rhok_im[tid] = 0.0f;
for (int rBlock=0; rBlock<NrBlock; rBlock++)
{
for (int i=0; i<3; i++)
if ((i+3*rBlock)*BS + tid < 3*numr)
r[0][BS*i+tid] = myR[(i+3*rBlock)*BS+tid];
int end = ((rBlock+1)*BS < numr) ? BS : (numr-rBlock*BS);
for (int j=0; j<end; j++)
{
T phase = (k[tid][0] * r[j][0] +
k[tid][1] * r[j][1] +
k[tid][2] * r[j][2]);
T s,c;
sincos (phase, &s, &c);
rhok_im[tid] += s;
rhok_re[tid] += c;
}
}
// Write rhok to global memory
rhok_s[2*tid+0] = rhok_re[tid];
rhok_s[2*tid+1] = rhok_im[tid];
__syncthreads();
if (2*(kBlock*BS)+tid < 2*numk)
myrhok[2*(kBlock*BS)+tid] = rhok_s[tid];
if (2*(kBlock*BS)+tid+BS < 2*numk)
myrhok[2*(kBlock*BS)+tid+BS] = rhok_s[tid+BS];
}
}
template<typename T, int BS>
__global__ void
eval_rhok_kernel (T **R, int first, int last,
T *kpoints, int numk, T **rhok)
{
int tid = threadIdx.x;
int numr = last-first+1;
__shared__ T r[BS][3], k[BS][3], *myR, *myrhok;
if (tid == 0)
{
myR = R[blockIdx.x];
myrhok = rhok[blockIdx.x];
}
__syncthreads();
int NrBlock = numr/BS + ((numr%BS) ? 1 : 0);
int NkBlock = numk/BS + ((numk%BS) ? 1 : 0);
__shared__ T rhok_re[BS], rhok_im[BS], rhok_s[2*BS];
for (int kBlock=0; kBlock<NkBlock; kBlock++)
{
for (int i=0; i<3; i++)
if ((i+3*kBlock)*BS + tid < 3*numk)
k[0][BS*i+tid] = kpoints[(i+3*kBlock)*BS+tid];
rhok_re[tid] = rhok_im[tid] = 0.0f;
for (int rBlock=0; rBlock<NrBlock; rBlock++)
{
for (int i=0; i<3; i++)
if ((i+3*rBlock)*BS + tid < 3*numr)
r[0][BS*i+tid] = myR[3*first+(i+3*rBlock)*BS+tid];
int end = ((rBlock+1)*BS < numr) ? BS : (numr-rBlock*BS);
for (int j=0; j<end; j++)
{
T phase = (k[tid][0] * r[j][0] +
k[tid][1] * r[j][1] +
k[tid][2] * r[j][2]);
T s,c;
sincos (phase, &s, &c);
rhok_im[tid] += s;
rhok_re[tid] += c;
}
}
// Write rhok to global memory
rhok_s[2*tid+0] = rhok_re[tid];
rhok_s[2*tid+1] = rhok_im[tid];
__syncthreads();
if (2*(kBlock*BS)+tid < 2*numk)
myrhok[2*(kBlock*BS)+tid] = rhok_s[tid];
if (2*(kBlock*BS)+tid+BS < 2*numk)
myrhok[2*(kBlock*BS)+tid+BS] = rhok_s[tid+BS];
}
}
void
eval_rhok_cuda(float *R[], int numr, float kpoints[],
int numk, float* rhok[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
eval_rhok_kernel<float,BS><<<dimGrid,dimBlock>>>
(R, numr, kpoints, numk, rhok);
}
void
eval_rhok_cuda(double *R[], int numr, double kpoints[],
int numk, double* rhok[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
eval_rhok_kernel<double,BS><<<dimGrid,dimBlock>>>
(R, numr, kpoints, numk, rhok);
}
void
eval_rhok_cuda(float *R[], int first, int last, float kpoints[],
int numk, float* rhok[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
eval_rhok_kernel<float,BS><<<dimGrid,dimBlock>>>
(R, first, last, kpoints, numk, rhok);
}
void
eval_rhok_cuda(double *R[], int first, int last, double kpoints[],
int numk, double* rhok[], int numWalkers)
{
const int BS=32;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
eval_rhok_kernel<double,BS><<<dimGrid,dimBlock>>>
(R, first, last, kpoints, numk, rhok);
}
template<typename T, int BS>
__global__ void
vk_sum_kernel(T **rhok, T *vk, int numk,
T *sum)
{
int tid = threadIdx.x;
__shared__ T *myrhok;
if (tid == 0)
myrhok = rhok[blockIdx.x];
__syncthreads();
// Used to do coalesced global loads
__shared__ T rhok_s[2*BS];
int NB = numk/BS + ((numk%BS) ? 1 : 0);
T mysum = 0.0f;
for (int b=0; b<NB; b++)
{
if (2*b*BS + tid < 2*numk)
rhok_s[tid] = myrhok[2*b*BS+tid];
if ((2*b+1)*BS + tid < 2*numk)
rhok_s[BS+tid] = myrhok[(2*b+1)*BS+tid];
__syncthreads();
if (b*BS + tid < numk)
mysum += vk[b*BS+tid] * (rhok_s[2*tid+0]*rhok_s[2*tid+0] +
rhok_s[2*tid+1]*rhok_s[2*tid+1]);
}
__shared__ T shared_sum[BS];
shared_sum[tid] = mysum;
__syncthreads();
for (int s=(BS>>1); s>0; s >>= 1)
{
if (tid < s)
shared_sum[tid] += shared_sum[tid+s];
__syncthreads();
}
// Not sure if this 0.25 factor is correct.
if (tid == 0)
sum[blockIdx.x] += 0.25*shared_sum[0];
}
template<typename T, int BS>
__global__ void
vk_sum_kernel2(T **rhok1, T **rhok2, T *vk, int numk,
T *sum)
{
int tid = threadIdx.x;
__shared__ T *myrhok1, *myrhok2;
if (tid == 0)
{
myrhok1 = rhok1[blockIdx.x];
myrhok2 = rhok2[blockIdx.x];
}
__syncthreads();
// Used to do coalesced global loads
__shared__ T rhok_s1[2*BS], rhok_s2[2*BS];
int NB = numk/BS + ((numk%BS) ? 1 : 0);
T mysum = 0.0f;
for (int b=0; b<NB; b++)
{
if (2*b*BS + tid < 2*numk)
{
rhok_s1[tid] = myrhok1[2*b*BS+tid];
rhok_s2[tid] = myrhok2[2*b*BS+tid];
}
if ((2*b+1)*BS + tid < 2*numk)
{
rhok_s1[BS+tid] = myrhok1[(2*b+1)*BS+tid];
rhok_s2[BS+tid] = myrhok2[(2*b+1)*BS+tid];
}
__syncthreads();
if (b*BS + tid < numk)
mysum += vk[b*BS+tid] * (rhok_s1[2*tid+0]*rhok_s2[2*tid+0] +
rhok_s1[2*tid+1]*rhok_s2[2*tid+1]);
}
__shared__ T shared_sum[BS];
shared_sum[tid] = mysum;
__syncthreads();
for (int s=(BS>>1); s>0; s >>= 1)
{
if (tid < s)
shared_sum[tid] += shared_sum[tid+s];
__syncthreads();
}
T factor = (myrhok1 == myrhok2) ? 0.5f : 1.0f;
if (tid == 0)
sum[blockIdx.x] += factor*shared_sum[0];
}
void
eval_vk_sum_cuda (float *rhok[], float vk[], int numk, float sum[],
int numWalkers)
{
const int BS=64;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
vk_sum_kernel<float,BS><<<dimGrid,dimBlock>>>
(rhok, vk, numk, sum);
}
void
eval_vk_sum_cuda (double *rhok[], double vk[], int numk, double sum[],
int numWalkers)
{
const int BS=64;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
vk_sum_kernel<double,BS><<<dimGrid,dimBlock>>>
(rhok, vk, numk, sum);
}
void
eval_vk_sum_cuda (float *rhok1[], float *rhok2[],
float vk[], int numk, float sum[],
int numWalkers)
{
const int BS=64;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
vk_sum_kernel2<float,BS><<<dimGrid,dimBlock>>>
(rhok1, rhok2, vk, numk, sum);
}
void
eval_vk_sum_cuda (double *rhok1[], double *rhok2[],
double vk[], int numk, double sum[],
int numWalkers)
{
const int BS=64;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
vk_sum_kernel2<double,BS><<<dimGrid,dimBlock>>>
(rhok1, rhok2, vk, numk, sum);
}
template<typename T, int BS>
__global__ void
vk_sum_kernel2(T **rhok1, T *rhok2, T *vk, int numk,
T *sum)
{
int tid = threadIdx.x;
__shared__ T *myrhok1;
if (tid == 0)
myrhok1 = rhok1[blockIdx.x];
__syncthreads();
// Used to do coalesced global loads
__shared__ T rhok_s1[2*BS], rhok_s2[2*BS];
int NB = numk/BS + ((numk%BS) ? 1 : 0);
T mysum = 0.0f;
for (int b=0; b<NB; b++)
{
if (2*b*BS + tid < 2*numk)
{
rhok_s1[tid] = myrhok1[2*b*BS+tid];
rhok_s2[tid] = rhok2[2*b*BS+tid];
}
if ((2*b+1)*BS + tid < 2*numk)
{
rhok_s1[BS+tid] = myrhok1[(2*b+1)*BS+tid];
rhok_s2[BS+tid] = rhok2[(2*b+1)*BS+tid];
}
__syncthreads();
if (b*BS + tid < numk)
mysum += vk[b*BS+tid] * (rhok_s1[2*tid+0]*rhok_s2[2*tid+0] +
rhok_s1[2*tid+1]*rhok_s2[2*tid+1]);
}
__shared__ T shared_sum[BS];
shared_sum[tid] = mysum;
__syncthreads();
for (int s=(BS>>1); s>0; s >>= 1)
{
if (tid < s)
shared_sum[tid] += shared_sum[tid+s];
__syncthreads();
}
if (tid == 0)
sum[blockIdx.x] += shared_sum[0];
}
void
eval_vk_sum_cuda (float *rhok1[], float rhok2[],
float vk[], int numk, float sum[],
int numWalkers)
{
const int BS=64;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
vk_sum_kernel2<float,BS><<<dimGrid,dimBlock>>>
(rhok1, rhok2, vk, numk, sum);
}
void
eval_vk_sum_cuda (double *rhok1[], double rhok2[],
double vk[], int numk, double sum[],
int numWalkers)
{
const int BS=64;
dim3 dimBlock(BS);
dim3 dimGrid(numWalkers);
vk_sum_kernel2<double,BS><<<dimGrid,dimBlock>>>
(rhok1, rhok2, vk, numk, sum);
}
#ifdef CUDA_COULOMB_TEST
__global__ void
test_texture_kernel(float x[], float vals[], int Ntex, int Nvals)
{
float nrm = (float)(Ntex-1)/(float)Ntex;
for (int i=0; i<Nvals; i++)
vals[i] = tex1D(myTex, nrm*x[i]+0.5);
}
#include <stdio.h>
void
TestTexture()
{
int Ntex = 2000;
int Npoints = 31415;
cudaArray *myArray;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32,0,0,0,cudaChannelFormatKindFloat);
cudaMallocArray(&myArray, &channelDesc, Ntex);
float data[Ntex];
for (int i=0; i<Ntex; i++)
{
double x = (double)i/(double)(Ntex-1) * 2.0*M_PI;
data[i] = (float)sin(x);
}
cudaMemcpyToArrayAsync(myArray, 0, 0, data, Ntex*sizeof(float), cudaMemcpyHostToDevice);
myTex.addressMode[0] = cudaAddressModeClamp;
myTex.filterMode = cudaFilterModeLinear;
myTex.normalized = false;
cudaBindTextureToArray(myTex, myArray, channelDesc);
float *x_d, *vals_d;
cudaMalloc ((void**)&x_d, Npoints*sizeof(float));
cudaMalloc ((void**)&vals_d, Npoints*sizeof(float));
float x_host[Npoints];
for (int i=0; i<Npoints; i++)
x_host[i] = (double)i/(double)(Npoints-1) * (double)Ntex;
cudaMemcpyAsync(x_d, x_host, Npoints*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimBlock(1);
dim3 dimGrid(1);
test_texture_kernel<<<dimGrid,dimBlock>>>(x_d, vals_d, Ntex, Npoints);
float vals_host[Npoints];
cudaMemcpy(vals_host, vals_d, Npoints*sizeof(float), cudaMemcpyDeviceToHost);
for (int i=0; i<Npoints; i++)
fprintf (stderr, "%18.10f %18.10f\n", sin(2.0*M_PI*x_host[i]/(double)Ntex), vals_host[i]);
}
main()
{
TestTexture();
}
#endif
|
the_stack
|
#include <stdlib.h>
#include <stdio.h>
#include "cuda.h"
int nblock_size = 64;
int ngrid_size = 1;
int maxgsx = 65535;
int mmcc = 0;
static int devid;
static cudaError_t crc;
#define MAXSTREAMS 4
static cudaStream_t streams[MAXSTREAMS] = {NULL,NULL,NULL,NULL};
/* Prototypes for Fortran function called by C */
extern "C" void getfcptr_(unsigned long *carrayref, float *carray,
int *nx);
extern "C" void getf2cptr_(unsigned long *carrayref, float *carray,
int *nx, int *ny);
extern "C" void getc2cptr_(unsigned long *carrayref, float2 *carray,
int *nx, int *ny);
__global__ void emptyKernel() {}
/*--------------------------------------------------------------------*/
extern "C" void gpu_setgbsize(int nblock) {
/* set blocksize */
nblock_size = nblock;
return;
}
/*--------------------------------------------------------------------*/
extern "C" int getmmcc() {
/* get major and minor computer capability */
return mmcc;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fallocate(float **g_f, int nsize, int *irc) {
/* allocate global float memory on GPU, return pointer to C */
void *gptr;
crc = cudaMalloc(&gptr,sizeof(float)*nsize);
if (crc) {
printf("cudaMalloc float Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*g_f = (float *)gptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_iallocate(int **g_i, int nsize, int *irc) {
/* allocate global integer memory on GPU, return pointer to C */
void *gptr;
crc = cudaMalloc(&gptr,sizeof(int)*nsize);
if (crc) {
printf("cudaMalloc int Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*g_i = (int *)gptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_callocate(float2 **g_c, int nsize, int *irc) {
/* allocate global float2 memory on GPU, return pointer to C */
void *gptr;
crc = cudaMalloc(&gptr,sizeof(float2)*nsize);
if (crc) {
printf("cudaMalloc float2 Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*g_c = (float2 *)gptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_deallocate(void *g_d, int *irc) {
/* deallocate global memory on GPU */
crc = cudaFree(g_d);
if (crc) {
printf("cudaFree Error=%d:%s\n",crc,cudaGetErrorString(crc));
*irc = 1;
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void hpl_fallocate(float **h_f, int nsize, int *irc) {
/* allocate page-locked float memory on host, return pointer to C */
void *hptr = NULL;
crc = cudaMallocHost(&hptr,sizeof(float)*nsize);
if (crc) {
printf("cudaMallocHost float Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*h_f = (float *)hptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void hpl_callocate(float2 **h_c, int nsize, int *irc) {
/* allocate page-locked float2 memory on host, return pointer to C */
void *hptr = NULL;
crc = cudaMallocHost(&hptr,sizeof(float2)*nsize);
if (crc) {
printf("cudaMallocHost float2 Error=%d:%s,l=%d\n",crc,
cudaGetErrorString(crc),nsize);
*irc = 1;
}
*h_c = (float2 *)hptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void hpl_deallocate(void *h_d, int *irc) {
/* deallocate page-locked on host */
crc = cudaFreeHost(h_d);
if (crc) {
printf("cudaFreeHost Error=%d:%s\n",crc,cudaGetErrorString(crc));
*irc = 1;
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyin(float *f, float *g_f, int nsize) {
/* copy float array from host memory to global GPU memory */
crc = cudaMemcpy((void *)g_f,f,sizeof(float)*nsize,
cudaMemcpyHostToDevice);
if (crc) {
printf("cudaMemcpyHostToDevice float Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyout(float *f, float *g_f, int nsize) {
/* copy float array from global GPU memory to host memory */
crc = cudaMemcpy(f,(void *)g_f,sizeof(float)*nsize,
cudaMemcpyDeviceToHost);
if (crc) {
printf("cudaMemcpyDeviceToHost float Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_icopyin(int *f, int *g_f, int nsize) {
/* copy int array from host memory to global GPU memory */
crc = cudaMemcpy((void *)g_f,f,sizeof(int)*nsize,
cudaMemcpyHostToDevice);
if (crc) {
printf("cudaMemcpyHostToDevice int Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_icopyout(int *f, int *g_f, int nsize) {
/* copy int array from global GPU memory to host memory */
crc = cudaMemcpy(f,(void *)g_f,sizeof(int)*nsize,
cudaMemcpyDeviceToHost);
if (crc) {
printf("cudaMemcpyDeviceToHost int Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_ccopyin(float2 *f, float2 *g_f, int nsize) {
/* copy float2 array from host memory to global GPU memory */
crc = cudaMemcpy((void *)g_f,f,sizeof(float2)*nsize,
cudaMemcpyHostToDevice);
if (crc) {
printf("cudaMemcpyHostToDevice float2 Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_ccopyout(float2 *f, float2 *g_f, int nsize) {
/* copy float2 array from global GPU memory to host memory */
crc = cudaMemcpy(f,(void *)g_f,sizeof(float2)*nsize,
cudaMemcpyDeviceToHost);
if (crc) {
printf("cudaMemcpyDeviceToHost float2 Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_initstream(int nstream) {
/* Create Stream for requested identifier nstream */
/* nstream should be between 1 and MAXSTREAMS inclusive */
if ((nstream < 1) || (nstream > MAXSTREAMS)) {
printf("gpu_initstream: nstream out of bounds = %d\n",nstream);
exit(1);
}
if (streams[nstream-1] != NULL) {
printf("gpu_initstream: nstream already used = %d\n",nstream);
exit(1);
}
crc = cudaStreamCreate(&streams[nstream-1]);
if (crc) {
printf("cudaStreamCreate Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_delstream(int nstream) {
/* Destroy Stream for requested identifier nstream */
/* nstream should be between 1 and MAXSTREAMS inclusive */
if ((nstream < 1) || (nstream > MAXSTREAMS)) {
printf("gpu_delstream: nstream out of bounds = %d\n",nstream);
}
if (streams[nstream-1] == NULL) {
printf("gpu_delstream: nstream not allocated = %d\n",nstream);
}
crc = cudaStreamDestroy(streams[nstream-1]);
if (crc) {
printf("cudaStreamDestroy Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_waitstream(int nstream) {
/* Synchronize Stream for requested identifier nstream */
/* nstream should be between 0 and MAXSTREAMS inclusive */
cudaStream_t stream = NULL;
if ((nstream >= 0) || (nstream <= MAXSTREAMS)) {
if (nstream > 0) stream = streams[nstream-1];
}
else {
printf("gpu_waitstream: nstream undefined = %d\n",nstream);
exit(1);
}
crc = cudaStreamSynchronize(stream);
if (crc) {
printf("cudaStreamSynchronize Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_cascopyin(float2 *f, float2 *g_f, int noff,
int nsize, int nstream) {
/* copy float2 array segment from host memory to global GPU memory */
/* asynchronous copy */
float2 *cptr;
cudaStream_t stream = NULL;
cptr = &g_f[noff];
if ((nstream >= 0) || (nstream <= MAXSTREAMS)) {
if (nstream > 0) stream = streams[nstream-1];
}
else {
printf("gpu_cascopyin: nstream undefined = %d\n",nstream);
exit(1);
}
crc = cudaMemcpyAsync((void *)cptr,f,sizeof(float2)*nsize,
cudaMemcpyHostToDevice,stream);
if (crc) {
printf("Async cudaMemcpyHostToDevice float2 Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_cascopyout(float2 *f, float2 *g_f, int noff,
int nsize, int nstream) {
/* copy float2 array segment from global GPU memory to host memory */
/* asynchronous copy */
float2 *cptr;
cudaStream_t stream = NULL;
cptr = &g_f[noff];
if ((nstream >= 0) || (nstream <= MAXSTREAMS)) {
if (nstream > 0) stream = streams[nstream-1];
}
else {
printf("gpu_cascopyout: nstream undefined = %d\n",nstream);
exit(1);
}
crc = cudaMemcpyAsync(f,(void *)cptr,sizeof(float2)*nsize,
cudaMemcpyDeviceToHost,stream);
if (crc) {
printf("Async cudaMemcpyDeviceToHost float2 Error=%d:%s\n",crc,
cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_zfmem(float *g_f, int nsize) {
/* initialize float array in global GPU memory to zero */
crc = cudaMemset((void *)g_f,0,sizeof(float)*nsize);
if (crc) {
printf("cudaMemset Error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_set_cache_size(int nscache) {
/* request preferred cache size, requires CUDA 3.2 or higher */
/* nscache = (0,1,2) = (no,small,big) cache size */
cudaFuncCache cpref;
if ((nscache < 0) || (nscache > 2))
return;
if (nscache==0)
cpref = cudaFuncCachePreferNone;
else if (nscache==1)
cpref = cudaFuncCachePreferShared;
else if (nscache==2)
cpref = cudaFuncCachePreferL1;
crc = cudaThreadSetCacheConfig(cpref);
/* crc = cudaDeviceSetCacheConfig(cpref); */
if (crc) {
printf("cudaThreadSetCacheConfig error=%d:%s\n",crc,
cudaGetErrorString(crc));
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void emptykernel() {
int ngx, ngy;
ngx = nblock_size < 32768 ? nblock_size : 32768;
ngy = (ngrid_size - 1)/ngx + 1;
dim3 dimBlock(nblock_size,1);
dim3 dimGrid(ngx,ngy);
crc = cudaGetLastError();
emptyKernel<<<dimGrid,dimBlock>>>();
cudaThreadSynchronize();
crc = cudaGetLastError();
if (crc) {
printf("emptyKernel error=%d:%s\n",crc,cudaGetErrorString(crc));
exit(1);
}
return;
}
/*--------------------------------------------------------------------*/
extern "C" void init_cu(int dev, int *irc) {
/* initialize CUDA with device dev or selects best GPU available */
/* searches throughs devices, selects the device with the most compute */
/* units, and saves the device id devid */
/* if dev is a valid device, it is used, otherwise the GPU with the */
/* most multi-processors is selected */
/* error code is modified only if there is an error */
int maxcpus = 0, jm = -1;
int j, ndevs, maxunits;
unsigned long msize;
double z;
struct cudaDeviceProp prop;
/* returns number of device */
crc = cudaGetDeviceCount(&ndevs);
if (crc) {
printf("cudaGetDeviceCount Error=%i:%s\n",crc,
cudaGetErrorString(crc));
*irc = 1;
return;
}
/* get information about devices */
for (j = 0; j < ndevs; j++) {
crc = cudaGetDeviceProperties(&prop,j);
if (crc) {
printf("cudaGetDeviceProperties Error=%i:%s\n",crc,
cudaGetErrorString(crc));
prop.name[0] = 0;
}
maxunits = prop.multiProcessorCount;
if (dev <= 0) {
printf("j=%i:CUDA_DEVICE_NAME=%s,CUDA_MULTIPROCESSOR_COUNT=%i\n",
j,prop.name,maxunits);
msize = prop.totalGlobalMem;
z = ((double) msize)/1073741824.0;
mmcc = 10*prop.major + prop.minor;
printf(" CUDA_GLOBAL_MEM_SIZE=%lu(%f GB),Capability=%d\n",
msize,(float) z,mmcc);
if (maxunits > maxcpus) {
maxcpus = maxunits;
jm = j;
}
}
}
devid = jm;
if (dev >= 0)
devid = dev % ndevs;
printf("using device j=%i\n",devid);
/* get properties for this device */
crc = cudaGetDeviceProperties(&prop,devid);
maxgsx = prop.maxGridSize[0];
mmcc = 10*prop.major + prop.minor;
/* set device */
crc = cudaSetDevice(devid);
if (crc) {
printf("cudaSetDevice Error=%i:%s\n",crc,
cudaGetErrorString(crc));
*irc = 1;
return;
}
/* run empty kernel */
emptykernel();
return;
}
extern "C" void end_cu() {
/* terminate CUDA */
crc = cudaThreadExit();
if (crc) {
printf("cudaThreadExit Error=%d:%s\n",crc,cudaGetErrorString(crc));
}
return;
}
/* Interfaces to Fortran */
/*--------------------------------------------------------------------*/
extern "C" void gpu_setgbsize_(int *nblock) {
gpu_setgbsize(*nblock);
return;
}
/*--------------------------------------------------------------------*/
extern "C" int getmmcc_() {
/* get major and minor computer capability */
return getmmcc();
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fallocate_(unsigned long *gp_f, int *nsize,
int *irc) {
/* allocate global float memory on GPU, return pointer to Fortran */
float *fptr;
gpu_fallocate(&fptr,*nsize,irc);
*gp_f = (long )fptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_iallocate_(unsigned long *gp_i, int *nsize,
int *irc) {
/* allocate global integer memory on GPU, return pointer to Fortran */
int *iptr;
gpu_iallocate(&iptr,*nsize,irc);
*gp_i = (long )iptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_callocate_(unsigned long *gp_f, int *nsize,
int *irc) {
/* allocate global float2 memory on GPU, return pointer */
/* to Fortran */
float2 *fptr;
gpu_callocate(&fptr,*nsize,irc);
*gp_f = (long )fptr;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_deallocate_(unsigned long *gp_d, int *irc) {
/* deallocate global memory on GPU, return pointer to Fortran */
void *d;
d = (void *)*gp_d;
gpu_deallocate(d,irc);
*gp_d = 0;
return;
}
/*--------------------------------------------------------------------*/
extern "C" void hpl_f1allocate_(unsigned long *hp_f, int *nx,
int *irc) {
/* allocate page-locked 1d real memory on host, assign */
/* data pointer to Fortran pointer object hp_f */
/* This procedure needs an interface in Fortran90 */
/* interface */
/* subroutine hpl_f1allocate(hp_f,nx,irc) */
/* implicit none */
/* integer :: nx, irc */
/* real, dimension(:), pointer :: hp_f */
/* end subroutine */
/* end interface */
int nsize;
float *fptr;
nsize = *nx;
/* allocate data on host */
hpl_fallocate(&fptr,nsize,irc);
/* set reference to C data in real Fortran pointer object */
getfcptr_(hp_f,fptr,nx);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void hpl_f2allocate_(unsigned long *hp_f, int *nx, int *ny,
int *irc) {
/* allocate page-locked 2d real memory on host, assign */
/* data pointer to Fortran pointer object hp_f */
/* This procedure needs an interface in Fortran90 */
/* interface */
/* subroutine hpl_f2allocate(hp_f,nx,ny,irc) */
/* implicit none */
/* integer :: nx, ny, irc */
/* real, dimension(:,:), pointer :: hp_f */
/* end subroutine */
/* end interface */
int nsize;
float *fptr;
nsize = (*nx)*(*ny);
/* allocate data on host */
hpl_fallocate(&fptr,nsize,irc);
/* set reference to C data in real Fortran pointer object */
getf2cptr_(hp_f,fptr,nx,ny);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void hpl_c2allocate_(unsigned long *hp_f, int *nx, int *ny,
int *irc) {
/* allocate page-locked 2d complex memory on host, assign */
/* data pointer to Fortran pointer object hp_f */
/* This procedure needs an interface in Fortran90 */
/* interface */
/* subroutine hpl_c2allocate(hp_f,nx,ny,irc) */
/* implicit none */
/* integer :: nx, ny, irc */
/* complex, dimension(:,:), pointer :: hp_f */
/* end subroutine */
/* end interface */
int nsize;
float2 *cptr;
nsize = (*nx)*(*ny);
/* allocate data on host */
hpl_callocate(&cptr,nsize,irc);
/* set reference to C data in complex Fortran pointer object */
getc2cptr_(hp_f,cptr,nx,ny);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void hpl_deallocate_(void *h_d, int *irc) {
/* deallocate page-locked memory on host */
/* pointer in Fortran should also be nullified */
hpl_deallocate(h_d,irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyin_(float *f, unsigned long *gp_f,
int *nsize) {
/* copy float array from main memory to global GPU memory */
float *g_f;
g_f = (float *)*gp_f;
gpu_fcopyin(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_fcopyout_(float *f, unsigned long *gp_f,
int *nsize) {
/* copy float array from global GPU memory to main memory */
float *g_f;
g_f = (float *)*gp_f;
gpu_fcopyout(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_icopyin_(int *f, unsigned long *gp_f, int *nsize) {
/* copy int array from main memory to global GPU memory */
int *g_f;
g_f = (int *)*gp_f;
gpu_icopyin(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_icopyout_(int *f, unsigned long *gp_f, int *nsize) {
/* copy int array from global GPU memory to main memory */
int *g_f;
g_f = (int *)*gp_f;
gpu_icopyout(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_ccopyin_(float2 *f, unsigned long *gp_f,
int *nsize) {
/* copy float2 array from main memory to global GPU memory */
float2 *g_f;
g_f = (float2 *)*gp_f;
gpu_ccopyin(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_ccopyout_(float2 *f, unsigned long *gp_f,
int *nsize) {
/* copy float2 array from global GPU memory to main memory */
float2 *g_f;
g_f = (float2 *)*gp_f;
gpu_ccopyout(f,g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_initstream_(int *nstream) {
gpu_initstream(*nstream);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_delstream_(int *nstream) {
gpu_delstream(*nstream);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_waitstream_(int *nstream) {
gpu_waitstream(*nstream);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_cascopyin_(float2 *f, unsigned long *gp_f,
int *noff, int *nsize, int *nstream) {
/* copy float2 array segment from main memory to global GPU memory */
/* asynchronous copy */
float2 *g_f;
g_f = (float2 *)*gp_f;
gpu_cascopyin(f,g_f,*noff,*nsize,*nstream);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_cascopyout_(float2 *f, unsigned long *gp_f,
int *noff, int *nsize, int *nstream) {
/* copy float2 array segment from global GPU memory to main memory */
/* asynchronous copy */
float2 *g_f;
g_f = (float2 *)*gp_f;
gpu_cascopyout(f,g_f,*noff,*nsize,*nstream);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_zfmem_(unsigned long *gp_f, int *nsize) {
float *g_f;
g_f = (float *)*gp_f;
gpu_zfmem(g_f,*nsize);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void gpu_set_cache_size_(int *nscache) {
gpu_set_cache_size(*nscache);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void init_cu_(int *dev, int *irc) {
init_cu(*dev,irc);
return;
}
/*--------------------------------------------------------------------*/
extern "C" void end_cu_() {
end_cu();
return;
}
|
the_stack
|
namespace lbvh
{
namespace detail
{
struct node
{
std::uint32_t parent_idx; // parent node
std::uint32_t left_idx; // index of left child node
std::uint32_t right_idx; // index of right child node
std::uint32_t object_idx; // == 0xFFFFFFFF if internal node.
};
// a set of pointers to use it on device.
template<typename Real, typename Object, bool IsConst>
struct basic_device_bvh;
template<typename Real, typename Object>
struct basic_device_bvh<Real, Object, false>
{
using real_type = Real;
using aabb_type = aabb<real_type>;
using node_type = detail::node;
using index_type = std::uint32_t;
using object_type = Object;
unsigned int num_nodes; // (# of internal node) + (# of leaves), 2N+1
unsigned int num_objects; // (# of leaves), the same as the number of objects
node_type * nodes;
aabb_type * aabbs;
object_type* objects;
};
template<typename Real, typename Object>
struct basic_device_bvh<Real, Object, true>
{
using real_type = Real;
using aabb_type = aabb<real_type>;
using node_type = detail::node;
using index_type = std::uint32_t;
using object_type = Object;
unsigned int num_nodes; // (# of internal node) + (# of leaves), 2N+1
unsigned int num_objects;// (# of leaves), the same as the number of objects
node_type const* nodes;
aabb_type const* aabbs;
object_type const* objects;
};
template<typename UInt>
__device__
inline uint2 determine_range(UInt const* node_code,
const unsigned int num_leaves, unsigned int idx)
{
if(idx == 0)
{
return make_uint2(0, num_leaves-1);
}
// determine direction of the range
const UInt self_code = node_code[idx];
const int L_delta = common_upper_bits(self_code, node_code[idx-1]);
const int R_delta = common_upper_bits(self_code, node_code[idx+1]);
const int d = (R_delta > L_delta) ? 1 : -1;
// Compute upper bound for the length of the range
const int delta_min = thrust::min(L_delta, R_delta);
int l_max = 2;
int delta = -1;
int i_tmp = idx + d * l_max;
if(0 <= i_tmp && i_tmp < num_leaves)
{
delta = common_upper_bits(self_code, node_code[i_tmp]);
}
while(delta > delta_min)
{
l_max <<= 1;
i_tmp = idx + d * l_max;
delta = -1;
if(0 <= i_tmp && i_tmp < num_leaves)
{
delta = common_upper_bits(self_code, node_code[i_tmp]);
}
}
// Find the other end by binary search
int l = 0;
int t = l_max >> 1;
while(t > 0)
{
i_tmp = idx + (l + t) * d;
delta = -1;
if(0 <= i_tmp && i_tmp < num_leaves)
{
delta = common_upper_bits(self_code, node_code[i_tmp]);
}
if(delta > delta_min)
{
l += t;
}
t >>= 1;
}
unsigned int jdx = idx + l * d;
if(d < 0)
{
thrust::swap(idx, jdx); // make it sure that idx < jdx
}
return make_uint2(idx, jdx);
}
template<typename UInt>
__device__
inline unsigned int find_split(UInt const* node_code, const unsigned int num_leaves,
const unsigned int first, const unsigned int last) noexcept
{
const UInt first_code = node_code[first];
const UInt last_code = node_code[last];
if (first_code == last_code)
{
return (first + last) >> 1;
}
const int delta_node = common_upper_bits(first_code, last_code);
// binary search...
int split = first;
int stride = last - first;
do
{
stride = (stride + 1) >> 1;
const int middle = split + stride;
if (middle < last)
{
const int delta = common_upper_bits(first_code, node_code[middle]);
if (delta > delta_node)
{
split = middle;
}
}
}
while(stride > 1);
return split;
}
template<typename Real, typename Object, bool IsConst, typename UInt>
void construct_internal_nodes(const basic_device_bvh<Real, Object, IsConst>& self,
UInt const* node_code, const unsigned int num_objects)
{
thrust::for_each(thrust::device,
thrust::make_counting_iterator<unsigned int>(0),
thrust::make_counting_iterator<unsigned int>(num_objects - 1),
[self, node_code, num_objects] __device__ (const unsigned int idx)
{
self.nodes[idx].object_idx = 0xFFFFFFFF; // internal nodes
const uint2 ij = determine_range(node_code, num_objects, idx);
const int gamma = find_split(node_code, num_objects, ij.x, ij.y);
self.nodes[idx].left_idx = gamma;
self.nodes[idx].right_idx = gamma + 1;
if(thrust::min(ij.x, ij.y) == gamma)
{
self.nodes[idx].left_idx += num_objects - 1;
}
if(thrust::max(ij.x, ij.y) == gamma + 1)
{
self.nodes[idx].right_idx += num_objects - 1;
}
self.nodes[self.nodes[idx].left_idx].parent_idx = idx;
self.nodes[self.nodes[idx].right_idx].parent_idx = idx;
return;
});
return;
}
} // detail
template<typename Real, typename Object>
struct default_morton_code_calculator
{
default_morton_code_calculator(aabb<Real> w): whole(w) {}
default_morton_code_calculator() = default;
~default_morton_code_calculator() = default;
default_morton_code_calculator(default_morton_code_calculator const&) = default;
default_morton_code_calculator(default_morton_code_calculator&&) = default;
default_morton_code_calculator& operator=(default_morton_code_calculator const&) = default;
default_morton_code_calculator& operator=(default_morton_code_calculator&&) = default;
__device__ __host__
inline unsigned int operator()(const Object&, const aabb<Real>& box) noexcept
{
auto p = centroid(box);
p.x -= whole.lower.x;
p.y -= whole.lower.y;
p.z -= whole.lower.z;
p.x /= (whole.upper.x - whole.lower.x);
p.y /= (whole.upper.y - whole.lower.y);
p.z /= (whole.upper.z - whole.lower.z);
return morton_code(p);
}
aabb<Real> whole;
};
template<typename Real, typename Object>
using bvh_device = detail::basic_device_bvh<Real, Object, false>;
template<typename Real, typename Object>
using cbvh_device = detail::basic_device_bvh<Real, Object, true>;
template<typename Real, typename Object, typename AABBGetter,
typename MortonCodeCalculator = default_morton_code_calculator<Real, Object>>
class bvh
{
public:
using real_type = Real;
using index_type = std::uint32_t;
using object_type = Object;
using aabb_type = aabb<real_type>;
using node_type = detail::node;
using aabb_getter_type = AABBGetter;
using morton_code_calculator_type = MortonCodeCalculator;
public:
template<typename InputIterator>
bvh(InputIterator first, InputIterator last, aabb_getter_type aabb_getter)
: objects_d_(first, last), aabb_getter_(aabb_getter)
{
this->construct();
}
bvh() = default;
~bvh() = default;
bvh(const bvh&) = default;
bvh(bvh&&) = default;
bvh& operator=(const bvh&) = default;
bvh& operator=(bvh&&) = default;
void clear()
{
this->objects_d_.clear();
this->aabbs_.clear();
this->nodes_.clear();
return ;
}
template<typename InputIterator>
void assign(InputIterator first, InputIterator last)
{
this->objects_d_.assign(first, last);
this->construct();
return;
}
bvh_device<real_type, object_type> get_device_repr() noexcept
{
return bvh_device<real_type, object_type>{
static_cast<unsigned int>(nodes_.size()),
static_cast<unsigned int>(objects_d_.size()),
nodes_.data().get(), aabbs_.data().get(), objects_d_.data().get()
};
}
cbvh_device<real_type, object_type> get_device_repr() const noexcept
{
return cbvh_device<real_type, object_type>{
static_cast<unsigned int>(nodes_.size()),
static_cast<unsigned int>(objects_d_.size()),
nodes_.data().get(), aabbs_.data().get(), objects_d_.data().get()
};
}
void construct()
{
if(objects_d_.size() == 0u) {return;}
const unsigned int num_objects = objects_d_.size();
const unsigned int num_internal_nodes = num_objects - 1;
const unsigned int num_nodes = num_objects * 2 - 1;
// --------------------------------------------------------------------
// calculate morton code of each points
const auto inf = std::numeric_limits<real_type>::infinity();
aabb_type default_aabb;
default_aabb.upper.x = -inf; default_aabb.lower.x = inf;
default_aabb.upper.y = -inf; default_aabb.lower.y = inf;
default_aabb.upper.z = -inf; default_aabb.lower.z = inf;
this->aabbs_.resize(num_nodes, default_aabb);
thrust::transform(this->objects_d_.begin(), this->objects_d_.end(),
aabbs_.begin() + num_internal_nodes, aabb_getter_);
const auto aabb_whole = thrust::reduce(
aabbs_.begin() + num_internal_nodes, aabbs_.end(), default_aabb,
[] __device__ (const aabb_type& lhs, const aabb_type& rhs) {
return merge(lhs, rhs);
});
thrust::device_vector<unsigned int> morton(num_objects);
thrust::transform(this->objects_d_.begin(), this->objects_d_.end(),
aabbs_.begin() + num_internal_nodes, morton.begin(),
morton_code_calculator_type(aabb_whole));
// --------------------------------------------------------------------
// sort object-indices by morton code
thrust::device_vector<unsigned int> indices(num_objects);
thrust::copy(thrust::make_counting_iterator<index_type>(0),
thrust::make_counting_iterator<index_type>(num_objects),
indices.begin());
// keep indices ascending order
thrust::stable_sort_by_key(morton.begin(), morton.end(),
thrust::make_zip_iterator(
thrust::make_tuple(aabbs_.begin() + num_internal_nodes,
indices.begin())));
// --------------------------------------------------------------------
// check morton codes are unique
thrust::device_vector<unsigned long long int> morton64(num_objects);
const auto uniqued = thrust::unique_copy(morton.begin(), morton.end(),
morton64.begin());
const bool morton_code_is_unique = (morton64.end() == uniqued);
if(!morton_code_is_unique)
{
thrust::transform(morton.begin(), morton.end(), indices.begin(),
morton64.begin(),
[] __device__ (const unsigned int m, const unsigned int idx)
{
unsigned long long int m64 = m;
m64 <<= 32;
m64 |= idx;
return m64;
});
}
// --------------------------------------------------------------------
// construct leaf nodes and aabbs
node_type default_node;
default_node.parent_idx = 0xFFFFFFFF;
default_node.left_idx = 0xFFFFFFFF;
default_node.right_idx = 0xFFFFFFFF;
default_node.object_idx = 0xFFFFFFFF;
this->nodes_.resize(num_nodes, default_node);
thrust::transform(indices.begin(), indices.end(),
this->nodes_.begin() + num_internal_nodes,
[] __device__ (const index_type idx)
{
node_type n;
n.parent_idx = 0xFFFFFFFF;
n.left_idx = 0xFFFFFFFF;
n.right_idx = 0xFFFFFFFF;
n.object_idx = idx;
return n;
});
// --------------------------------------------------------------------
// construct internal nodes
const auto self = this->get_device_repr();
if(morton_code_is_unique)
{
const unsigned int* node_code = morton.data().get();
detail::construct_internal_nodes(self, node_code, num_objects);
}
else // 64bit version
{
const unsigned long long int* node_code = morton64.data().get();
detail::construct_internal_nodes(self, node_code, num_objects);
}
// --------------------------------------------------------------------
// create AABB for each node by bottom-up strategy
thrust::device_vector<int> flag_container(num_internal_nodes, 0);
const auto flags = flag_container.data().get();
thrust::for_each(thrust::device,
thrust::make_counting_iterator<index_type>(num_internal_nodes),
thrust::make_counting_iterator<index_type>(num_nodes),
[self, flags] __device__ (index_type idx)
{
unsigned int parent = self.nodes[idx].parent_idx;
while(parent != 0xFFFFFFFF) // means idx == 0
{
const int old = atomicCAS(flags + parent, 0, 1);
if(old == 0)
{
// this is the first thread entered here.
// wait the other thread from the other child node.
return;
}
assert(old == 1);
// here, the flag has already been 1. it means that this
// thread is the 2nd thread. merge AABB of both childlen.
const auto lidx = self.nodes[parent].left_idx;
const auto ridx = self.nodes[parent].right_idx;
const auto lbox = self.aabbs[lidx];
const auto rbox = self.aabbs[ridx];
self.aabbs[parent] = merge(lbox, rbox);
// look the next parent...
parent = self.nodes[parent].parent_idx;
}
return;
});
return;
}
private:
thrust::device_vector<object_type> objects_d_;
thrust::device_vector<aabb_type> aabbs_;
thrust::device_vector<node_type> nodes_;
aabb_getter_type aabb_getter_;
};
} // lbvh
#endif// LBVH_BVH_CUH
|
the_stack
|
* @file
* cub::DeviceSegmentedSort provides device-wide, parallel operations for
* computing a batched sort across multiple, non-overlapping sequences of
* data items residing within device-accessible memory.
*/
#pragma once
#include <cub/config.cuh>
#include <cub/device/dispatch/dispatch_segmented_sort.cuh>
#include <cub/util_namespace.cuh>
CUB_NAMESPACE_BEGIN
/**
* @brief DeviceSegmentedSort provides device-wide, parallel operations for
* computing a batched sort across multiple, non-overlapping sequences of
* data items residing within device-accessible memory.
* 
* @ingroup SegmentedModule
*
* @par Overview
* The algorithm arranges items into ascending (or descending) order.
* The underlying sorting algorithm is undefined. Depending on the segment size,
* it might be radix sort, merge sort or something else. Therefore, no
* assumptions on the underlying implementation should be made.
*
* @par Differences from DeviceSegmentedRadixSort
* DeviceSegmentedRadixSort is optimized for significantly large segments (tens
* of thousands of items and more). Nevertheless, some domains produce a wide
* range of segment sizes. DeviceSegmentedSort partitions segments into size
* groups and specialize sorting algorithms for each group. This approach leads
* to better resource utilization in the presence of segment size imbalance or
* moderate segment sizes (up to thousands of items).
* This algorithm is more complex and consists of multiple kernels. This fact
* leads to longer compilation times as well as larger binaries sizes.
*
* @par Supported Types
* The algorithm has to satisfy the underlying algorithms restrictions. Radix
* sort usage restricts the list of supported types. Therefore,
* DeviceSegmentedSort can sort all of the built-in C++ numeric primitive types
* (`unsigned char`, `int`, `double`, etc.) as well as CUDA's `__half` and
* `__nv_bfloat16` 16-bit floating-point types.
*
* @par A simple example
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_segmented_sort.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers
* // for sorting data
* int num_items; // e.g., 7
* int num_segments; // e.g., 3
* int *d_offsets; // e.g., [0, 3, 3, 7]
* int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9]
* int *d_keys_out; // e.g., [-, -, -, -, -, -, -]
* int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6]
* int *d_values_out; // e.g., [-, -, -, -, -, -, -]
* ...
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceSegmentedSort::SortPairs(
* d_temp_storage, temp_storage_bytes,
* d_keys_in, d_keys_out, d_values_in, d_values_out,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run sorting operation
* cub::DeviceSegmentedSort::SortPairs(
* d_temp_storage, temp_storage_bytes,
* d_keys_in, d_keys_out, d_values_in, d_values_out,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // d_keys_out <-- [6, 7, 8, 0, 3, 5, 9]
* // d_values_out <-- [1, 2, 0, 5, 4, 3, 6]
* @endcode
*/
struct DeviceSegmentedSort
{
/*************************************************************************//**
* @name Keys-only
****************************************************************************/
//@{
/**
* @brief Sorts segments of keys into ascending order. Approximately
* `num_items + 2*num_segments` auxiliary storage required.
*
* @par
* - The contents of the input data are not altered by the sorting operation.
* - When the input is a contiguous sequence of segments, a single sequence
* @p segment_offsets (of length `num_segments+1`) can be aliased
* for both the @p d_begin_offsets and @p d_end_offsets parameters (where
* the latter is specified as `segment_offsets+1`).
* - SortKeys is not guaranteed to be stable. That is, suppose that @p i and
* @p j are equivalent: neither one is less than the other. It is not
* guaranteed that the relative order of these two elements will be
* preserved by sort.
*
* @par Snippet
* The code snippet below illustrates the batched sorting of three segments
* (with one zero-length segment) of @p int keys.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_segmented_sort.cuh>
*
* // Declare, allocate, and initialize device-accessible
* // pointers for sorting data
* int num_items; // e.g., 7
* int num_segments; // e.g., 3
* int *d_offsets; // e.g., [0, 3, 3, 7]
* int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9]
* int *d_keys_out; // e.g., [-, -, -, -, -, -, -]
* ...
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceSegmentedSort::SortKeys(
* d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run sorting operation
* cub::DeviceSegmentedSort::SortKeys(
* d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // d_keys_out <-- [6, 7, 8, 0, 3, 5, 9]
* @endcode
*
* @tparam KeyT
* <b>[inferred]</b> Key type
*
* @tparam BeginOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* beginning offsets \iterator
*
* @tparam EndOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* ending offsets \iterator
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When nullptr, the
* required allocation size is written to @p temp_storage_bytes and no work
* is done
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of @p d_temp_storage allocation
*
* @param[in] d_keys_in
* Device-accessible pointer to the input data of key data to sort
*
* @param[out] d_keys_out
* Device-accessible pointer to the sorted output sequence of key data
*
* @param[in] num_items
* The total number of items to sort (across all segments)
*
* @param[in] num_segments
* The number of segments that comprise the sorting data
*
* @param[in] d_begin_offsets
* Random-access input iterator to the sequence of beginning offsets of
* length `num_segments`, such that `d_begin_offsets[i]` is the first
* element of the <em>i</em><sup>th</sup> data segment in `d_keys_*` and
* `d_values_*`
*
* @param[in] d_end_offsets
* Random-access input iterator to the sequence of ending offsets of length
* `num_segments`, such that `d_end_offsets[i] - 1` is the last element of
* the <em>i</em><sup>th</sup> data segment in `d_keys_*` and `d_values_*`.
* If `d_end_offsets[i] - 1 <= d_begin_offsets[i]`, the i-th segment is
* considered empty.
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations to
* be printed to the console. Default is `false`.
*/
template <typename KeyT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
SortKeys(void *d_temp_storage,
std::size_t &temp_storage_bytes,
const KeyT *d_keys_in,
KeyT *d_keys_out,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
constexpr bool is_descending = false;
constexpr bool is_overwrite_okay = false;
using DispatchT = DispatchSegmentedSort<is_descending,
KeyT,
cub::NullType,
int,
BeginOffsetIteratorT,
EndOffsetIteratorT>;
DoubleBuffer<KeyT> d_keys(const_cast<KeyT *>(d_keys_in), d_keys_out);
DoubleBuffer<NullType> d_values;
return DispatchT::Dispatch(d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
is_overwrite_okay,
stream,
debug_synchronous);
}
/**
* @brief Sorts segments of keys into descending order. Approximately
* `num_items + 2*num_segments` auxiliary storage required.
*
* @par
* - The contents of the input data are not altered by the sorting operation.
* - When the input is a contiguous sequence of segments, a single sequence
* @p segment_offsets (of length `num_segments + 1`) can be aliased
* for both the @p d_begin_offsets and @p d_end_offsets parameters (where
* the latter is specified as `segment_offsets + 1`).
* - SortKeysDescending is not guaranteed to be stable. That is, suppose that
* @p i and @p j are equivalent: neither one is less than the other. It is
* not guaranteed that the relative order of these two elements will be
* preserved by sort.
*
* @par Snippet
* The code snippet below illustrates the batched sorting of three segments
* (with one zero-length segment) of @p int keys.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_segmented_sort.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers
* // for sorting data
* int num_items; // e.g., 7
* int num_segments; // e.g., 3
* int *d_offsets; // e.g., [0, 3, 3, 7]
* int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9]
* int *d_keys_out; // e.g., [-, -, -, -, -, -, -]
* ...
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceSegmentedSort::SortKeysDescending(
* d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run sorting operation
* cub::DeviceSegmentedSort::SortKeysDescending(
* d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // d_keys_out <-- [8, 7, 6, 9, 5, 3, 0]
* @endcode
*
* @tparam KeyT
* <b>[inferred]</b> Key type
*
* @tparam BeginOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* beginning offsets \iterator
*
* @tparam EndOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* ending offsets \iterator
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When nullptr, the
* required allocation size is written to @p temp_storage_bytes and no
* work is done
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of @p d_temp_storage allocation
*
* @param[in] d_keys_in
* Device-accessible pointer to the input data of key data to sort
*
* @param[out] d_keys_out
* Device-accessible pointer to the sorted output sequence of key data
*
* @param[in] num_items
* The total number of items to sort (across all segments)
*
* @param[in] num_segments
* The number of segments that comprise the sorting data
*
* @param[in] d_begin_offsets
* Random-access input iterator to the sequence of beginning offsets of
* length @p num_segments, such that `d_begin_offsets[i]` is the first
* element of the <em>i</em><sup>th</sup> data segment in `d_keys_*` and
* `d_values_*`
*
* @param[in] d_end_offsets
* Random-access input iterator to the sequence of ending offsets of length
* @p num_segments, such that `d_end_offsets[i] - 1` is the last element of
* the <em>i</em><sup>th</sup> data segment in `d_keys_*` and `d_values_*`.
* If `d_end_offsets[i] - 1 <= d_begin_offsets[i]`, the i-th segment is
* considered empty.
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations
* to be printed to the console. Default is @p false.
*/
template <typename KeyT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
SortKeysDescending(void *d_temp_storage,
std::size_t &temp_storage_bytes,
const KeyT *d_keys_in,
KeyT *d_keys_out,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
constexpr bool is_descending = true;
constexpr bool is_overwrite_okay = false;
using DispatchT = DispatchSegmentedSort<is_descending,
KeyT,
cub::NullType,
int,
BeginOffsetIteratorT,
EndOffsetIteratorT>;
DoubleBuffer<KeyT> d_keys(const_cast<KeyT *>(d_keys_in), d_keys_out);
DoubleBuffer<NullType> d_values;
return DispatchT::Dispatch(d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
is_overwrite_okay,
stream,
debug_synchronous);
}
/**
* @brief Sorts segments of keys into ascending order. Approximately
* `2*num_segments` auxiliary storage required.
*
* @par
* - The sorting operation is given a pair of key buffers managed by a
* DoubleBuffer structure that indicates which of the two buffers is
* "current" (and thus contains the input data to be sorted).
* - The contents of both buffers may be altered by the sorting operation.
* - Upon completion, the sorting operation will update the "current"
* indicator within the DoubleBuffer wrapper to reference which of the two
* buffers now contains the sorted output sequence (a function of the number
* of key bits and the targeted device architecture).
* - When the input is a contiguous sequence of segments, a single sequence
* @p segment_offsets (of length `num_segments+1`) can be aliased
* for both the @p d_begin_offsets and @p d_end_offsets parameters (where
* the latter is specified as `segment_offsets+1`).
* - SortKeys is not guaranteed to be stable. That is, suppose that
* @p i and @p j are equivalent: neither one is less than the other. It is
* not guaranteed that the relative order of these two elements will be
* preserved by sort.
*
* @par Snippet
* The code snippet below illustrates the batched sorting of three segments
* (with one zero-length segment) of @p int keys.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_segmented_sort.cuh>
*
* // Declare, allocate, and initialize device-accessible
* // pointers for sorting data
* int num_items; // e.g., 7
* int num_segments; // e.g., 3
* int *d_offsets; // e.g., [0, 3, 3, 7]
* int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9]
* int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -]
* ...
*
* // Create a DoubleBuffer to wrap the pair of device pointers
* cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf);
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceSegmentedSort::SortKeys(
* d_temp_storage, temp_storage_bytes, d_keys,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run sorting operation
* cub::DeviceSegmentedSort::SortKeys(
* d_temp_storage, temp_storage_bytes, d_keys,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // d_keys.Current() <-- [6, 7, 8, 0, 3, 5, 9]
* @endcode
*
* @tparam KeyT
* <b>[inferred]</b> Key type
*
* @tparam BeginOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* beginning offsets \iterator
*
* @tparam EndOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* ending offsets \iterator
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When nullptr, the
* required allocation size is written to @p temp_storage_bytes and no
* work is done
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of @p d_temp_storage allocation
*
* @param[in,out] d_keys
* Reference to the double-buffer of keys whose "current" device-accessible
* buffer contains the unsorted input keys and, upon return, is updated to
* point to the sorted output keys
*
* @param[in] num_items
* The total number of items to sort (across all segments)
*
* @param[in] num_segments
* The number of segments that comprise the sorting data
*
* @param[in] d_begin_offsets
* Random-access input iterator to the sequence of beginning offsets of
* length @p num_segments, such that `d_begin_offsets[i]` is the first
* element of the <em>i</em><sup>th</sup> data segment in `d_keys_*`
* and `d_values_*`
*
* @param[in] d_end_offsets
* Random-access input iterator to the sequence of ending offsets of length
* @p num_segments, such that `d_end_offsets[i] - 1` is the last element of
* the <em>i</em><sup>th</sup> data segment in `d_keys_*` and `d_values_*`.
* If `d_end_offsets[i] - 1 <= d_begin_offsets[i]`, the i-th segment is
* considered empty.
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations to
* be printed to the console. Default is @p false.
*/
template <typename KeyT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
SortKeys(void *d_temp_storage,
std::size_t &temp_storage_bytes,
DoubleBuffer<KeyT> &d_keys,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
constexpr bool is_descending = false;
constexpr bool is_overwrite_okay = true;
using DispatchT = DispatchSegmentedSort<is_descending,
KeyT,
cub::NullType,
int,
BeginOffsetIteratorT,
EndOffsetIteratorT>;
DoubleBuffer<NullType> d_values;
return DispatchT::Dispatch(d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
is_overwrite_okay,
stream,
debug_synchronous);
}
/**
* @brief Sorts segments of keys into descending order. Approximately
* `2*num_segments` auxiliary storage required.
*
* @par
* - The sorting operation is given a pair of key buffers managed by a
* DoubleBuffer structure that indicates which of the two buffers is
* "current" (and thus contains the input data to be sorted).
* - The contents of both buffers may be altered by the sorting operation.
* - Upon completion, the sorting operation will update the "current"
* indicator within the DoubleBuffer wrapper to reference which of the two
* buffers now contains the sorted output sequence (a function of the number
* of key bits and the targeted device architecture).
* - When the input is a contiguous sequence of segments, a single sequence
* @p segment_offsets (of length `num_segments + 1`) can be aliased
* for both the @p d_begin_offsets and @p d_end_offsets parameters (where
* the latter is specified as `segment_offsets + 1`).
* - SortKeysDescending is not guaranteed to be stable. That is, suppose that
* @p i and @p j are equivalent: neither one is less than the other. It is
* not guaranteed that the relative order of these two elements will be
* preserved by sort.
*
* @par Snippet
* The code snippet below illustrates the batched sorting of three segments
* (with one zero-length segment) of @p int keys.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_segmented_sort.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers for
* // sorting data
* int num_items; // e.g., 7
* int num_segments; // e.g., 3
* int *d_offsets; // e.g., [0, 3, 3, 7]
* int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9]
* int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -]
* ...
*
* // Create a DoubleBuffer to wrap the pair of device pointers
* cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf);
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceSegmentedSort::SortKeysDescending(
* d_temp_storage, temp_storage_bytes, d_keys,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run sorting operation
* cub::DeviceSegmentedSort::SortKeysDescending(
* d_temp_storage, temp_storage_bytes, d_keys,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // d_keys.Current() <-- [8, 7, 6, 9, 5, 3, 0]
* @endcode
*
* @tparam KeyT
* <b>[inferred]</b> Key type
*
* @tparam BeginOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* beginning offsets \iterator
*
* @tparam EndOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* ending offsets \iterator
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When `nullptr`, the
* required allocation size is written to @p temp_storage_bytes and no work
* is done
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of @p d_temp_storage allocation
*
* @param[in,out] d_keys
* Reference to the double-buffer of keys whose "current" device-accessible
* buffer contains the unsorted input keys and, upon return, is updated to
* point to the sorted output keys
*
* @param[in] num_items
* The total number of items to sort (across all segments)
*
* @param[in] num_segments
* The number of segments that comprise the sorting data
*
* @param[in] d_begin_offsets
* Random-access input iterator to the sequence of beginning offsets of
* length @p num_segments, such that `d_begin_offsets[i]` is the first
* element of the <em>i</em><sup>th</sup> data segment in `d_keys_*` and
* `d_values_*`
*
* @param[in] d_end_offsets
* Random-access input iterator to the sequence of ending offsets of length
* @p num_segments, such that `d_end_offsets[i] - 1` is the last element of
* the <em>i</em><sup>th</sup> data segment in `d_keys_*` and `d_values_*`.
* If `d_end_offsets[i] - 1<= d_begin_offsets[i]`, the i-th segment is
* considered empty.
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations
* to be printed to the console. Default is @p false.
*/
template <typename KeyT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
SortKeysDescending(void *d_temp_storage,
std::size_t &temp_storage_bytes,
DoubleBuffer<KeyT> &d_keys,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
constexpr bool is_descending = true;
constexpr bool is_overwrite_okay = true;
using DispatchT = DispatchSegmentedSort<is_descending,
KeyT,
cub::NullType,
int,
BeginOffsetIteratorT,
EndOffsetIteratorT>;
DoubleBuffer<NullType> d_values;
return DispatchT::Dispatch(d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
is_overwrite_okay,
stream,
debug_synchronous);
}
/**
* @brief Sorts segments of keys into ascending order. Approximately
* `num_items + 2*num_segments` auxiliary storage required.
*
* @par
* - The contents of the input data are not altered by the sorting operation.
* - When the input is a contiguous sequence of segments, a single sequence
* @p segment_offsets (of length `num_segments+1`) can be aliased
* for both the @p d_begin_offsets and @p d_end_offsets parameters (where
* the latter is specified as `segment_offsets+1`).
* - StableSortKeys is stable: it preserves the relative ordering of
* equivalent elements. That is, if @p x and @p y are elements such that
* @p x precedes @p y, and if the two elements are equivalent (neither
* @p x < @p y nor @p y < @p x) then a postcondition of stable sort is that
* @p x still precedes @p y.
*
* @par Snippet
* The code snippet below illustrates the batched sorting of three segments
* (with one zero-length segment) of @p int keys.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_segmented_sort.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers
* // for sorting data
* int num_items; // e.g., 7
* int num_segments; // e.g., 3
* int *d_offsets; // e.g., [0, 3, 3, 7]
* int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9]
* int *d_keys_out; // e.g., [-, -, -, -, -, -, -]
* ...
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceSegmentedSort::StableSortKeys(
* d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run sorting operation
* cub::DeviceSegmentedSort::StableSortKeys(
* d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // d_keys_out <-- [6, 7, 8, 0, 3, 5, 9]
* @endcode
*
* @tparam KeyT
* <b>[inferred]</b> Key type
*
* @tparam BeginOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* beginning offsets \iterator
*
* @tparam EndOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* ending offsets \iterator
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When nullptr, the
* required allocation size is written to @p temp_storage_bytes and no work
* is done
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of @p d_temp_storage allocation
*
* @param[in] d_keys_in
* Device-accessible pointer to the input data of key data to sort
*
* @param[out] d_keys_out
* Device-accessible pointer to the sorted output sequence of key data
*
* @param[in] num_items
* The total number of items to sort (across all segments)
*
* @param[in] num_segments
* The number of segments that comprise the sorting data
*
* @param[in] d_begin_offsets
* Random-access input iterator to the sequence of beginning offsets of
* length @p num_segments, such that `d_begin_offsets[i]` is the first
* element of the <em>i</em><sup>th</sup> data segment in `d_keys_*` and
* `d_values_*`
*
* @param[in] d_end_offsets
* Random-access input iterator to the sequence of ending offsets of length
* @p num_segments, such that `d_end_offsets[i]-1` is the last element of
* the <em>i</em><sup>th</sup> data segment in `d_keys_*` and `d_values_*`.
* If `d_end_offsets[i]-1 <= d_begin_offsets[i]`, the i-th segment is
* considered empty.
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations
* to be printed to the console. Default is @p false.
*/
template <typename KeyT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
StableSortKeys(void *d_temp_storage,
std::size_t &temp_storage_bytes,
const KeyT *d_keys_in,
KeyT *d_keys_out,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
return SortKeys<KeyT, BeginOffsetIteratorT, EndOffsetIteratorT>(
d_temp_storage,
temp_storage_bytes,
d_keys_in,
d_keys_out,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
stream,
debug_synchronous);
}
/**
* @brief Sorts segments of keys into descending order. Approximately
* `num_items + 2*num_segments` auxiliary storage required.
*
* @par
* - The contents of the input data are not altered by the sorting operation.
* - When the input is a contiguous sequence of segments, a single sequence
* @p segment_offsets (of length `num_segments+1`) can be aliased
* for both the @p d_begin_offsets and @p d_end_offsets parameters (where
* the latter is specified as `segment_offsets+1`).
* - StableSortKeysDescending is stable: it preserves the relative ordering of
* equivalent elements. That is, if @p x and @p y are elements such that
* @p x precedes @p y, and if the two elements are equivalent (neither
* @p x < @p y nor @p y < @p x) then a postcondition of stable sort is that
* @p x still precedes @p y.
*
* @par Snippet
* The code snippet below illustrates the batched sorting of three segments
* (with one zero-length segment) of @p int keys.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_segmented_sort.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers
* // for sorting data
* int num_items; // e.g., 7
* int num_segments; // e.g., 3
* int *d_offsets; // e.g., [0, 3, 3, 7]
* int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9]
* int *d_keys_out; // e.g., [-, -, -, -, -, -, -]
* ...
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceSegmentedSort::StableSortKeysDescending(
* d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run sorting operation
* cub::DeviceSegmentedSort::StableSortKeysDescending(
* d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // d_keys_out <-- [8, 7, 6, 9, 5, 3, 0]
* @endcode
*
* @tparam KeyT
* <b>[inferred]</b> Key type
*
* @tparam BeginOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* beginning offsets \iterator
*
* @tparam EndOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* ending offsets \iterator
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When nullptr, the
* required allocation size is written to @p temp_storage_bytes and no work
* is done.
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of @p d_temp_storage allocation
*
* @param[in] d_keys_in
* Device-accessible pointer to the input data of key data to sort
*
* @param[out] d_keys_out
* Device-accessible pointer to the sorted output sequence of key data
*
* @param[in] num_items
* The total number of items to sort (across all segments)
*
* @param[in] num_segments
* The number of segments that comprise the sorting data
*
* @param[in] d_begin_offsets
* Random-access input iterator to the sequence of beginning offsets of
* length @p num_segments, such that `d_begin_offsets[i]` is the first
* element of the <em>i</em><sup>th</sup> data segment in `d_keys_*` and
* `d_values_*`
*
* @param[in] d_end_offsets
* Random-access input iterator to the sequence of ending offsets of length
* @p num_segments, such that `d_end_offsets[i]-1` is the last element of
* the <em>i</em><sup>th</sup> data segment in `d_keys_*` and `d_values_*`.
* If `d_end_offsets[i]-1 <= d_begin_offsets[i]`, the i-th segment is
* considered empty.
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations
* to be printed to the console. Default is @p false.
*/
template <typename KeyT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
StableSortKeysDescending(void *d_temp_storage,
std::size_t &temp_storage_bytes,
const KeyT *d_keys_in,
KeyT *d_keys_out,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
return SortKeysDescending<KeyT,
BeginOffsetIteratorT,
EndOffsetIteratorT>(d_temp_storage,
temp_storage_bytes,
d_keys_in,
d_keys_out,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
stream,
debug_synchronous);
}
/**
* @brief Sorts segments of keys into ascending order. Approximately
* `2*num_segments` auxiliary storage required.
*
* @par
* - The sorting operation is given a pair of key buffers managed by a
* DoubleBuffer structure that indicates which of the two buffers is
* "current" (and thus contains the input data to be sorted).
* - The contents of both buffers may be altered by the sorting operation.
* - Upon completion, the sorting operation will update the "current"
* indicator within the DoubleBuffer wrapper to reference which of the two
* buffers now contains the sorted output sequence (a function of the number
* of key bits and the targeted device architecture).
* - When the input is a contiguous sequence of segments, a single sequence
* @p segment_offsets (of length `num_segments+1`) can be aliased
* for both the @p d_begin_offsets and @p d_end_offsets parameters (where
* the latter is specified as `segment_offsets+1`).
* - StableSortKeys is stable: it preserves the relative ordering of
* equivalent elements. That is, if @p x and @p y are elements such that
* @p x precedes @p y, and if the two elements are equivalent (neither
* @p x < @p y nor @p y < @p x) then a postcondition of stable sort is that
* @p x still precedes @p y.
*
* @par Snippet
* The code snippet below illustrates the batched sorting of three segments
* (with one zero-length segment) of @p int keys.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_segmented_sort.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers
* // for sorting data
* int num_items; // e.g., 7
* int num_segments; // e.g., 3
* int *d_offsets; // e.g., [0, 3, 3, 7]
* int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9]
* int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -]
* ...
*
* // Create a DoubleBuffer to wrap the pair of device pointers
* cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf);
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceSegmentedSort::StableSortKeys(
* d_temp_storage, temp_storage_bytes, d_keys,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run sorting operation
* cub::DeviceSegmentedSort::StableSortKeys(
* d_temp_storage, temp_storage_bytes, d_keys,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // d_keys.Current() <-- [6, 7, 8, 0, 3, 5, 9]
* @endcode
*
* @tparam KeyT
* <b>[inferred]</b> Key type
*
* @tparam BeginOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* beginning offsets \iterator
*
* @tparam EndOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* ending offsets \iterator
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When nullptr, the
* required allocation size is written to @p temp_storage_bytes and no work
* is done
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of @p d_temp_storage allocation
*
* @param[in,out] d_keys
* Reference to the double-buffer of keys whose "current" device-accessible
* buffer contains the unsorted input keys and, upon return, is updated to
* point to the sorted output keys
*
* @param[in] num_items
* The total number of items to sort (across all segments)
*
* @param[in] num_segments
* The number of segments that comprise the sorting data
*
* @param[in] d_begin_offsets
* Random-access input iterator to the sequence of beginning offsets of
* length @p num_segments, such that `d_begin_offsets[i]` is the first
* element of the <em>i</em><sup>th</sup> data segment in `d_keys_*` and
* `d_values_*`
*
* @param[in] d_end_offsets
* Random-access input iterator to the sequence of ending offsets of length
* @p num_segments, such that `d_end_offsets[i] - 1` is the last element of
* the <em>i</em><sup>th</sup> data segment in `d_keys_*` and `d_values_*`.
* If `d_end_offsets[i] - 1 <= d_begin_offsets[i]`, the i-th segment is
* considered empty.
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations
* to be printed to the console. Default is @p false.
*/
template <typename KeyT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
StableSortKeys(void *d_temp_storage,
std::size_t &temp_storage_bytes,
DoubleBuffer<KeyT> &d_keys,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
return SortKeys<KeyT, BeginOffsetIteratorT, EndOffsetIteratorT>(
d_temp_storage,
temp_storage_bytes,
d_keys,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
stream,
debug_synchronous);
}
/**
* @brief Sorts segments of keys into descending order. Approximately
* `2*num_segments` auxiliary storage required.
*
* @par
* - The sorting operation is given a pair of key buffers managed by a
* DoubleBuffer structure that indicates which of the two buffers is
* "current" (and thus contains the input data to be sorted).
* - The contents of both buffers may be altered by the sorting operation.
* - Upon completion, the sorting operation will update the "current"
* indicator within the DoubleBuffer wrapper to reference which of the two
* buffers now contains the sorted output sequence (a function of the number
* of key bits and the targeted device architecture).
* - When the input is a contiguous sequence of segments, a single sequence
* @p segment_offsets (of length `num_segments+1`) can be aliased
* for both the @p d_begin_offsets and @p d_end_offsets parameters (where
* the latter is specified as `segment_offsets+1`).
* - StableSortKeysDescending is stable: it preserves the relative ordering of
* equivalent elements. That is, if @p x and @p y are elements such that
* @p x precedes @p y, and if the two elements are equivalent (neither
* @p x < @p y nor @p y < @p x) then a postcondition of stable sort is that
* @p x still precedes @p y.
*
* @par Snippet
* The code snippet below illustrates the batched sorting of three segments
* (with one zero-length segment) of @p int keys.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_segmented_sort.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers
* // for sorting data
* int num_items; // e.g., 7
* int num_segments; // e.g., 3
* int *d_offsets; // e.g., [0, 3, 3, 7]
* int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9]
* int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -]
* ...
*
* // Create a DoubleBuffer to wrap the pair of device pointers
* cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf);
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceSegmentedSort::StableSortKeysDescending(
* d_temp_storage, temp_storage_bytes, d_keys,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run sorting operation
* cub::DeviceSegmentedSort::StableSortKeysDescending(
* d_temp_storage, temp_storage_bytes, d_keys,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // d_keys.Current() <-- [8, 7, 6, 9, 5, 3, 0]
* @endcode
*
* @tparam KeyT
* <b>[inferred]</b> Key type
*
* @tparam BeginOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* beginning offsets \iterator
*
* @tparam EndOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* ending offsets \iterator
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When nullptr, the
* required allocation size is written to @p temp_storage_bytes and no work
* is done.
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of @p d_temp_storage allocation
*
* @param[in,out] d_keys
* Reference to the double-buffer of keys whose "current" device-accessible
* buffer contains the unsorted input keys and, upon return, is updated to
* point to the sorted output keys
*
* @param[in] num_items
* The total number of items to sort (across all segments)
*
* @param[in] num_segments
* The number of segments that comprise the sorting data
*
* @param[in] d_begin_offsets
* Random-access input iterator to the sequence of beginning offsets of
* length @p num_segments, such that `d_begin_offsets[i]` is the first
* element of the <em>i</em><sup>th</sup> data segment in `d_keys_*` and
* `d_values_*`
*
* @param[in] d_end_offsets
* Random-access input iterator to the sequence of ending offsets of length
* @p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last
* element of the <em>i</em><sup>th</sup> data segment in `d_keys_*` and
* `d_values_*`. If `d_end_offsets[i]-1 <= d_begin_offsets[i]`, the
* i-th segment is considered empty.
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations
* to be printed to the console. Default is @p false.
*/
template <typename KeyT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
StableSortKeysDescending(void *d_temp_storage,
std::size_t &temp_storage_bytes,
DoubleBuffer<KeyT> &d_keys,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
return SortKeysDescending<KeyT,
BeginOffsetIteratorT,
EndOffsetIteratorT>(d_temp_storage,
temp_storage_bytes,
d_keys,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
stream,
debug_synchronous);
}
//@} end member group
/*************************************************************************//**
* @name Key-value pairs
****************************************************************************/
//@{
/**
* @brief Sorts segments of key-value pairs into ascending order.
* Approximately `2*num_items + 2*num_segments` auxiliary storage
* required.
*
* @par
* - The contents of the input data are not altered by the sorting operation.
* - When the input is a contiguous sequence of segments, a single sequence
* @p segment_offsets (of length `num_segments+1`) can be aliased
* for both the @p d_begin_offsets and @p d_end_offsets parameters (where
* the latter is specified as `segment_offsets+1`).
* - SortPairs is not guaranteed to be stable. That is, suppose that @p i and
* @p j are equivalent: neither one is less than the other. It is not
* guaranteed that the relative order of these two elements will be
* preserved by sort.
*
* @par Snippet
* The code snippet below illustrates the batched sorting of three segments
* (with one zero-length segment) of @p int keys with associated vector of
* @p int values.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_segmented_sort.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers
* // for sorting data
* int num_items; // e.g., 7
* int num_segments; // e.g., 3
* int *d_offsets; // e.g., [0, 3, 3, 7]
* int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9]
* int *d_keys_out; // e.g., [-, -, -, -, -, -, -]
* int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6]
* int *d_values_out; // e.g., [-, -, -, -, -, -, -]
* ...
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceSegmentedSort::SortPairs(
* d_temp_storage, temp_storage_bytes,
* d_keys_in, d_keys_out, d_values_in, d_values_out,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run sorting operation
* cub::DeviceSegmentedSort::SortPairs(
* d_temp_storage, temp_storage_bytes,
* d_keys_in, d_keys_out, d_values_in, d_values_out,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // d_keys_out <-- [6, 7, 8, 0, 3, 5, 9]
* // d_values_out <-- [1, 2, 0, 5, 4, 3, 6]
* @endcode
*
* @tparam KeyT
* <b>[inferred]</b> Key type
*
* @tparam ValueT
* <b>[inferred]</b> Value type
*
* @tparam BeginOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* beginning offsets \iterator
*
* @tparam EndOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* ending offsets \iterator
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When `nullptr`, the
* required allocation size is written to @p temp_storage_bytes and no work
* is done
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of @p d_temp_storage allocation
*
* @param[in] d_keys_in
* Device-accessible pointer to the input data of key data to sort
*
* @param[out] d_keys_out
* Device-accessible pointer to the sorted output sequence of key data
*
* @param[in] d_values_in
* Device-accessible pointer to the corresponding input sequence of
* associated value items
*
* @param[out] d_values_out
* Device-accessible pointer to the correspondingly-reordered output
* sequence of associated value items
*
* @param[in] num_items
* The total number of items to sort (across all segments)
*
* @param[in] num_segments
* The number of segments that comprise the sorting data
*
* @param[in] d_begin_offsets
* Random-access input iterator to the sequence of beginning offsets of
* length @p num_segments, such that `d_begin_offsets[i]` is the first
* element of the <em>i</em><sup>th</sup> data segment in `d_keys_*` and
* `d_values_*`
*
* @param[in] d_end_offsets
* Random-access input iterator to the sequence of ending offsets of length
* @p num_segments, such that `d_end_offsets[i]-1` is the last element of
* the <em>i</em><sup>th</sup> data segment in `d_keys_*` and `d_values_*`.
* If `d_end_offsets[i]-1 <= d_begin_offsets[i]`, the i-th segment is
* considered empty.
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations
* to be printed to the console. Default is @p false.
*/
template <typename KeyT,
typename ValueT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
SortPairs(void *d_temp_storage,
std::size_t &temp_storage_bytes,
const KeyT *d_keys_in,
KeyT *d_keys_out,
const ValueT *d_values_in,
ValueT *d_values_out,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
constexpr bool is_descending = false;
constexpr bool is_overwrite_okay = false;
using DispatchT = DispatchSegmentedSort<is_descending,
KeyT,
ValueT,
int,
BeginOffsetIteratorT,
EndOffsetIteratorT>;
DoubleBuffer<KeyT> d_keys(const_cast<KeyT *>(d_keys_in), d_keys_out);
DoubleBuffer<ValueT> d_values(const_cast<ValueT *>(d_values_in), d_values_out);
return DispatchT::Dispatch(d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
is_overwrite_okay,
stream,
debug_synchronous);
}
/**
* @brief Sorts segments of key-value pairs into descending order. Approximately
* `2*num_items + 2*num_segments` auxiliary storage required.
*
* @par
* - The contents of the input data are not altered by the sorting operation.
* - When the input is a contiguous sequence of segments, a single sequence
* @p segment_offsets (of length `num_segments+1`) can be aliased
* for both the @p d_begin_offsets and @p d_end_offsets parameters (where
* the latter is specified as `segment_offsets+1`).
* - SortPairs is not guaranteed to be stable. That is, suppose that @p i and
* @p j are equivalent: neither one is less than the other. It is not
* guaranteed that the relative order of these two elements will be
* preserved by sort.
*
* @par Snippet
* The code snippet below illustrates the batched sorting of three segments
* (with one zero-length segment) of @p int keys with associated vector of
* @p int values.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_segmented_sort.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers for
* // sorting data
* int num_items; // e.g., 7
* int num_segments; // e.g., 3
* int *d_offsets; // e.g., [0, 3, 3, 7]
* int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9]
* int *d_keys_out; // e.g., [-, -, -, -, -, -, -]
* int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6]
* int *d_values_out; // e.g., [-, -, -, -, -, -, -]
* ...
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceSegmentedSort::SortPairsDescending(
* d_temp_storage, temp_storage_bytes,
* d_keys_in, d_keys_out, d_values_in, d_values_out,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run sorting operation
* cub::DeviceSegmentedSort::SortPairsDescending(
* d_temp_storage, temp_storage_bytes,
* d_keys_in, d_keys_out, d_values_in, d_values_out,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // d_keys_out <-- [8, 7, 6, 9, 5, 3, 0]
* // d_values_out <-- [0, 2, 1, 6, 3, 4, 5]
* @endcode
*
* @tparam KeyT
* <b>[inferred]</b> Key type
*
* @tparam ValueT
* <b>[inferred]</b> Value type
*
* @tparam BeginOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* beginning offsets \iterator
*
* @tparam EndOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* ending offsets \iterator
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When nullptr, the
* required allocation size is written to @p temp_storage_bytes and no work
* is done.
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of @p d_temp_storage allocation
*
* @param[in] d_keys_in
* Device-accessible pointer to the input data of key data to sort
*
* @param[out] d_keys_out
* Device-accessible pointer to the sorted output sequence of key data
*
* @param[in] d_values_in
* Device-accessible pointer to the corresponding input sequence of
* associated value items
*
* @param[out] d_values_out
* Device-accessible pointer to the correspondingly-reordered output
* sequence of associated value items
*
* @param[in] num_items
* The total number of items to sort (across all segments)
*
* @param[in] num_segments
* The number of segments that comprise the sorting data
*
* @param[in] d_begin_offsets
* Random-access input iterator to the sequence of beginning offsets of
* length @p num_segments, such that `d_begin_offsets[i]` is the first
* element of the <em>i</em><sup>th</sup> data segment in `d_keys_*` and
* `d_values_*`
*
* @param[in] d_end_offsets
* Random-access input iterator to the sequence of ending offsets of length
* @p num_segments, such that `d_end_offsets[i]-1` is the last element of
* the <em>i</em><sup>th</sup> data segment in `d_keys_*` and `d_values_*`.
* If `d_end_offsets[i]-1 <= d_begin_offsets[i]`, the i-th segment is
* considered empty.
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations
* to be printed to the console. Default is @p false.
*/
template <typename KeyT,
typename ValueT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
SortPairsDescending(void *d_temp_storage,
std::size_t &temp_storage_bytes,
const KeyT *d_keys_in,
KeyT *d_keys_out,
const ValueT *d_values_in,
ValueT *d_values_out,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
constexpr bool is_descending = true;
constexpr bool is_overwrite_okay = false;
using DispatchT = DispatchSegmentedSort<is_descending,
KeyT,
ValueT,
int,
BeginOffsetIteratorT,
EndOffsetIteratorT>;
DoubleBuffer<KeyT> d_keys(const_cast<KeyT *>(d_keys_in), d_keys_out);
DoubleBuffer<ValueT> d_values(const_cast<ValueT *>(d_values_in), d_values_out);
return DispatchT::Dispatch(d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
is_overwrite_okay,
stream,
debug_synchronous);
}
/**
* @brief Sorts segments of key-value pairs into ascending order.
* Approximately `2*num_segments` auxiliary storage required.
*
* @par
* - The sorting operation is given a pair of key buffers and a corresponding
* pair of associated value buffers. Each pair is managed by a DoubleBuffer
* structure that indicates which of the two buffers is "current" (and thus
* contains the input data to be sorted).
* - The contents of both buffers within each pair may be altered by the sorting
* operation.
* - Upon completion, the sorting operation will update the "current" indicator
* within each DoubleBuffer wrapper to reference which of the two buffers
* now contains the sorted output sequence (a function of the number of key bits
* specified and the targeted device architecture).
* - When the input is a contiguous sequence of segments, a single sequence
* @p segment_offsets (of length `num_segments+1`) can be aliased
* for both the @p d_begin_offsets and @p d_end_offsets parameters (where
* the latter is specified as `segment_offsets+1`).
* - SortPairs is not guaranteed to be stable. That is, suppose that @p i and
* @p j are equivalent: neither one is less than the other. It is not
* guaranteed that the relative order of these two elements will be
* preserved by sort.
*
* @par Snippet
* The code snippet below illustrates the batched sorting of three segments
* (with one zero-length segment) of @p int keys with associated vector of
* @p int values.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_segmented_sort.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers
* // for sorting data
* int num_items; // e.g., 7
* int num_segments; // e.g., 3
* int *d_offsets; // e.g., [0, 3, 3, 7]
* int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9]
* int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -]
* int *d_value_buf; // e.g., [0, 1, 2, 3, 4, 5, 6]
* int *d_value_alt_buf; // e.g., [-, -, -, -, -, -, -]
* ...
*
* // Create a set of DoubleBuffers to wrap pairs of device pointers
* cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf);
* cub::DoubleBuffer<int> d_values(d_value_buf, d_value_alt_buf);
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceSegmentedSort::SortPairs(
* d_temp_storage, temp_storage_bytes, d_keys, d_values,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run sorting operation
* cub::DeviceSegmentedSort::SortPairs(
* d_temp_storage, temp_storage_bytes, d_keys, d_values,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // d_keys.Current() <-- [6, 7, 8, 0, 3, 5, 9]
* // d_values.Current() <-- [5, 4, 3, 1, 2, 0, 6]
*
* @endcode
*
* @tparam KeyT
* <b>[inferred]</b> Key type
*
* @tparam ValueT
* <b>[inferred]</b> Value type
*
* @tparam BeginOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* beginning offsets \iterator
*
* @tparam EndOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* ending offsets \iterator
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When `nullptr`, the
* required allocation size is written to @p temp_storage_bytes and no work
* is done.
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of @p d_temp_storage allocation
*
* @param[in,out] d_keys
* Reference to the double-buffer of keys whose "current" device-accessible
* buffer contains the unsorted input keys and, upon return, is updated to
* point to the sorted output keys
*
* @param[in,out] d_values
* Double-buffer of values whose "current" device-accessible buffer contains
* the unsorted input values and, upon return, is updated to point to the
* sorted output values
*
* @param[in] num_items
* The total number of items to sort (across all segments)
*
* @param[in] num_segments
* The number of segments that comprise the sorting data
*
* @param[in] d_begin_offsets
* Random-access input iterator to the sequence of beginning offsets of
* length @p num_segments, such that `d_begin_offsets[i]` is the first
* element of the <em>i</em><sup>th</sup> data segment in `d_keys_*` and
* `d_values_*`
*
* @param[in] d_end_offsets
* Random-access input iterator to the sequence of ending offsets of length
* @p num_segments, such that `d_end_offsets[i]-1` is the last element of
* the <em>i</em><sup>th</sup> data segment in `d_keys_*` and `d_values_*`.
* If `d_end_offsets[i]-1 <= d_begin_offsets[i]`, the i-th segment is
* considered empty.
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations
* to be printed to the console. Default is @p false.
*/
template <typename KeyT,
typename ValueT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
SortPairs(void *d_temp_storage,
std::size_t &temp_storage_bytes,
DoubleBuffer<KeyT> &d_keys,
DoubleBuffer<ValueT> &d_values,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
constexpr bool is_descending = false;
constexpr bool is_overwrite_okay = true;
using DispatchT = DispatchSegmentedSort<is_descending,
KeyT,
ValueT,
int,
BeginOffsetIteratorT,
EndOffsetIteratorT>;
return DispatchT::Dispatch(d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
is_overwrite_okay,
stream,
debug_synchronous);
}
/**
* @brief Sorts segments of key-value pairs into descending order.
* Approximately `2*num_segments` auxiliary storage required.
*
* @par
* - The sorting operation is given a pair of key buffers and a corresponding
* pair of associated value buffers. Each pair is managed by a DoubleBuffer
* structure that indicates which of the two buffers is "current" (and thus
* contains the input data to be sorted).
* - The contents of both buffers within each pair may be altered by the
* sorting operation.
* - Upon completion, the sorting operation will update the "current"
* indicator within each DoubleBuffer wrapper to reference which of the two
* buffers now contains the sorted output sequence (a function of the number
* of key bits specified and the targeted device architecture).
* - When the input is a contiguous sequence of segments, a single sequence
* @p segment_offsets (of length <tt>num_segments+1</tt>) can be aliased
* for both the @p d_begin_offsets and @p d_end_offsets parameters (where
* the latter is specified as <tt>segment_offsets+1</tt>).
* - SortPairsDescending is not guaranteed to be stable. That is, suppose that
* @p i and @p j are equivalent: neither one is less than the other. It is
* not guaranteed that the relative order of these two elements will be
* preserved by sort.
*
* @par Snippet
* The code snippet below illustrates the batched sorting of three segments
* (with one zero-length segment) of @p int keys with associated vector of
* @p int values.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_segmented_sort.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers for
* // sorting data
* int num_items; // e.g., 7
* int num_segments; // e.g., 3
* int *d_offsets; // e.g., [0, 3, 3, 7]
* int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9]
* int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -]
* int *d_value_buf; // e.g., [0, 1, 2, 3, 4, 5, 6]
* int *d_value_alt_buf; // e.g., [-, -, -, -, -, -, -]
* ...
*
* // Create a set of DoubleBuffers to wrap pairs of device pointers
* cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf);
* cub::DoubleBuffer<int> d_values(d_value_buf, d_value_alt_buf);
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceSegmentedSort::SortPairsDescending(
* d_temp_storage, temp_storage_bytes, d_keys, d_values,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run sorting operation
* cub::DeviceSegmentedSort::SortPairsDescending(
* d_temp_storage, temp_storage_bytes, d_keys, d_values,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // d_keys.Current() <-- [8, 7, 6, 9, 5, 3, 0]
* // d_values.Current() <-- [0, 2, 1, 6, 3, 4, 5]
*
* @endcode
*
* @tparam KeyT
* <b>[inferred]</b> Key type
*
* @tparam ValueT
* <b>[inferred]</b> Value type
*
* @tparam BeginOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* beginning offsets \iterator
*
* @tparam EndOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* ending offsets \iterator
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When nullptr, the
* required allocation size is written to @p temp_storage_bytes and no work
* is done
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of @p d_temp_storage allocation
*
* @param[in,out] d_keys
* Reference to the double-buffer of keys whose "current" device-accessible
* buffer contains the unsorted input keys and, upon return, is updated to
* point to the sorted output keys
*
* @param[in,out] d_values
* Double-buffer of values whose "current" device-accessible buffer contains
* the unsorted input values and, upon return, is updated to point to the
* sorted output values
*
* @param[in] num_items
* The total number of items to sort (across all segments)
*
* @param[in] num_segments
* The number of segments that comprise the sorting data
*
* @param[in] d_begin_offsets
* Random-access input iterator to the sequence of beginning offsets of
* length @p num_segments, such that `d_begin_offsets[i]` is the first
* element of the <em>i</em><sup>th</sup> data segment in `d_keys_*` and
* `d_values_*`
*
* @param[in] d_end_offsets
* Random-access input iterator to the sequence of ending offsets of length
* @p num_segments, such that `d_end_offsets[i]-1` is the last element of
* the <em>i</em><sup>th</sup> data segment in `d_keys_*` and `d_values_*`.
* If `d_end_offsets[i]-1 <= d_begin_offsets[i]`, the i-th segment is
* considered empty.
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations to
* be printed to the console. Default is @p false.
*/
template <typename KeyT,
typename ValueT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
SortPairsDescending(void *d_temp_storage,
std::size_t &temp_storage_bytes,
DoubleBuffer<KeyT> &d_keys,
DoubleBuffer<ValueT> &d_values,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
constexpr bool is_descending = true;
constexpr bool is_overwrite_okay = true;
using DispatchT = DispatchSegmentedSort<is_descending,
KeyT,
ValueT,
int,
BeginOffsetIteratorT,
EndOffsetIteratorT>;
return DispatchT::Dispatch(d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
is_overwrite_okay,
stream,
debug_synchronous);
}
/**
* @brief Sorts segments of key-value pairs into ascending order. Approximately
* `2*num_items + 2*num_segments` auxiliary storage required.
*
* @par
* - The contents of the input data are not altered by the sorting operation.
* - When the input is a contiguous sequence of segments, a single sequence
* @p segment_offsets (of length `num_segments+1`) can be aliased
* for both the @p d_begin_offsets and @p d_end_offsets parameters (where
* the latter is specified as `segment_offsets+1`).
* - StableSortPairs is stable: it preserves the relative ordering of
* equivalent elements. That is, if @p x and @p y are elements such that
* @p x precedes @p y, and if the two elements are equivalent (neither
* @p x < @p y nor @p y < @p x) then a postcondition of stable sort is that
* @p x still precedes @p y.
*
* @par Snippet
* The code snippet below illustrates the batched sorting of three segments
* (with one zero-length segment) of @p int keys with associated vector of
* @p int values.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_segmented_sort.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers
* // for sorting data
* int num_items; // e.g., 7
* int num_segments; // e.g., 3
* int *d_offsets; // e.g., [0, 3, 3, 7]
* int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9]
* int *d_keys_out; // e.g., [-, -, -, -, -, -, -]
* int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6]
* int *d_values_out; // e.g., [-, -, -, -, -, -, -]
* ...
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceSegmentedSort::StableSortPairs(
* d_temp_storage, temp_storage_bytes,
* d_keys_in, d_keys_out, d_values_in, d_values_out,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run sorting operation
* cub::DeviceSegmentedSort::StableSortPairs(
* d_temp_storage, temp_storage_bytes,
* d_keys_in, d_keys_out, d_values_in, d_values_out,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // d_keys_out <-- [6, 7, 8, 0, 3, 5, 9]
* // d_values_out <-- [1, 2, 0, 5, 4, 3, 6]
* @endcode
*
* @tparam KeyT
* <b>[inferred]</b> Key type
*
* @tparam ValueT
* <b>[inferred]</b> Value type
*
* @tparam BeginOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* beginning offsets \iterator
*
* @tparam EndOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* ending offsets \iterator
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When nullptr, the
* required allocation size is written to @p temp_storage_bytes and no work is done.
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of @p d_temp_storage allocation
*
* @param[in] d_keys_in
* Device-accessible pointer to the input data of key data to sort
*
* @param[out] d_keys_out
* Device-accessible pointer to the sorted output sequence of key data
*
* @param[in] d_values_in
* Device-accessible pointer to the corresponding input sequence of
* associated value items
*
* @param[out] d_values_out
* Device-accessible pointer to the correspondingly-reordered output
* sequence of associated value items
*
* @param[in] num_items
* The total number of items to sort (across all segments)
*
* @param[in] num_segments
* The number of segments that comprise the sorting data
*
* @param[in] d_begin_offsets
* Random-access input iterator to the sequence of beginning offsets of
* length @p num_segments, such that `d_begin_offsets[i]` is the first
* element of the <em>i</em><sup>th</sup> data segment in `d_keys_*` and
* `d_values_*`
*
* @param[in] d_end_offsets
* Random-access input iterator to the sequence of ending offsets of length
* @p num_segments, such that `d_end_offsets[i]-1` is the last element of
* the <em>i</em><sup>th</sup> data segment in `d_keys_*` and `d_values_*`.
* If `d_end_offsets[i]-1 <= d_begin_offsets[i]`, the i-th segment is
* considered empty.
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations to
* be printed to the console. Default is @p false.
*/
template <typename KeyT,
typename ValueT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
StableSortPairs(void *d_temp_storage,
std::size_t &temp_storage_bytes,
const KeyT *d_keys_in,
KeyT *d_keys_out,
const ValueT *d_values_in,
ValueT *d_values_out,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
return SortPairs<KeyT,
ValueT,
BeginOffsetIteratorT,
EndOffsetIteratorT>(d_temp_storage,
temp_storage_bytes,
d_keys_in,
d_keys_out,
d_values_in,
d_values_out,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
stream,
debug_synchronous);
}
/**
* @brief Sorts segments of key-value pairs into descending order.
* Approximately `2*num_items + 2*num_segments` auxiliary
* storage required.
*
* @par
* - The contents of the input data are not altered by the sorting operation.
* - When the input is a contiguous sequence of segments, a single sequence
* @p segment_offsets (of length `num_segments+1`) can be aliased
* for both the @p d_begin_offsets and @p d_end_offsets parameters (where
* the latter is specified as `segment_offsets+1`).
* - StableSortPairsDescending is stable: it preserves the relative ordering
* of equivalent elements. That is, if @p x and @p y are elements such that
* @p x precedes @p y, and if the two elements are equivalent (neither
* @p x < @p y nor @p y < @p x) then a postcondition of stable sort is that
* @p x still precedes @p y.
*
* @par Snippet
* The code snippet below illustrates the batched sorting of three segments
* (with one zero-length segment) of @p int keys with associated vector of
* @p int values.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_segmented_sort.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers
* // for sorting data
* int num_items; // e.g., 7
* int num_segments; // e.g., 3
* int *d_offsets; // e.g., [0, 3, 3, 7]
* int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9]
* int *d_keys_out; // e.g., [-, -, -, -, -, -, -]
* int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6]
* int *d_values_out; // e.g., [-, -, -, -, -, -, -]
* ...
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceSegmentedSort::StableSortPairsDescending(
* d_temp_storage, temp_storage_bytes,
* d_keys_in, d_keys_out, d_values_in, d_values_out,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run sorting operation
* cub::DeviceSegmentedSort::StableSortPairsDescending(
* d_temp_storage, temp_storage_bytes,
* d_keys_in, d_keys_out, d_values_in, d_values_out,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // d_keys_out <-- [8, 7, 6, 9, 5, 3, 0]
* // d_values_out <-- [0, 2, 1, 6, 3, 4, 5]
* @endcode
*
* @tparam KeyT
* <b>[inferred]</b> Key type
*
* @tparam ValueT
* <b>[inferred]</b> Value type
*
* @tparam BeginOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* beginning offsets \iterator
*
* @tparam EndOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* ending offsets \iterator
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When `nullptr`, the
* required allocation size is written to @p temp_storage_bytes and no work
* is done
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of @p d_temp_storage allocation
*
* @param[in] d_keys_in
* Device-accessible pointer to the input data of key data to sort
*
* @param[out] d_keys_out
* Device-accessible pointer to the sorted output sequence of key data
*
* @param[in] d_values_in
* Device-accessible pointer to the corresponding input sequence of
* associated value items
*
* @param[out] d_values_out
* Device-accessible pointer to the correspondingly-reordered output
* sequence of associated value items
*
* @param[in] num_items
* The total number of items to sort (across all segments)
*
* @param[in] num_segments
* The number of segments that comprise the sorting data
*
* @param[in] d_begin_offsets
* Random-access input iterator to the sequence of beginning offsets of
* length @p num_segments, such that `d_begin_offsets[i]` is the first
* element of the <em>i</em><sup>th</sup> data segment in `d_keys_*` and
* `d_values_*`
*
* @param[in] d_end_offsets
* Random-access input iterator to the sequence of ending offsets of length
* @p num_segments, such that `d_end_offsets[i]-1` is the last element of
* the <em>i</em><sup>th</sup> data segment in `d_keys_*` and `d_values_*`.
* If `d_end_offsets[i]-1 <= d_begin_offsets[i]`, the i-th segment is
* considered empty.
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations to
* be printed to the console. Default is @p false.
*/
template <typename KeyT,
typename ValueT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
StableSortPairsDescending(void *d_temp_storage,
std::size_t &temp_storage_bytes,
const KeyT *d_keys_in,
KeyT *d_keys_out,
const ValueT *d_values_in,
ValueT *d_values_out,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
return SortPairsDescending<KeyT,
ValueT,
BeginOffsetIteratorT,
EndOffsetIteratorT>(d_temp_storage,
temp_storage_bytes,
d_keys_in,
d_keys_out,
d_values_in,
d_values_out,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
stream,
debug_synchronous);
}
/**
* @brief Sorts segments of key-value pairs into ascending order.
* Approximately `2*num_segments` auxiliary storage required.
*
* @par
* - The sorting operation is given a pair of key buffers and a corresponding
* pair of associated value buffers. Each pair is managed by a DoubleBuffer
* structure that indicates which of the two buffers is "current" (and thus
* contains the input data to be sorted).
* - The contents of both buffers within each pair may be altered by the
* sorting operation.
* - Upon completion, the sorting operation will update the "current"
* indicator within each DoubleBuffer wrapper to reference which of the two
* buffers now contains the sorted output sequence (a function of the number
* of key bits specified and the targeted device architecture).
* - When the input is a contiguous sequence of segments, a single sequence
* @p segment_offsets (of length `num_segments+1`) can be aliased
* for both the @p d_begin_offsets and @p d_end_offsets parameters (where
* the latter is specified as `segment_offsets+1`).
* - StableSortPairs is stable: it preserves the relative ordering
* of equivalent elements. That is, if @p x and @p y are elements such that
* @p x precedes @p y, and if the two elements are equivalent (neither
* @p x < @p y nor @p y < @p x) then a postcondition of stable sort is that
* @p x still precedes @p y.
*
* @par Snippet
* The code snippet below illustrates the batched sorting of three segments
* (with one zero-length segment) of @p int keys with associated vector of
* @p int values.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_segmented_sort.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers
* // for sorting data
* int num_items; // e.g., 7
* int num_segments; // e.g., 3
* int *d_offsets; // e.g., [0, 3, 3, 7]
* int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9]
* int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -]
* int *d_value_buf; // e.g., [0, 1, 2, 3, 4, 5, 6]
* int *d_value_alt_buf; // e.g., [-, -, -, -, -, -, -]
* ...
*
* // Create a set of DoubleBuffers to wrap pairs of device pointers
* cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf);
* cub::DoubleBuffer<int> d_values(d_value_buf, d_value_alt_buf);
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceSegmentedSort::StableSortPairs(
* d_temp_storage, temp_storage_bytes, d_keys, d_values,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run sorting operation
* cub::DeviceSegmentedSort::StableSortPairs(
* d_temp_storage, temp_storage_bytes, d_keys, d_values,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // d_keys.Current() <-- [6, 7, 8, 0, 3, 5, 9]
* // d_values.Current() <-- [5, 4, 3, 1, 2, 0, 6]
*
* @endcode
*
* @tparam KeyT
* <b>[inferred]</b> Key type
*
* @tparam ValueT
* <b>[inferred]</b> Value type
*
* @tparam BeginOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* beginning offsets \iterator
*
* @tparam EndOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* ending offsets \iterator
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When `nullptr`, the
* required allocation size is written to @p temp_storage_bytes and no work
* is done
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of @p d_temp_storage allocation
*
* @param[in,out] d_keys
* Reference to the double-buffer of keys whose "current" device-accessible
* buffer contains the unsorted input keys and, upon return, is updated to
* point to the sorted output keys
*
* @param[in,out] d_values
* Double-buffer of values whose "current" device-accessible buffer contains
* the unsorted input values and, upon return, is updated to point to the
* sorted output values
*
* @param[in] num_items
* The total number of items to sort (across all segments)
*
* @param[in] num_segments
* The number of segments that comprise the sorting data
*
* @param[in] d_begin_offsets
* Random-access input iterator to the sequence of beginning offsets of
* length @p num_segments, such that `d_begin_offsets[i]` is the first
* element of the <em>i</em><sup>th</sup> data segment in `d_keys_*` and
* `d_values_*`
*
* @param[in] d_end_offsets
* Random-access input iterator to the sequence of ending offsets of length
* @p num_segments, such that `d_end_offsets[i]-1` is the last element of
* the <em>i</em><sup>th</sup> data segment in `d_keys_*` and `d_values_*`.
* If `d_end_offsets[i]-1 <= d_begin_offsets[i]`, the i-th segment is
* considered empty.
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations to
* be printed to the console. Default is @p false.
*/
template <typename KeyT,
typename ValueT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
StableSortPairs(void *d_temp_storage,
std::size_t &temp_storage_bytes,
DoubleBuffer<KeyT> &d_keys,
DoubleBuffer<ValueT> &d_values,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
return SortPairs<KeyT,
ValueT,
BeginOffsetIteratorT,
EndOffsetIteratorT>(d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
stream,
debug_synchronous);
}
/**
* @brief Sorts segments of key-value pairs into descending order.
* Approximately `2*num_segments` auxiliary storage required.
*
* @par
* - The sorting operation is given a pair of key buffers and a corresponding
* pair of associated value buffers. Each pair is managed by a DoubleBuffer
* structure that indicates which of the two buffers is "current" (and thus
* contains the input data to be sorted).
* - The contents of both buffers within each pair may be altered by the sorting
* operation.
* - Upon completion, the sorting operation will update the "current" indicator
* within each DoubleBuffer wrapper to reference which of the two buffers
* now contains the sorted output sequence (a function of the number of key bits
* specified and the targeted device architecture).
* - When the input is a contiguous sequence of segments, a single sequence
* @p segment_offsets (of length `num_segments+1`) can be aliased
* for both the @p d_begin_offsets and @p d_end_offsets parameters (where
* the latter is specified as `segment_offsets+1`).
* - StableSortPairsDescending is stable: it preserves the relative ordering
* of equivalent elements. That is, if @p x and @p y are elements such that
* @p x precedes @p y, and if the two elements are equivalent (neither
* @p x < @p y nor @p y < @p x) then a postcondition of stable sort is that
* @p x still precedes @p y.
*
* @par Snippet
* The code snippet below illustrates the batched sorting of three segments
* (with one zero-length segment) of @p int keys with associated vector of
* @p int values.
*
* @par
* @code
* #include <cub/cub.cuh>
* // or equivalently <cub/device/device_segmented_sort.cuh>
*
* // Declare, allocate, and initialize device-accessible pointers
* // for sorting data
* int num_items; // e.g., 7
* int num_segments; // e.g., 3
* int *d_offsets; // e.g., [0, 3, 3, 7]
* int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9]
* int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -]
* int *d_value_buf; // e.g., [0, 1, 2, 3, 4, 5, 6]
* int *d_value_alt_buf; // e.g., [-, -, -, -, -, -, -]
* ...
*
* // Create a set of DoubleBuffers to wrap pairs of device pointers
* cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf);
* cub::DoubleBuffer<int> d_values(d_value_buf, d_value_alt_buf);
*
* // Determine temporary device storage requirements
* void *d_temp_storage = NULL;
* size_t temp_storage_bytes = 0;
* cub::DeviceSegmentedSort::StableSortPairsDescending(
* d_temp_storage, temp_storage_bytes, d_keys, d_values,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // Allocate temporary storage
* cudaMalloc(&d_temp_storage, temp_storage_bytes);
*
* // Run sorting operation
* cub::DeviceSegmentedSort::StableSortPairsDescending(
* d_temp_storage, temp_storage_bytes, d_keys, d_values,
* num_items, num_segments, d_offsets, d_offsets + 1);
*
* // d_keys.Current() <-- [8, 7, 6, 9, 5, 3, 0]
* // d_values.Current() <-- [0, 2, 1, 6, 3, 4, 5]
* @endcode
*
* @tparam KeyT
* <b>[inferred]</b> Key type
*
* @tparam ValueT
* <b>[inferred]</b> Value type
*
* @tparam BeginOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* beginning offsets \iterator
*
* @tparam EndOffsetIteratorT
* <b>[inferred]</b> Random-access input iterator type for reading segment
* ending offsets \iterator
*
* @param[in] d_temp_storage
* Device-accessible allocation of temporary storage. When `nullptr`, the
* required allocation size is written to @p temp_storage_bytes and no work
* is done
*
* @param[in,out] temp_storage_bytes
* Reference to size in bytes of @p d_temp_storage allocation
*
* @param[in,out] d_keys
* Reference to the double-buffer of keys whose "current" device-accessible
* buffer contains the unsorted input keys and, upon return, is updated to
* point to the sorted output keys
*
* @param[in,out] d_values
* Double-buffer of values whose "current" device-accessible buffer contains
* the unsorted input values and, upon return, is updated to point to the
* sorted output values
*
* @param[in] num_items
* The total number of items to sort (across all segments)
*
* @param[in] num_segments
* The number of segments that comprise the sorting data
*
* @param[in] d_begin_offsets
* Random-access input iterator to the sequence of beginning offsets of
* length @p num_segments, such that `d_begin_offsets[i]` is the first
* element of the <em>i</em><sup>th</sup> data segment in `d_keys_*` and
* `d_values_*`
*
* @param[in] d_end_offsets
* Random-access input iterator to the sequence of ending offsets of length
* @p num_segments, such that `d_end_offsets[i]-1` is the last element of
* the <em>i</em><sup>th</sup> data segment in `d_keys_*` and `d_values_*`.
* If `d_end_offsets[i]-1 <= d_begin_offsets[i]`, the i-th segment is
* considered empty.
*
* @param[in] stream
* <b>[optional]</b> CUDA stream to launch kernels within. Default is
* stream<sub>0</sub>.
*
* @param[in] debug_synchronous
* <b>[optional]</b> Whether or not to synchronize the stream after every
* kernel launch to check for errors. Also causes launch configurations to
* be printed to the console. Default is @p false.
*/
template <typename KeyT,
typename ValueT,
typename BeginOffsetIteratorT,
typename EndOffsetIteratorT>
CUB_RUNTIME_FUNCTION static cudaError_t
StableSortPairsDescending(void *d_temp_storage,
std::size_t &temp_storage_bytes,
DoubleBuffer<KeyT> &d_keys,
DoubleBuffer<ValueT> &d_values,
int num_items,
int num_segments,
BeginOffsetIteratorT d_begin_offsets,
EndOffsetIteratorT d_end_offsets,
cudaStream_t stream = 0,
bool debug_synchronous = false)
{
return SortPairsDescending<KeyT,
ValueT,
BeginOffsetIteratorT,
EndOffsetIteratorT>(d_temp_storage,
temp_storage_bytes,
d_keys,
d_values,
num_items,
num_segments,
d_begin_offsets,
d_end_offsets,
stream,
debug_synchronous);
}
//@} end member group
};
CUB_NAMESPACE_END
|
the_stack
|
#include "k2/csrc/array_ops.h"
#include "k2/csrc/context.h"
#include "k2/csrc/fsa_algo.h"
#include "k2/csrc/fsa_utils.h"
namespace k2 {
// Caution: this is really a .cu file. It contains mixed host and device code.
class TopSorter {
public:
/**
Topological sorter object. You should call TopSort() after
constructing it. Please see TopSort() declaration in header for
high-level overview of the algorithm.
@param [in] fsas A vector of FSAs; must have 3 axes.
*/
explicit TopSorter(FsaVec &fsas) : c_(fsas.Context()), fsas_(fsas) {
K2_CHECK_EQ(fsas_.NumAxes(), 3);
}
/*
Return the ragged array containing the states active on the 1st iteration of
the algorithm. These just correspond to the start-states of all
the FSAs, and also the final-states for all FSAs in which final-states
had in-degree zero (no arcs entering them).
Note: in the originally published algorithm we start with all states
that have in-degree zero, but in the context of this toolkit there
is (I believe) no use in states that aren't accessible from the start
state, so we remove them.
*/
std::unique_ptr<Ragged<int32_t>> GetInitialBatch() {
NVTX_RANGE(K2_FUNC);
// Initialize it with a list of all states that currently have zero
// in-degree.
int32_t num_states = state_in_degree_.Dim();
Renumbering state_renumbering(c_, num_states);
// NOTE: this is not very optimal given that we're keeping only a small
// number of states, but at this point I don't want to optimize too heavily.
char *keep_data = state_renumbering.Keep().Data();
const int32_t *state_in_degree_data = state_in_degree_.Data(),
*fsas_row_ids1_data = fsas_.RowIds(1).Data(),
*fsas_row_splits1_data = fsas_.RowSplits(1).Data();
K2_EVAL(
c_, num_states, lambda_set_keep, (int32_t fsas_idx01)->void {
// Make this state a member of the initial batch if it has zero
// in-degree (note: this won't include final states, as we incremented
// their in-degree to avoid them appearing here.)
keep_data[fsas_idx01] = state_in_degree_data[fsas_idx01] == 0;
});
Array1<int32_t> first_iter_values = state_renumbering.New2Old();
Array1<int32_t> first_iter_row_ids = fsas_.RowIds(1)[first_iter_values];
int32_t num_fsas = fsas_.Dim0();
Array1<int32_t> first_iter_row_splits(c_, num_fsas + 1);
RowIdsToRowSplits(first_iter_row_ids, &first_iter_row_splits);
return std::make_unique<Ragged<int32_t>>(
RaggedShape2(&first_iter_row_splits, &first_iter_row_ids,
first_iter_row_ids.Dim()),
first_iter_values);
}
/*
Computes the next batch of states
@param [in] cur_states Ragged array with 2 axes, with the shape of
`[fsas][states]`, containing state-indexes (idx01) into fsas_.
These are states which already have in-degree 0
@return Returns the states which, after processing.
*/
std::unique_ptr<Ragged<int32_t>> GetNextBatch(Ragged<int32_t> &cur_states) {
NVTX_RANGE(K2_FUNC);
// Process arcs leaving all states in `cur_states`
// First figure out how many arcs leave each state.
Array1<int32_t> num_arcs_per_state(c_, cur_states.NumElements() + 1);
int32_t *num_arcs_per_state_data = num_arcs_per_state.Data();
const int32_t *states_data = cur_states.values.Data(),
*fsas_row_splits2_data = fsas_.RowSplits(2).Data();
K2_EVAL(
c_, cur_states.NumElements(), lambda_set_arcs_per_state,
(int32_t states_idx01)->void {
int32_t idx01 = states_data[states_idx01],
num_arcs = fsas_row_splits2_data[idx01 + 1] -
fsas_row_splits2_data[idx01];
num_arcs_per_state_data[states_idx01] = num_arcs;
});
ExclusiveSum(num_arcs_per_state, &num_arcs_per_state);
// arcs_shape `[fsas][states in-degree 0][arcs]
RaggedShape arcs_shape = ComposeRaggedShapes(
cur_states.shape, RaggedShape2(&num_arcs_per_state, nullptr, -1));
// Each arc that generates a new state (i.e. for which
// arc_renumbering.Keep[i] == true) will write the state-id to here (as an
// idx01 into fsas_). Other elements will be undefined.
// We will also write the row-id (which fsa the state belongs) for each
// new state.
int32_t num_arcs = arcs_shape.NumElements();
Array1<int32_t> temp(c_, 2 * num_arcs);
Array1<int32_t> next_iter_states = temp.Arange(0, num_arcs);
Array1<int32_t> new_state_row_ids = temp.Arange(num_arcs, 2 * num_arcs);
// We'll be figuring out which of these arcs leads to a state that now has
// in-degree 0. (If >1 arc goes to such a state, only one will 'win',
// arbitrarily).
Renumbering arc_renumbering(c_, num_arcs);
const int32_t *arcs_row_ids1_data = arcs_shape.RowIds(1).Data(),
*arcs_row_ids2_data = arcs_shape.RowIds(2).Data(),
*arcs_row_splits2_data = arcs_shape.RowSplits(2).Data(),
*fsas_row_splits1_data = fsas_.RowSplits(1).Data(),
*dest_states_data = dest_states_.values.Data();
char *keep_arc_data = arc_renumbering.Keep().Data();
int32_t *state_in_degree_data = state_in_degree_.Data(),
*next_iter_states_data = next_iter_states.Data(),
*new_state_row_ids_data = new_state_row_ids.Data();
K2_EVAL(
c_, num_arcs, lambda_set_arc_renumbering,
(int32_t arcs_idx012)->void {
// note: the prefix `arcs_` means it is an idxXXX w.r.t. `arcs_shape`.
// the prefix `fsas_` means the variable is an idxXXX w.r.t. `fsas_`.
int32_t arcs_idx01 = arcs_row_ids2_data[arcs_idx012],
arcs_idx0 = arcs_row_ids1_data[arcs_idx01],
arcs_idx01x = arcs_row_splits2_data[arcs_idx01],
arcs_idx2 = arcs_idx012 - arcs_idx01x,
fsas_idx01 = states_data[arcs_idx01], // a state index
fsas_idx01x = fsas_row_splits2_data[fsas_idx01],
fsas_idx012 = fsas_idx01x + arcs_idx2,
fsas_dest_state_idx01 = dest_states_data[fsas_idx012];
// if this arc is a self-loop, just ignore this arc as we have
// processed the dest_state (==src_state)
if (fsas_dest_state_idx01 == fsas_idx01) {
keep_arc_data[arcs_idx012] = 0;
return;
}
if ((keep_arc_data[arcs_idx012] = AtomicDecAndCompareZero(
state_in_degree_data + fsas_dest_state_idx01))) {
next_iter_states_data[arcs_idx012] = fsas_dest_state_idx01;
new_state_row_ids_data[arcs_idx012] = arcs_idx0;
}
});
Array1<int32_t> new2old_map = arc_renumbering.New2Old();
if (new2old_map.Dim() == 0) {
// There are no new states. This means we terminated. We'll check from
// calling code that we processed all arcs.
return nullptr;
}
int32_t num_states = new2old_map.Dim();
Array1<int32_t> temp2(c_, 2 * num_states);
// `new_states` will contain state-ids which are idx01's into `fsas_`.
Array1<int32_t> new_states = temp2.Arange(0, num_states);
// `ans_row_ids` will map to FSA index
Array1<int32_t> ans_row_ids = temp2.Arange(num_states, 2 * num_states);
const int32_t *new2old_map_data = new2old_map.Data();
int32_t *ans_row_ids_data = ans_row_ids.Data(),
*new_states_data = new_states.Data();
K2_EVAL(
c_, num_states, lambda_set_new_states_and_row_ids,
(int32_t new_state_idx)->void {
int32_t arcs_idx012 = new2old_map_data[new_state_idx];
new_states_data[new_state_idx] = next_iter_states_data[arcs_idx012];
ans_row_ids_data[new_state_idx] = new_state_row_ids_data[arcs_idx012];
});
int32_t num_fsas = fsas_.Dim0();
Array1<int32_t> ans_row_splits(c_, num_fsas + 1);
RowIdsToRowSplits(ans_row_ids, &ans_row_splits);
auto ans = std::make_unique<Ragged<int32_t>>(
RaggedShape2(&ans_row_splits, &ans_row_ids, num_states),
new_states);
// The following will ensure the answer has deterministic numbering
SortSublists(ans.get());
return ans;
}
/*
Returns the final batch of states. This will include all final-states that
existed in the original FSAs, i.e. at most one per input. We treat them
specially because we can't afford the final-state to not be the last state
(this is only an issue because we support input where not all states were
reachable from the start state).
*/
std::unique_ptr<Ragged<int32_t>> GetFinalBatch() {
NVTX_RANGE(K2_FUNC);
int32_t num_fsas = fsas_.Dim0();
const int32_t *fsas_row_splits1_data = fsas_.RowSplits(1).Data();
Array1<int32_t> has_final_state(c_, num_fsas + 1);
int32_t *has_final_state_data = has_final_state.Data();
K2_EVAL(
c_, num_fsas, lambda_set_has_final_state, (int32_t i)->void {
int32_t split = fsas_row_splits1_data[i],
next_split = fsas_row_splits1_data[i + 1];
has_final_state_data[i] = (next_split > split);
});
ExclusiveSum(has_final_state, &has_final_state);
int32_t n = has_final_state[num_fsas];
auto ans = std::make_unique<Ragged<int32_t>>(
RaggedShape2(&has_final_state, nullptr, n), Array1<int32_t>(c_, n));
int32_t *ans_data = ans->values.Data();
const int32_t *ans_row_ids1_data = ans->RowIds(1).Data();
K2_EVAL(
c_, n, lambda_set_final_state, (int32_t i)->void {
int32_t fsa_idx0 = ans_row_ids1_data[i],
final_state = fsas_row_splits1_data[fsa_idx0 + 1] - 1;
// If the following fails, it likely means an input FSA was invalid
// (e.g. had exactly one state, which is not allowed). Either that,
// or a code error.
K2_DCHECK_GT(final_state, fsas_row_splits1_data[fsa_idx0]);
ans_data[i] = final_state;
});
return ans;
}
void InitDestStatesAndInDegree() {
NVTX_RANGE(K2_FUNC);
int32_t num_fsas = fsas_.shape.TotSize(0),
num_states = fsas_.shape.TotSize(1);
// Get in-degrees of states.
int32_t num_arcs = fsas_.NumElements();
Array1<int32_t> dest_states_idx01 = GetDestStates(fsas_, true);
dest_states_ = Ragged<int32_t>(fsas_.shape, dest_states_idx01);
// remove those arcs which are self-loops, as we will not count them in
// state_in_degree_
Renumbering arc_renumbering(c_, num_arcs);
char *keep_arc_data = arc_renumbering.Keep().Data();
const int32_t *dest_states_data = dest_states_.values.Data(),
*fsas_row_ids2_data = fsas_.RowIds(2).Data();
K2_EVAL(
c_, num_arcs, lambda_set_keep_arc, (int32_t arc_idx012)->void {
int32_t dest_state_idx01 = dest_states_data[arc_idx012],
src_state_idx01 = fsas_row_ids2_data[arc_idx012];
keep_arc_data[arc_idx012] = dest_state_idx01 != src_state_idx01;
});
state_in_degree_ =
GetCounts(dest_states_.values[arc_renumbering.New2Old()], num_states);
int32_t *state_in_degree_data = state_in_degree_.Data();
const int32_t *fsas_row_splits1_data = fsas_.RowSplits(1).Data();
// Increment the in-degree of final-states
K2_EVAL(
c_, num_fsas, lambda_inc_final_state_in_degree,
(int32_t fsa_idx0)->void {
int32_t this_idx01 = fsas_row_splits1_data[fsa_idx0],
next_idx01 = fsas_row_splits1_data[fsa_idx0 + 1];
if (next_idx01 > this_idx01) {
int32_t final_state = next_idx01 - 1;
state_in_degree_data[final_state] += 1;
};
});
}
/* Does the main work of top-sorting and returns the resulting FSAs.
@param [out] arc_map if non-NULL, the map from (arcs in output)
to (corresponding arcs in input) is written to here.
@return Returns the top-sorted FsaVec. (Note: this may have
fewer states than the input if there were unreachable
states.)
*/
FsaVec TopSort(Array1<int32_t> *arc_map) {
NVTX_RANGE(K2_FUNC);
InitDestStatesAndInDegree();
std::vector<std::unique_ptr<Ragged<int32_t>>> iters;
iters.push_back(GetInitialBatch());
{ // This block just checks that all non-empty FSAs in the input have their
// start state in their first batch.
int32_t num_fsas = fsas_.Dim0();
Ragged<int32_t> *first_batch = iters.back().get();
const int32_t *first_batch_states_data = first_batch->values.Data(),
*first_batch_row_splits1_data =
first_batch->RowSplits(1).Data(),
*fsas_row_splits1_data = fsas_.RowSplits(1).Data();
// Act as a flag
Array1<int32_t> start_state_present(c_, 1, 1);
int32_t *start_state_present_data = start_state_present.Data();
K2_EVAL(
c_, num_fsas, lambda_set_start_state_present,
(int32_t fsa_idx0)->void {
int32_t start_state_idx0x = fsas_row_splits1_data[fsa_idx0],
next_start_state_idx0x =
fsas_row_splits1_data[fsa_idx0 + 1];
if (next_start_state_idx0x > start_state_idx0x) { // non-empty Fsa
// `first_state_idx01` is the 1st state in the first batch of this
// fsa (it must be the start state of this Fsa according to our
// implementation of `GetFirstBatch`
int32_t first_state_idx01 = first_batch_states_data
[first_batch_row_splits1_data[fsa_idx0]];
if (first_state_idx01 != start_state_idx0x)
start_state_present_data[0] = 0;
}
});
K2_CHECK_EQ(start_state_present[0], 1)
<< "Our current implementation requires that the start state in each "
"Fsa must be present in the first batch";
}
while (iters.back() != nullptr)
iters.push_back(GetNextBatch(*iters.back()));
// note: below, we're overwriting nullptr.
iters.back() = GetFinalBatch();
// Need raw pointers for Stack().
std::vector<Ragged<int32_t> *> iters_ptrs(iters.size());
for (size_t i = 0; i < iters.size(); ++i) iters_ptrs[i] = iters[i].get();
Ragged<int32_t> all_states =
Cat(1, static_cast<int32_t>(iters.size()), iters_ptrs.data());
K2_CHECK_EQ(all_states.NumElements(), fsas_.TotSize(1))
<< "Our current implementation requires that the input Fsa is acyclic, "
"but it seems there are cycles other than self-loops.";
return RenumberFsaVec(fsas_, all_states.values, arc_map);
}
ContextPtr c_;
FsaVec &fsas_;
// For each arc in fsas_ (with same structure as fsas_), dest-state
// of that arc as an idx01.
Ragged<int32_t> dest_states_;
// The remaining in-degree of each state (state_in_degree_.Dim() ==
// fsas_.TotSize(1)), i.e. number of incoming arcs (except those from
// states that were already processed).
Array1<int32_t> state_in_degree_;
};
void TopSort(FsaVec &src, FsaVec *dest, Array1<int32_t> *arc_map) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
K2_CHECK_LE(src.NumAxes(), 3);
if (src.NumAxes() == 2) {
// Turn single Fsa into FsaVec.
FsaVec src_vec = FsaToFsaVec(src), dest_vec;
// Recurse..
TopSort(src_vec, &dest_vec, arc_map);
*dest = GetFsaVecElement(dest_vec, 0);
return;
}
TopSorter sorter(src);
*dest = sorter.TopSort(arc_map);
}
} // namespace k2
|
the_stack
|
#include <cmath>
#include <iostream>
#include <algorithm>
#include <cuda_runtime_api.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <QDebug>
#include <QFuture>
#include <QMutex>
#include <QtConcurrent/QtConcurrentRun>
using std::make_shared;
using std::pair;
using std::vector;
using thrust::device_vector;
namespace gpusim
{
size_t get_gpu_free_memory(unsigned int device_index)
{
cudaSetDevice(device_index);
size_t free, total;
cudaMemGetInfo(&free, &total);
return free;
}
unsigned int get_gpu_count()
{
static int device_count = 0;
static bool initialized = false;
if (!initialized) {
cudaGetDeviceCount(&device_count);
initialized = true;
}
return device_count;
}
unsigned int get_next_gpu(size_t required_memory)
{
static int next_device = 0;
for (unsigned int i = 0; i < get_gpu_count(); i++) {
int gpu =
next_device++ % get_gpu_count(); // Divide by 0 if called w/o GPU
auto free = get_gpu_free_memory(i);
if (free > required_memory) {
return gpu;
}
}
throw std::runtime_error(
"Can't find a GPU with enough memory to copy data.");
return 0; // Never gets here, just for compiler happiness
}
typedef device_vector<int> DFingerprint;
/**
* @internal
* Functor used to perform tanimoto similarity on GPGPU via thrust::transform
*/
struct TanimotoFunctor {
const int* m_ref_fp;
const int m_fp_intsize;
const int* m_dbdata;
const float m_similarity_cutoff;
TanimotoFunctor(const DFingerprint& ref_fp, int fp_intsize,
const device_vector<int>& dbdata, float similarity_cutoff)
: m_ref_fp(ref_fp.data().get()), m_fp_intsize(fp_intsize),
m_dbdata(dbdata.data().get()),
m_similarity_cutoff(similarity_cutoff){};
__device__ float operator()(const int& fp_index) const
{
int total = 0;
int common = 0;
int offset = m_fp_intsize * fp_index;
for (int i = 0; i < m_fp_intsize; i++) {
const int fp1 = m_ref_fp[i];
const int fp2 = m_dbdata[offset + i];
total += __popc(fp1) + __popc(fp2);
common += __popc(fp1 & fp2);
}
float score =
static_cast<float>(common) / static_cast<float>(total - common);
return score >= m_similarity_cutoff ? score : 0;
};
};
struct StorageResultObject {
vector<SortableResult> m_result_data;
vector<int> m_approximate_matching_results;
};
class FingerprintDBPriv
{
public:
std::shared_ptr<device_vector<int>> d_data;
};
FingerprintDBStorage::FingerprintDBStorage(FingerprintDB* parent,
std::vector<char>& fp_data,
int index_offset, int fp_bitcount)
: m_parent(parent), m_index_offset(index_offset),
m_count(fp_data.size() / (fp_bitcount / CHAR_BIT))
{
const int* int_data = reinterpret_cast<const int*>(fp_data.data());
const size_t int_size = fp_data.size() / sizeof(int);
m_data.assign(int_data, int_data + int_size);
}
unsigned int FingerprintDBStorage::getOffsetIndex(unsigned int without_offset)
{
return without_offset + m_index_offset;
}
FingerprintDB::FingerprintDB(int fp_bitcount, int fp_count,
const QString& dbkey, vector<vector<char>>& data,
vector<char*>& smiles_vector,
std::vector<char*>& ids_vector)
: m_dbkey(dbkey)
{
m_fp_intsize = fp_bitcount / (sizeof(int) * 8); // ASSUMES INT-DIVISIBLE
// SIZE
m_total_count = fp_count;
int current_fp_count = 0;
for (auto& dataset : data) {
auto storage = make_shared<FingerprintDBStorage>(
this, dataset, current_fp_count, fp_bitcount);
storage->m_priv = make_shared<FingerprintDBPriv>();
m_storage.push_back(storage);
current_fp_count += storage->m_data.size() / m_fp_intsize;
}
if (current_fp_count != m_total_count) {
throw std::runtime_error("Mismatch between FP count and data, "
"potential database corruption.");
}
m_total_data_size = static_cast<size_t>(m_total_count) *
static_cast<size_t>(m_fp_intsize) * sizeof(int);
qDebug() << "Database loaded with" << m_total_count << "molecules";
// Optimization, take the underlying storage of the incoming vectors,
// which won't be used again in calling code
m_smiles.swap(smiles_vector);
m_ids.swap(ids_vector);
}
void FingerprintDB::copyToGPU(unsigned int fold_factor)
{
m_fold_factor = fold_factor;
while (m_fp_intsize % m_fold_factor != 0) {
m_fold_factor++;
}
if (m_fold_factor == 1) {
for (const auto& storage : m_storage) {
storage->m_gpu_device =
get_next_gpu(storage->m_data.size() * sizeof(int));
cudaSetDevice(storage->m_gpu_device);
// Have to create vector where correct cuda device is set
storage->m_priv->d_data = make_shared<device_vector<int>>();
*(storage->m_priv->d_data) = storage->m_data;
}
} else {
for (const auto& storage : m_storage) {
auto folded_data = fold_data(storage->m_data);
storage->m_gpu_device =
get_next_gpu(folded_data.size() * sizeof(int));
cudaSetDevice(storage->m_gpu_device);
// Have to create vector where correct cuda device is set
storage->m_priv->d_data = make_shared<device_vector<int>>();
*(storage->m_priv->d_data) = folded_data;
}
}
}
void FingerprintDB::getStorageAndLocalIndex(unsigned int offset_index,
FingerprintDBStorage** storage,
unsigned int* local_index) const
{
int slice_index_offset = 0;
*storage = m_storage[0].get();
for (unsigned int i = 1; i < m_storage.size(); i++) {
if (m_storage[i]->m_index_offset >= offset_index)
break;
*storage = m_storage[i].get();
slice_index_offset = (*storage)->m_index_offset;
}
*local_index = offset_index - slice_index_offset;
}
Fingerprint FingerprintDB::getFingerprint(unsigned int index) const
{
Fingerprint output(m_fp_intsize);
FingerprintDBStorage* storage;
unsigned int local_index;
getStorageAndLocalIndex(index, &storage, &local_index);
unsigned int offset = local_index * m_fp_intsize;
for (int i = 0; i < m_fp_intsize; i++) {
output[i] = storage->m_data[offset + i];
}
return output;
}
void FingerprintDB::search_storage(
const Fingerprint& query,
const std::shared_ptr<FingerprintDBStorage>& storage,
StorageResultObject* results, unsigned int max_return_count,
float similarity_cutoff) const
{
auto& sortable_results = results->m_result_data;
cudaSetDevice(storage->m_gpu_device);
static QMutex mutex;
vector<int> indices;
std::vector<char*> results_smiles;
std::vector<char*> results_ids;
std::vector<float> results_scores;
device_vector<float> d_results_scores(storage->m_count);
device_vector<int> d_results_indices(storage->m_count);
try {
// Fill indices [0->N), which will be sorted along with scores at end
thrust::sequence(d_results_indices.begin(), d_results_indices.end());
DFingerprint d_ref_fp;
if (m_fold_factor == 1) {
// Copy the query fingerprint up to the GPU
d_ref_fp = query;
} else {
auto folded = fold_data(query);
d_ref_fp = folded;
}
const int folded_fp_intsize = m_fp_intsize / m_fold_factor;
// Use Tanimoto to score similarity of all compounds to query
// fingerprint
thrust::transform(d_results_indices.begin(), d_results_indices.end(),
d_results_scores.begin(),
TanimotoFunctor(d_ref_fp, folded_fp_intsize,
*(storage->m_priv->d_data),
similarity_cutoff));
auto indices_end = d_results_indices.end();
auto scores_end = d_results_scores.end();
if (similarity_cutoff > 0) {
indices_end = thrust::remove_if(
d_results_indices.begin(), d_results_indices.end(),
d_results_scores.begin(), thrust::logical_not<bool>());
scores_end = thrust::remove(d_results_scores.begin(),
d_results_scores.end(), 0);
}
unsigned int indices_size =
std::distance(d_results_indices.begin(), indices_end);
mutex.lock();
results->m_approximate_matching_results.push_back(indices_size);
mutex.unlock();
// Sort scores & indices vectors descending on score
thrust::sort_by_key(d_results_scores.begin(), scores_end,
d_results_indices.begin(),
thrust::greater<float>());
int results_to_consider = 0;
results_to_consider = std::min(
indices_size, max_return_count * m_fold_factor *
static_cast<int>(std::log2(2 * m_fold_factor)));
indices.assign(d_results_indices.begin(),
d_results_indices.begin() + results_to_consider);
} catch (thrust::system_error e) {
qDebug() << "Error!" << e.what();
throw;
}
if (m_fold_factor == 1) { // If we don't fold, we can take exact GPU results
// Push top max_return_count results to CPU results vectors to be
// returned
for (auto index : indices) {
int offset_index = storage->getOffsetIndex(index);
results_smiles.push_back(m_smiles[offset_index]);
results_ids.push_back(m_ids[offset_index]);
}
results_scores.assign(d_results_scores.begin(),
d_results_scores.begin() + indices.size());
} else { // If we folded, we need to recalculate scores with full
// fingerprints
results_scores.resize(indices.size());
for (unsigned int i = 0; i < indices.size(); i++) {
int offset_index = storage->getOffsetIndex(indices[i]);
results_scores[i] =
tanimoto_similarity_cpu(query, getFingerprint(offset_index));
// Uncomment below to debug pre vs post folding scores
// qDebug() << results_scores[i] << " vs " << d_results_scores[i];
}
top_results_bubble_sort(indices, results_scores, max_return_count);
max_return_count = std::min((size_t) max_return_count, indices.size());
results_scores.resize(max_return_count);
for (unsigned int i = 0; i < max_return_count; i++) {
// Check whether the re-scored similarity is too low
if (results_scores[i] < similarity_cutoff) {
results_scores.resize(i);
break;
}
results_ids.push_back(m_ids[storage->getOffsetIndex(indices[i])]);
results_smiles.push_back(
m_smiles[storage->getOffsetIndex(indices[i])]);
}
}
mutex.lock();
for (unsigned int i = 0; i < results_smiles.size(); i++) {
sortable_results.push_back(SortableResult(
results_scores[i], ResultData(results_smiles[i], results_ids[i])));
}
mutex.unlock();
}
void FingerprintDB::search(const Fingerprint& query, const QString& dbkey,
unsigned int max_return_count,
float similarity_cutoff,
std::vector<char*>& results_smiles,
std::vector<char*>& results_ids,
std::vector<float>& results_scores,
unsigned long& approximate_result_count) const
{
if (dbkey != m_dbkey) {
qDebug() << "Key check failed, returning empty results";
return;
}
StorageResultObject results;
auto& sortable_results = results.m_result_data;
vector<QFuture<void>> futures;
for (auto& storage : m_storage) {
QFuture<void> future = QtConcurrent::run(
this, &FingerprintDB::search_storage, query, storage, &results,
max_return_count, similarity_cutoff);
futures.push_back(future);
}
for (auto& future : futures) {
future.waitForFinished();
}
std::sort(sortable_results.rbegin(), sortable_results.rend());
approximate_result_count =
std::accumulate(results.m_approximate_matching_results.begin(),
results.m_approximate_matching_results.end(), 0);
for (auto result : sortable_results) {
results_scores.push_back(result.first);
results_smiles.push_back(result.second.first);
results_ids.push_back(result.second.second);
}
int result_size = std::min(static_cast<int>(max_return_count),
static_cast<int>(results_scores.size()));
results_scores.resize(result_size);
results_smiles.resize(result_size);
results_ids.resize(result_size);
}
/**
* @brief
* A CPU implementation of tanimoto similarity, meant purely for testing.
*/
float FingerprintDB::tanimoto_similarity_cpu(const Fingerprint& fp1,
const Fingerprint& fp2) const
{
int total = 0;
int common = 0;
for (int i = 0; i < m_fp_intsize; i++) {
total += __builtin_popcount(fp1[i]) + __builtin_popcount(fp2[i]);
common += __builtin_popcount(fp1[i] & fp2[i]);
}
return (float) common / (float) (total - common);
}
size_t get_available_gpu_memory()
{
size_t free = 0;
for (unsigned int gpu = 0; gpu < get_gpu_count(); gpu++) {
auto lfree = get_gpu_free_memory(gpu);
free += lfree;
}
// Comment out below line to force-test folding:
// free = 100*1024*1024;
return free;
}
} // namespace gpusim
|
the_stack
|
#include "nnbnorm.hpp"
#include "impl/dispatcher.hpp"
#include <cassert>
#include <cstring>
#include <cmath>
#include <cstdlib>
#include <limits>
#include <algorithm>
#include <iostream>
using namespace vl ;
using namespace vl::nn ;
using namespace vl::impl ;
template<DeviceType deviceType, DataType dataType> struct BatchNormForward ;
template<DeviceType deviceType, DataType dataType> struct BatchNormForwardWithMoment ;
template<DeviceType deviceType, DataType dataType> struct BatchNormBackward ;
template<DeviceType deviceType, DataType dataType> struct BatchNormBackwardWithMoment ;
template<DataType dataType> struct BatchNormForwardCudnn ;
template<DataType dataType> struct BatchNormForwardWithMomentCudnn ;
template<DataType dataType> struct BatchNormBackwardCudnn ;
template<DataType dataType> struct BatchNormBackwardWithMomentCudnn ;
// -------------------------------------------------------------------
// Helpers
// -------------------------------------------------------------------
// Compute moments (means and sigmas) from the batch data
// WH is the product of the data width and height
// moments is a 2 x depth array with means and sigmas
template<typename T> inline void
compute_moment(T * moments,
T const * data,
int WH,
int depth,
int num,
T epsilon)
{
memset(moments, 0, sizeof(T) * 2*depth) ;
int mass = WH * num ;
for(int channel = 0; channel < depth; ++channel) {
for(int element = 0; element < num; ++element) {
for(int wh = 0; wh < WH; ++wh){
T x = data[wh + channel*WH + element*(depth*WH)] ;
moments[channel] += x ; // mean
moments[channel + depth] += x * x; // sigma
}
}
}
for(int i = 0; i < depth; ++i) {
T mean = moments[i] / mass ;
T sigma2 = std::max((T).0, moments[i + depth]/mass - mean*mean) ;
moments[i] = mean ;
moments[i + depth] = sqrt(sigma2 + epsilon);
}
}
// This version assumes that the moment tensor is precomputed.
template<typename T> inline void
compute_ders(T * derMultipliers,
T * derBiases,
T const * moments,
T const * data,
T const * derOutput,
int WH, int depth, int num,
T epsilon)
{
memset(derMultipliers, 0, sizeof(T) * depth) ;
memset(derBiases, 0, sizeof(T) * depth) ;
for(int channel = 0; channel < depth; ++channel){
for(int element = 0; element < num; ++element ){
for(int wh = 0; wh < WH; ++wh){
int offset = wh + channel*WH + element * (WH*depth) ;
derMultipliers[channel] += derOutput[offset] * data[offset];
derBiases[channel] += derOutput[offset];
}
}
}
for(int i = 0; i < depth; ++i) {
T mean = moments[i] ;
T sigma = moments[i + depth] ;
derMultipliers[i] = (derMultipliers[i] - mean*derBiases[i]) / sigma;
}
}
template<typename T> inline void
compute_ders_and_moments(T * derMultipliers,
T * derBiases,
T * moments,
T const * data,
T const * derOutput,
int WH, int depth, int num,
T epsilon)
{
memset(derMultipliers, 0, sizeof(T) * depth) ;
memset(derBiases, 0, sizeof(T) * depth) ;
memset(moments, 0, sizeof(T) * 2*depth) ;
for(int channel = 0; channel < depth; ++channel){
for(int element = 0; element < num; ++element ){
for(int wh = 0; wh < WH; ++wh){
int offset = wh + channel*WH + element * (WH*depth) ;
moments[channel] += data[offset] ;
moments[channel + depth] += data[offset] * data[offset];
derMultipliers[channel] += derOutput[offset] * data[offset];
derBiases[channel] += derOutput[offset];
}
}
}
T mass = WH*num;
for(int i = 0; i < depth; ++i) {
T mean = moments[i] / mass ;
T sigma2 = std::max((T).0, moments[i + depth]/mass - mean*mean) ;
T sigma = sqrt(sigma2 + epsilon);
moments[i] = mean ;
moments[i + depth] = sigma ;
derMultipliers[i] = (derMultipliers[i] - mean*derBiases[i]) / sigma;
}
}
template<typename T> inline void
batch_normalize_backward(T * derData,
T const * moments,
T const * data,
T const * multipliers,
T const * derMultipliers,
T const * derBiases,
T const * derOutput,
int WH,
int depth,
int num)
{
T mass = WH*num;
for(int channel = 0; channel < depth; ++channel ) {
T mean = moments[channel] ;
T sigma = moments[channel + depth] ;
T muz = derBiases[channel]/mass;
T G1 = multipliers[channel]/sigma ;
T G2 = G1 * derMultipliers[channel]/(mass*sigma);
for(int element = 0; element < num; ++element){
for(int wh = 0; wh < WH; ++wh){
int offset = wh + channel*WH + element * (WH*depth) ;
derData[offset] = G1 * (derOutput[offset] - muz) - G2 * (data[offset]-mean) ;
}
}
}
}
// -------------------------------------------------------------------
// Forward
// -------------------------------------------------------------------
template<DataType dataType>
struct BatchNormForwardWithMoment<VLDT_CPU, dataType>
{
vl::ErrorCode operator()(BatchNorm &op,
Tensor &output,
Tensor const &moment,
Tensor const &input,
Tensor const &multiplier,
Tensor const &bias)
{
typedef typename vl::DataTypeTraits<dataType>::type type ;
auto height = input.getHeight() ;
auto width = input.getWidth() ;
auto depth = input.getDepth() ;
auto size = input.getSize() ;
auto outputData = (type*)output.getMemory() ;
auto momentData = (type const*)moment.getMemory() ;
auto inputData = (type const*)input.getMemory() ;
auto multiplierData = (type const*)multiplier.getMemory() ;
auto biasData = (type const*)bias.getMemory() ;
int WH = height * width ;
for(int channel = 0; channel < depth; ++channel) {
type mean = momentData[channel] ;
type sigma = momentData[channel + depth] ;
type bias = biasData[channel];
type coefficient = multiplierData[channel] / sigma ;
for(int element = 0; element < size; ++element) {
for(int wh = 0; wh < WH; ++wh){
int offset = wh + channel*WH + element * (depth*WH) ;
outputData[offset] = coefficient * (inputData[offset] - mean) + bias ;
}
}
}
return VLE_Success ;
}
} ;
template<DataType dataType>
struct BatchNormForward<VLDT_CPU, dataType>
{
vl::ErrorCode operator()(BatchNorm &op,
Tensor &output,
Tensor &moment,
Tensor const &input,
Tensor const &multiplier,
Tensor const &bias)
{
vl::ErrorCode error = VLE_Success ;
typedef typename vl::DataTypeTraits<dataType>::type type ;
auto height = input.getHeight() ;
auto width = input.getWidth() ;
auto depth = input.getDepth() ;
auto size = input.getSize() ;
auto outputData = (type*)output.getMemory() ;
auto inputData = (type const*)input.getMemory() ;
auto multiplierData = (type const*)multiplier.getMemory() ;
auto biasData = (type const*)bias.getMemory() ;
// Compute the moments.
Tensor ownMoment(moment) ;
if (ownMoment.getMemory() == NULL) {
auto * buffer = (type*)op.context.getWorkspace(vl::VLDT_CPU, sizeof(type)*2*depth) ;
if (!buffer) {
error = VLE_OutOfMemory ;
goto done ;
}
ownMoment.setMemory(buffer) ;
}
{
auto momentData = (type*)ownMoment.getMemory() ;
compute_moment<type>(momentData,
inputData, width*height, depth, size,
op.epsilon) ;
}
// Compute output.
error = BatchNormForwardWithMoment<vl::VLDT_CPU,dataType>()
(op,output,ownMoment,input,multiplier,bias) ;
// Finish.
done:
return error ;
}
} ;
// -------------------------------------------------------------------
// Backward
// -------------------------------------------------------------------
template<DataType dataType>
struct BatchNormBackwardWithMoment<VLDT_CPU, dataType>
{
vl::ErrorCode operator()(BatchNorm &op,
Tensor &derInput,
Tensor &derMultiplier,
Tensor &derBias,
Tensor const &moment,
Tensor const &input,
Tensor const &multiplier,
Tensor const &bias,
Tensor const &derOutput)
{
typedef typename vl::DataTypeTraits<dataType>::type type ;
auto height = input.getHeight() ;
auto width = input.getWidth() ;
auto depth = input.getDepth() ;
auto size = input.getSize() ;
auto derInputData = (type*)derInput.getMemory() ;
auto derMultiplierData = (type*)derMultiplier.getMemory() ;
auto derBiasData = (type*)derBias.getMemory() ;
auto momentData = (type const*)moment.getMemory() ;
auto inputData = (type const*)input.getMemory() ;
auto multiplierData = (type const*)multiplier.getMemory() ;
auto biasData = (type const*)bias.getMemory() ;
auto derOutputData = (type const*)derOutput.getMemory() ;
int WH = height * width ;
// Compute derMultipliers, derBiases, muz, and moments.
compute_ders<type>(derMultiplierData, derBiasData,
momentData, inputData, derOutputData,
WH, depth, size,
op.epsilon);
// Compute derData.
batch_normalize_backward<type>(derInputData,
momentData, inputData,
multiplierData,
derMultiplierData, derBiasData, derOutputData,
WH, depth, size);
return VLE_Success ;
}
} ;
template<DataType dataType>
struct BatchNormBackward<VLDT_CPU, dataType>
{
vl::ErrorCode operator()(BatchNorm &op,
Tensor &derInput,
Tensor &derMultiplier,
Tensor &derBias,
Tensor &moment,
Tensor const &input,
Tensor const &multiplier,
Tensor const &bias,
Tensor const &derOutput)
{
vl::ErrorCode error = VLE_Success ;
typedef typename vl::DataTypeTraits<dataType>::type type ;
auto height = input.getHeight() ;
auto width = input.getWidth() ;
auto depth = input.getDepth() ;
auto size = input.getSize() ;
auto derInputData = (type*)derInput.getMemory() ;
auto derMultiplierData = (type*)derMultiplier.getMemory() ;
auto derBiasData = (type*)derBias.getMemory() ;
auto inputData = (type const*)input.getMemory() ;
auto multiplierData = (type const*)multiplier.getMemory() ;
auto biasData = (type const*)bias.getMemory() ;
auto derOutputData = (type const*)derOutput.getMemory() ;
int WH = height * width ;
// Get workspace if needed.
Tensor ownMoment(moment) ;
if (ownMoment.getMemory() == NULL) {
auto * buffer = (type*)op.context.getWorkspace(vl::VLDT_CPU, sizeof(type)*2*depth) ;
if (!buffer) {
error = VLE_OutOfMemory ;
goto done ;
}
ownMoment.setMemory(buffer) ;
}
{
auto momentData = (type*)ownMoment.getMemory() ;
// Compute derMultipliers, derBiases, and moments.
compute_ders_and_moments<type>(derMultiplierData, derBiasData, momentData,
inputData, derOutputData,
WH, depth, size,
op.epsilon);
// Compute derData.
batch_normalize_backward<type>(derInputData,
momentData, inputData,
multiplierData,
derMultiplierData, derBiasData, derOutputData,
WH, depth, size);
}
done:;
return error ;
}
} ;
// -------------------------------------------------------------------
// Driver
// -------------------------------------------------------------------
#if ENABLE_GPU
#include "nnbnorm_gpu.cu"
#endif
#if ENABLE_CUDNN
#include "nnbnorm_cudnn.cu"
#endif
BatchNorm::BatchNorm(Context &context,
double epsilon)
:
context(context),
epsilon(epsilon)
{ }
vl::ErrorCode
BatchNorm::forward(Tensor &output,
Tensor &moment,
Tensor const &input,
Tensor const &multiplier,
Tensor const &bias)
{
return dispatch_cudnn<
BatchNormForward,
BatchNormForwardCudnn>()
(*this,output,moment,input,multiplier,bias) ;
}
vl::ErrorCode
BatchNorm::forwardWithMoment(Tensor &output,
Tensor const &moment,
Tensor const &input,
Tensor const &multiplier,
Tensor const &bias)
{
return dispatch_cudnn<
BatchNormForwardWithMoment,
BatchNormForwardWithMomentCudnn>()
(*this,output,moment,input,multiplier,bias) ;
}
vl::ErrorCode
BatchNorm::backward(Tensor &derInput,
Tensor &derMultiplier,
Tensor &derBias,
Tensor &moment,
Tensor const &input,
Tensor const &multiplier,
Tensor const &bias,
Tensor const &derOutput)
{
return dispatch_cudnn<
BatchNormBackward,
BatchNormBackwardCudnn>()
(*this,derInput,derMultiplier,derBias,moment,input,multiplier,bias,derOutput) ;
}
vl::ErrorCode
BatchNorm::backwardWithMoment(Tensor &derInput,
Tensor &derMultiplier,
Tensor &derBias,
Tensor const &moment,
Tensor const &input,
Tensor const &multiplier,
Tensor const &bias,
Tensor const &derOutput)
{
return dispatch_cudnn<
BatchNormBackwardWithMoment,
BatchNormBackwardWithMomentCudnn>()
(*this,derInput,derMultiplier,derBias,moment,input,multiplier,bias,derOutput) ;
}
|
the_stack
|
///////////////////////////////////////////////////////////////////// Headers //
#include "GDelKernels.h"
#include "Geometry.h"
//////////////////////////////////////////////////// Exclusive-Inclusive Scan //
#define LOG2_WARP_SIZE 5U
#define WARP_SIZE (1U << LOG2_WARP_SIZE)
inline __device__ int warpScanInclusive(int idata, volatile int *s_Data)
{
int pos = 2 * threadIdx.x - (threadIdx.x & (WARP_SIZE - 1));
s_Data[pos] = 0;
pos += WARP_SIZE;
s_Data[pos] = idata;
s_Data[pos] += s_Data[pos - 1];
s_Data[pos] += s_Data[pos - 2];
s_Data[pos] += s_Data[pos - 4];
s_Data[pos] += s_Data[pos - 8];
s_Data[pos] += s_Data[pos - 16];
return s_Data[pos];
}
inline __device__ int warpScanInclusive(int idata, int *s_Data, int size)
{
int pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
for(int offset = 1; offset < size; offset <<= 1)
s_Data[pos] += s_Data[pos - offset];
return s_Data[pos];
}
inline __device__ int warpScanExclusive(int idata, int *s_Data, int size)
{
return warpScanInclusive(idata, s_Data, size) - idata;
}
inline __device__ int scan1Inclusive(int idata, int *s_Data, int size)
{
// Bottom-level inclusive warp scan
int warpResult = warpScanInclusive(idata, s_Data);
// Save top elements of each warp for exclusive warp scan
// sync to wait for warp scans to complete (because s_Data is being overwritten)
__syncthreads();
if( (threadIdx.x & (WARP_SIZE - 1)) == (WARP_SIZE - 1) )
s_Data[threadIdx.x >> LOG2_WARP_SIZE] = warpResult;
// wait for warp scans to complete
__syncthreads();
if( threadIdx.x < (size >> LOG2_WARP_SIZE) )
{
// grab top warp elements
int val = s_Data[threadIdx.x];
// calculate exclsive scan and write back to shared memory
s_Data[threadIdx.x] = warpScanExclusive(val, s_Data, size >> LOG2_WARP_SIZE);
}
// return updated warp scans with exclusive scan results
__syncthreads();
return warpResult + s_Data[threadIdx.x >> LOG2_WARP_SIZE];
}
///////////////////////////////////////////////////////////////////// Kernels //
__global__ void kerMakeMissingData
(
const int* grid,
int gridWidth,
KerPointData pointData,
KerMissingData missingData
)
{
// Iterate through points
for ( int pointIdx = getCurThreadIdx(); pointIdx < missingData._num; pointIdx += getThreadNum() )
{
CudaAssert( ( pointIdx < pointData._num ) && "Invalid point index!" );
// Pick leader from grid location of point
const Point3& point = pointData._pointArr[ pointIdx ];
const int3 ptLoc = { ( int ) point._p[0], ( int ) point._p[1], ( int ) point._p[2] };
const int gridIdx = coordToIdx( gridWidth, ptLoc );
const int leader = grid[ gridIdx ];
CudaAssert( ( leader >= 0 ) && ( leader < pointData._num ) && "Invalid leader has won grid voxel!" );
// Write leader for this point
missingData._leaderArr[ pointIdx ] = leader;
}
return;
}
// Do NOT change this to inline device function.
// Inline device function was found to be comparitively slower!
#define READ_GRID_VALUE( dGrid, gridWidth, loc, value ) \
/* Outer layer */ \
if ( ( loc.x == -1 ) || ( loc.x == gridWidth ) \
|| ( loc.y == -1 ) || ( loc.y == gridWidth ) \
|| ( loc.z == -1 ) || ( loc.z == gridWidth ) ) \
{ \
value = Marker; \
} \
else \
/* Inner region */ \
{ \
const int curIdx = coordToIdx( gridWidth, loc ); \
value = dGrid[ curIdx ]; \
}
__forceinline__ __device__ void grabPair
(
KerInsertData worksetData,
int aVal,
int bVal,
int& curPairIdx,
KernelMode mode
)
{
CudaAssert( aVal != bVal );
if ( aVal == Marker )
{
return;
}
if ( GrabPerThreadPairs == mode )
{
worksetData._vertStarArr[ curPairIdx ] = ( aVal < bVal ) ? aVal : bVal;
worksetData._vertArr[ curPairIdx ] = ( aVal < bVal ) ? bVal : aVal;
}
++curPairIdx;
return;
}
// Read pairs from grid, one thread per row
// Invoked twice:
// 1: Count tetra
// 2: Read tetra
__global__ void kerReadPairsFromGrid
(
const int* dGrid,
int gridWidth,
KerInsertData worksetData,
KernelMode mode
)
{
// 8 voxels and their Voronoi vertices
const int Root = 0;
const int Opp = 7;
const int LinkNum = 6;
const int LinkVertex[ LinkNum + 1 ] = { 6, 2, 3, 1, 5, 4, 6 };
int3 loc = ( blockIdx.x <= gridWidth )
? make_int3( threadIdx.x - 1, blockIdx.x - 1, -1 )
: make_int3( gridWidth - 1, threadIdx.x - 1, -1 ); // Read row on other side of grid
const int curThreadIdx = getCurThreadIdx();
const int pairIdxBeg = ( CountPerThreadPairs == mode )
? 0
: worksetData._starVertMap[ curThreadIdx ];
int curPairIdx = pairIdxBeg;
int vals[8];
int valIdx = 0;
////
// Read one plane (4 voxels) in this row
////
for ( int iy = 0; iy <= 1; ++iy ) { for ( int ix = 0; ix <= 1; ++ix )
{
const int3 curLoc = make_int3( loc.x + ix, loc.y + iy, loc.z );
READ_GRID_VALUE( dGrid, gridWidth, curLoc, vals[ valIdx ] );
++valIdx;
} }
////
// Move along row, using plane (4 voxels) from last read
////
// Move along row
for ( ; loc.z < gridWidth; ++loc.z )
{
valIdx = 4;
// Read next plane (4 voxels) in this row
for ( int iy = 0; iy <= 1; ++iy ) { for ( int ix = 0; ix <= 1; ++ix )
{
const int3 curLoc = make_int3( loc.x + ix, loc.y + iy, loc.z + 1 );
READ_GRID_VALUE( dGrid, gridWidth, curLoc, vals[ valIdx ] );
++valIdx;
} }
// We have 8 values of cube of width 2
// Check the main diagonal of cube
const int rootVal = vals[ Root ];
const int oppVal = vals[ Opp ];
const bool hasMarker = ( rootVal == Marker ) || ( oppVal == Marker );
if ( rootVal != oppVal )
{
// Check 6 link pairs
bool hasQuad = false;
int aVal = vals[ LinkVertex[ 0 ] ];
for ( int vi = 0; vi < LinkNum; ++vi )
{
const int bVal = vals[ LinkVertex[ vi + 1 ] ];
if ( ( aVal != bVal )
&& ( aVal != rootVal ) && ( aVal != oppVal )
&& ( bVal != rootVal ) && ( bVal != oppVal ) )
{
grabPair( worksetData, rootVal, aVal, curPairIdx, mode );
grabPair( worksetData, oppVal, aVal, curPairIdx, mode );
grabPair( worksetData, rootVal, bVal, curPairIdx, mode );
grabPair( worksetData, oppVal, bVal, curPairIdx, mode );
grabPair( worksetData, aVal, bVal, curPairIdx, mode );
hasQuad = true;
}
aVal = bVal;
}
if ( hasQuad && !hasMarker ) // Has a quad
{
grabPair( worksetData, rootVal, oppVal, curPairIdx, mode );
}
}
// Store plane for next row
vals[ 0 ] = vals[ 4 ];
vals[ 1 ] = vals[ 5 ];
vals[ 2 ] = vals[ 6 ];
vals[ 3 ] = vals[ 7 ];
}
////
// Write count of thread
////
if ( CountPerThreadPairs == mode )
{
worksetData._vertArr[ curThreadIdx ] = curPairIdx - pairIdxBeg;
}
return;
}
__global__ void kerCopyWorksets
(
KerInsertData insertData,
KerIntArray fromStarArr,
int* fromVertArr,
int* fromMap
)
{
// Iterate current worksets
for ( int fromIdx = getCurThreadIdx(); fromIdx < fromStarArr._num; fromIdx += getThreadNum() )
{
const int star = fromStarArr._arr[ fromIdx ];
const int fromVertBeg = fromMap[ star ];
const int toVertBeg = insertData._starVertMap[ star ];
const int locIdx = fromIdx - fromVertBeg;
const int toIdx = toVertBeg + locIdx;
insertData._vertStarArr[ toIdx ] = star;
insertData._vertArr[ toIdx ] = fromVertArr[ fromIdx ];
}
return;
}
__global__ void kerCopyPumpedUsingAtomic
(
KerInsertData insertData,
KerIntArray pumpStarArr,
int* pumpVertArr,
int* toIdxArr
)
{
// Iterate pumped pairs
for ( int fromIdx = getCurThreadIdx(); fromIdx < pumpStarArr._num; fromIdx += getThreadNum() )
{
const int star = pumpStarArr._arr[ fromIdx ];
const int toIdx = atomicAdd( &toIdxArr[ star ], 1 );
insertData._vertStarArr[ toIdx ] = star;
insertData._vertArr[ toIdx ] = pumpVertArr[ fromIdx ];
}
return;
}
__forceinline__ __device__ bool isVertexInRange
(
const int* arr,
int beg,
int end,
int key
)
{
int idx = beg;
while ( idx < end )
{
if ( arr[ idx ] == key )
{
return true;
}
++idx;
}
return false;
}
__global__ void kerGatherPumpedInsertions
(
KerIntArray fromMap,
KerIntArray fromStarArr,
int* fromVertArr,
KerIntArray actStarArr,
int* actPumpMap,
KerIntArray outStarArr,
int* outVertArr
)
{
const int starNum = fromMap._num;
const int pumpStarNum = actStarArr._num;
// Iterate *only* stars needing pumping
for ( int idx = getCurThreadIdx(); idx < pumpStarNum; idx += getThreadNum() )
{
const int star = actStarArr._arr[ idx ];
const int pumpBeg = actPumpMap[ idx ];
const int pumpEnd = ( ( idx + 1 ) < pumpStarNum ) ? actPumpMap[ idx + 1 ] : outStarArr._num;
int pumpIdx = pumpBeg;
CudaAssert( ( pumpEnd > pumpBeg ) && "Star with no pumping!" );
////
// Pump starving stars using workset of their neighbours
// Note: There will always be at least ONE item in starving workset
////
const int fromBeg = fromMap._arr[ star ];
const int fromEnd = ( ( star + 1 ) < starNum ) ? fromMap._arr[ star + 1 ] : fromStarArr._num;
bool donePumping = false;
CudaAssert( ( fromEnd > fromBeg ) && "Star has at least *one* workset item!" );
// Iterate workset of starving star
for ( int fromIdx = fromBeg; fromIdx < fromEnd; ++fromIdx )
{
const int neiVert = fromVertArr[ fromIdx ];
const int neiVertBeg = fromMap._arr[ neiVert ];
const int neiVertEnd = ( ( neiVert + 1 ) < starNum ) ? fromMap._arr[ neiVert + 1 ] : fromStarArr._num;
// Iterate workset of neighbour
for ( int candidateIdx = neiVertBeg; candidateIdx < neiVertEnd; ++candidateIdx )
{
const int candidateVert = fromVertArr[ candidateIdx ];
if ( star == candidateVert )
{
continue;
}
// Check if already there in workset
if ( !isVertexInRange( fromVertArr, fromBeg, fromEnd, candidateVert )
&& !isVertexInRange( outVertArr, pumpBeg, pumpIdx, candidateVert ) )
{
// Add ordered insertion
outStarArr._arr[ pumpIdx ] = star;
outVertArr[ pumpIdx ] = candidateVert;
++pumpIdx;
// Check if pumping is enough
if ( pumpIdx == pumpEnd )
{
donePumping = true;
break;
}
}
}
if ( donePumping )
{
break;
}
}
////
// Borrowing from neighbour is not enough. So, use 0,1,2... stars
// Note: This actually happens for few points
////
int starIdx = 0;
while ( pumpIdx < pumpEnd )
{
CudaAssert( ( starIdx < starNum ) && "Not enough points in the world to pump this star!" );
if ( star == starIdx )
{
continue;
}
// Check if it's already there
if ( !isVertexInRange( fromVertArr, fromBeg, fromEnd, starIdx )
&& !isVertexInRange( outVertArr, pumpBeg, pumpIdx, starIdx ) )
{
// Add ordered insertion
outStarArr._arr[ pumpIdx ] = star;
outVertArr[ pumpIdx ] = starIdx;
++pumpIdx;
}
++starIdx;
}
}
return;
}
// Given a list of sorted numbers (has duplicates and is non-contiguous) create a map
// Note: The input map *should* already be initialized to -1 !!!
// Guarantees:
// (1) For a number that is in input list, its map value and its next number's map value will be correct
// (2) For a number that is not in input list, either its map value is -1, the next one is -1, or size is 0
__global__ void kerMakeAllStarMap
(
KerIntArray inArr,
KerIntArray allStarMap,
int starNum
)
{
const int curThreadIdx = getCurThreadIdx();
// Iterate input list of numbers
for ( int idx = curThreadIdx; idx < inArr._num; idx += getThreadNum() )
{
const int curVal = inArr._arr[ idx ];
const int nextVal = ( ( idx + 1 ) < inArr._num ) ? inArr._arr[ idx + 1 ] : starNum - 1;
CudaAssert( ( curVal <= nextVal ) && "Input array of numbers is not sorted!" );
// Number changes at this index
if ( curVal != nextVal )
{
allStarMap._arr[ curVal + 1 ] = idx + 1;
allStarMap._arr[ nextVal ] = idx + 1;
}
}
if ( ( 0 == curThreadIdx ) && ( inArr._num > 0 ) )
{
const int firstVal = inArr._arr[ 0 ];
allStarMap._arr[ firstVal ] = 0; // Zero index for first value in input list
}
return;
}
__forceinline__ __device__ TriPositionEx getNextFreeTri
(
KerStarData starData,
StarInfo starInfo,
TriPositionEx& freeTriPosEx,
int& maxStarSize
)
{
do
{
// Increment free triangle location
starInfo.moveToNextTri( freeTriPosEx );
const TriangleStatus status = starData.triStatusAt( freeTriPosEx );
if ( Free == status )
{
////
// Update maximum star size if needed
////
const int locIdx = starInfo.toLocTriIdx( freeTriPosEx );
if ( locIdx >= maxStarSize )
{
maxStarSize = locIdx + 1;
}
return freeTriPosEx;
}
} while ( true );
CudaAssert( false && "No free triangle found!" );
return freeTriPosEx;
}
// Find first valid triangle adjacent to hole boundary
// There must be one!
__device__ void findFirstHoleSegment
(
KerStarData starData,
StarInfo starInfo,
TriPositionEx beneathTriPosEx,
TriPositionEx& firstTriPosEx,
int& firstVi,
TriPositionEx& firstHoleTriPosEx
)
{
// Check the beneath triangle we know if it's on the hole boundary
const TriangleOpp triOppFirst = starData.triOppAt( beneathTriPosEx );
for ( int vi = 0; vi < 3; ++vi )
{
const TriPositionEx oppTriPosEx = starInfo.locToTriPosEx( triOppFirst.getOppTri(vi) );
const TriangleStatus status = starData.triStatusAt( oppTriPosEx );
if ( Free != status ) // Found a hole edge
{
firstTriPosEx = oppTriPosEx;
firstHoleTriPosEx = beneathTriPosEx;
firstVi = triOppFirst.getOppVi( vi );
return;
}
}
// Iterate triangles
for ( int locTriIdx = 0; locTriIdx < starInfo._locTriNum; ++locTriIdx )
{
const TriPositionEx triPosEx = starInfo.locToTriPosEx( locTriIdx );
const TriangleStatus status = starData.triStatusAt( triPosEx );
// Ignore non-beneath triangles
if ( Free == status )
{
continue;
}
const TriangleOpp triOpp = starData.triOppAt( triPosEx );
// Iterate segments of beneath triangle
for ( int vi = 0; vi < 3; ++vi )
{
const TriPositionEx triOppPosEx = starInfo.locToTriPosEx( triOpp.getOppTri(vi) );
const TriangleStatus status = starData.triStatusAt( triOppPosEx );
if ( Free == status ) // Found a hole edge
{
firstTriPosEx = triPosEx;
firstVi = vi;
firstHoleTriPosEx = triOppPosEx;
return;
}
}
}
CudaAssert( false && "Not found any hole triangle" );
return;
}
__global__ void kerStitchPointToHole
(
KerStarData starData,
KerBeneathData beneathData,
KerInsertData insertData,
KerIntArray activeStarArr,
int insIdx
)
{
CudaAssert( ( insIdx >= 0 ) && "Invalid insertion index!" );
// Thread 0-0 resets exact triangle counter
if ( ( 0 == threadIdx.x ) && ( 0 == blockIdx.x ) )
{
beneathData._flagArr[ ExactTriCount ] = 0;
}
// Iterate active stars
for ( int idx = getCurThreadIdx(); idx < activeStarArr._num; idx += getThreadNum() )
{
const int star = activeStarArr._arr[ idx ];
const TriPosition beneathTriPos = beneathData._beneathTriPosArr[ star ];
// Check if no beneath triangle found
if ( -1 == beneathTriPos )
{
continue; // Nothing to do, since point is inside star
}
// Reset since we have read it
beneathData._beneathTriPosArr[ star ] = -1;
////
// Get insertion vertex
////
const int insBeg = insertData._starVertMap[ star ];
const int insLoc = insBeg + insIdx;
const int insVert = insertData._vertArr[ insLoc ];
insertData._vertStarArr[ insLoc ] = flipToNeg( star ); // Mark as successful insertion (not drowned)
////
// Find first hole segment
////
const TriPositionEx beneathTriPosEx = triPosToEx( beneathTriPos );
const StarInfo starInfo = starData.getStarInfo( star );
int maxStarSize = starData._maxSizeArr[ star ];
int firstVi = -1;
TriPositionEx firstTriPosEx;
TriPositionEx firstNewTriPosEx;
// Use the first hole triangle to store the first new triangle
findFirstHoleSegment( starData, starInfo, beneathTriPosEx, firstTriPosEx, firstVi, firstNewTriPosEx );
////
// First stitched triangle
////
// Get the first two vertices of the hole
TriPositionEx curTriPosEx = firstTriPosEx;
const Triangle& curTri = starData.triangleAt( curTriPosEx );
const int firstVert = curTri._v[ ( firstVi + 1 ) % 3 ];
int curVi = ( firstVi + 2 ) % 3;
int curVert = curTri._v[ curVi ];
// Stitch the first triangle
const Triangle firstNewTri = { insVert, curVert, firstVert };
starData.triangleAt( firstNewTriPosEx ) = firstNewTri;
starData.triStatusAt( firstNewTriPosEx ) = NewValidAndUnchecked;
// Adjancency with opposite triangle
TriangleOpp& firstNewTriOpp = starData.triOppAt( firstNewTriPosEx );
firstNewTriOpp.setOpp( 0, starInfo.toLocTriIdx( firstTriPosEx ), firstVi );
TriangleOpp& firstTriOpp = starData.triOppAt( firstTriPosEx );
firstTriOpp.setOpp( firstVi, starInfo.toLocTriIdx( firstNewTriPosEx ), 0 );
////
// Walk around outside of hole, stitching rest of triangles
////
TriPositionEx freeTriPosEx = makeTriPosEx( 0, starInfo._begIdx0 - 1 ); // Start from begin of array
TriPositionEx prevNewTriPosEx = firstNewTriPosEx;
// Walk outside the hole in CW direction
while ( curVert != firstVert )
{
// Check opposite triangle
const TriangleOpp& curTriOpp = starData.triOppAt( curTriPosEx );
const TriPositionEx gloOppTriPosEx = starInfo.locToTriPosEx( curTriOpp.getOppTri( ( curVi + 2 ) % 3 ) );
const TriangleStatus status = starData.triStatusAt( gloOppTriPosEx );
// Triangle is outside the hole
if ( ( Free != status ) && ( NewValidAndUnchecked != status ) )
{
// Continue moving
const int oppVi = curTriOpp.getOppVi( ( curVi + 2 ) % 3 );
curVi = ( oppVi + 2 ) % 3;
curTriPosEx = gloOppTriPosEx;
}
// Triangle is in hole
else
{
const TriPositionEx newTriPosEx = ( Free == status )
? gloOppTriPosEx // Reuse hole triangle
: getNextFreeTri( starData, starInfo, freeTriPosEx, maxStarSize );
// Get the next vertex in the hole boundary
const int oppVi = ( curVi + 2 ) % 3;
const Triangle& curTri = starData.triangleAt( curTriPosEx );
const int nextVert = curTri._v[ ( curVi + 1 ) % 3 ];
// New triangle
const int locNewTriIdx = starInfo.toLocTriIdx( newTriPosEx );
const Triangle newTri = { insVert, nextVert, curVert };
// Adjancency with opposite triangle
TriangleOpp& curTriOpp = starData.triOppAt( curTriPosEx );
curTriOpp.setOpp( oppVi, locNewTriIdx, 0 );
TriangleOpp& newTriOpp = starData.triOppAt( newTriPosEx );
newTriOpp.setOpp( 0, starInfo.toLocTriIdx( curTriPosEx ), oppVi );
// Adjacency with previous new triangle
TriangleOpp& prevTriOpp = starData.triOppAt( prevNewTriPosEx );
prevTriOpp.setOpp( 2, locNewTriIdx, 1 );
newTriOpp.setOpp( 1, starInfo.toLocTriIdx( prevNewTriPosEx ), 2 );
// Last hole triangle
if ( nextVert == firstVert )
{
TriangleOpp& firstTriOpp = starData.triOppAt( firstNewTriPosEx );
firstTriOpp.setOpp( 1, locNewTriIdx, 2 );
newTriOpp.setOpp( 2, starInfo.toLocTriIdx( firstNewTriPosEx ), 1 );
}
// Store new triangle data
starData.triangleAt( newTriPosEx ) = newTri;
starData.triStatusAt( newTriPosEx ) = NewValidAndUnchecked;
// Check if this is not a beneath triangle, but fresh new triangle
if ( Free != status )
{
// Star needs to be set *only* for such triangles
// Note: Saves about 15ms for 1M points on GTX 580
starData.triStarAt( newTriPosEx ) = star;
}
// Prepare for next triangle
prevNewTriPosEx = newTriPosEx;
// Move to the next vertex
curVi = ( curVi + 1 ) % 3;
curVert = nextVert;
}
}
// Update bounds of max triangle
starData._maxSizeArr[ star ] = maxStarSize;
}
return;
}
__global__ void kerCountPointsOfStar( KerStarData starData )
{
// Iterate through stars
for ( int star = getCurThreadIdx(); star < starData._starNum; star += getThreadNum() )
{
// Check if any insertions for this star
if ( 0 == starData._insCountArr[ star ] )
{
continue;
}
const StarInfo starInfo = starData.getStarInfo( star );
int validTriCount = starInfo._locTriNum;
for ( int locTriIdx = 0; locTriIdx < starInfo._locTriNum; ++locTriIdx )
{
const TriPositionEx triPosEx = starInfo.locToTriPosEx( locTriIdx );
if ( Free == starData.triStatusAt( triPosEx ) )
{
--validTriCount;
}
}
// Get point count
CudaAssert( ( 0 == ( ( validTriCount + 4 ) % 2 ) ) && "2-sphere triangle count not divisible by 2!" );
starData._pointNumArr[ star ] = ( validTriCount + 4 ) / 2;
}
return;
}
__global__ void kerGetPerTriangleCount
(
KerStarData starData,
int* insCountArr
)
{
// Iterate all triangles
for ( int triIdx = getCurThreadIdx(); triIdx < starData._totalTriNum; triIdx += getThreadNum() )
{
const TriPositionEx triPosEx = starData.globToTriPosEx( triIdx );
const TriangleStatus triStatus = starData.triStatusAt( triPosEx );
int insertCount = 0;
// Only unchecked triangles
if ( ( ValidAndUnchecked == triStatus ) || ( NewValidAndUnchecked == triStatus ) )
{
const Triangle tri = starData.triangleAt( triPosEx );
// Iterate triangle edges
for ( int vi = 0; vi < 3; ++vi )
{
const int v0 = tri._v[ vi ];
const int v1 = tri._v[ ( vi + 1 ) % 3 ];
if ( v0 < v1 )
{
++insertCount;
}
}
}
// Write insertion count
insCountArr[ triIdx ] = insertCount;
}
return;
}
// Note that insertions can be bounded
__global__ void kerGetPerTriangleInsertions
(
KerStarData starData,
KerIntArray facetMap,
KerIntArray triStarArr,
int* triVertArr,
int allTriInsertNum // Total number (unbounded) of triangle insertions
)
{
// Iterate triangles
for ( int triIdx = getCurThreadIdx(); triIdx < starData._totalTriNum; triIdx += getThreadNum() )
{
////
// Triangle out of bound
////
const int insBeg = facetMap._arr[ triIdx ];
const int insEnd = ( ( triIdx + 1 ) < starData._totalTriNum )
? facetMap._arr[ triIdx + 1 ]
: allTriInsertNum; // Total number of unbounded insertions
// Check if beyond insertion bound
if ( insBeg >= triStarArr._num )
{
break; // No more work for this *thread*
}
////
// Triangle generating no insertion
////
if ( insEnd == insBeg )
{
continue;
}
////
// Triangle generating insertions
////
const TriPositionEx triPosEx = starData.globToTriPosEx( triIdx );
const Triangle tri = starData.triangleAt( triPosEx );
int insIdx = insBeg;
// Iterate triangle edges
for ( int vi = 0; vi < 3; ++vi )
{
const int v0 = tri._v[ vi ];
const int v1 = tri._v[ ( vi + 1 ) % 3 ];
if ( v0 < v1 )
{
// Note: When triangle lies on FacetMax boundary,
// this write will go beyond bound by 1. Assumption is that the
// arrays' "actual" size is at least +1 more than triStarArr._num
triStarArr._arr[ insIdx ] = v0;
triVertArr[ insIdx ] = v1;
++insIdx;
}
}
CudaAssert( insIdx == insEnd );
// All triangles with insertions *except* the one on bound
if ( insIdx <= triStarArr._num )
{
TriangleStatus& triStatus = starData.triStatusAt( triPosEx );
CudaAssert( ( ( Free != triStatus ) && ( Valid != triStatus ) ) && "Triangle has insertions, so has to be unchecked!" );
triStatus = Valid; // Reset triangle status
}
// Triangle on boundary
else
{
//CudaAssert( false && "There is a triangle on boundary! Not an error, just an informational message!" );
}
}
return;
}
__global__ void kerCountPerStarInsertions
(
KerStarData starData,
KerInsertData insertData
)
{
for ( int star = getCurThreadIdx(); star < starData._starNum; star += getThreadNum() )
{
////
// Update point number to be inserted AND drowned number
////
const int insBeg = insertData._starVertMap[ star ];
int insEnd = ( star < ( starData._starNum - 1 ) ) ? insertData._starVertMap[ star + 1 ] : insertData._vertNum;
if ( ( -1 == insBeg ) || ( -1 == insEnd ) )
{
insEnd = insBeg;
}
const int insPointNum = insEnd - insBeg;
CudaAssert( ( insPointNum >= 0 ) && "Invalid indices!" );
// Insert point count for this star
starData._insCountArr[ star ] = insPointNum;
////
// Drowned count for this star
// Given star of n link points and m insertion points, only a maximum
// of (n + m - 4) points can drown
////
const int starPointNum = starData._pointNumArr[ star ];
const int totalPointNum = starPointNum + insPointNum;
// Update star point count
starData._pointNumArr[ star ] = totalPointNum;
}
return;
}
__global__ void kerCopyOldToNewHistory
(
KerHistoryData historyData, // Old history[1]
int* newVertArr,
int* newVertStarArr,
int* newStarVertMap
)
{
CudaAssert( ( historyData._vertNum[1] > 0 ) && "There is no history[1]! Use the array directly, no need for this kernel!" );
// Iterate old history vertices
for ( int curVertIdx = getCurThreadIdx(); curVertIdx < historyData._vertNum[1]; curVertIdx += getThreadNum() )
{
const int star = historyData._vertStarArr[1][ curVertIdx ];
const int vertBeg = historyData._starVertMap[1][ star ];
const int locVertIdx = curVertIdx - vertBeg;
const int newVertBeg = newStarVertMap[ star ];
const int newVertLoc = newVertBeg + locVertIdx;
CudaAssert( ( vertBeg >= 0 ) && "Vertex that exists cannot have invalid map value!" );
CudaAssert( ( newVertBeg >= 0 ) && "Vertex that exists cannot have invalid map value!" );
// Copy from old to new location
newVertArr[ newVertLoc ] = historyData._vertArr[1][ curVertIdx ];
newVertStarArr[ newVertLoc ] = star;
}
return;
}
__global__ void kerCopyInsertionToNewHistory
(
KerIntArray insVertArr,
int* insVertStarArr,
KerIntArray insStarVertMap,
int* oldHistStarVertMap,
int oldHistVertNum,
int* newVertArr,
int* newVertStarArr,
int* newStarVertMap
)
{
const int starNum = insStarVertMap._num;
// Iterate current insertions
for ( int curInsIdx = getCurThreadIdx(); curInsIdx < insVertArr._num; curInsIdx += getThreadNum() )
{
////
// Location of *this* insertion vertex
////
const int star = insVertStarArr[ curInsIdx ];
const int insBeg = insStarVertMap._arr[ star ];
const int insEnd = ( ( star + 1 ) < starNum ) ? insStarVertMap._arr[ star + 1 ] : insVertArr._num;
int insLocIdx = curInsIdx - insBeg;
CudaAssert( ( insBeg >= 0 ) && ( insEnd >= 0 ) && "*All* insertion map values must be valid!" );
////
// Old history size of this star
////
const int oldVertBeg = oldHistStarVertMap[ star ];
const int oldVertEnd = ( ( star + 1 ) < starNum ) ? oldHistStarVertMap[ star + 1 ] : oldHistVertNum;
const int oldVertNum = oldVertEnd - oldVertBeg;
CudaAssert( ( oldVertBeg >= 0 ) && ( oldVertEnd >= 0 ) && "*All* old history map values must be valid!" );
////
// Destination of *this* insertion index
////
const int newVertBeg = newStarVertMap[ star ];
const int newInsLoc = newVertBeg + oldVertNum + insLocIdx;
////
// Copy insertion to new history
////
newVertArr[ newInsLoc ] = insVertArr._arr[ curInsIdx ];
newVertStarArr[ newInsLoc ] = star;
}
return;
}
__global__ void kerComputeTriangleCount
(
KerStarData starData,
KerIntArray triNumArr
)
{
const float ExpandFactor = 1.0f;
// Iterate through stars
for ( int star = getCurThreadIdx(); star < starData._starNum; star += getThreadNum() )
{
// Current number of triangles
const StarInfo starInfo = starData.getStarInfo( star );
const int curTriNum = starInfo._locTriNum;
// Expected number of points (current + expected insertions)
const int expPointNum = starData._pointNumArr[ star ];
// Expected number of triangles
const int insTriNum = get2SphereTriangleNum( 1, expPointNum );
const int newTriNum = insTriNum * ExpandFactor;
triNumArr._arr[ star ] = max( newTriNum, curTriNum ) - starInfo._size0; // Only "expand" second array
}
return;
}
__global__ void kerNoteOwnerTriangles
(
KerStarData starData,
KerIntArray tetraTriMap
)
{
// Iterate triangles
for ( int triIdx = getCurThreadIdx(); triIdx < starData._totalTriNum; triIdx += getThreadNum() )
{
const TriPositionEx triPosEx = starData.globToTriPosEx( triIdx );
const TriangleStatus status = starData.triStatusAt( triPosEx );
if ( Free == status )
{
continue;
}
////
// Check if triangle's star is its owner
////
const Triangle tri = starData.triangleAt( triPosEx );
const int star = starData.triStarAt( triPosEx );
if ( ( star < tri._v[0] ) & ( star < tri._v[1] ) & ( star < tri._v[2] ) )
{
tetraTriMap._arr[ triIdx ] = triIdx;
}
}
return;
}
__global__ void kerGrabTetrasFromStars
(
KerStarData starData,
KerTetraData tetraData,
KerIntArray tetraTriMap,
int* triTetraMap,
LocTriIndex* tetraCloneTriArr
)
{
const int tetraNum = tetraTriMap._num;
// Iterate all tetrahedrons
for ( int tetIdx = getCurThreadIdx(); tetIdx < tetraNum; tetIdx += getThreadNum() )
{
// Construct 4 vertices of the tetra
const int triIdx = tetraTriMap._arr[ tetIdx ];
const TriPositionEx triPosEx = starData.globToTriPosEx( triIdx );
const int fromStar = starData.triStarAt( triPosEx );
const StarInfo starInfo = starData.getStarInfo( fromStar );
const Triangle tri = starData.triangleAt( triPosEx );
Tetrahedron tetra;
const int v0 = tri._v[0];
tetra._v[0] = v0;
tetra._v[1] = tri._v[1];
tetra._v[2] = tri._v[2];
tetra._v[3] = fromStar;
////
// Set 3 opposites of this tetra
////
const TriangleOpp triOpp = starData.triOppAt( triPosEx );
for ( int vi = 0; vi < 3; ++vi )
{
const int gloOppTriIdx = starInfo.toGlobalTriIdx( triOpp.getOppTri( vi ) );
tetra._opp[ vi ] = triTetraMap[ gloOppTriIdx ];
}
////
// Set 4th opposite of this tetra
////
// Get clone triangle
const StarInfo toStarInfo = starData.getStarInfo( v0 );
const LocTriIndex locTriIdx = tetraCloneTriArr[ tetIdx ];
const TriPositionEx toTriPosEx = toStarInfo.locToTriPosEx( locTriIdx );
const Triangle toTri = starData.triangleAt( toTriPosEx );
// Find tetra opposite _v[ 3 ]
const int fromStarIdx = toTri.indexOfVert( fromStar );
const TriangleOpp& toTriOpp = starData.triOppAt( toTriPosEx );
const int starTriOppIdx = toStarInfo.toGlobalTriIdx( toTriOpp.getOppTri( fromStarIdx ) );
tetra._opp[ 3 ] = triTetraMap[ starTriOppIdx ];
// Write back tetra
tetraData._arr[ tetIdx ] = tetra;
}
return;
}
__forceinline__ __device__ bool checkIfStarHasTriangle
(
KerStarData starData,
int star,
Triangle inTri
)
{
const StarInfo starInfo = starData.getStarInfo( star );
// Iterate triangles of star
for ( int locTriIdx = 0; locTriIdx < starInfo._locTriNum; ++locTriIdx )
{
const TriPositionEx triPosEx = starInfo.locToTriPosEx( locTriIdx );
const TriangleStatus status = starData.triStatusAt( triPosEx );
// Ignore free triangles
if ( Free == status )
{
continue;
}
// Check if triangle has vertex
const Triangle tri = starData.triangleAt( triPosEx );
if ( tri.hasVertex( inTri._v[0] ) & tri.hasVertex( inTri._v[1] ) & tri.hasVertex( inTri._v[2] ) )
{
return true;
}
}
return false;
}
__global__ void kerCheckStarConsistency
(
KerStarData starData,
KerBeneathData beneathData
)
{
// Iterate all triangles
for ( int triIdx = getCurThreadIdx(); triIdx < starData._totalTriNum; triIdx += getThreadNum() )
{
////
// Ignore free triangles
////
const TriPositionEx triPosEx = starData.globToTriPosEx( triIdx );
TriangleStatus& triStatus = starData.triStatusAt( triPosEx );
if ( Free == triStatus )
{
continue;
}
CudaAssert( Valid == triStatus );
////
// Check if triangle is consistent
////
const int star = starData.triStarAt( triPosEx );
const Triangle tri = starData.triangleAt( triPosEx );
// Check for this tetra in other 3 stars
for ( int vi = 0; vi < 3; ++vi )
{
const int toStar = tri._v[ vi ];
const Triangle toTri = { star, tri._v[ ( vi + 1 ) % 3 ], tri._v[ ( vi + 2 ) % 3 ] };
if ( !checkIfStarHasTriangle( starData, toStar, toTri ) )
{
beneathData._flagArr[ ExactTriCount ] = 1;
break;
}
}
}
return;
}
__global__ void kerAppendValueToKey
(
KerIntArray keyArr,
int* valArr,
int bitsPerIndex
)
{
const int bitsPerValue = 31 - bitsPerIndex;
const int ValMask = 1 << bitsPerValue;
// Iterate array
for ( int idx = getCurThreadIdx(); idx < keyArr._num; idx += getThreadNum() )
{
const int key = keyArr._arr[ idx ];
const int val = valArr[ idx ];
CudaAssert( ( key >= 0 ) && "Invalid key!" );
keyArr._arr[ idx ] = ( ( key << bitsPerValue ) | ( val & ( ValMask - 1 ) ) );
}
return;
}
__global__ void kerRemoveValueFromKey
(
KerIntArray keyArr,
int bitsPerIndex
)
{
const int bitsPerValue = 31 - bitsPerIndex;
// Iterate array
for ( int idx = getCurThreadIdx(); idx < keyArr._num; idx += getThreadNum() )
{
const int keyvalue = keyArr._arr[ idx ];
CudaAssert( ( keyvalue >= 0 ) && "Key-Value is invalid!" );
keyArr._arr[ idx ] = ( keyvalue >> bitsPerValue );
}
return;
}
// Key: ... 3 3 3 3 3 3 ...
// Val: ... 5 5 5 5 5 5 ...
// Res: ... -6 -6 -6 -6 -6 5 ...
__global__ void kerMarkDuplicates( KerIntArray keyArr, KerIntArray valueArr )
{
// Iterate key array
for ( int idx = getCurThreadIdx(); idx < keyArr._num; idx += getThreadNum() )
{
const int key = keyArr._arr[ idx ];
const int val = valueArr._arr[ idx ];
int nextIdx = idx + 1;
// Move right until end of array
while ( nextIdx < keyArr._num )
{
const int nextKey = keyArr._arr[ nextIdx ];
// Check if next key-val pair same as current pair
if ( nextKey != key )
{
break; // Get out!
}
// Now this pair and next pair are same
const int nextVal = valueArr._arr[ nextIdx ];
// Compare *this* value to next
if ( ( val == nextVal ) // nextVal is +ve
|| ( val == ( - nextVal - 1 ) ) // nextVal is -ve
)
{
valueArr._arr[ idx ] = flipToNeg( val ); // Negate *this* value, so this pair can be removed
break;
}
++nextIdx;
}
}
return;
}
// Make map AND also update triStar and triStatus array
__global__ void kerMakeOldToNewTriMap
(
KerStarData oldStarData,
int oldTriNum,
KerIntArray newTriMap,
KerIntArray oldNewMap,
KerIntArray newTriStar,
KerTriStatusArray newTriStatus
)
{
// Iterate through triangles
for ( int oldTriIdx = getCurThreadIdx(); oldTriIdx < oldTriNum; oldTriIdx += getThreadNum() )
{
////
// Skip copying free triangle information
////
const TriangleStatus status = oldStarData._triStatusArr[1][ oldTriIdx ];
if ( Free == status )
{
continue;
}
////
// Make map
////
const int starIdx = oldStarData._triStarArr[1][ oldTriIdx ]; // Star
const int oldTriBeg = oldStarData._starTriMap[1][ starIdx ]; // Old location begin
const int newTriBeg = newTriMap._arr[ starIdx ]; // New location begin
const int newTriIdx = oldTriIdx - oldTriBeg + newTriBeg; // New location
oldNewMap._arr[ oldTriIdx ] = newTriIdx;
newTriStar._arr[ newTriIdx ] = starIdx;
newTriStatus._arr[ newTriIdx ] = status;
}
return;
}
__global__ void kerGetActiveTriCount
(
KerStarData starData,
KerIntArray activeStarArr,
KerIntArray activeTriCountArr,
int insIdx,
bool isActiveBoundTight
)
{
// Iterate active stars
for ( int idx = getCurThreadIdx(); idx < activeStarArr._num; idx += getThreadNum() )
{
const int star = activeStarArr._arr[ idx ];
const int insNum = starData._insCountArr[ star ];
int maxSize = 0;
if ( insIdx < insNum )
{
if ( isActiveBoundTight )
{
// Exact bound of used triangles in star (tight bound)
maxSize = starData._maxSizeArr[ star ];
}
else
{
// Total number of triangles of star (loose bound)
const StarInfo& starInfo = starData.getStarInfo( star );
maxSize = starInfo._locTriNum;
}
}
activeTriCountArr._arr[ idx ] = maxSize;
}
return;
}
__global__ void kerGetActiveTriCount
(
KerStarData starData,
KerIntArray activeStarArr,
KerIntArray activeTriCountArr
)
{
// Iterate stars
for ( int idx = getCurThreadIdx(); idx < activeStarArr._num; idx += getThreadNum() )
{
const int star = activeStarArr._arr[ idx ];
const StarInfo starInfo = starData.getStarInfo( star );
activeTriCountArr._arr[ idx ] = starInfo._locTriNum;
}
return;
}
__forceinline__ __device__ bool _checkIfStarHasVertex
(
KerStarData starData,
int star,
int inVert
)
{
const StarInfo starInfo = starData.getStarInfo( star );
// Iterate triangles of star
for ( int locTriIdx = 0; locTriIdx < starInfo._locTriNum; ++locTriIdx )
{
const TriPositionEx triPosEx = starInfo.locToTriPosEx( locTriIdx );
const TriangleStatus status = starData.triStatusAt( triPosEx );
// Ignore free triangles
if ( Free == status )
{
continue;
}
// Check if triangle has vertex
const Triangle tri = starData.triangleAt( triPosEx );
if ( tri.hasVertex( inVert ) )
{
return true;
}
}
return false;
}
__global__ void
kerMarkSubmergedInsertions
(
KerStarData starData,
KerInsertData insertData
)
{
// Iterate through insertions
for ( int idx = getCurThreadIdx(); idx < insertData._vertNum; idx += getThreadNum() )
{
////
// Ignore drowned insertions
// Note: These have positive star value
////
const int star = insertData._vertStarArr[ idx ];
if ( star >= 0 )
{
continue;
}
////
// Check if insertion is submerged
////
const int destStar = flipToPos( star );
const int vert = insertData._vertArr[ idx ];
const bool starHasVert = _checkIfStarHasVertex( starData, destStar, vert );
insertData._vertStarArr[ idx ] = starHasVert
? -1 // Successful insertion
: destStar; // Submerged (is passed as drowned, so that it can get a proof)
}
return;
}
// Mark keys as -1 if ( val < key )
__global__ void kerMarkReversePairs
(
KerIntArray keyArr,
int* valArr
)
{
// Iterate items
for ( int idx = getCurThreadIdx(); idx < keyArr._num; idx += getThreadNum() )
{
const int key = keyArr._arr[ idx ];
const int val = valArr[ idx ];
CudaAssert( ( key != val ) && "Invalid key-val pair in array!" );
if ( val < key )
{
keyArr._arr[ idx ] = -1;
}
}
return;
}
__global__ void kerGetCloneTriInfo
(
KerStarData starData,
KerIntArray facetStarArr,
int* facetTriArr,
int* triTetraMap,
LocTriIndex* tetraCloneTriArr
)
{
// Iterate opp-tetra facet items
for ( int facetIdx = getCurThreadIdx(); facetIdx < facetStarArr._num; facetIdx += getThreadNum() )
{
// From- data
const int fromTriIdx = facetTriArr[ facetIdx ];
const TriPositionEx fromTriPosEx = starData.globToTriPosEx( fromTriIdx );
const Triangle& fromTri = starData.triangleAt( fromTriPosEx );
const int fromStar = starData.triStarAt( fromTriPosEx );
const int v0 = fromTri._v[ 1 ];
const int v1 = fromTri._v[ 2 ];
// To- data
const int toStar = facetStarArr._arr[ facetIdx ];
const StarInfo toStarInfo = starData.getStarInfo( toStar );
////
// Check if to-star has from-triangle
////
int foundLocIdx = -1;
// Iterate triangles of to-star
for ( int locTriIdx = 0; locTriIdx < toStarInfo._locTriNum; ++locTriIdx )
{
const TriPositionEx triPosEx = toStarInfo.locToTriPosEx( locTriIdx );
const Triangle toTri = starData.triangleAt( triPosEx );
// Check if triangle matches
if ( toTri.hasVertex( fromStar ) & toTri.hasVertex( v0 ) & toTri.hasVertex( v1 ) )
{
const TriangleStatus status = starData.triStatusAt( triPosEx );
// Matched triangle *might* be free, check for it
// Highly unlikely, but stranger things have happened in this world!
if ( Free == status )
{
continue;
}
// Matches!
foundLocIdx = locTriIdx;
break;
}
}
CudaAssert( ( -1 != foundLocIdx ) && "Triangle not found in to-star!" );
////
// Set triIdx of tetra's clone AND point clone triangle to tetra
// Note: This is non-cohesive read-write. Sorting and then cohesively writing was
// tried and found to be a bit slower than this.
////
const int ownTetIdx = triTetraMap[ fromTriIdx ]; CudaAssert( ownTetIdx >= 0 );
tetraCloneTriArr[ ownTetIdx ] = foundLocIdx;
const int toTriIdx = toStarInfo.toGlobalTriIdx( foundLocIdx );
triTetraMap[ toTriIdx ] = ownTetIdx;
}
return;
}
__forceinline__ __device__ bool isPairInHistory
(
KerHistoryData historyData,
int starNum,
int v0,
int v1
)
{
CudaAssert( ( v0 >= 0 ) && ( v0 < starNum ) );
// Iterate the two history arrays
for ( int hi = 0; hi < 2; ++hi )
{
const int histBeg = historyData._starVertMap[ hi ][ v0 ];
const int histEnd = ( ( v0 + 1 ) < starNum ) ? historyData._starVertMap[ hi ][ v0 + 1 ] : historyData._vertNum[ hi ];
CudaAssert( ( histBeg >= 0 ) && ( histEnd >= 0 ) && "*All* history map values must be valid!" );
// Search history for insertion
for ( int histIdx = histBeg; histIdx < histEnd; ++histIdx )
{
if ( v1 == historyData._vertArr[ hi ][ histIdx ] )
{
return true;
}
}
}
return false;
}
__global__ void kerMarkIfInsertionInHistory
(
KerHistoryData historyData,
KerIntArray starArr,
int* vertArr,
int starNum
)
{
// Iterate insertions
for ( int idx = getCurThreadIdx(); idx < starArr._num; idx += getThreadNum() )
{
const int star = starArr._arr[ idx ];
const int vert = vertArr[ idx ];
if ( isPairInHistory( historyData, starNum, star, vert ) )
{
vertArr[ idx ] = -1;
}
}
return;
}
__forceinline__ __device__ int getValidMapVal
(
int* arr,
int inIdx
)
{
// Check if simple map value
int val = arr[ inIdx ];
if ( -1 != val )
{
return val;
}
// Decrement back until we hit valid map value
int idx = inIdx;
while ( idx > 0 )
{
const int prevVal = arr[ --idx ];
if ( -1 != prevVal )
{
// Write back so it can be found easier by next search
arr[ inIdx ] = prevVal;
return prevVal;
}
}
// Write back so it can be found easier by next search
arr[ 0 ] = 0;
arr[ inIdx ] = 0;
return 0;
}
__global__ void kerConvertMapToCount
(
KerIntArray inMap,
int* countArr,
int dataNum
)
{
const int starNum = inMap._num;
// Iterate array
for ( int idx = getCurThreadIdx(); idx < starNum; idx += getThreadNum() )
{
const int beg = inMap._arr[ idx ];
int end = ( ( idx + 1 ) < starNum ) ? inMap._arr[ idx + 1 ] : dataNum;
if ( ( -1 == beg ) || ( -1 == end ) )
{
end = beg;
}
countArr[ idx ] = ( end - beg );
}
return;
}
__global__ void kerGetActiveTriPos
(
KerStarData starData,
KerIntArray activeStarArr,
KerIntArray activeTriMap,
KerTriPosArray activeTriPosArr,
KerShortArray activeTriInsNumArr
)
{
// Iterate active stars
for ( int idx = getCurThreadIdx(); idx < activeStarArr._num; idx += getThreadNum() )
{
// Get triangle map of star
const int triIdxBeg = activeTriMap._arr[ idx ];
const int triIdxEnd = ( idx + 1 < activeStarArr._num ) ? activeTriMap._arr[ idx + 1 ] : activeTriPosArr._num;
// No active triangles in star
if ( triIdxBeg == triIdxEnd )
{
continue;
}
const int star = activeStarArr._arr[ idx ];
const StarInfo starInfo = starData.getStarInfo( star );
int locTriIdx = 0;
// First triangle of star get the proper info
activeTriPosArr._arr[ triIdxBeg + locTriIdx ] = ( starInfo._begIdx0 << 1 );
activeTriInsNumArr._arr[ triIdxBeg + locTriIdx ] = starData._insCountArr[ star ];
++locTriIdx;
const int size = min( triIdxEnd - triIdxBeg, starInfo._size0 );
////
// Write as int4 for better performance
////
for ( ; ( locTriIdx < size ) && ( ( ( triIdxBeg + locTriIdx ) & 3 ) > 0 ); ++locTriIdx )
{
activeTriPosArr._arr[ triIdxBeg + locTriIdx ] = - locTriIdx;
}
for ( ; locTriIdx + 3 < size; locTriIdx += 4 )
{
( ( int4* ) activeTriPosArr._arr )[ ( triIdxBeg + locTriIdx ) >> 2 ] = make_int4( -locTriIdx, -locTriIdx-1, -locTriIdx-2, -locTriIdx-3 );
}
for ( ; locTriIdx < size; ++locTriIdx )
{
activeTriPosArr._arr[ triIdxBeg + locTriIdx ] = - locTriIdx;
}
if ( ( triIdxBeg + locTriIdx ) < triIdxEnd )
{
// First triangle of the star in the second array also get the proper info
TriPosition triPos = ( ( starInfo._begIdx1MinusSize0 + starInfo._size0 ) << 1 ) + 1 ;
activeTriPosArr._arr[ triIdxBeg + locTriIdx ] = triPos;
activeTriInsNumArr._arr[ triIdxBeg + locTriIdx ] = starData._insCountArr[ star ];
++locTriIdx;
// Iterate non-first triangles of star, give them local index (flipped)
for ( ; locTriIdx < triIdxEnd - triIdxBeg; ++locTriIdx )
activeTriPosArr._arr[ triIdxBeg + locTriIdx ] = - ( locTriIdx - starInfo._size0 );
}
}
return;
}
__global__ void kerGetActiveTriInsCount
(
KerStarData starData,
KerTriPosArray activeTriPosArr,
KerShortArray activeTriInsNumArr
)
{
// Iterate active triangles
for ( int idx = getCurThreadIdx(); idx < activeTriPosArr._num; idx += getThreadNum() )
{
TriPosition triFirstPos = activeTriPosArr._arr[ idx ];
// Not first triangle of star
if ( triFirstPos < 0 )
{
const int locTriIdx = - triFirstPos;
activeTriInsNumArr._arr[ idx ] = activeTriInsNumArr._arr[ idx - locTriIdx ];
activeTriPosArr._arr[ idx ] = activeTriPosArr._arr[ idx - locTriIdx ] + ( locTriIdx << 1 );
}
}
return;
}
// Check if star-vert pair is ordered
// If not, order it and set a bit in vert to 1 (encoding)
__global__ void
kerOrderDrownedPairs
(
KerIntArray keyArr,
int* valArr,
int bitsPerIndex
)
{
const int bitsPerValue = 31 - bitsPerIndex;
const int ValMask = 1 << bitsPerValue;
for ( int idx = getCurThreadIdx(); idx < keyArr._num; idx += getThreadNum() )
{
int key = keyArr._arr[ idx ];
int val = valArr[ idx ];
int flipped = 0;
CudaAssert( ( key >= 0 ) && "Invalid key!" );
// Order pair so that (key < val)
if ( key > val )
{
cuSwap( key, val );
flipped = 1;
}
// Write back
keyArr._arr[ idx ] = ( ( key << bitsPerValue ) | ( val & ( ValMask - 1 ) ) ); // Append val to key
valArr[ idx ] = ( val << 1 ) | flipped; // Encode flipped status in val
}
return;
}
// Remove the encoding and put back original order
__global__ void
kerRestoreDrownedPairs
(
KerIntArray keyArr,
int* valArr,
int bitsPerIndex
)
{
const int bitsPerValue = 31 - bitsPerIndex;
for ( int idx = getCurThreadIdx(); idx < keyArr._num; idx += getThreadNum() )
{
int key = keyArr._arr[ idx ];
int val = valArr[ idx ];
const int flipped = ( val & 1 );
CudaAssert( ( key >= 0 ) && "Appended Key-Value is invalid!" );
// Restore key and val
key = ( key >> bitsPerValue );
val = ( val >> 1 );
// Restore original order
if ( 1 == flipped )
{
cuSwap( key, val );
}
// Write back original pair
keyArr._arr[ idx ] = key;
valArr[ idx ] = val;
}
return;
}
__global__ void
kerMarkDuplicateDrownedPairs
(
KerIntArray keyArr,
int* valArr
)
{
for ( int idx = getCurThreadIdx(); idx < keyArr._num; idx += getThreadNum() )
{
if ( 0 == idx )
{
continue;
}
const int prevKey = keyArr._arr[ idx - 1 ];
const int curKey = keyArr._arr[ idx ];
// Duplicate pair, only *one* such drowned pair possible
if ( prevKey == curKey )
{
valArr[ idx - 1 ] = -1;
valArr[ idx ] = -1;
}
}
return;
}
////////////////////////////////////////////////////////////////////////////////
|
the_stack
|
#include <type_traits> //std::remove_cv
namespace xlib {
#define Store_MACRO(CACHE_MOD, ptx_modifier) \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, ulonglong2> \
(ulonglong2* pointer, ulonglong2 value) { \
\
asm volatile("st."#ptx_modifier".v2.u64 [%0], {%1, %2};" \
: : "l"(pointer), "l"(value.x), "l"(value.y)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, uint4>(uint4* pointer, uint4 value) { \
asm volatile("st."#ptx_modifier".v4.u32 [%0], {%1, %2, %3, %4};" \
: : "l"(pointer), "r"(value.x), "r"(value.y), \
"r"(value.z), "r"(value.w)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, uint2>(uint2* pointer, uint2 value) { \
asm volatile("st."#ptx_modifier".v2.u32 [%0], {%1, %2};" \
: : "l"(pointer), "r"(value.x), "r"(value.y)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, ushort4>(ushort4* pointer, ushort4 value) { \
asm volatile("st."#ptx_modifier".v4.u16 [%0], {%1, %2, %3, %4};" \
: : "l"(pointer), "h"(value.x), "h"(value.y), \
"h"(value.z), "h"(value.w)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, ushort2>(ushort2* pointer, ushort2 value) { \
asm volatile("st."#ptx_modifier".v2.u16 [%0], {%1, %2};" \
: : "l"(pointer), "h"(value.x), "h"(value.y)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, longlong2> \
(longlong2* pointer, longlong2 value) { \
\
asm volatile("st."#ptx_modifier".v2.s64 [%0], {%1, %2};" \
: : "l"(pointer), "l"(value.x), "l"(value.y)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, int4>(int4* pointer, int4 value) { \
asm volatile("st."#ptx_modifier".v4.s32 [%0], {%1, %2, %3, %4};" \
: : "l"(pointer), "r"(value.x), "r"(value.y), \
"r"(value.z), "r"(value.w)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, int2>(int2* pointer, int2 value) { \
asm volatile("st."#ptx_modifier".v2.s32 [%0], {%1, %2};" \
: : "l"(pointer), "r"(value.x), "r"(value.y)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, short4>(short4* pointer, short4 value) { \
asm volatile("st."#ptx_modifier".v4.s16 [%0], {%1, %2, %3, %4};" \
: : "l"(pointer), "h"(value.x), "h"(value.y), \
"h"(value.z), "h"(value.w)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, short2>(short2* pointer, short2 value) { \
asm volatile("st."#ptx_modifier".v2.s16 [%0], {%1, %2};" \
: : "l"(pointer), "h"(value.x), "h"(value.y)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, unsigned long long> \
(unsigned long long* pointer, unsigned long long value) { \
\
asm volatile("st."#ptx_modifier".u64 [%0], %1;" \
: : "l"(pointer), "l"(value)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, unsigned>(unsigned* pointer, unsigned value) { \
asm volatile("st."#ptx_modifier".u32 [%0], %1;" \
: : "l"(pointer), "r"(value)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, unsigned short> \
(unsigned short* pointer, unsigned short value) { \
\
asm volatile("st."#ptx_modifier".u16 [%0], %1;" \
: : "l"(pointer), \
"h"(static_cast<unsigned short>(value))); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, long long int> \
(long long int* pointer, long long int value) { \
\
asm volatile("st."#ptx_modifier".s64 [%0], %1;" \
: : "l"(pointer), "l"(value)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, int>(int* pointer, int value) { \
asm volatile("st."#ptx_modifier".s32 [%0], %1;" \
: : "l"(pointer), "r"(value)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, short>(short* pointer, short value) { \
asm volatile("st."#ptx_modifier".s16 [%0], %1;" \
: : "l"(pointer), "h"(value)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, char>(char* pointer, char value) { \
asm volatile("st."#ptx_modifier".s8 [%0], %1;" \
: : "l"(pointer), "h"(static_cast<short>(value))); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, char2>(char2* pointer, char2 value) { \
StoreSupport<CACHE_MOD>(reinterpret_cast<short*>(pointer), \
reinterpret_cast<short&>(value)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, char4>(char4* pointer, char4 value) { \
StoreSupport<CACHE_MOD>(reinterpret_cast<int*>(pointer), \
reinterpret_cast<int&>(value)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, unsigned char> \
(unsigned char* pointer, unsigned char value) { \
\
asm volatile("st."#ptx_modifier".u8 [%0], %1;" \
: : "l"(pointer), \
"h"(static_cast<unsigned short>(value))); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, uchar2>(uchar2* pointer, uchar2 value) { \
StoreSupport<CACHE_MOD>(reinterpret_cast<unsigned short*>(pointer), \
reinterpret_cast<unsigned short&>(value)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, uchar4>(uchar4* pointer, uchar4 value) { \
StoreSupport<CACHE_MOD>(reinterpret_cast<unsigned*>(pointer), \
reinterpret_cast<unsigned&>(value)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, double2>(double2* pointer, double2 value) { \
asm volatile("st."#ptx_modifier".v2.f64 [%0], {%1, %2};" \
: : "l"(pointer), "d"(value.x), "d"(value.y)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, double>(double* pointer, double value) { \
asm volatile("st."#ptx_modifier".f64 [%0], %1;" \
: : "l"(pointer), "d"(value)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, float4>(float4* pointer, float4 value) { \
asm volatile("st."#ptx_modifier".v4.f32 [%0], {%1, %2, %3, %4};" \
: : "l"(pointer), "f"(value.x), "f"(value.y), \
"f"(value.z), "f"(value.w)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, float2>(float2* pointer, float2 value) { \
asm volatile("st."#ptx_modifier".v2.f32 [%0], {%1, %2};" \
: : "l"(pointer), "f"(value.x), "f"(value.y)); \
} \
\
template<> \
__device__ __forceinline__ \
void StoreSupport<CACHE_MOD, float>(float* pointer, float value) { \
asm volatile("st."#ptx_modifier".f32 [%0], %1;" \
: : "l"(pointer), "f"(value)); \
}
//==============================================================================
//==============================================================================
template<CacheModifier MODIFIER = DF>
struct ThreadStore;
template<CacheModifier MODIFIER>
struct ThreadStore {
template<typename T, typename R>
__device__ __forceinline__
static void op(T* pointer, R value) {
static_assert(sizeof(T) != sizeof(T), "NOT IMPLEMENTED");
}
};
template<>
struct ThreadStore<DF> {
template<typename T, typename R>
__device__ __forceinline__
static void op(T* pointer, R value) {
*pointer = value;
}
};
//==============================================================================
//==============================================================================
template<CacheModifier M, typename T>
__device__ __forceinline__ void StoreSupport(T* pointer, T value);
#define StoreStruct_MACRO(CACHE_MOD) \
\
template<> \
struct ThreadStore<CACHE_MOD> { \
template<typename T, typename R> \
__device__ __forceinline__ \
static void op(T* pointer, R value) { \
return StoreSupport<CACHE_MOD>( \
const_cast<typename std::remove_cv<T>::type*>(pointer), \
value); \
} \
};
StoreStruct_MACRO(WB)
StoreStruct_MACRO(CG)
StoreStruct_MACRO(CS)
StoreStruct_MACRO(CV)
Store_MACRO(WB, global.wb)
Store_MACRO(CG, global.cg)
Store_MACRO(CS, global.cs)
Store_MACRO(CV, global.volatile)
#undef StoreStruct_MACRO
#undef Store_MACRO
//==============================================================================
//==============================================================================
template<CacheModifier MODIFIER, typename T, typename R>
__device__ __forceinline__
void Store(T* pointer, R value) {
static_assert(std::is_same<typename std::remove_cv<T>::type,
typename std::remove_cv<R>::type>::value,
"Different Type: T != R");
ThreadStore<MODIFIER>::op(pointer, value);
}
} // namespace xlib
|
the_stack
|
namespace nv {
template<typename value_type>
struct ReplaceOp {
constexpr static value_type IDENTITY{0};
__host__ __device__ value_type operator()(value_type new_value, value_type old_value)
{
return new_value;
}
};
template<typename Table>
__global__ void insert_kernel(Table* table,
const typename Table::key_type* const keys,
const typename Table::mapped_type* const vals,
size_t len) {
thrust::pair<typename Table::key_type, typename Table::mapped_type> kv;
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
kv.first = keys[i];
kv.second = vals[i];
auto it = table->insert(kv);
assert(it != table->end() && "error: insert fails: table is full");
}
}
template<typename Table>
__global__ void insert_kernel_mask(Table* table,
const typename Table::key_type* const keys,
const typename Table::mapped_type* const vals,
const bool* const status,
size_t len) {
thrust::pair<typename Table::key_type, typename Table::mapped_type> kv;
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
if(!status[i]){
kv.first = keys[i];
kv.second = vals[i];
auto it = table->insert(kv);
assert(it != table->end() && "error: insert fails: table is full");
}
}
}
template<typename Table>
__global__ void upsert_kernel(Table* table,
const typename Table::key_type* const keys,
const typename Table::mapped_type* const vals,
size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
auto it = table->find(keys[i]);
if(it.Iterator != table->end()){
((it.Iterator).getter())->second = vals[i];
} else {
thrust::pair<typename Table::key_type, typename Table::mapped_type> kv;
kv.first = keys[i];
kv.second = vals[i];
auto it = table->insert(kv);
assert(it != table->end() && "error: insert fails: table is full");
}
}
}
template <typename Table>
__global__ void accum_kernel(
Table* table, const typename Table::key_type* const keys,
const typename Table::mapped_type* const vals_or_deltas, const bool* exists,
size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
//thrust::pair<typename Table::key_type, typename Table::mapped_type> kv;
//kv.first = keys[i];
//kv.second = vals_or_deltas[i];
auto it = table->accum(keys[i], vals_or_deltas[i], exists[i]);
}
}
template<typename Table>
__global__ void set_kernel(Table* table,
const typename Table::key_type* const keys,
const typename Table::mapped_type* const vals,
size_t len){
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
auto it = table->find(keys[i]);
assert(it.Iterator != table->end() && "error: can't find key");
((it.Iterator).getter())->second = vals[i];
}
}
template<typename Table>
__global__ void set_kernel_mask(Table* table,
const typename Table::key_type* const keys,
const typename Table::mapped_type* const vals,
const bool *status,
size_t len){
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
if (status[i]) {
auto it = table->find(keys[i]);
assert(it.Iterator != table->end() && "set_kernel_mask: can't find key, but should exist.");
((it.Iterator).getter())->second = vals[i];
}
}
}
template<typename Table, typename GradType, typename Optimizer>
__global__ void update_kernel(Table* table,
const typename Table::key_type* const keys,
const GradType* const gradients,
size_t len,
Optimizer& op){
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
auto it = table->find(keys[i]);
assert(it.Iterator != table->end() && "error: can't find key");
op.update(((it.Iterator).getter())->second, gradients[i]);
}
}
template<typename Table>
__global__ void search_kernel(Table* table,
const typename Table::key_type * const keys,
typename Table::mapped_type * const vals,
bool* const status,
size_t len,
typename Table::mapped_type* const def_val,
bool full_size_default) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
auto it = table->find(keys[i]);
if(it.Iterator != table->end()){
vals[i] = ((it.Iterator).getter())->second;
status[i] = true;
}
else{
vals[i] = full_size_default ? def_val[i] : def_val[0];
status[i] = false;
}
}
}
template<typename Table>
__global__ void get_status_kernel(Table* table,
const typename Table::key_type * const keys,
bool* const status,
size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
auto it = table->find(keys[i]);
//assert(it != table->end() && "error: can't find key");
if(it.Iterator != table->end()){
status[i] = true;
}
else{
status[i] = false;
}
}
}
template<typename Table, typename counter_type>
__global__ void get_insert_kernel(Table* table,
const typename Table::key_type * const keys,
typename Table::mapped_type * const vals,
size_t len,
counter_type * d_counter) {
ReplaceOp<typename Table::mapped_type> op;
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
auto it = table->get_insert(keys[i], op, d_counter);
vals[i] = it->second;
}
}
template<typename Table, typename KeyType>
__global__ void size_kernel(const Table* table,
const size_t hash_capacity,
size_t * table_size,
KeyType unused_key) {
/* Per block accumulator */
__shared__ size_t block_acc;
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
/* Initialize */
if(threadIdx.x == 0){
block_acc = 0;
}
__syncthreads();
/* Whether the bucket mapping to the current thread is empty? do nothing : Atomically add to counter */
if(i < hash_capacity){
typename Table::value_type val = load_pair_vectorized(table->data() + i);
bool valid = table->get_valid(i);
if((val.first != unused_key) && valid){
atomicAdd(&block_acc, 1);
}
}
__syncthreads();
/* Atomically reduce block counter to global conuter */
if(threadIdx.x == 0){
atomicAdd(table_size, block_acc);
}
}
template<typename KeyType, typename ValType, typename Table>
__global__ void dump_kernel(KeyType* d_key,
ValType* d_val,
const Table* table,
const size_t offset,
const size_t search_length,
size_t * d_dump_counter,
KeyType unused_key){
// inter-block gathered key, value and counter. Global conuter for storing shared memory into global memory.
//__shared__ KeyType block_result_key[BLOCK_SIZE_];
//__shared__ ValType block_result_val[BLOCK_SIZE_];
extern __shared__ unsigned char s[];
KeyType * smem = (KeyType *)s;
KeyType * block_result_key = smem;
ValType * block_result_val = (ValType *) &(smem[blockDim.x]);
__shared__ size_t block_acc;
__shared__ size_t global_acc;
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
/* Initialize */
if(threadIdx.x == 0){
block_acc = 0;
}
__syncthreads();
// Each thread gather the key and value from bucket assigned to them and store them into shared mem.
if(i < search_length){
typename Table::value_type val = load_pair_vectorized(table->data() + offset + i);
bool valid = table->get_valid(offset + i);
if((val.first != unused_key) && valid){
size_t local_index = atomicAdd(&block_acc, 1);
block_result_key[local_index] = val.first;
block_result_val[local_index] = val.second;
}
}
__syncthreads();
//Each block request a unique place in global memory buffer, this is the place where shared memory store back to.
if(threadIdx.x == 0){
global_acc = atomicAdd(d_dump_counter, block_acc);
}
__syncthreads();
//Each thread store one bucket's data back to global memory, d_dump_counter is how many buckets in total dumped.
if(threadIdx.x < block_acc){
d_key[global_acc + threadIdx.x] = block_result_key[threadIdx.x];
d_val[global_acc + threadIdx.x] = block_result_val[threadIdx.x];
}
}
template<typename Table>
__global__ void delete_kernel(Table* table,
const typename Table::key_type* const keys,
size_t len) {
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < len) {
auto it = table->find(keys[i]);
assert(it.Iterator != table->end() && "error: can't find key");
table->set_valid(it.current_index, false);
}
}
template<typename KeyType, typename ValType, typename BaseValType, KeyType empty_key, size_t DIM, typename counter_type = unsigned long long int>
class HashTable {
public:
HashTable(size_t capacity, counter_type count = 0) {
//assert(capacity <= std::numeric_limits<ValType>::max() && "error: Table is too large for the value type");
cudaDeviceProp deviceProp;
table_ = new Table(capacity, std::numeric_limits<ValType>::max());
update_counter_ = 0;
get_counter_ = 0;
// Allocate device-side counter and copy user input to it
CUDA_CHECK(cudaMallocManaged((void **)&d_counter_, sizeof(*d_counter_)));
CUDA_CHECK(cudaMemcpy(d_counter_, &count, sizeof(*d_counter_), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaGetDeviceProperties(&deviceProp ,0));
shared_mem_size = deviceProp.sharedMemPerBlock;
}
~HashTable() {
delete table_;
// De-allocate device-side counter
CUDA_CHECK(cudaFree(d_counter_));
}
HashTable(const HashTable&) = delete;
HashTable& operator=(const HashTable&) = delete;
void insert(const KeyType* d_keys, const BaseValType* d_vals, size_t len, cudaStream_t stream) {
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
insert_kernel<<<grid_size, BLOCK_SIZE_, 0, stream>>>(table_, d_keys, (const ValType*)d_vals, len);
}
void upsert(const KeyType* d_keys, const BaseValType* d_vals, size_t len, cudaStream_t stream) {
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
upsert_kernel<<<grid_size, BLOCK_SIZE_, 0, stream>>>(table_, d_keys, (const ValType*)d_vals, len);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void get(const KeyType* d_keys, BaseValType* d_vals, bool* d_status, size_t len, BaseValType *d_def_val, cudaStream_t stream, bool full_size_default) const {
if (len == 0) {
return;
}
CUDA_CHECK(cudaMemset((void *)d_vals, 0, sizeof(ValType) * len));
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
search_kernel<<<grid_size, BLOCK_SIZE_, 0, stream>>>(table_, d_keys, (ValType*)d_vals, d_status, len, (ValType*)d_def_val, full_size_default);
}
void get_status(const KeyType* d_keys, bool* d_status, size_t len, cudaStream_t stream) const {
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
get_status_kernel<<<grid_size, BLOCK_SIZE_, 0, stream>>>(table_, d_keys, d_status, len);
}
void set(const KeyType* d_keys, const BaseValType* d_vals, size_t len, cudaStream_t stream) {
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
set_kernel<<<grid_size, BLOCK_SIZE_, 0, stream>>>(table_, d_keys, (const ValType*)d_vals, len);
}
void accum(const KeyType* d_keys, const BaseValType* d_vals_or_deltas, const bool* d_exists, size_t len,
cudaStream_t stream) {
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
accum_kernel<<<grid_size, BLOCK_SIZE_, 0, stream>>>(table_, d_keys,
(const ValType*)d_vals_or_deltas, d_exists, len);
}
size_t get_size(cudaStream_t stream) const{
/* size variable on Host and device, total capacity of the hashtable */
size_t table_size;
size_t * d_table_size;
const size_t hash_capacity = table_-> size();
/* grid_size and allocating/initializing variable on dev, lauching kernel*/
const int grid_size = (hash_capacity - 1) / BLOCK_SIZE_ + 1;
CUDA_CHECK(cudaMallocManaged((void **)&d_table_size, sizeof(size_t)));
CUDA_CHECK(cudaMemset ( d_table_size, 0, sizeof(size_t)));
size_kernel<<<grid_size, BLOCK_SIZE_, 0, stream>>>(table_, hash_capacity, d_table_size, empty_key);
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaMemcpy(&table_size, d_table_size, sizeof(size_t), cudaMemcpyDeviceToHost));
/* Copy result back and do clean up*/
CUDA_CHECK(cudaFree(d_table_size));
return table_size;
}
void dump(KeyType* d_key, BaseValType* d_val, const size_t offset, const size_t search_length, size_t * d_dump_counter, cudaStream_t stream) const{
//Before we call the kernel, set the global counter to 0
CUDA_CHECK(cudaMemset (d_dump_counter, 0, sizeof(size_t)));
// grid size according to the searching length.
size_t block_size = shared_mem_size * 0.5 / (sizeof(KeyType) + sizeof(ValType)) ;
block_size = block_size <= 1024 ? block_size : 1024;
assert(block_size > 0 && "nvhash: block_size <= 0, the KV size may be too large!");
size_t shared_size = sizeof(* d_key) * block_size + sizeof(ValType) * block_size;
const int grid_size = (search_length - 1) / (block_size) + 1;
dump_kernel<<<grid_size, block_size, shared_size, stream>>>(d_key, (ValType* )d_val, table_, offset, search_length, d_dump_counter, empty_key);
}
size_t get_capacity() const{
return (table_-> size());
}
counter_type get_value_head() const{
counter_type counter;
CUDA_CHECK(cudaMemcpy(&counter, d_counter_, sizeof(*d_counter_), cudaMemcpyDeviceToHost));
return counter;
}
void set_value_head(counter_type counter_value){
CUDA_CHECK(cudaMemcpy(d_counter_, &counter_value, sizeof(*d_counter_), cudaMemcpyHostToDevice));
}
counter_type add_value_head(counter_type counter_add){
counter_type counter;
CUDA_CHECK(cudaMemcpy(&counter, d_counter_, sizeof(*d_counter_), cudaMemcpyDeviceToHost));
counter += counter_add;
CUDA_CHECK(cudaMemcpy(d_counter_, &counter, sizeof(*d_counter_), cudaMemcpyHostToDevice));
return counter;
}
template<typename GradType, typename Optimizer>
void update(const KeyType* d_keys, const GradType* d_gradients, size_t len, cudaStream_t stream, Optimizer& op){
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
update_kernel<<<grid_size, BLOCK_SIZE_, 0, stream>>>(table_, d_keys, d_gradients, len, op);
}
void get_insert(const KeyType* d_keys, void* d_vals, size_t len, cudaStream_t stream){
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
get_insert_kernel<<<grid_size, BLOCK_SIZE_, 0, stream>>>(table_, d_keys, (ValType*)d_vals, len, d_counter_);
}
// Before any get API is called, call this to check and update counter
bool get_lock(){
counter_mtx_.lock();
bool ret_val;
if(update_counter_ > 0){
ret_val = false; // There are update APIs running, can't do get.
}
else{
get_counter_++;
ret_val = true; // There is no update API running, can do get, increase counter
}
counter_mtx_.unlock();
return ret_val;
}
// Before any update API is called, call this to check and update counter
bool update_lock(){
counter_mtx_.lock();
bool ret_val;
if(get_counter_ > 0){
ret_val = false; // There are get APIs running, can't do update
}
else{
update_counter_++;
ret_val = true; // There is no get API running, can do update, increase counter
}
counter_mtx_.unlock();
return ret_val;
}
// After each get API finish on this GPU's hashtable, decrease the counter
void get_release(){
counter_mtx_.lock();
get_counter_--; // one get API finish, dec counter
counter_mtx_.unlock();
}
void update_release(){
counter_mtx_.lock();
update_counter_--; // one update API finish, dec counter
counter_mtx_.unlock();
}
void clear(cudaStream_t stream){
table_-> clear_async(stream);
}
void remove(const KeyType* d_keys, size_t len, cudaStream_t stream){
if (len == 0) {
return;
}
const int grid_size = (len - 1) / BLOCK_SIZE_ + 1;
delete_kernel<<<grid_size, BLOCK_SIZE_, 0, stream>>>(table_, d_keys, len);
}
private:
static const int BLOCK_SIZE_ = 256;
using Table = concurrent_unordered_map<KeyType, ValType, empty_key, DIM>;
Table* table_;
// GPU-level lock and counters for get and update APIs
std::mutex counter_mtx_; // Lock that protect the counters
volatile size_t update_counter_; // How many update APIs are currently called on this GPU' hashtable
volatile size_t get_counter_; // How many get APIs are currently called on this GPU's hashtable
// Counter for value index
counter_type * d_counter_;
size_t shared_mem_size;
};
}
#endif
|
the_stack
|
#pragma once
#include <gunrock/util/track_utils.cuh>
#include <gunrock/app/problem_base.cuh>
#include <gunrock/oprtr/1D_oprtr/for_all.cuh>
namespace gunrock {
namespace app {
namespace pr {
/**
* @brief Speciflying parameters for PR Problem
* @param parameters The util::Parameter<...> structure holding all parameter
* info \return cudaError_t error message(s), if any
*/
cudaError_t UseParameters_problem(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(gunrock::app::UseParameters_problem(parameters));
GUARD_CU(parameters.Use<bool>(
"normalize",
util::OPTIONAL_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
true, "Whether to normalize ranking values.", __FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"compensate",
util::OPTIONAL_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
true, "Whether to compensate for zero-degree vertices.", __FILE__,
__LINE__));
GUARD_CU(parameters.Use<bool>(
"scale",
util::OPTIONAL_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
false, "Whether to scale the ranking values during computation.",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<double>(
"delta",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
0.85, "Damping factor of PageRank.", __FILE__, __LINE__));
GUARD_CU(parameters.Use<double>(
"threshold",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
0.01, "Error threshold of PageRank.", __FILE__, __LINE__));
GUARD_CU(parameters.Use<int64_t>(
"max-iter",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
50, "Maximum number of PageRank iterations.", __FILE__, __LINE__));
return retval;
}
/**
* @brief PageRank Problem structure.
* @tparam _GraphT Type of the graph
* @tparam _ValueT Type of ranking values
* @tparam _FLAG Problem flags
*/
template <typename _GraphT, typename _ValueT = typename _GraphT::ValueT,
ProblemFlag _FLAG = Problem_None>
struct Problem : ProblemBase<_GraphT, _FLAG> {
typedef _GraphT GraphT;
static const ProblemFlag FLAG = _FLAG;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::SizeT SizeT;
typedef typename GraphT::CscT CscT;
typedef typename GraphT::CooT CooT;
typedef typename GraphT::GpT GpT;
typedef _ValueT ValueT;
typedef ProblemBase<GraphT, FLAG> BaseProblem;
typedef DataSliceBase<GraphT, FLAG> BaseDataSlice;
// Helper structures
/**
* @brief Data slice structure which contains PR problem specific data.
*/
struct DataSlice : BaseDataSlice {
// PR-specific storage arrays
util::Array1D<SizeT, ValueT> rank_curr; // Ping-pong ranking values
util::Array1D<SizeT, ValueT> rank_next; // Ping-pong ranking values
util::Array1D<SizeT, ValueT>
rank_temp; // Temp ranking values for neighborreduce
util::Array1D<SizeT, ValueT> rank_temp2; // Another temp ranking values
util::Array1D<SizeT, SizeT> degrees; // Out-degree for each vertex
util::Array1D<SizeT, VertexT> node_ids;
util::Array1D<SizeT, VertexT> local_vertices;
util::Array1D<SizeT, VertexT> *remote_vertices_out;
util::Array1D<SizeT, VertexT> *remote_vertices_in;
SizeT org_nodes; // Number of vertices in the orginal graph
bool normalize; // Whether to normalize the ranking value
bool compensate; // Whether to compensate for zero-degree vertices
bool scale; // Whether to scale the ranking values during computation
bool pull; // Whether to use pull direction PR
ValueT threshold; // Threshold for ranking errors
ValueT delta; // Damping factor
SizeT max_iter; // Maximum number of PR iterations
ValueT init_value; // Initial ranking value
ValueT reset_value;
VertexT src_node; // Source vertex for personalized PageRank
bool to_continue;
SizeT num_updated_vertices;
bool final_event_set;
DataSlice *data_slices;
util::Array1D<int, SizeT> in_counters;
util::Array1D<int, SizeT> out_counters;
util::Array1D<uint64_t, char> cub_sort_storage;
util::Array1D<SizeT, VertexT> temp_vertex;
/*
* @brief Default constructor
*/
DataSlice()
: BaseDataSlice(),
org_nodes(0),
normalize(true),
compensate(true),
scale(false),
pull(false),
threshold(0),
delta(0),
init_value(0),
reset_value(0),
src_node(util::PreDefinedValues<VertexT>::InvalidValue),
to_continue(true),
max_iter(0),
num_updated_vertices(0),
final_event_set(false),
remote_vertices_in(NULL),
remote_vertices_out(NULL) {
rank_curr.SetName("rank_curr");
rank_next.SetName("rank_next");
rank_temp.SetName("rank_temp");
rank_temp2.SetName("rank_temp2");
degrees.SetName("degrees");
node_ids.SetName("node_ids");
local_vertices.SetName("local_vertices");
in_counters.SetName("in_counters");
out_counters.SetName("out_counters");
cub_sort_storage.SetName("cub_sort_storage");
temp_vertex.SetName("temp_vertex");
}
/*
* @brief Default destructor
*/
virtual ~DataSlice() { Release(); }
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return cudaError_t Error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx));
GUARD_CU(rank_curr.Release(target));
GUARD_CU(rank_next.Release(target));
GUARD_CU(rank_temp.Release(target));
GUARD_CU(rank_temp2.Release(target));
GUARD_CU(degrees.Release(target));
GUARD_CU(node_ids.Release(target));
GUARD_CU(in_counters.Release(target));
GUARD_CU(out_counters.Release(target));
GUARD_CU(cub_sort_storage.Release(target));
GUARD_CU(temp_vertex.Release(target));
if (remote_vertices_in != NULL) {
for (int peer = 0; peer < this->num_gpus; peer++) {
GUARD_CU(remote_vertices_in[peer].Release(target));
}
delete[] remote_vertices_in;
remote_vertices_in = NULL;
}
if (remote_vertices_out != NULL) {
for (int peer = 0; peer < this->num_gpus; peer++) {
GUARD_CU(remote_vertices_out[peer].Release(target));
}
delete[] remote_vertices_out;
remote_vertices_out = NULL;
}
GUARD_CU(BaseDataSlice::Release(target));
return retval;
}
/**
* @brief initializing PR-specific data on each gpu
* @param sub_graph Sub graph on the GPU.
* @param[in] gpu_idx GPU device index
* @param[in] target Targeting device location
* @param[in] flag Problem flag containling options
* \return cudaError_t Error message(s), if any
*/
cudaError_t Init(GraphT &sub_graph, SizeT org_nodes, int num_gpus = 1,
int gpu_idx = 0, util::Location target = util::DEVICE,
ProblemFlag flag = Problem_None) {
cudaError_t retval = cudaSuccess;
SizeT nodes = sub_graph.nodes;
SizeT edges = sub_graph.edges;
this->org_nodes = org_nodes;
util::PrintMsg("nodes = " + std::to_string(nodes));
GUARD_CU(BaseDataSlice::Init(sub_graph, num_gpus, gpu_idx, target, flag));
GUARD_CU(rank_curr.Allocate(nodes, target));
GUARD_CU(rank_next.Allocate(nodes, target));
if (pull) {
GUARD_CU(rank_temp.Allocate(edges, target));
GUARD_CU(rank_temp2.Allocate(nodes, target));
}
GUARD_CU(degrees.Allocate(nodes + 1, target));
GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed.");
// Compute degrees
// auto &sub_graph = this -> sub_graph[0];
if (num_gpus == 1) {
GUARD_CU(local_vertices.Allocate(nodes, target));
GUARD_CU(local_vertices.ForAll(
[] __host__ __device__(VertexT * l_vertices, const SizeT &pos) {
l_vertices[pos] = pos;
},
nodes, target, this->stream));
} else {
GUARD_CU(out_counters.Allocate(num_gpus, util::HOST));
GUARD_CU(in_counters.Allocate(num_gpus, util::HOST));
remote_vertices_out = new util::Array1D<SizeT, VertexT>[num_gpus];
remote_vertices_in = new util::Array1D<SizeT, VertexT>[num_gpus];
for (int peer = 0; peer < num_gpus; peer++) {
out_counters[peer] = 0;
remote_vertices_out[peer].SetName("remote_vetices_out[]");
remote_vertices_in[peer].SetName("remote_vertces_in []");
}
for (VertexT v = 0; v < nodes; v++)
out_counters[sub_graph.GpT::partition_table[v]]++;
for (int peer = 0; peer < num_gpus; peer++) {
GUARD_CU(remote_vertices_out[peer].Allocate(out_counters[peer],
util::HOST | target));
out_counters[peer] = 0;
}
for (VertexT v = 0; v < nodes; v++) {
int target = sub_graph.GpT::partition_table[v];
remote_vertices_out[target][out_counters[target]] = v;
out_counters[target]++;
}
for (int peer = 0; peer < num_gpus; peer++) {
GUARD_CU(remote_vertices_out[peer].Move(util::HOST, target));
}
GUARD_CU(local_vertices.SetPointer(
remote_vertices_out[0].GetPointer(util::HOST), out_counters[0],
util::HOST));
GUARD_CU(
local_vertices.SetPointer(remote_vertices_out[0].GetPointer(target),
out_counters[0], target));
}
if (pull) {
GUARD_CU(sub_graph.CscT::Move(util::HOST, target, this->stream));
} else {
GUARD_CU(sub_graph.CooT::Move(util::HOST, target, this->stream));
}
GUARD_CU2(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed.");
if (GraphT::FLAG & gunrock::graph::HAS_CSR) {
GUARD_CU(degrees.ForAll(
[sub_graph] __host__ __device__(SizeT * degrees, const SizeT &pos) {
degrees[pos] = sub_graph.GetNeighborListLength(pos);
},
sub_graph.nodes, target, this->stream));
} else if (GraphT::FLAG &
(gunrock::graph::HAS_COO | gunrock::graph::HAS_CSC)) {
bool pull = this->pull;
GUARD_CU(degrees.ForEach(
[] __host__ __device__(SizeT & degree) { degree = 0; }, nodes + 1,
target, this->stream));
GUARD_CU(degrees.ForAll(
[sub_graph, nodes, pull] __host__ __device__(SizeT * degrees,
const SizeT &e) {
VertexT src, dest;
if (pull) {
sub_graph.CscT::GetEdgeSrcDest(e, src, dest);
SizeT old_val = atomicAdd(degrees + dest, 1);
// if (util::isTracking(dest))
// printf("degree[%d] <- %d, edge %d : %d -> %d\n",
// dest, old_val + 1, e, src, dest);
} else {
sub_graph.CooT::GetEdgeSrcDest(e, src, dest);
SizeT old_val = atomicAdd(degrees + src, 1);
// if (util::isTracking(src))
// printf("degree[%d] <- %d, edge %d : %d -> %d\n",
// src, old_val + 1, e, src, dest);
}
},
sub_graph.edges, target, this->stream));
// GUARD_CU(oprtr::ForAll((VertexT*)NULL,
// [degrees]
// __host__ __device__ (VertexT *dummy, const SizeT &e)
//{
// printf("degree[42029] = %d\n", degrees[42029]);
//}, 1, target, this -> stream));
}
return retval;
}
/**
* @brief Reset problem function. Must be called prior to each run.
* @param[in] target Targeting device location
* \return cudaError_t Error message(s), if any
*/
cudaError_t Reset(util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
SizeT nodes = this->sub_graph->nodes;
// TODO: Move to EnactorSlice::Reset()
/*if (this -> num_gpus > 1)
for (int peer = 0; peer < this -> num_gpus; peer++)
{
if (retval = this -> keys_out[peer].Release()) return retval;
if (retval = this -> keys_in[0][peer].Release()) return retval;
if (retval = this -> keys_in[1][peer].Release()) return retval;
}*/
// Ensure data allocation is sufficient
GUARD_CU(rank_curr.EnsureSize_(nodes, target));
GUARD_CU(rank_next.EnsureSize_(nodes, target));
// GUARD_CU(degrees .EnsureSize_(nodes, target));
// Initial rank_next = 0
init_value = normalize ? (scale ? 1.0 : (1.0 / (ValueT)(org_nodes)))
: (1.0 - delta);
reset_value = normalize ? (scale ? (1.0 - delta)
: ((1.0 - delta) / (ValueT)(org_nodes)))
: (1.0 - delta);
bool &normalize = this->normalize;
ValueT &delta = this->delta;
GUARD_CU(rank_next.ForEach(
[normalize, delta] __host__ __device__(ValueT & rank) {
rank = normalize ? (ValueT)0.0 : (ValueT)(1.0 - delta);
},
nodes, target, this->stream));
ValueT &init_value = this->init_value;
GUARD_CU(rank_curr.ForAll(
degrees,
[init_value] __host__ __device__(ValueT * ranks, SizeT * degrees_,
const SizeT &v) {
SizeT degree = degrees_[v];
ranks[v] = (degree == 0) ? init_value : (init_value / degree);
// if (v == 42029)
// printf("rank[%d] = %f = %f / %d\n",
// v, ranks[v], init_value, degree);
},
nodes, target, this->stream));
this->to_continue = true;
this->final_event_set = false;
// this -> PR_queue_length = 1;
this->num_updated_vertices = 1;
return retval;
}
}; // DataSlice
// Members
// Set of data slices (one for each GPU)
util::Array1D<SizeT, DataSlice> *data_slices;
// whether to use the scaling feature
// bool scaled;
// Methods
/**
* @brief PRProblem default constructor
*/
Problem(util::Parameters &_parameters, ProblemFlag _flag = Problem_None)
: BaseProblem(_parameters, _flag), data_slices(NULL) {}
/**
* @brief PRProblem default destructor
*/
virtual ~Problem() { Release(); }
/*
* @brief Releasing allocated memory space
* @param[in] target The location to release memory from
* \return cudaError_t Error message(s), if any
*/
cudaError_t Release(util::Location target = util::LOCATION_ALL) {
cudaError_t retval = cudaSuccess;
if (data_slices == NULL) return retval;
for (int i = 0; i < this->num_gpus; i++)
GUARD_CU(data_slices[i].Release(target));
if ((target & util::HOST) != 0 &&
data_slices[0].GetPointer(util::DEVICE) == NULL) {
delete[] data_slices;
data_slices = NULL;
}
GUARD_CU(BaseProblem::Release(target));
return retval;
}
/**
* \addtogroup PublicInterface
* @{
*/
/**
* @brief Copy result ranking values and vertex orders to host-side vectors.
* @param[out] h_node_id host vector to store node Vertex ID.
* @param[out] h_rank host vector to store page rank values.
* @param[in] target where the results are stored
* \return cudaError_t Error message(s), if any
*/
cudaError_t Extract(VertexT *h_node_ids, ValueT *h_ranks,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
auto &data_slice = data_slices[0][0];
SizeT nodes = this->org_graph->nodes;
if (target == util::DEVICE) {
GUARD_CU(util::SetDevice(this->gpu_idx[0]));
data_slice.rank_curr.SetPointer(h_ranks, nodes, util::HOST);
data_slice.node_ids.SetPointer(h_node_ids, nodes, util::HOST);
GUARD_CU(data_slice.rank_curr.Move(util::DEVICE, util::HOST));
GUARD_CU(data_slice.node_ids.Move(util::DEVICE, util::HOST));
} else if (target == util::HOST) {
GUARD_CU(data_slice.rank_curr.ForEach(
h_ranks,
[] __host__ __device__(const ValueT &rank, ValueT &h_rank) {
h_rank = rank;
},
nodes, util::HOST));
GUARD_CU(data_slice.node_ids.ForEach(
h_node_ids,
[] __host__ __device__(const VertexT &node_id, VertexT &h_node_id) {
h_node_id = node_id;
},
nodes, util::HOST));
}
return retval;
}
/**
* @brief initialization function.
* @param graph The graph that PageRank processes on
* @param[in] Location Memory location to work on
* \return cudaError_t Error message(s), if any
*/
cudaError_t Init(GraphT &graph, util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
GUARD_CU(BaseProblem::Init(graph, target));
data_slices = new util::Array1D<SizeT, DataSlice>[this->num_gpus];
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
data_slices[gpu].SetName("data_slices[" + std::to_string(gpu) + "]");
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
// GUARD_CU(this -> graph_slices[gpu] -> out_degrees .Release());
// GUARD_CU(this -> graph_slices[gpu] -> original_vertex.Release());
// GUARD_CU(this -> graph_slices[gpu] -> convertion_table.Release());
GUARD_CU(data_slices[gpu].Allocate(1, target | util::HOST));
auto &data_slice = data_slices[gpu][0];
data_slice.normalize = this->parameters.template Get<bool>("normalize");
data_slice.compensate = this->parameters.template Get<bool>("compensate");
data_slice.scale = this->parameters.template Get<bool>("scale");
data_slice.pull = this->parameters.template Get<bool>("pull");
data_slice.threshold = this->parameters.template Get<ValueT>("threshold");
data_slice.delta = this->parameters.template Get<ValueT>("delta");
data_slice.max_iter = this->parameters.template Get<SizeT>("max-iter");
GUARD_CU(data_slice.Init(this->sub_graphs[gpu], graph.nodes,
this->num_gpus, this->gpu_idx[gpu], target,
this->flag));
}
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
if (target & util::DEVICE) {
GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU2(cudaStreamSynchronize(data_slices[gpu]->stream),
"cudaStreamSynchronize failed");
}
}
return retval;
}
/**
* @brief Reset problem function. Must be called prior to each run.
* @param[in] src Source vertex to start.
* @param[in] location Memory location to work on
* \return cudaError_t Error message(s), if any
*/
cudaError_t Reset(VertexT src = util::PreDefinedValues<VertexT>::InvalidValue,
util::Location target = util::DEVICE) {
cudaError_t retval = cudaSuccess;
for (int gpu = 0; gpu < this->num_gpus; gpu++) {
auto &data_slice = data_slices[gpu][0];
data_slice.src_node = src;
// Set device
if (target & util::DEVICE) GUARD_CU(util::SetDevice(this->gpu_idx[gpu]));
GUARD_CU(data_slice.Reset(target));
GUARD_CU(data_slices[gpu].Move(util::HOST, target));
if (gpu == 0 && this->num_gpus > 1) {
for (int peer = 1; peer < this->num_gpus; peer++) {
GUARD_CU(data_slice.remote_vertices_in[peer].Move(
util::HOST, target, data_slice.in_counters[peer]));
}
}
}
return retval;
}
/** @} */
}; // Problem
} // namespace pr
} // namespace app
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
the_stack
|
namespace tensorflow
{
using GPUDevice = Eigen::GpuDevice;
using namespace effectivetransformer;
namespace functor
{
template <typename T>
struct BertTransformerOpFunctor<GPUDevice, T>
{
typedef typename TransformerTFTraits<T>::DataType DataType_;
static Status Compute(OpKernelContext *context,
EncoderInitParam<DataType_ > param,
TransformerParam t_param)
{
const cudaStream_t &stream = context->eigen_device<GPUDevice>().stream();
param.stream = stream;
try
{
check_cuda_error(cublasSetStream(param.cublas_handle, stream));
/// 1. Set compute type
cudaDataType_t computeType, AType, BType, CType;
int cublasAlgo[3];
if (TransformerTFTraits<T>::OpType == OperationType::FP32) {
computeType = CUDA_R_32F;
AType = CUDA_R_32F;
BType = CUDA_R_32F;
CType = CUDA_R_32F;
cublasAlgo[0] = -1;
cublasAlgo[1] = -1;
cublasAlgo[2] = -1;
} else {
computeType = CUDA_R_16F;
AType = CUDA_R_16F;
BType = CUDA_R_16F;
CType = CUDA_R_16F;
cublasAlgo[0] = 99;
cublasAlgo[1] = 99;
cublasAlgo[2] = 99;
}
DataType_ alpha = (DataType_)1.0f, beta = (DataType_)0.0f;
/// 2. allocate buffer for transformer
Tensor buf_tensor;
int batch_size = t_param.batch_size_;
int head_num = t_param.head_num_;
int from_seq_len = t_param.from_seq_len_;
int size_per_head = t_param.size_per_head_;
int input_tensor_size = batch_size * head_num * from_seq_len * size_per_head;
int attn_tensor_size = batch_size * head_num * from_seq_len * from_seq_len;
long long int buf_size = input_tensor_size * 13 + attn_tensor_size;
tensorflow::Status status = context->allocate_temp(DT_UINT8, TensorShape{buf_size}, &buf_tensor);
/// 3. assign intermediate pointer
DataType_* buf = reinterpret_cast<DataType_ *>(buf_tensor.flat<uint8>().data());
/// buffer for qkv
DataType_* query_buf_ = buf + 0 * input_tensor_size;
DataType_* key_buf_ = buf + 1 * input_tensor_size;
DataType_* value_buf_ = buf + 2 * input_tensor_size;
DataType_* query_ = buf + 3 * input_tensor_size;
DataType_* key_ = buf + 4 * input_tensor_size;
DataType_* value_ = buf + 5 * input_tensor_size;
/// buffer for self attention
DataType_* qk_buf_ = buf + 0 * input_tensor_size;
DataType_* transpose_dst_ = buf + std::max(attn_tensor_size, input_tensor_size);
/// buffer for output matmat
DataType_* attr_out_buf_ = buf + 0 * input_tensor_size;
DataType_* attr_matmul_buf_ = buf + 1 * input_tensor_size;
DataType_* inter_matmul_buf_ = buf + 2 * input_tensor_size;
/// 4. get valid word num
int valid_word_num = 0;
check_cuda_error(cudaMemcpyAsync(
&valid_word_num, param.valid_word_num, sizeof(int), cudaMemcpyDeviceToHost, param.stream));
// std::cout << "valid_word_num : " << valid_word_num << std::endl;
// 5. input -> Q K V
{
int m = valid_word_num;
int k = t_param.head_num_ * t_param.size_per_head_;
int n = k;
check_cuda_error(cublasGemmEx(param.cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
n, m, k,
&alpha,
param.attr_kernel_Q, AType, n,
param.from_tensor, BType, k,
&beta,
query_buf_, CType, n,
computeType,
static_cast<cublasGemmAlgo_t>(cublasAlgo[0])));
check_cuda_error(cublasGemmEx(param.cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
n, m, k,
&alpha,
param.attr_kernel_K, AType, n,
param.to_tensor, BType, k,
&beta,
key_buf_, CType, n,
computeType,
static_cast<cublasGemmAlgo_t>(cublasAlgo[0])));
check_cuda_error(cublasGemmEx(param.cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
n, m, k,
&alpha,
param.attr_kernel_V, AType, n,
param.to_tensor, BType, k,
&beta,
value_buf_, CType, n,
computeType,
static_cast<cublasGemmAlgo_t>(cublasAlgo[0])));
// check_cuda_error(cudaMemsetAsync(query_, 0, input_tensor_size * sizeof(DataType_), stream));
// check_cuda_error(cudaMemsetAsync(key_, 0, input_tensor_size * sizeof(DataType_), stream));
// check_cuda_error(cudaMemsetAsync(value_, 0, input_tensor_size * sizeof(DataType_), stream));
check_cuda_error(cudaMemsetAsync(query_, 0, 3 * input_tensor_size * sizeof(DataType_), stream));
/// add bias & add padding & transpose for self-attention
cuda::add_QKV_bias_padding_kernelLauncher<DataType_>(
query_buf_, param.attr_bias_Q,
key_buf_, param.attr_bias_K,
value_buf_, param.attr_bias_V,
query_, key_, value_,
valid_word_num, batch_size, from_seq_len, head_num, size_per_head,
param.batch_idx, param.word_idx, stream);
}
/// 6. self-attention
{
check_cuda_error(cublasGemmStridedBatchedEx(param.cublas_handle,
CUBLAS_OP_T, CUBLAS_OP_N,
from_seq_len, from_seq_len, size_per_head,
&alpha,
key_, AType, size_per_head, from_seq_len * size_per_head,
query_, BType, size_per_head, from_seq_len * size_per_head,
&beta,
qk_buf_, CType, from_seq_len, from_seq_len * from_seq_len,
batch_size * head_num,
computeType,
static_cast<cublasGemmAlgo_t>(cublasAlgo[1])));
DataType_ scaler = 1 / sqrtf(size_per_head * 1.0f);
cuda::softmax_kernel_kernelLauncher<DataType_>(
qk_buf_, param.attr_mask, batch_size, head_num, from_seq_len, scaler, stream);
check_cuda_error(cublasGemmStridedBatchedEx(param.cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
size_per_head, from_seq_len, from_seq_len,
&alpha,
value_, AType, size_per_head, from_seq_len * size_per_head,
qk_buf_, BType, from_seq_len, from_seq_len * from_seq_len,
&beta,
transpose_dst_, CType, size_per_head, from_seq_len * size_per_head,
batch_size * head_num,
computeType,
static_cast<cublasGemmAlgo_t>(cublasAlgo[2])));
cuda::transpose_rm_padding_kernelLauncher<DataType_>(
transpose_dst_, attr_out_buf_,
valid_word_num, batch_size, from_seq_len, head_num, size_per_head,
param.batch_idx, param.word_idx, stream);
}
/// 7. matmat & layer norm
{
int m = valid_word_num;
int k = head_num * size_per_head;
int n = k;
check_cuda_error(cublasGemmEx(param.cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
n, m, k,
&alpha,
param.attr_output_kernel, AType, n,
attr_out_buf_, BType, k,
&beta,
attr_matmul_buf_, CType, n,
computeType,
static_cast<cublasGemmAlgo_t>(cublasAlgo[0])));
add_bias_input_layernorm_kernelLauncher<DataType_>(attr_matmul_buf_,
param.from_tensor, param.attr_output_bias, param.attr_output_layernorm_gamma,
param.attr_output_layernorm_beta, m, n, param.stream);
n *= 4;
check_cuda_error(cublasGemmEx(param.cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
n, m, k,
&alpha,
param.inter_kernel, AType, n,
attr_matmul_buf_, BType, k,
&beta,
inter_matmul_buf_, CType, n,
computeType,
static_cast<cublasGemmAlgo_t>(cublasAlgo[1])));
add_bias_act_kernelLauncher<DataType_>(inter_matmul_buf_, param.inter_bias, m, n, param.stream);
n = k;
k *= 4;
check_cuda_error(cublasGemmEx(param.cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
n, m, k,
&alpha,
param.output_kernel, AType, n,
inter_matmul_buf_, BType, k,
&beta,
param.transformer_out, CType, n,
computeType,
static_cast<cublasGemmAlgo_t>(cublasAlgo[2])));
add_bias_input_layernorm_kernelLauncher<DataType_>(
param.transformer_out, attr_matmul_buf_, param.output_bias,
param.output_layernorm_gamma,
param.output_layernorm_beta,
m, n, param.stream);
}
return Status::OK();
}
catch(std::runtime_error& error)
{
return errors::Internal(error.what());
}
catch(...)
{
return errors::Internal("Runtime error");
}
}
};
template struct functor::BertTransformerOpFunctor<GPUDevice, float>;
template struct functor::BertTransformerOpFunctor<GPUDevice, Eigen::half>;
/// ************************* Transformer input parser *************************
template <typename T>
struct BertTransformerInputOpFunctor<GPUDevice, T>
{
typedef typename TransformerTFTraits<T>::DataType DataType_;
static Status Compute(OpKernelContext *context,
EncoderInputInitParam<DataType_ > param)
{
const cudaStream_t &stream = context->eigen_device<GPUDevice>().stream();
param.stream = stream;
try
{
/// 1. allocate tf temp memory
Tensor buf;
long long int buf_size = param.batch_size_ * param.from_seq_len_ * 2;
tensorflow::Status status = context->allocate_temp(DT_INT32, TensorShape{buf_size}, &buf);
int* prefix_sum_buf_ = reinterpret_cast<int *>(buf.flat<int>().data());
if(status != tensorflow::Status::OK()) {
throw std::runtime_error("TF error: context->allocate_temp failed");
}
if(prefix_sum_buf_ == nullptr) {
throw std::runtime_error(std::string("Tensorflow Allocator failed to allocate internal buffer."));
}
/// 2. compute mask's prefix sum
int word_num = param.batch_size_ * param.from_seq_len_;
exclusiveScan_kernelLauncher(prefix_sum_buf_, param.mask, word_num, param.stream);
/// 3. compress input tensor, copy tensor according to there
/// will be faster if placed in front of embedding ...
/// nv's thrust::copy_if is very slow...
compressBertInput_kernelLauncher(
param.from_tensor, param.mask, prefix_sum_buf_,
param.to_tensor, param.batch_idx, param.word_idx,
param.batch_size_ ,param.from_seq_len_, param.head_num_ * param.size_per_head_, param.stream);
/// 2. get valid word num
int valid_word_num = 0;
int last_mask = 0;
check_cuda_error(cudaMemcpyAsync(
&valid_word_num, prefix_sum_buf_ + word_num - 1, sizeof(int), cudaMemcpyDeviceToHost, param.stream));
check_cuda_error(cudaMemcpyAsync(
&last_mask, param.mask + word_num - 1, sizeof(int), cudaMemcpyDeviceToHost, param.stream));
check_cuda_error(cudaStreamSynchronize(param.stream));
if (last_mask == 1) {
// in case of the last mask is 1, since this is exclusive scan
valid_word_num ++;
}
check_cuda_error(cudaMemcpyAsync(
param.valid_word_num, &valid_word_num, sizeof(int), cudaMemcpyHostToDevice, param.stream));
// std::cout << "valid_word_num : " << valid_word_num << std::endl;
return Status::OK();
}
catch(std::runtime_error& error)
{
return errors::Internal(error.what());
}
catch(...)
{
return errors::Internal("Runtime error");
}
}
};
template struct functor::BertTransformerInputOpFunctor<GPUDevice, float>;
template struct functor::BertTransformerInputOpFunctor<GPUDevice, Eigen::half>;
/// ************************* Transformer output parser *************************
template <typename T>
struct BertTransformerOutputOpFunctor<GPUDevice, T>
{
typedef typename TransformerTFTraits<T>::DataType DataType_;
static Status Compute(OpKernelContext *context,
EncoderOutputInitParam<DataType_ > param)
{
const cudaStream_t &stream = context->eigen_device<GPUDevice>().stream();
param.stream = stream;
try
{
int valid_word_num = 0;
check_cuda_error(cudaMemcpyAsync(
&valid_word_num, param.valid_word_num, sizeof(int), cudaMemcpyDeviceToHost, param.stream));
int tensor_size = param.batch_size_ * param.head_num_ * param.from_seq_len_ * param.size_per_head_;
check_cuda_error(cudaMemsetAsync(param.to_tensor, 0, tensor_size * sizeof(DataType_), param.stream));
restoreBertOutput_kernelLauncher(
param.to_tensor,
param.from_tensor, param.batch_idx, param.word_idx,
valid_word_num, param.from_seq_len_, param.head_num_ * param.size_per_head_, param.stream);
return Status::OK();
}
catch(std::runtime_error& error)
{
return errors::Internal(error.what());
}
catch(...)
{
return errors::Internal("Runtime error");
}
}
};
template struct functor::BertTransformerOutputOpFunctor<GPUDevice, float>;
template struct functor::BertTransformerOutputOpFunctor<GPUDevice, Eigen::half>;
} //namespace functor
} //namespace tensorflow
#endif
// { ////////////////////////////////////////////////////////////////////////////////
// // printf("input_tensor_size : %d\n", input_tensor_size);
// printf("attn final : \n");
// printf("transpose valid_word_num : %d, batch_size : %d, from_seq_len : %d, head_num : %d, size_per_head : %d\n",
// valid_word_num, batch_size, from_seq_len, head_num, size_per_head);
// __half* tmp = new __half[input_tensor_size];
// check_cuda_error(cudaMemcpyAsync(
// tmp, attr_out_buf_, input_tensor_size * sizeof(__half), cudaMemcpyDeviceToHost, stream));
// int word = 1;
// std::vector<int> len = {16, 24, 33, 8};
// for (int bi = 0; bi < batch_size; bi++) {
// int b_off = 0;
// for (int bbi = 0; bbi < bi; bbi++)
// b_off += len[bbi];
// int offset = b_off * 768 + word * 768;
// printf("batch %d : %f %f %f ... %f %f %f\n", bi,
// (float)(tmp[offset + 0]),
// (float)(tmp[offset + 1]),
// (float)(tmp[offset + 2]),
// (float)(tmp[offset + 768 - 3]),
// (float)(tmp[offset + 768 - 2]),
// (float)(tmp[offset + 768 - 1]));
// }
// delete [] tmp;
// } ////////////////////////////////////////////////////////////////////////////////
|
the_stack
|
bert::bert (bool BERT_Large,
int num_gpu,
std::string dir,
int max_batchsize,
int max_seq_length) {
checkCudaErrors(cudaSetDevice(num_gpu));
handle = new global_handle(BERT_Large, dir, max_batchsize, max_seq_length);
init_ops();
}
void bert::init_ops(){
for(int i = 0; i < handle->num_hidden_layers; i++){
std::string num_layer = "_" + std::to_string(i) + "_";
op_LayerNorm* layernorm = new op_LayerNorm(num_layer + "attention_output_LayerNorm_gamma",
num_layer + "attention_output_LayerNorm_beta",
handle);
attention_layernorm.push_back(layernorm);
layernorm = new op_LayerNorm(num_layer + "output_LayerNorm_gamma",
num_layer + "output_LayerNorm_beta",
handle);
output_layernorm.push_back(layernorm);
op_SoftMax* Softmax = new op_SoftMax(handle);
softmax.push_back(Softmax);
op_Linear* linear = new op_Linear(num_layer + "attention_output_dense_kernel",
num_layer + "attention_output_dense_bias",
handle);
attention_linear.push_back(linear);
linear = new op_Linear(num_layer + "intermediate_dense_kernel",
num_layer + "intermediate_dense_bias",
handle);
intermediate_linear.push_back(linear);
linear = new op_Linear(num_layer + "output_dense_kernel",
num_layer + "output_dense_bias",
handle);
output_linear.push_back(linear);
Query_Key* qk = new Query_Key(handle);
query_key.push_back(qk);
Prob_Value* pv = new Prob_Value(handle);
prob_value.push_back(pv);
op_BatchedLinear* batchlinear = new op_BatchedLinear(
num_layer + "attention_self_query_kernel",
num_layer + "attention_self_query_bias",
num_layer + "attention_self_key_kernel",
num_layer + "attention_self_key_bias",
num_layer + "attention_self_value_kernel",
num_layer + "attention_self_value_bias",
handle);
batched_linear.push_back(batchlinear);
op_Gelu* op_gelu = new op_Gelu(handle);
gelu.push_back(op_gelu);
}
pooler_linear = new op_Linear( "pooler_dense_kernel",
"pooler_dense_bias",
handle);
// classify_linear = new op_Linear("classifier_kernel",
// "classifier_bias",
// handle);
// loss = new op_CrossEntropyLoss(handle);
// classify_softmax = new op_SoftMax(handle);
embedding = new Embedding(handle);
op_tanh = new op_Tanh(handle);
}
void bert::copy_inputs( int* &words,
int* &token_type,
int* &position,
int* &attention_mask){
size_t batchsize = handle->batchsize;
size_t seq_length = handle->seq_length;
int num_words = batchsize * seq_length;
int *word_gpu, *token_type_gpu, *positions_gpu, *mask_gpu;
int positions[num_words];
for( int i = 0; i < num_words; i++){
positions[i] = i % seq_length;
}
int* host_input_package;
checkCudaErrors(cudaMallocHost((void **)&host_input_package, 4*num_words*sizeof(int)));
memcpy(host_input_package, words, num_words*sizeof(int));
memcpy(host_input_package + num_words, token_type, num_words*sizeof(int));
memcpy(host_input_package + 2*num_words, positions, num_words*sizeof(int));
word_gpu = handle->global_malloc_manage_int.get_new_head_point(num_words);
token_type_gpu = handle->global_malloc_manage_int.get_new_head_point(num_words);
positions_gpu = handle->global_malloc_manage_int.get_new_head_point(num_words);
if(attention_mask != nullptr){
mask_gpu = handle->global_malloc_manage_int.get_new_head_point(num_words);
memcpy(host_input_package + 3*num_words, attention_mask, num_words*sizeof(int));
checkCudaErrors(cudaMemcpyAsync(word_gpu,
host_input_package,
4*num_words*sizeof(int),
cudaMemcpyHostToDevice));
attention_mask = mask_gpu;
}
else{
checkCudaErrors(cudaMemcpyAsync(word_gpu,
host_input_package,
3*num_words*sizeof(int),
cudaMemcpyHostToDevice));
}
cudaFreeHost(host_input_package);
words = word_gpu;
token_type = token_type_gpu;
position = positions_gpu;
}
void bert::BERT_Inference (
int* words,
int* token_types,
size_t batchsize,
size_t seq_length,
int* attention_mask){
size_t hidden_size = handle->hidden_size;
size_t total_length = batchsize * seq_length * hidden_size;
size_t num_words = batchsize * seq_length;
size_t num_attention_heads= handle->num_attention_heads;
size_t intermediate_size = handle->intermediate_size;
handle->set_scale(batchsize, seq_length);
handle->reset();
int* positions;
copy_inputs(words, token_types, positions, attention_mask);
float *embedding_out;
embedding->forward(embedding_out, words, token_types, positions);
float *tensor_layer = embedding_out, *temp;
for(int i = 0; i < handle->num_hidden_layers; i++){
handle->global_malloc_manage_float.record_layer_start();
// start of Attention
float *batched_gemm_out;
batched_linear[i]->forward(batched_gemm_out,
tensor_layer,
batchsize * seq_length,
hidden_size,
hidden_size);
float *query, *key, *val;
query = batched_gemm_out;
key = query + total_length;
val = key + total_length;
float *query_key_gemm;
query_key[i]->forward(
query,
key,
1.0 / 8.0,
query_key_gemm);
softmax[i]->forward(handle,
query_key_gemm,
batchsize * num_attention_heads * seq_length,
seq_length,
attention_mask);
float* attention;
prob_value[i]->forward(
query_key_gemm,
val,
attention);
attention_linear[i]->forward(temp,
attention,
num_words,
hidden_size,
hidden_size);
attention = temp;
attention_layernorm[i]->forward(tensor_layer,
tensor_layer,
num_words,
hidden_size,
attention);
// End of Attention
// Start of Intermediate
float* intermediate_out;
intermediate_linear[i]->forward(intermediate_out,
tensor_layer,
num_words,
hidden_size,
intermediate_size);
gelu[i]->forward(intermediate_out, num_words * intermediate_size);
// End of Intermedaite
// Start of Output
float* output_out;
output_linear[i]->forward(output_out,
intermediate_out,
num_words,
intermediate_size,
hidden_size);
output_layernorm[i]->forward(tensor_layer,
tensor_layer,
num_words,
hidden_size,
output_out);
cudaEventRecord(handle->layer_compute_done, handle->cal_stream);
cudaEventSynchronize(handle->layer_compute_done);
handle->global_malloc_manage_float.reuse_layer_mem();
// Layer End
}
// Pooler Start
float* first_token, *pooler_out;
copy_pooler(first_token, tensor_layer, handle);
pooler_linear->forward(pooler_out,
first_token,
batchsize,
hidden_size,
hidden_size);
op_tanh->forward(pooler_out, batchsize * hidden_size);
// Pooler End
ret.tensor = tensor_layer;
ret.pooled_output = pooler_out;
}
// float *bert::classify_inference(size_t num_classes) {
// float *classify_out;
// classify_linear->forward(classify_out,
// ret.pooled_output,
// handle->batchsize,
// handle->hidden_size,
// num_classes);
// //classify_softmax->forward(handle, classify_out, handle->batchsize, num_classes);
// return classify_out;
// }
// void bert::classify_inference_backward(int *classes, size_t num_classes) {
// int *calsses_gpu;
// calsses_gpu = handle->global_malloc_manage_int.get_new_head_point(handle->hidden_size);
// checkCudaErrors(cudaMemcpyAsync(calsses_gpu, classes, handle->hidden_size * sizeof(int), cudaMemcpyHostToDevice));
// float *dout_gpu;
// dout_gpu = handle->global_malloc_manage_float.get_new_head_point(1);
// float *dout = (float *) malloc(sizeof(float));
// dout[0] = 1.0;
// checkCudaErrors(cudaMemcpyAsync(dout_gpu, dout, sizeof(float), cudaMemcpyHostToDevice));
// loss->backward(dout_gpu, handle->batchsize, num_classes, calsses_gpu);
// // debug_tensor_gpu<float>(std::string("Grid CrossEntropyLoss_output"), loss->grad_input, n2, n2, n1);
// // debug_tensor_gpu<float>(std::string("classify_linear->stored_input"), classify_linear->stored_input, 768, 768, 2);
// classify_linear->backward(loss->grad_input, handle->batchsize,
// handle->hidden_size,
// num_classes);
// // debug_tensor_gpu<float>(std::string("grad_input"), classify_linear->grad_input, k, k, n);
// // debug_tensor_gpu<float>(std::string("grad_kernel"), grad_kernel, m, m, k);
// // debug_tensor_gpu<float>(std::string("grad_bias"), grad_bias, m, m);
// return;
// }
|
the_stack
|
#include <cstdio>
#include <utility_kernels.h>
#include <optical_flow_kernels.h>
#define TWO_PI 6.28318530717958623199592694f
#define DC_THR 0.00001f
namespace vision {
// texture references for 2D float Gabor filter outs
// and previous scale optic flow field
texture<float2, 2, cudaReadModeElementType> d_Gabor_texture2;
texture<float2, 2, cudaReadModeElementType> d_o_prev_scale_texture;
// texture reference for consistency check in two-frame optical flow
texture<float2, 2, cudaReadModeElementType> d_frame2_flow_texture;
// IOC masks
__device__ __constant__ float d_FV_X1[] = {
1.0000000000000000f, 0.9238795325112867f, 0.7071067811865476f,
0.3826834323650898f, 0.0000000000000001f, -0.3826834323650897f,
-0.7071067811865475f, -0.9238795325112867f
};
__device__ __constant__ float d_FV_X2[] = {
0.0000000000000000f, 0.3826834323650898f, 0.7071067811865475f,
0.9238795325112867f, 1.0000000000000000f, 0.9238795325112867f,
0.7071067811865476f, 0.3826834323650899f
};
__device__ __constant__ float d_FV_2_X1[] = {
1.0000000000000000f, 0.8535533905932737f, 0.5000000000000001f,
0.1464466094067263f, 0.0000000000000000f, 0.1464466094067262f,
0.4999999999999999f, 0.8535533905932737f
};
__device__ __constant__ float d_FV_2_X2[] = {
0.0000000000000000f, 0.1464466094067262f, 0.4999999999999999f,
0.8535533905932737f, 1.0000000000000000f, 0.8535533905932737f,
0.5000000000000001f, 0.1464466094067263f
};
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
// 3x3 2D median filter ignoring nans (unless more than half the data are nans)
#define s2(a, b) \
{ \
tmp = a; \
a = fminf(a, b); \
b = fmaxf(tmp, b); \
}
#define mn3(a, b, c) \
s2(a, b); \
s2(a, c);
#define mx3(a, b, c) \
s2(b, c); \
s2(a, c);
#define mnmx3(a, b, c) \
mx3(a, b, c); \
s2(a, b); // 3 exchanges
#define mnmx4(a, b, c, d) \
s2(a, b); \
s2(c, d); \
s2(a, c); \
s2(b, d); // 4 exchanges
#define mnmx5(a, b, c, d, e) \
s2(a, b); \
s2(c, d); \
mn3(a, c, e); \
mx3(b, d, e); // 6 exchanges
#define mnmx6(a, b, c, d, e, f) \
s2(a, d); \
s2(b, e); \
s2(c, f); \
mn3(a, b, c); \
mx3(d, e, f); // 7 exchanges
__global__ void nanmedfilt2_flow_GPU(float2 *d_Image_med, int n_rows,
int n_cols, int pitch, float2 unreliable) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < n_cols) & (y < n_rows)) { // are we in the image?
float tmp;
float bigNum = 1000000.0f;
int valid_count = 0;
float xt = (float)x + 0.5f;
float yt = (float)y + 0.5f;
// pull top six from texture memory
float2 v[6];
v[0] = tex2D(d_o_prev_scale_texture, xt - 1.0f, yt - 1.0f);
if (isfinite(v[0].x)) {
valid_count++;
} else {
bigNum = -bigNum;
v[0].x = bigNum;
v[0].y = bigNum;
}
v[1] = tex2D(d_o_prev_scale_texture, xt, yt - 1.0f);
if (isfinite(v[1].x)) {
valid_count++;
} else {
bigNum = -bigNum;
v[1].x = bigNum;
v[1].y = bigNum;
}
v[2] = tex2D(d_o_prev_scale_texture, xt + 1.0f, yt - 1.0f);
if (isfinite(v[2].x)) {
valid_count++;
} else {
bigNum = -bigNum;
v[2].x = bigNum;
v[2].y = bigNum;
}
v[3] = tex2D(d_o_prev_scale_texture, xt - 1.0f, yt);
if (isfinite(v[3].x)) {
valid_count++;
} else {
bigNum = -bigNum;
v[3].x = bigNum;
v[3].y = bigNum;
}
v[4] = tex2D(d_o_prev_scale_texture, xt, yt);
if (isfinite(v[4].x)) {
valid_count++;
} else {
bigNum = -bigNum;
v[4].x = bigNum;
v[4].y = bigNum;
}
v[5] = tex2D(d_o_prev_scale_texture, xt + 1.0f, yt);
if (isfinite(v[5].x)) {
valid_count++;
} else {
bigNum = -bigNum;
v[5].x = bigNum;
v[5].y = bigNum;
}
// with each pass, remove min and max values and add new value
mnmx6(v[0].x, v[1].x, v[2].x, v[3].x, v[4].x, v[5].x);
mnmx6(v[0].y, v[1].y, v[2].y, v[3].y, v[4].y, v[5].y);
v[5] = tex2D(d_o_prev_scale_texture, xt - 1.0f, yt + 1.0f);
if (isfinite(v[5].x)) {
valid_count++;
} else {
bigNum = -bigNum;
v[5].x = bigNum;
v[5].y = bigNum;
}
mnmx5(v[1].x, v[2].x, v[3].x, v[4].x, v[5].x);
mnmx5(v[1].y, v[2].y, v[3].y, v[4].y, v[5].y);
v[5] = tex2D(d_o_prev_scale_texture, xt, yt + 1.0f);
if (isfinite(v[5].x)) {
valid_count++;
} else {
bigNum = -bigNum;
v[5].x = bigNum;
v[5].y = bigNum;
}
mnmx4(v[2].x, v[3].x, v[4].x, v[5].x);
mnmx4(v[2].y, v[3].y, v[4].y, v[5].y);
v[5] = tex2D(d_o_prev_scale_texture, xt + 1.0f, yt + 1.0f);
if (isfinite(v[5].x)) {
valid_count++;
} else {
bigNum = -bigNum;
v[5].x = bigNum;
v[5].y = bigNum;
}
mnmx3(v[3].x, v[4].x, v[5].x);
mnmx3(v[3].y, v[4].y, v[5].y);
// pick the middle one
*((float2 *)((char *)d_Image_med + y *pitch) + x) =
(valid_count > 4) ? v[4] : unreliable;
}
}
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
// device component velocity function (for loop unrolling)
__device__ static void
component_velocity_two_frames(int o, const float2 *d_Gabor1, int x, int y,
float2 op, unsigned int &n_valids,
float &sFV_2_X1, float &sFV_2_X2, float &sFV_X1X2,
float &sFV_YX1, float &sFV_YX2) {
bool dc =
false; // set to true if one of the gabor responses is below dc_thres
float2 gabor1, gabor2;
float xt, yt;
float phase_diff;
xt = (float)x + op.x + 0.5f;
yt = (float)y + op.y + 0.5f;
gabor1 = *d_Gabor1;
gabor2 = tex2D(d_Gabor_texture2, xt, yt);
dc = dc | (fabsf(gabor1.x) < DC_THR) | (fabsf(gabor1.y) < DC_THR) |
(fabsf(gabor2.x) < DC_THR) | (fabsf(gabor2.y) < DC_THR);
phase_diff = -atan2(gabor2.x * gabor1.y - gabor1.x * gabor2.y,
gabor1.x * gabor2.x + gabor1.y * gabor2.y);
// component velocity
float beta = phase_diff;
// update IOC accumulators
float valid = (float)(!dc);
n_valids += (int)(valid);
sFV_2_X1 += d_FV_2_X1[o] * valid;
sFV_2_X2 += d_FV_2_X2[o] * valid;
sFV_X1X2 += d_FV_X1[o] * d_FV_X2[o] * valid;
sFV_YX1 += beta * d_FV_X1[o] * valid;
sFV_YX2 += beta * d_FV_X2[o] * valid;
}
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
// Core device optical flow computation
__device__ static void comp_optic_flow_two_frames(const char *d_Gabor1,
int d_Gabor1Pitch, float2 &op,
int x, int y, int height,
float unreliable) {
float Ox, Oy, sFV_YX1, sFV_YX2, sFV_2_X1, sFV_2_X2, sFV_X1X2;
sFV_YX1 = 0.0f;
sFV_YX2 = 0.0f;
sFV_2_X1 = 0.0f;
sFV_2_X2 = 0.0f;
sFV_X1X2 = 0.0f;
unsigned int n_valids = 0;
/******************************/
/* Component Velocity and MSE */
/******************************/
// unroll the loop
d_Gabor1 += y * d_Gabor1Pitch + x * sizeof(float2);
component_velocity_two_frames(0, (const float2 *)d_Gabor1, x, y, op, n_valids,
sFV_2_X1, sFV_2_X2, sFV_X1X2, sFV_YX1, sFV_YX2);
y += height;
d_Gabor1 += height * d_Gabor1Pitch;
component_velocity_two_frames(1, (const float2 *)d_Gabor1, x, y, op, n_valids,
sFV_2_X1, sFV_2_X2, sFV_X1X2, sFV_YX1, sFV_YX2);
y += height;
d_Gabor1 += height * d_Gabor1Pitch;
component_velocity_two_frames(2, (const float2 *)d_Gabor1, x, y, op, n_valids,
sFV_2_X1, sFV_2_X2, sFV_X1X2, sFV_YX1, sFV_YX2);
y += height;
d_Gabor1 += height * d_Gabor1Pitch;
component_velocity_two_frames(3, (const float2 *)d_Gabor1, x, y, op, n_valids,
sFV_2_X1, sFV_2_X2, sFV_X1X2, sFV_YX1, sFV_YX2);
y += height;
d_Gabor1 += height * d_Gabor1Pitch;
component_velocity_two_frames(4, (const float2 *)d_Gabor1, x, y, op, n_valids,
sFV_2_X1, sFV_2_X2, sFV_X1X2, sFV_YX1, sFV_YX2);
y += height;
d_Gabor1 += height * d_Gabor1Pitch;
component_velocity_two_frames(5, (const float2 *)d_Gabor1, x, y, op, n_valids,
sFV_2_X1, sFV_2_X2, sFV_X1X2, sFV_YX1, sFV_YX2);
y += height;
d_Gabor1 += height * d_Gabor1Pitch;
component_velocity_two_frames(6, (const float2 *)d_Gabor1, x, y, op, n_valids,
sFV_2_X1, sFV_2_X2, sFV_X1X2, sFV_YX1, sFV_YX2);
y += height;
d_Gabor1 += height * d_Gabor1Pitch;
component_velocity_two_frames(7, (const float2 *)d_Gabor1, x, y, op, n_valids,
sFV_2_X1, sFV_2_X2, sFV_X1X2, sFV_YX1, sFV_YX2);
/*******************************/
/* Intersection of Constraints */
/*******************************/
// Compute optic flow
float invden = 1.0f;
invden /= (sFV_2_X1 * sFV_2_X2 - sFV_X1X2 * sFV_X1X2) * TWO_PI * 0.25f;
// Strictly speaking, the signs should be negated in the following (maybe the
// filter orientations should be negative?)
Ox = -(sFV_YX1 * sFV_2_X2 - sFV_YX2 * sFV_X1X2) * invden;
Oy = -(sFV_YX2 * sFV_2_X1 - sFV_YX1 * sFV_X1X2) * invden;
if (n_valids < 4) {
Ox = unreliable;
Oy = unreliable;
}
op.x += Ox; // add previous scale flow (doubled and interpolated)
op.y += Oy; // add previous scale flow (doubled and interpolated)
}
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
// Core device optical flow computation (four orientations)
__device__ static void comp_optic_flow_two_frames_four_orientations(
const char *d_Gabor1, int d_Gabor1Pitch, float2 &op, int x, int y,
int height, float unreliable) {
float Ox, Oy, sFV_YX1, sFV_YX2, sFV_2_X1, sFV_2_X2, sFV_X1X2;
sFV_YX1 = 0.0f;
sFV_YX2 = 0.0f;
sFV_2_X1 = 0.0f;
sFV_2_X2 = 0.0f;
sFV_X1X2 = 0.0f;
unsigned int n_valids = 0;
/******************************/
/* Component Velocity and MSE */
/******************************/
// unroll the loop
d_Gabor1 += y * d_Gabor1Pitch + x * sizeof(float2);
component_velocity_two_frames(0, (const float2 *)d_Gabor1, x, y, op, n_valids,
sFV_2_X1, sFV_2_X2, sFV_X1X2, sFV_YX1, sFV_YX2);
y += height;
d_Gabor1 += height * d_Gabor1Pitch;
component_velocity_two_frames(2, (const float2 *)d_Gabor1, x, y, op, n_valids,
sFV_2_X1, sFV_2_X2, sFV_X1X2, sFV_YX1, sFV_YX2);
y += height;
d_Gabor1 += height * d_Gabor1Pitch;
component_velocity_two_frames(4, (const float2 *)d_Gabor1, x, y, op, n_valids,
sFV_2_X1, sFV_2_X2, sFV_X1X2, sFV_YX1, sFV_YX2);
y += height;
d_Gabor1 += height * d_Gabor1Pitch;
component_velocity_two_frames(6, (const float2 *)d_Gabor1, x, y, op, n_valids,
sFV_2_X1, sFV_2_X2, sFV_X1X2, sFV_YX1, sFV_YX2);
/*******************************/
/* Intersection of Constraints */
/*******************************/
// Compute optic flow
float invden = 1.0f;
invden /= (sFV_2_X1 * sFV_2_X2 - sFV_X1X2 * sFV_X1X2) * TWO_PI * 0.25f;
// Strictly speaking, the signs should be negated in the following (maybe the
// filter orientations should be negative?)
Ox = -(sFV_YX1 * sFV_2_X2 - sFV_YX2 * sFV_X1X2) * invden;
Oy = -(sFV_YX2 * sFV_2_X1 - sFV_YX1 * sFV_X1X2) * invden;
if (n_valids < 4) {
Ox = unreliable;
Oy = unreliable;
}
op.x += Ox; // add previous scale flow (doubled and interpolated)
op.y += Oy; // add previous scale flow (doubled and interpolated)
}
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
// Check if fetch location is within the image
__device__ static bool valid_fetch(float2 fetch, float width, float height) {
return ((fetch.x >= 0.0f) & (fetch.x <= (width - 1.0f)) & (fetch.y >= 0.0f) &
(fetch.y <= (height - 1.0f)));
}
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
// Two frame optical flow kernel
__global__ void optic_flow_two_frames_GPU(const char *d_Gabor1,
int d_Gabor1Pitch,
float2 *d_optic_flow, int width,
int height, int d_optic_flowPitch,
float unreliable, bool first_scale) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < width) & (y < height)) { // are we in the image?
// fetch and transform previous scale optic flow (or init to zero at first
// scale)
float2 O = first_scale ? make_float2(0.0f, 0.0f)
: tex2D(d_o_prev_scale_texture, x * 0.5f + 0.5f,
y * 0.5f + 0.5f);
O.x *= 2.0f;
O.y *= 2.0f;
// check if flow warping remains inside the image
// frame 2
float2 fetch = make_float2((float)x + O.x, (float)y + O.y);
if (valid_fetch(fetch, width, height)) {
comp_optic_flow_two_frames(d_Gabor1, d_Gabor1Pitch, O, x, y, height,
unreliable);
} else {
O.x += unreliable;
O.y += unreliable;
}
// Save optic flow
*((float2 *)((char *)d_optic_flow + y *d_optic_flowPitch) + x) = O;
}
}
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
// Two frame optical flow kernel
__global__ void optic_flow_two_frames_four_orientations_GPU(
const char *d_Gabor1, int d_Gabor1Pitch, float2 *d_optic_flow, int width,
int height, int d_optic_flowPitch, float unreliable, bool first_scale) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < width) & (y < height)) { // are we in the image?
// fetch and transform previous scale optic flow (or init to zero at first
// scale)
float2 O = first_scale ? make_float2(0.0f, 0.0f)
: tex2D(d_o_prev_scale_texture, x * 0.5f + 0.5f,
y * 0.5f + 0.5f);
O.x *= 2.0f;
O.y *= 2.0f;
// check if flow warping remains inside the image
// frame 2
float2 fetch = make_float2((float)x + O.x, (float)y + O.y);
if (valid_fetch(fetch, width, height)) {
comp_optic_flow_two_frames_four_orientations(d_Gabor1, d_Gabor1Pitch, O,
x, y, height, unreliable);
} else {
O.x += unreliable;
O.y += unreliable;
}
// Save optic flow
*((float2 *)((char *)d_optic_flow + y *d_optic_flowPitch) + x) = O;
}
}
///////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////
__global__ void flow_consist_GPU(float2 *d_flow1, float cons_thres, int width,
int height, int pitch) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < width) & (y < height)) // are we in the image?
{
// Fetch frame 1 flow from global memory
d_flow1 = ((float2 *)((char *)d_flow1 + y * pitch) + x);
float2 flow1 = *d_flow1;
// Fetch frame 2 flow (at pos + frame 1 flow) from texture
float2 flow2 = tex2D(d_frame2_flow_texture, (float)x + flow1.x + 0.5f,
(float)y + flow1.y + 0.5f);
// Check error
float e_x = flow1.x + flow2.x;
float e_y = flow1.y + flow2.y;
float err = sqrtf(e_x * e_x + e_y * e_y);
*d_flow1 = (err < cons_thres) ? flow1 : make_float2(nanf(""), nanf(""));
}
}
////////////////////////
/// HOST FUNCTIONS ///
////////////////////////
void compute_optical_flow_two_frames(
std::vector<PitchFloat2Mem> &d_optic_flow_pyramid, char *d_TEMP,
int d_TEMPPitch, const std::vector<PitchFloat2Mem> &gabPyr1_v,
const std::vector<PitchFloat2Mem> &gabPyr2_v, int n_scales,
bool median_filter, std::vector<int> &n_rows, std::vector<int> &n_cols,
bool fourOrientations = false) {
int N_ORIENTS = fourOrientations ? 4 : 8;
// configuration
dim3 dimBlock_flow(16, 8, 1);
dim3 dimBlock_medfilt(16, 8, 1);
// Setup textures
cudaChannelFormatDesc channelFloat2 = cudaCreateChannelDesc<float2>();
// Bind textures to lowest scale arrays
const char *d_Gabor1 = (const char *)gabPyr1_v.at(0).ptr;
int d_Gabor1Pitch = (int)gabPyr1_v.at(0).pitch;
cudaBindTexture2D(0, &d_Gabor_texture2, gabPyr2_v.at(0).ptr, &channelFloat2,
n_cols[0], n_rows[0] * N_ORIENTS, gabPyr2_v.at(0).pitch);
// compute first scale optical flow
dim3 dimGrid_flow(iDivUp(n_cols[0], dimBlock_flow.x),
iDivUp(n_rows[0], dimBlock_flow.y), 1);
if (median_filter) {
if (fourOrientations)
optic_flow_two_frames_four_orientations_GPU
<< <dimGrid_flow, dimBlock_flow>>>
(d_Gabor1, d_Gabor1Pitch, (float2 *)d_TEMP, n_cols[0], n_rows[0],
d_TEMPPitch, NAN_FLOAT, true);
else
optic_flow_two_frames_GPU << <dimGrid_flow, dimBlock_flow>>>
(d_Gabor1, d_Gabor1Pitch, (float2 *)d_TEMP, n_cols[0], n_rows[0],
d_TEMPPitch, NAN_FLOAT, true);
} else {
if (fourOrientations)
optic_flow_two_frames_four_orientations_GPU
<< <dimGrid_flow, dimBlock_flow>>>
(d_Gabor1, d_Gabor1Pitch, d_optic_flow_pyramid.at(0).ptr, n_cols[0],
n_rows[0], (int)d_optic_flow_pyramid.at(0).pitch,
(n_scales == 1) ? NAN_FLOAT : 0.0f, true);
else
optic_flow_two_frames_GPU << <dimGrid_flow, dimBlock_flow>>>
(d_Gabor1, d_Gabor1Pitch, d_optic_flow_pyramid.at(0).ptr, n_cols[0],
n_rows[0], (int)d_optic_flow_pyramid.at(0).pitch,
(n_scales == 1) ? NAN_FLOAT : 0.0f, true);
}
// Coarse-to-fine processing
for (int s = 1; s < n_scales; s++) {
if (median_filter) {
// 2D NaNMedianFilter the flow and turn NaNs into zeros
cudaBindTexture2D(0, &d_o_prev_scale_texture, d_TEMP, &channelFloat2,
n_cols[s - 1], n_rows[s - 1], d_TEMPPitch);
dim3 dimGrid_medfilt(iDivUp(n_cols[s - 1], dimBlock_medfilt.x),
iDivUp(n_rows[s - 1], dimBlock_medfilt.y), 1);
nanmedfilt2_flow_GPU << <dimGrid_medfilt, dimBlock_medfilt>>>
(d_optic_flow_pyramid.at(s - 1).ptr, n_rows[s - 1], n_cols[s - 1],
(int)d_optic_flow_pyramid.at(s - 1).pitch, make_float2(0.0f, 0.0f));
}
cudaBindTexture2D(0, &d_o_prev_scale_texture,
d_optic_flow_pyramid.at(s - 1).ptr, &channelFloat2,
n_cols[s - 1], n_rows[s - 1],
d_optic_flow_pyramid.at(s - 1).pitch);
// Bind textures to Gabor Pyramids
d_Gabor1 = (const char *)gabPyr1_v.at(s).ptr;
d_Gabor1Pitch = (int)gabPyr1_v.at(s).pitch;
cudaBindTexture2D(0, &d_Gabor_texture2, gabPyr2_v.at(s).ptr, &channelFloat2,
n_cols[s], n_rows[s] * N_ORIENTS, gabPyr2_v.at(s).pitch);
///////////////////////
// Update optic flow //
///////////////////////
dim3 dimGrid_flow(iDivUp(n_cols[s], dimBlock_flow.x),
iDivUp(n_rows[s], dimBlock_flow.y), 1);
if (median_filter) {
if (fourOrientations)
optic_flow_two_frames_four_orientations_GPU
<< <dimGrid_flow, dimBlock_flow>>>
(d_Gabor1, d_Gabor1Pitch, (float2 *)d_TEMP, n_cols[s], n_rows[s],
d_TEMPPitch, NAN_FLOAT, false);
else
optic_flow_two_frames_GPU << <dimGrid_flow, dimBlock_flow>>>
(d_Gabor1, d_Gabor1Pitch, (float2 *)d_TEMP, n_cols[s], n_rows[s],
d_TEMPPitch, NAN_FLOAT, false);
} else {
if (fourOrientations)
optic_flow_two_frames_four_orientations_GPU
<< <dimGrid_flow, dimBlock_flow>>>
(d_Gabor1, d_Gabor1Pitch, d_optic_flow_pyramid.at(s).ptr, n_cols[s],
n_rows[s], (int)d_optic_flow_pyramid.at(s).pitch,
(s == (n_scales - 1)) ? NAN_FLOAT : 0.0f, false);
else
optic_flow_two_frames_GPU << <dimGrid_flow, dimBlock_flow>>>
(d_Gabor1, d_Gabor1Pitch, d_optic_flow_pyramid.at(s).ptr, n_cols[s],
n_rows[s], (int)d_optic_flow_pyramid.at(s).pitch,
(s == (n_scales - 1)) ? NAN_FLOAT : 0.0f, false);
}
} // for(int s=1;s<n_scales;s++)
if (median_filter) {
// Median filter final flow (now keeping NaNs)
cudaBindTexture2D(0, &d_o_prev_scale_texture, d_TEMP, &channelFloat2,
n_cols[n_scales - 1], n_rows[n_scales - 1], d_TEMPPitch);
dim3 dimGrid_medfilt(iDivUp(n_cols[n_scales - 1], dimBlock_medfilt.x),
iDivUp(n_rows[n_scales - 1], dimBlock_medfilt.y), 1);
nanmedfilt2_flow_GPU << <dimGrid_medfilt, dimBlock_medfilt>>>
(d_optic_flow_pyramid.at(n_scales - 1).ptr, n_rows[n_scales - 1],
n_cols[n_scales - 1], (int)d_optic_flow_pyramid.at(n_scales - 1).pitch,
make_float2(NAN_FLOAT, NAN_FLOAT));
}
// printf("%s\n",cudaGetErrorString(cudaGetLastError()));
}
void compute_consistent_optical_flow_two_frames(
std::vector<PitchFloat2Mem> &d_optic_flow_pyramid, char *d_TEMP,
int d_TEMPPitch, const std::vector<PitchFloat2Mem> &gabPyr1_v,
const std::vector<PitchFloat2Mem> &gabPyr2_v,
cudaArray *d_frame2_flow_array, int n_scales, bool median_filter,
std::vector<int> &n_rows, std::vector<int> &n_cols, float cons_thres,
bool fourOrientations = false) {
// Compute flow 2 -> 1, situated in 2
compute_optical_flow_two_frames(d_optic_flow_pyramid, d_TEMP, d_TEMPPitch,
gabPyr2_v, gabPyr1_v, n_scales, median_filter,
n_rows, n_cols, fourOrientations);
// Copy to array for consistency check
cudaMemcpy2DToArray(d_frame2_flow_array, 0, 0,
d_optic_flow_pyramid.at(n_scales - 1).ptr,
d_optic_flow_pyramid.at(n_scales - 1).pitch,
n_cols[n_scales - 1] * sizeof(float2),
n_rows[n_scales - 1], cudaMemcpyDeviceToDevice);
// Compute flow 1 -> 2, situated in 1
compute_optical_flow_two_frames(d_optic_flow_pyramid, d_TEMP, d_TEMPPitch,
gabPyr1_v, gabPyr2_v, n_scales, median_filter,
n_rows, n_cols, fourOrientations);
// Consistency check
cudaChannelFormatDesc channelFloat2 = cudaCreateChannelDesc<float2>();
cudaBindTextureToArray(d_frame2_flow_texture, d_frame2_flow_array,
channelFloat2);
dim3 dimBlock_flow(16, 16, 1);
dim3 dimGrid_flow(iDivUp(n_cols[n_scales - 1], dimBlock_flow.x),
iDivUp(n_rows[n_scales - 1], dimBlock_flow.y), 1);
flow_consist_GPU << <dimGrid_flow, dimBlock_flow>>>
(d_optic_flow_pyramid.at(n_scales - 1).ptr, cons_thres,
n_cols[n_scales - 1], n_rows[n_scales - 1],
(int)d_optic_flow_pyramid.at(n_scales - 1).pitch);
// flow_consist_GPU<<<dimGrid_flow,dimBlock_flow>>>(d_optic_flow_pyramid[n_scales-1],
// cons_thres, n_cols[n_scales-1], n_rows[n_scales-1],
// (int)d_optic_flow_pyramidPitch[n_scales-1]);
}
///////////////////////////////////////////////////////////////
// Calling function
///////////////////////////////////////////////////////////////
void computeOpticalFlowTwoFrames(
std::vector<PitchFloat2Mem> &d_optic_flow_pyramid, char *d_TEMP,
int d_TEMPPitch, const std::vector<PitchFloat2Mem> &gabPyr1_v,
const std::vector<PitchFloat2Mem> &gabPyr2_v,
cudaArray *d_frame2_flow_array, int n_scales, bool median_filter,
bool consistent, float cons_thres, std::vector<int> &n_rows,
std::vector<int> &n_cols, bool fourOrientations) {
d_Gabor_texture2.addressMode[0] = cudaAddressModeClamp;
d_Gabor_texture2.addressMode[1] = cudaAddressModeClamp;
d_Gabor_texture2.filterMode = cudaFilterModeLinear;
d_Gabor_texture2.normalized = false;
d_o_prev_scale_texture.addressMode[0] = cudaAddressModeClamp;
d_o_prev_scale_texture.addressMode[1] = cudaAddressModeClamp;
d_o_prev_scale_texture.filterMode = cudaFilterModeLinear;
d_o_prev_scale_texture.normalized = false;
d_frame2_flow_texture.addressMode[0] = cudaAddressModeClamp;
d_frame2_flow_texture.addressMode[1] = cudaAddressModeClamp;
d_frame2_flow_texture.filterMode = cudaFilterModeLinear;
d_frame2_flow_texture.normalized = false;
if (consistent)
compute_consistent_optical_flow_two_frames(
d_optic_flow_pyramid, d_TEMP, d_TEMPPitch, gabPyr1_v, gabPyr2_v,
d_frame2_flow_array, n_scales, median_filter, n_rows, n_cols,
cons_thres, fourOrientations);
else
compute_optical_flow_two_frames(
d_optic_flow_pyramid, d_TEMP, d_TEMPPitch, gabPyr1_v, gabPyr2_v,
n_scales, median_filter, n_rows, n_cols, fourOrientations);
}
} // end namespace vision
|
the_stack
|
std::map<CUdevice, int> nervana_sm_counts_;
std::map<std::string, CUfunction> nervana_kernels_;
std::vector<CUmodule> nervana_modules_;
//for when we need to modify the above data structures
std::mutex nervana_load_kernels_mutex_;
std::mutex nervana_sm_count_mutex_;
extern "C" bool nervana_loadKernels(const char* const base_path_cstr) {
std::lock_guard<std::mutex> lock(nervana_load_kernels_mutex_);
//better would be a vector<string>, but there is a bug in nvcc that prevents this
// (bug report filed)
std::string names[36] = {
"hgemm_nn_vec_128x128",
"hgemm_nn_128x128",
"hgemm_nt_vec_128x128",
"hgemm_nt_128x128",
"hgemm_tn_vec_128x128",
"hgemm_tn_128x128",
"hgemm_nn_vec_128x64",
"hgemm_nn_128x64",
"hgemm_tn_vec_128x64",
"hgemm_tn_128x64",
"hgemm_nn_vec_128x32",
"hgemm_nn_128x32",
"hgemm_tn_vec_128x32",
"hgemm_tn_128x32",
"hgemm_nn_32x128",
"hgemm_nn_vec_32x128",
"hgemm_nt_32x128",
"hgemm_nt_vec_32x128",
"sgemm_nn_vec_128x128",
"sgemm_nn_128x128",
"sgemm_nt_vec_128x128",
"sgemm_nt_128x128",
"sgemm_tn_vec_128x128",
"sgemm_tn_128x128",
"sgemm_nn_vec_128x64",
"sgemm_nn_128x64",
"sgemm_tn_vec_128x64",
"sgemm_tn_128x64",
"sgemm_nn_vec_128x32",
"sgemm_nn_128x32",
"sgemm_tn_vec_128x32",
"sgemm_tn_128x32",
"sgemm_nn_32x128",
"sgemm_nn_vec_32x128",
"sgemm_nt_32x128",
"sgemm_nt_vec_32x128"
};
std::string base_path(base_path_cstr);
for (auto kernel : names) {
if (nervana_kernels_.count(kernel) > 0)
continue;
CUmodule module;
std::string path = base_path + kernel + std::string(".cubin");
CUresult res = cuModuleLoad(&module, path.c_str());
if (res != CUDA_SUCCESS) {
std::cerr << "Failed to load: " << kernel << " " << res << std::endl;
return false;
}
nervana_modules_.push_back(module);
CUfunction function;
res = cuModuleGetFunction(&function, module, kernel.c_str());
if (res != CUDA_SUCCESS) {
std::cerr << "Failed to extract: " << kernel << " " << res << std::endl;
return false;
}
nervana_kernels_.insert(std::make_pair(kernel, function));
}
return true;
}
extern "C" bool nervana_unloadKernels() {
std::lock_guard<std::mutex> lock(nervana_load_kernels_mutex_);
while(nervana_modules_.size() > 0) {
auto module = nervana_modules_.back();
CUresult res = cuModuleUnload(module);
nervana_modules_.pop_back();
if (res != CUDA_SUCCESS)
return false;
}
nervana_kernels_.clear();
return true;
}
extern "C" size_t nervana_randStateSizeBytes() {
return 2048 * 32 * sizeof(int);
}
std::tuple<int, int, int> get_grid_dimensions(int grid, int m, int n, int sm_count, const std::string& trans)
{
int sizeA, sizeB, threads;
if (grid >= 0) {
if (grid == 0) {
sizeA = 32;
sizeB = 128;
threads = 128;
} else if (grid == 1) {
sizeA = 128;
sizeB = 32;
threads = 128;
} else if (grid == 2) {
sizeA = 128;
sizeB = 64;
threads = 128;
} else if (grid == 3) {
sizeA = 128;
sizeB = 128;
threads = 256;
}
} else {
int sh = min(m, n);
int size;
if (sh < 384 - 16) {
int sh128 = sh % 128;
if (sh128 > 0 && sh128 < 112) {
if (sh128 > 48 && sh128 <= 64) {
int sh64 = sh / 64;
int wide = max(m, n);
sh64 *= (wide / 128 + (wide % 128 != 0)) / sm_count;
if (sh64 > 1) {
size = 64;
}
else {
size = 32;
}
}
else {
size = 32;
}
}
else {
size = 128;
}
} else {
size = 128;
}
if (m >= n) {
if (trans == "nt") {
size = 128;
}
sizeA = 128;
sizeB = size;
} else {
if (trans == "tn") {
size = 128;
} else if (size == 64) {
//temporary until kernels exist
size = 32;
}
sizeA = size;
sizeB = 128;
}
threads = (sizeA == 128 && sizeB == 128) ? 256 : 128;
}
return std::make_tuple(sizeA, sizeB, threads);
}
extern "C" bool nervana_sgemm(float *A, float *B, float *C,
bool a_t, bool b_t,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta,
unsigned int *rand_state,
bool stochastic_round, bool apply_relu,
CUstream stream, int grid
)
{
int sm_count;
{
std::lock_guard<std::mutex> lock(nervana_sm_count_mutex_);
CUdevice device;
CUresult res = cuCtxGetDevice(&device);
if (res != CUDA_SUCCESS) {
return false;
}
auto count = nervana_sm_counts_.find(device);
if (count != nervana_sm_counts_.end()) {
sm_count = count->second;
}
else {
int pi;
res = cuDeviceGetAttribute(&pi, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device);
if (res != CUDA_SUCCESS) {
return false;
}
sm_count = pi;
nervana_sm_counts_[device] = pi;
}
}
std::string name = "sgemm_";
std::string trans;
trans += a_t ? 't' : 'n';
trans += b_t ? 't' : 'n';
name += trans;
int sizeA, sizeB, threads;
std::tie(sizeA, sizeB, threads) = get_grid_dimensions(grid, m, n, sm_count, trans);
int k_vec = (sizeA == 32 || sizeB == 32) ? 4 : 16;
if ( (trans == "tn" && m % 4 == 0 && n % 4 == 0) ||
(trans == "nn" && k % k_vec == 0 && n % 4 == 0) ||
(trans == "nt" && k % k_vec == 0)) {
name += "_vec";
}
int gridA = m / sizeA + (m % sizeA != 0);
int gridB = n / sizeB + (n % sizeB != 0);
std::stringstream ss;
ss << "_" << sizeA << "x" << sizeB;
name += ss.str();
int flags = 0;
flags |= (stochastic_round << 0);
flags |= (apply_relu << 1);
CUresult res;
if (a_t)
lda *= (8 * sizeof(float));
if (!b_t)
ldb *= (8 * sizeof(float));
int zero = 0;
int one = 1;
void *args[16] = {&C, &A, &B, &alpha, &beta, &flags, &lda, &ldb, &ldc, &m, &n, &k,
&zero, &zero, &zero, &one};
res = cuLaunchKernel(nervana_kernels_[name],
1, gridA, gridB,
threads, 1, 1,
0,
stream, args, NULL);
if (res != CUDA_SUCCESS) {
std::cerr << "Error launching kernel " << name << " " << res << std::endl;
return false;
}
return true;
}
extern "C" bool nervana_hgemm(short *A, short *B, short *C,
bool a_t, bool b_t,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta,
unsigned int *rand_state,
bool stochastic_round, bool apply_relu,
CUstream stream, int grid
)
{
int sm_count;
{
std::lock_guard<std::mutex> lock(nervana_sm_count_mutex_);
CUdevice device;
CUresult res = cuCtxGetDevice(&device);
if (res != CUDA_SUCCESS) {
return false;
}
auto count = nervana_sm_counts_.find(device);
if (count != nervana_sm_counts_.end()) {
sm_count = count->second;
}
else {
int pi;
res = cuDeviceGetAttribute(&pi, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device);
if (res != CUDA_SUCCESS) {
return false;
}
sm_count = pi;
nervana_sm_counts_[device] = pi;
}
}
std::string name = "hgemm_";
std::string trans;
trans += a_t ? 't' : 'n';
trans += b_t ? 't' : 'n';
name += trans;
int sizeA, sizeB, threads;
std::tie(sizeA, sizeB, threads) = get_grid_dimensions(grid, m, n, sm_count, trans);
int k_vec = (sizeA == 32 || sizeB == 32) ? 4 : 16;
if ( (trans == "tn" && m % 4 == 0 && n % 4 == 0) ||
(trans == "nn" && k % k_vec == 0 && n % 4 == 0) ||
(trans == "nt" && k % k_vec == 0)) {
name += "_vec";
}
int gridA = m / sizeA + (m % sizeA != 0);
int gridB = n / sizeB + (n % sizeB != 0);
std::stringstream ss;
ss << "_" << sizeA << "x" << sizeB;
name += ss.str();
int flags = 0;
flags |= (stochastic_round << 0);
flags |= (apply_relu << 1);
CUresult res;
if (a_t)
lda *= (8 * sizeof(short));
if (!b_t)
ldb *= (8 * sizeof(short));
int zero = 0;
int one = 1;
void *args[16] = {&C, &A, &B, &alpha, &beta, &flags, &lda, &ldb, &ldc, &m, &n, &k,
&zero, &zero, &zero, &one};
res = cuLaunchKernel(nervana_kernels_[name],
1, gridA, gridB,
threads, 1, 1,
0,
stream, args, NULL);
if (res != CUDA_SUCCESS) {
std::cerr << "Error launching kernel " << name << " " << res << std::endl;
return false;
}
return true;
}
|
the_stack
|
#include <ATen/cuda/CUDAContext.h>
#include <pybind11/pybind11.h>
#include <torch/extension.h>
#include <unordered_map>
namespace py = pybind11;
namespace minkowski {
namespace detail {
template <typename src_type, typename dst_type>
__global__ void cuda_copy_n(src_type const *src, uint32_t N, dst_type *dst) {
CUDA_KERNEL_LOOP(index, N) { dst[index] = src[index]; }
}
template <typename coordinate_type, typename coordinate_field_type,
template <typename C> class TemplatedAllocator>
struct insert_and_map_functor<coordinate_type, coordinate_field_type,
TemplatedAllocator, CoordinateMapGPU> {
std::pair<at::Tensor, at::Tensor> operator()(
coordinate_map_key_type &map_key, at::Tensor const &th_coordinate,
CoordinateMapManager<coordinate_type, coordinate_field_type,
TemplatedAllocator, CoordinateMapGPU> &manager) {
uint32_t const N = th_coordinate.size(0);
uint32_t const coordinate_size = th_coordinate.size(1);
coordinate_type *p_coordinate = th_coordinate.data_ptr<coordinate_type>();
auto coordinate_map = CoordinateMapGPU<coordinate_type, TemplatedAllocator>(
N, coordinate_size, manager.m_gpu_default_occupancy, map_key.first);
LOG_DEBUG("inserting", N,
"coordinates with coordinate_size:", coordinate_size);
auto input_coordinate_range =
coordinate_range<coordinate_type>(N, coordinate_size, p_coordinate);
LOG_DEBUG("insert_and_map");
auto map_inverse_map = coordinate_map.template insert_and_map<true>(
input_coordinate_range.begin(), input_coordinate_range.end());
LOG_DEBUG("mapping size:", map_inverse_map.first.size());
// insert moves map
manager.insert(map_key, coordinate_map);
auto const &mapping = map_inverse_map.first;
auto const &inverse_mapping = map_inverse_map.second;
// return tensors
// TODO int64_t
LOG_DEBUG("Reserve mapping torch output tensors.");
at::Tensor th_mapping = torch::empty(
{(int64_t)mapping.size()},
th_coordinate.options().requires_grad(false).dtype(torch::kInt64));
at::Tensor th_inverse_mapping = torch::empty(
{(int64_t)inverse_mapping.size()},
th_coordinate.options().requires_grad(false).dtype(torch::kInt64));
auto const num_blocks =
(mapping.size() + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
LOG_DEBUG("cuda_copy_n with num_blocks:", num_blocks,
"mapping.size():", mapping.size());
detail::cuda_copy_n<default_types::index_type, int64_t>
<<<num_blocks, CUDA_NUM_THREADS>>>(mapping.cbegin(), mapping.size(),
th_mapping.data_ptr<int64_t>());
auto const num_inv_blocks =
(inverse_mapping.size() + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
LOG_DEBUG("cuda_copy_n with num_inv_blocks:", num_inv_blocks,
"inverse_mapping.size():", inverse_mapping.size());
if (inverse_mapping.size() > 0) {
detail::cuda_copy_n<default_types::index_type, int64_t>
<<<num_inv_blocks, CUDA_NUM_THREADS>>>(
inverse_mapping.cbegin(), inverse_mapping.size(),
th_inverse_mapping.data_ptr<int64_t>());
CUDA_CHECK(cudaStreamSynchronize(0));
}
LOG_DEBUG("End of insert_map_functor");
// return std::make_pair(std::move(th_mapping),
// std::move(th_inverse_mapping));
return std::make_pair(th_mapping, th_inverse_mapping);
}
};
template <typename coordinate_type,
template <typename C> class TemplatedAllocator>
struct stride_map2tensor_functor<
coordinate_type, TemplatedAllocator, CoordinateMapGPU,
gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>>> {
using gpu_kernel_map_type =
gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>>;
std::pair<at::Tensor, at::Tensor>
operator()(gpu_kernel_map_type const &stride_kernel_map) {
ASSERT(stride_kernel_map.in_maps.size(0) ==
stride_kernel_map.out_maps.size(0),
"Invalid kernel map");
auto curr_device = at::cuda::current_device();
auto options = torch::TensorOptions({at::kCUDA, curr_device})
.dtype(torch::kLong)
.requires_grad(false);
auto const out_size = stride_kernel_map.size();
// return tensors
LOG_DEBUG("Reserve mapping torch output tensors with size:", out_size);
at::Tensor th_in_map = torch::empty({(int64_t)out_size}, options);
at::Tensor th_out_map = torch::empty({(int64_t)out_size}, options);
auto const num_blocks =
(out_size + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
LOG_DEBUG("cuda_copy_n with num_blocks:", num_blocks,
"mapping size:", out_size);
detail::cuda_copy_n<default_types::index_type, int64_t>
<<<num_blocks, CUDA_NUM_THREADS>>>(stride_kernel_map.in_maps.begin(),
out_size,
th_in_map.data_ptr<int64_t>());
detail::cuda_copy_n<default_types::index_type, int64_t>
<<<num_blocks, CUDA_NUM_THREADS>>>(stride_kernel_map.out_maps.begin(),
out_size,
th_out_map.data_ptr<int64_t>());
return std::make_pair(std::move(th_in_map), std::move(th_out_map));
}
};
template <typename coordinate_type, typename coordinate_field_type,
template <typename C> class TemplatedAllocator>
struct insert_field_functor<
coordinate_type, coordinate_field_type, TemplatedAllocator,
CoordinateMapGPU,
CoordinateFieldMapGPU<coordinate_field_type, coordinate_type,
TemplatedAllocator>> {
void operator()(
coordinate_map_key_type &map_key, at::Tensor const &th_coordinate,
CoordinateMapManager<coordinate_type, coordinate_field_type,
TemplatedAllocator, CoordinateMapGPU> &manager) {
LOG_DEBUG("insert field");
uint32_t const N = th_coordinate.size(0);
uint32_t const coordinate_size = th_coordinate.size(1);
coordinate_field_type *p_coordinate =
th_coordinate.data_ptr<coordinate_field_type>();
auto map = CoordinateFieldMapGPU<coordinate_field_type, coordinate_type,
TemplatedAllocator>(N, coordinate_size,
map_key.first);
map.insert(p_coordinate, p_coordinate + N * coordinate_size);
LOG_DEBUG("insert map with tensor_stride", map_key.first);
manager.insert_field_map(map_key, map);
}
};
template <typename coordinate_type,
template <typename C> class TemplatedAllocator>
struct kernel_map_functor<
coordinate_type, TemplatedAllocator, CoordinateMapGPU,
gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>>> {
gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>>
operator()(
CoordinateMapGPU<coordinate_type, TemplatedAllocator> const &in_map,
CoordinateMapGPU<coordinate_type, TemplatedAllocator> const &out_map,
CUDAKernelMapMode::Mode kernel_map_mode,
cpu_kernel_region<coordinate_type> &kernel) {
LOG_DEBUG("cpu_kernel_region initialized with volume", kernel.volume());
kernel.to_gpu();
auto gpu_kernel = gpu_kernel_region<coordinate_type>(kernel);
LOG_DEBUG("gpu_kernel_region initialization");
return in_map.kernel_map(out_map, gpu_kernel, kernel_map_mode,
CUDA_NUM_THREADS);
}
};
template <typename coordinate_type,
template <typename C> class TemplatedAllocator>
struct stride_map_functor<
coordinate_type, TemplatedAllocator, CoordinateMapGPU,
gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>>> {
gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>>
operator()(
CoordinateMapGPU<coordinate_type, TemplatedAllocator> const &in_map,
CoordinateMapGPU<coordinate_type, TemplatedAllocator> const &out_map,
default_types::stride_type const &stride) {
return in_map.stride_map(out_map, stride, CUDA_NUM_THREADS);
}
};
// a partial specialization functor for kernel map in/out swap
template <>
struct swap_in_out_map_functor<gpu_kernel_map<
default_types::index_type, detail::default_allocator<char>>> {
using gpu_kernel_map_type = gpu_kernel_map<default_types::index_type,
detail::default_allocator<char>>;
gpu_kernel_map_type operator()(gpu_kernel_map_type const &kernel_map) {
auto swapped_kernel_map = kernel_map.swap();
LOG_DEBUG("Transposed kernel map in_maps:",
swapped_kernel_map.out_maps.begin() -
swapped_kernel_map.in_maps.begin());
return std::move(swapped_kernel_map);
}
};
template <typename coordinate_type,
template <typename C> class TemplatedAllocator>
struct empty_map_functor<
coordinate_type, TemplatedAllocator, CoordinateMapGPU,
gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>>> {
using gpu_kernel_map_type =
gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>>;
gpu_kernel_map_type operator()() { return gpu_kernel_map_type{}; }
};
template <>
struct swap_in_out_map_functor<
gpu_kernel_map<default_types::index_type, detail::c10_allocator<char>>> {
using gpu_kernel_map_type =
gpu_kernel_map<default_types::index_type, detail::c10_allocator<char>>;
gpu_kernel_map_type operator()(gpu_kernel_map_type const &kernel_map) {
auto swapped_kernel_map = kernel_map.swap();
LOG_DEBUG("Transposed kernel map in_maps:",
swapped_kernel_map.out_maps.begin() -
swapped_kernel_map.in_maps.begin());
return std::move(swapped_kernel_map);
}
};
template <typename coordinate_type,
template <typename C> class TemplatedAllocator>
struct kernel_map_to_tensors<
coordinate_type, TemplatedAllocator, CoordinateMapGPU,
gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>>> {
using index_type = default_types::index_type;
std::unordered_map<int64_t, at::Tensor> operator()(
gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>> const
&kernel_map) {
auto curr_device = at::cuda::current_device();
auto options = torch::TensorOptions({at::kCUDA, curr_device})
.dtype(torch::kInt)
.requires_grad(false);
std::unordered_map<int64_t, at::Tensor> kernel_map_th;
if (kernel_map.size() > 0)
for (auto it = kernel_map.key_cbegin(); it != kernel_map.key_cend();
++it) {
auto const &key = it->first;
long const N = kernel_map.size(key);
at::Tensor curr_map = torch::empty({2, N}, options);
int32_t *p_map = curr_map.data_ptr<int32_t>();
CUDA_CHECK(cudaMemcpy(p_map, kernel_map.in_maps.begin(key),
sizeof(int32_t) * N, cudaMemcpyDeviceToDevice));
CUDA_CHECK(cudaMemcpy(p_map + N, kernel_map.out_maps.begin(key),
sizeof(int32_t) * N, cudaMemcpyDeviceToDevice));
kernel_map_th[key] = std::move(curr_map);
}
return kernel_map_th;
}
};
namespace detail {
template <typename dst_type, typename src_type, typename size_type>
__global__ void strided_copy(dst_type *__restrict__ dst, //
size_type const num_threads, //
src_type const *__restrict__ src, //
size_type const stride_size) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
dst[x] = src[x * stride_size];
}
}
} // namespace detail
template <typename coordinate_type,
template <typename C> class TemplatedAllocator>
struct origin_map_functor<
coordinate_type, TemplatedAllocator, CoordinateMapGPU,
gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>>> {
std::pair<at::Tensor, std::vector<at::Tensor>> operator()(
CoordinateMapGPU<coordinate_type, TemplatedAllocator> const
&origin_coordinate_map,
gpu_kernel_map<default_types::index_type, TemplatedAllocator<char>> const
&origin_map) {
auto curr_device = at::cuda::current_device();
auto options = torch::TensorOptions({at::kCUDA, curr_device})
.dtype(torch::kLong)
.requires_grad(false);
auto const out_size = origin_coordinate_map.size();
auto const coordinate_size = origin_coordinate_map.coordinate_size();
at::Tensor batch_indices = torch::empty({out_size}, options);
int64_t *d_batch_indices = batch_indices.data_ptr<int64_t>();
LOG_DEBUG("manager origin map strided_copy");
// GPU batch indices are sorted
detail::strided_copy<int64_t, default_types::dcoordinate_type,
default_types::size_type>
<<<GET_BLOCKS(out_size, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>(
d_batch_indices, out_size,
origin_coordinate_map.const_coordinate_data(), coordinate_size);
CUDA_CHECK(cudaStreamSynchronize(0));
LOG_DEBUG("manager batch copy");
std::vector<int64_t> vec_batch_indices(out_size);
CUDA_CHECK(cudaMemcpy(vec_batch_indices.data(), d_batch_indices,
out_size * sizeof(int64_t), cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaStreamSynchronize(0));
#ifdef DEBUG
LOG_DEBUG("Batch indices:", vec_batch_indices);
#endif
// gpu origin() sort batch indices
auto const max_batch_index = vec_batch_indices[out_size - 1];
std::vector<at::Tensor> in_maps;
default_types::index_type current_batch_row_index = 0;
for (default_types::index_type i = 0; i < (max_batch_index + 1);) {
if (vec_batch_indices[current_batch_row_index] == i) {
auto p_curr_map = origin_map.in_maps.begin(current_batch_row_index);
auto const curr_size = origin_map.size(current_batch_row_index);
at::Tensor row_indices = torch::empty({curr_size}, options);
int64_t *d_row_indices = row_indices.data_ptr<int64_t>();
LOG_DEBUG("manager batch copy", i);
detail::strided_copy<int64_t, default_types::index_type,
default_types::size_type>
<<<GET_BLOCKS(curr_size, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>(
d_row_indices, curr_size, p_curr_map, 1);
in_maps.push_back(std::move(row_indices));
// if there is a match, move the index.
++current_batch_row_index;
if (current_batch_row_index >= out_size) {
// Should not happen, but for safety
break;
}
} else {
at::Tensor row_indices = torch::empty({0}, options);
in_maps.push_back(std::move(row_indices));
}
++i;
}
CUDA_CHECK(cudaStreamSynchronize(0));
return std::make_pair(batch_indices, in_maps);
}
};
} // namespace detail
template class CoordinateMapManager<
default_types::dcoordinate_type, default_types::ccoordinate_type,
detail::default_allocator, CoordinateMapGPU>;
template class CoordinateMapManager<default_types::dcoordinate_type,
default_types::ccoordinate_type,
detail::c10_allocator, CoordinateMapGPU>;
} // end namespace minkowski
|
the_stack
|
* \file
* cub::BlockPartitionTiles implements a stateful abstraction of CUDA thread blocks for participating in device-wide list partitioning.
*/
#pragma once
#include <iterator>
#include "scan_tiles_types.cuh"
#include "../../thread/thread_operators.cuh"
#include "../../block/block_load.cuh"
#include "../../block/block_store.cuh"
#include "../../block/block_scan.cuh"
#include "../../grid/grid_queue.cuh"
#include "../../util_vector.cuh"
#include "../../util_namespace.cuh"
/// Optional outer namespace(s)
CUB_NS_PREFIX
/// CUB namespace
namespace cub {
/******************************************************************************
* Tuning policy types
******************************************************************************/
/**
* Tuning policy for BlockPartitionTiles
*/
template <
int _PARTITIONS,
int _BLOCK_THREADS,
int _ITEMS_PER_THREAD,
PtxLoadModifier _LOAD_MODIFIER,
BlockScanAlgorithm _SCAN_ALGORITHM>
struct BlockPartitionTilesPolicy
{
enum
{
PARTITIONS = _PARTITIONS,
BLOCK_THREADS = _BLOCK_THREADS,
ITEMS_PER_THREAD = _ITEMS_PER_THREAD,
};
static const PtxLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER;
static const BlockScanAlgorithm SCAN_ALGORITHM = _SCAN_ALGORITHM;
};
/**
* Tuple type for scanning partition membership flags
*/
template <
typename SizeT,
int PARTITIONS>
struct PartitionScanTuple;
/**
* Tuple type for scanning partition membership flags (specialized for 1 output partition)
*/
template <typename SizeT>
struct PartitionScanTuple<SizeT, 1> : VectorHelper<SizeT, 1>::Type
{
__device__ __forceinline__ PartitionScanTuple operator+(const PartitionScanTuple &other)
{
PartitionScanTuple retval;
retval.x = x + other.x;
return retval;
}
template <typename PredicateOp, typename T>
__device__ __forceinline__ void SetFlags(PredicateOp pred_op, T val)
{
this->x = pred_op(val);
}
template <typename PredicateOp, typename T, typename OutputIteratorRA, SizeT num_items>
__device__ __forceinline__ void Scatter(PredicateOp pred_op, T val, OutputIteratorRA d_out, SizeT num_items)
{
if (pred_op(val))
d_out[this->x - 1] = val;
}
};
/**
* Tuple type for scanning partition membership flags (specialized for 2 output partitions)
*/
template <typename SizeT>
struct PartitionScanTuple<SizeT, 2> : VectorHelper<SizeT, 2>::Type
{
__device__ __forceinline__ PartitionScanTuple operator+(const PartitionScanTuple &other)
{
PartitionScanTuple retval;
retval.x = x + other.x;
retval.y = y + other.y;
return retval;
}
template <typename PredicateOp, typename T>
__device__ __forceinline__ void SetFlags(PredicateOp pred_op, T val)
{
bool pred = pred_op(val);
this->x = pred;
this->y = !pred;
}
template <typename PredicateOp, typename T, typename OutputIteratorRA, SizeT num_items>
__device__ __forceinline__ void Scatter(PredicateOp pred_op, T val, OutputIteratorRA d_out, SizeT num_items)
{
SizeT scatter_offset = (pred_op(val)) ?
this->x - 1 :
num_items - this->y;
d_out[scatter_offset] = val;
}
};
/******************************************************************************
* Thread block abstractions
******************************************************************************/
/**
* \brief BlockPartitionTiles implements a stateful abstraction of CUDA thread blocks for participating in device-wide list partitioning.
*
* Implements a single-pass "domino" strategy with adaptive prefix lookback.
*/
template <
typename BlockPartitionTilesPolicy, ///< Tuning policy
typename InputIteratorRA, ///< Input iterator type
typename OutputIteratorRA, ///< Output iterator type
typename PredicateOp, ///< Partition predicate functor type
typename SizeT> ///< Offset integer type
struct BlockPartitionTiles
{
//---------------------------------------------------------------------
// Types and constants
//---------------------------------------------------------------------
// Constants
enum
{
PARTITIONS = BlockPartitionTilesPolicy::PARTITIONS,
BLOCK_THREADS = BlockPartitionTilesPolicy::BLOCK_THREADS,
ITEMS_PER_THREAD = BlockPartitionTilesPolicy::ITEMS_PER_THREAD,
TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD,
};
// Load modifier
static const PtxLoadModifier LOAD_MODIFIER = BlockPartitionTilesPolicy::LOAD_MODIFIER;
// Data type of input iterator
typedef typename std::iterator_traits<InputIteratorRA>::value_type T;
// Tuple type for scanning partition membership flags
typedef PartitionScanTuple<SizeT, PARTITIONS> PartitionScanTuple;
// Tile status descriptor type
typedef ScanTileDescriptor<PartitionScanTuple> ScanTileDescriptorT;
// Block scan type for scanning membership flag scan_tuples
typedef BlockScan<
PartitionScanTuple,
BlockPartitionTilesPolicy::BLOCK_THREADS,
BlockPartitionTilesPolicy::SCAN_ALGORITHM> BlockScanT;
// Callback type for obtaining inter-tile prefix during block scan
typedef DeviceScanBlockPrefixOp<PartitionScanTuple, Sum> InterblockPrefixOp;
// Shared memory type for this threadblock
struct TempStorage
{
typename InterblockPrefixOp::TempStorage prefix; // Smem needed for cooperative prefix callback
typename BlockScanT::TempStorage scan; // Smem needed for tile scanning
SizeT tile_idx; // Shared tile index
};
//---------------------------------------------------------------------
// Per-thread fields
//---------------------------------------------------------------------
TempStorage &temp_storage; ///< Reference to temp_storage
InputIteratorRA d_in; ///< Input data
OutputIteratorRA d_out; ///< Output data
ScanTileDescriptorT *d_tile_status; ///< Global list of tile status
PredicateOp pred_op; ///< Unary predicate operator indicating membership in the first partition
SizeT num_items; ///< Total number of input items
//---------------------------------------------------------------------
// Constructor
//---------------------------------------------------------------------
// Constructor
__device__ __forceinline__
BlockPartitionTiles(
TempStorage &temp_storage, ///< Reference to temp_storage
InputIteratorRA d_in, ///< Input data
OutputIteratorRA d_out, ///< Output data
ScanTileDescriptorT *d_tile_status, ///< Global list of tile status
PredicateOp pred_op, ///< Unary predicate operator indicating membership in the first partition
SizeT num_items) ///< Total number of input items
:
temp_storage(temp_storage.Alias()),
d_in(d_in),
d_out(d_out),
d_tile_status(d_tile_status),
pred_op(pred_op),
num_items(num_items)
{}
//---------------------------------------------------------------------
// Domino scan
//---------------------------------------------------------------------
/**
* Process a tile of input
*/
template <bool FULL_TILE>
__device__ __forceinline__ void ConsumeTile(
int tile_idx, ///< Tile index
SizeT block_offset, ///< Tile offset
PartitionScanTuple &partition_ends) ///< Running total
{
T items[ITEMS_PER_THREAD];
PartitionScanTuple scan_tuples[ITEMS_PER_THREAD];
// Load items
int valid_items = num_items - block_offset;
if (FULL_TILE)
LoadStriped<LOAD_MODIFIER, BLOCK_THREADS>(threadIdx.x, d_in + block_offset, items);
else
LoadStriped<LOAD_MODIFIER, BLOCK_THREADS>(threadIdx.x, d_in + block_offset, items, valid_items);
// Prevent hoisting
// __syncthreads();
// __threadfence_block();
// Set partition membership flags in scan scan_tuples
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
scan_tuples[ITEM].SetFlags(pred_op, items[ITEM]);
}
// Perform inclusive scan over scan scan_tuples
PartitionScanTuple block_aggregate;
if (tile_idx == 0)
{
BlockScanT(temp_storage.scan).InclusiveScan(scan_tuples, scan_tuples, Sum(), block_aggregate);
partition_ends = block_aggregate;
// Update tile status if there are successor tiles
if (FULL_TILE && (threadIdx.x == 0))
ScanTileDescriptorT::SetPrefix(d_tile_status, block_aggregate);
}
else
{
InterblockPrefixOp prefix_op(d_tile_status, temp_storage.prefix, Sum(), tile_idx);
BlockScanT(temp_storage.scan).InclusiveScan(scan_tuples, scan_tuples, Sum(), block_aggregate, prefix_op);
partition_ends = prefix_op.inclusive_prefix;
}
// Scatter items
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
{
// Scatter if not out-of-bounds
if (FULL_TILE || (threadIdx.x + (ITEM * BLOCK_THREADS) < valid_items))
{
scan_tuples[ITEM].Scatter(pred_op, items[ITEM], d_out, num_items);
}
}
}
/**
* Dequeue and scan tiles of items as part of a domino scan
*/
__device__ __forceinline__ void ConsumeTiles(
GridQueue<int> queue, ///< [in] Queue descriptor for assigning tiles of work to thread blocks
SizeT num_tiles, ///< [in] Total number of input tiles
PartitionScanTuple &partition_ends, ///< [out] Running partition end offsets
bool &is_last_tile) ///< [out] Whether or not this block handled the last tile (i.e., partition_ends is valid for the entire input)
{
#if CUB_PTX_ARCH < 200
// No concurrent kernels allowed and blocks are launched in increasing order, so just assign one tile per block (up to 65K blocks)
int tile_idx = blockIdx.x;
SizeT block_offset = SizeT(TILE_ITEMS) * tile_idx;
if (block_offset + TILE_ITEMS <= num_items)
{
ConsumeTile<true>(tile_idx, block_offset, partition_ends);
}
else if (block_offset < num_items)
{
ConsumeTile<false>(tile_idx, block_offset, partition_ends);
}
is_last_tile = (tile_idx == num_tiles - 1);
#else
// Get first tile
if (threadIdx.x == 0)
temp_storage.tile_idx = queue.Drain(1);
__syncthreads();
int tile_idx = temp_storage.tile_idx;
SizeT block_offset = SizeT(TILE_ITEMS) * tile_idx;
while (block_offset + TILE_ITEMS <= num_items)
{
// Consume full tile
ConsumeTile<true>(tile_idx, block_offset, partition_ends);
is_last_tile = (tile_idx == num_tiles - 1);
// Get next tile
if (threadIdx.x == 0)
temp_storage.tile_idx = queue.Drain(1);
__syncthreads();
tile_idx = temp_storage.tile_idx;
block_offset = SizeT(TILE_ITEMS) * tile_idx;
}
// Consume a partially-full tile
if (block_offset < num_items)
{
ConsumeTile<false>(tile_idx, block_offset, partition_ends);
is_last_tile = (tile_idx == num_tiles - 1);
}
#endif
}
};
} // CUB namespace
CUB_NS_POSTFIX // Optional outer namespace(s)
|
the_stack
|
template <typename scalar_t>
__device__ scalar_t modulated_deform_conv3d_im2col_trilinear(
const scalar_t *bottom_data, const int data_width,const int data_length,
const int height, const int width, const int length,scalar_t h, scalar_t w,scalar_t l)
{
int h_low = floor(h);
int w_low = floor(w);
int l_low = floor(l);
int h_high = h_low + 1;
int w_high = w_low + 1;
int l_high = l_low + 1;
scalar_t lh = h - h_low;//dh
scalar_t lw = w - w_low;//dw
scalar_t ll = l - l_low;//dl
scalar_t hh = 1 - lh, hw = 1 - lw, hl = 1 - ll; //1-dh 1-dw 1-dl
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0 && l_low >= 0)
v1 = bottom_data[h_low * data_width*data_length + w_low*data_length+ l_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_low >=0 && l_high<= length -1)
v2 = bottom_data[h_low * data_width*data_length + w_low*data_length+ l_high];
scalar_t v3 = 0;
if (h_low >= 0 && w_high <= width - 1 && l_low >= 0)
v3 = bottom_data[h_low * data_width*data_length + w_high*data_length+ l_low];
scalar_t v4 = 0;
if (h_low >= 0 && w_high <= width - 1 && l_high<= length -1)
v4 = bottom_data[h_low * data_width*data_length + w_high*data_length+ l_high];
scalar_t v5 = 0;
if (h_high <= height -1 && w_low >= 0 && l_low >= 0)
v5 = bottom_data[h_high * data_width*data_length + w_low*data_length+ l_low];
scalar_t v6 = 0;
if (h_high <= height -1 && w_low >= 0 && l_high<= length -1)
v6 = bottom_data[h_high * data_width*data_length + w_low*data_length+ l_high];
scalar_t v7 = 0;
if (h_high <= height -1 && w_high <= width - 1 && l_low >= 0)
v7 = bottom_data[h_high * data_width*data_length + w_high*data_length+ l_low];
scalar_t v8 = 0;
if (h_high <= height -1 && w_high <= width - 1 && l_high<= length -1)
v8 = bottom_data[h_high * data_width*data_length + w_high*data_length+ l_high];
scalar_t w1 = hh * hw *hl, w2 = hh *hw *ll, w3 = hh * lw*hl, w4 = hh * lw* ll;
scalar_t w5 = lh * hw *hl, w6 = lh *hw *ll, w7 = lh * lw*hl, w8 = lh * lw* ll;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4+w5 * v5 + w6 * v6 + w7 * v7 + w8 * v8);
return val;
}
template <typename scalar_t>
__global__ void modulated_deform_conv3d_im2col_gpu_kernel(
const int n,const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask,
const int height, const int width, const int length,
const int kernel_h, const int kernel_w, const int kernel_l,
const int pad_h, const int pad_w, const int pad_l,
const int stride_h, const int stride_w, const int stride_l,
const int dilation_h, const int dilation_w, const int dilation_l,
const int channel_per_deformable_group,
const int batch_size,const int num_channels, const int deformable_group,
const int height_col, const int width_col, const int length_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int l_col = index % length_col;
const int w_col = (index / length_col) % width_col;
const int h_col = (index / length_col / width_col ) % height_col;
const int b_col = (index / length_col / width_col / height_col) % batch_size;
const int c_im = (index / length_col/ width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w* kernel_l;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
const int l_in = l_col * stride_l - pad_l;
scalar_t *data_col_ptr = data_col+(c_col*batch_size + b_col) *height_col*width_col*length_col
+h_col*width_col*length_col
+w_col*length_col
+l_col;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width * length;
const scalar_t *data_offset_ptr = data_offset
+ (b_col * deformable_group + deformable_group_index)
* 3 * kernel_h * kernel_w * kernel_l * height_col * width_col * length_col;
const scalar_t *data_mask_ptr = data_mask
+ (b_col * deformable_group + deformable_group_index)
* kernel_h * kernel_w *kernel_l * height_col * width_col * length_col;
for (int i = 0; i < kernel_h; ++i)
for (int j = 0; j < kernel_w; ++j)
for (int k = 0; k < kernel_l; ++k){
int f=i*kernel_w*kernel_l + j*kernel_l+k;
const int data_offset_h_ptr = (3*f) * height_col * width_col * length_col+ h_col* width_col * length_col+ w_col* length_col + l_col;
const int data_offset_w_ptr = (3*f+1) * height_col * width_col* length_col + h_col* width_col* length_col + w_col* length_col + l_col;
const int data_offset_l_ptr = (3*f+2) * height_col * width_col* length_col + h_col* width_col* length_col + w_col* length_col + l_col;
const int data_mask_hwl_ptr = f * height_col * width_col * length_col+ h_col* width_col * length_col+ w_col* length_col + l_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t offset_l = data_offset_ptr[data_offset_l_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hwl_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
const scalar_t l_im = l_in + k * dilation_l + offset_l;
if (h_im > -1 && w_im > -1 && l_im > -1 && h_im < height && w_im < width && l_im < length)
{
val = modulated_deform_conv3d_im2col_trilinear(data_im_ptr, width, length, height, width, length, h_im, w_im,l_im);
}
*data_col_ptr = val*mask;
data_col_ptr += batch_size * height_col * width_col* length_col;
}
}
}
void modulated_deform_conv3d_im2col_cuda(
at::Tensor data_im, at::Tensor data_offset,at::Tensor data_mask,
const int batch_size,const int channels,
const int height_im, const int width_im, const int length_im,
const int height_col, const int width_col, const int length_col,
const int kernel_h, const int kernel_w,const int kernel_l,
const int pad_h, const int pad_w, const int pad_l,
const int stride_h, const int stride_w, const int stride_l,
const int dilation_h, const int dilation_w, const int dilation_l,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col * length_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "modulated_deform_conv3d_im2col_cuda", ([&] {
const scalar_t *data_im_ = data_im.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *data_col_ = data_col.data<scalar_t>();
modulated_deform_conv3d_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_im_, data_offset_, data_mask_,
height_im, width_im, length_im,
kernel_h, kernel_w, kernel_l,
pad_h, pad_w, pad_l,
stride_h, stride_w, stride_l,
dilation_h, dilation_w, dilation_l,
channel_per_deformable_group,batch_size,
channels, deformable_group,
height_col, width_col, length_col,data_col_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deform_conv3d_im2col_cuda: %s\n", cudaGetErrorString(err));
}
}
int modulated_deform_conv3d_forward_cuda(
at::Tensor input, at::Tensor weight, at::Tensor bias,
at::Tensor offset, at::Tensor mask, at::Tensor output,
const int kernel_h, const int kernel_w, const int kernel_l,
const int stride_h, const int stride_w, const int stride_l,
const int pad_h, const int pad_w, const int pad_l,
const int dilation_h,const int dilation_w, const int dilation_l,
const int group, const int deformable_group,const int in_step,const bool with_bias) {
TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous");
TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous");
TORCH_CHECK(bias.is_contiguous(), "bias tensor has to be contiguous");
TORCH_CHECK(offset.is_contiguous(), "offset tensor has to be contiguous");
TORCH_CHECK(mask.is_contiguous(), "mask tensor has to be contiguous");
TORCH_CHECK(output.is_contiguous(), "output tensor has to be contiguous");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int length = input.size(4);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
const int kernel_l_ = weight.size(4);
if (kernel_h_ != kernel_h || kernel_w_ != kernel_w || kernel_l_ != kernel_l)
AT_ERROR("Input shape and kernel shape wont match: (%d x %d x %d vs %d x %d x %d).",
kernel_h_, kernel_w,kernel_l, kernel_h_, kernel_w_,kernel_l_);
if (channels != channels_kernel * group)
AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).",
channels, channels_kernel * group);
const int height_out =
(height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out =
(width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
const int length_out =
(length + 2 * pad_l - (dilation_l * (kernel_l - 1) + 1)) / stride_l + 1;
const int step=GET_STEP(batch,in_step);
output = output.view({batch,channels_out,height_out, width_out,length_out});
output.zero_();
at::Tensor columns = at::zeros({channels * kernel_h * kernel_w * kernel_l,
step * height_out * width_out * length_out},input.options());
input = input.view({batch / step, step, channels,height, width,length});
offset =offset.view({batch / step, step,deformable_group*3* kernel_h * kernel_w * kernel_l,
height_out, width_out,length_out});
mask =mask.view({batch / step, step,deformable_group* kernel_h * kernel_w * kernel_l,
height_out, width_out,length_out});
//divide into group
output = output.view({batch/step,group,output.size(1)/group,step,output.size(2),output.size(3),output.size(4)});
weight = weight.view({group, weight.size(0) / group, weight.size(1),
weight.size(2), weight.size(3),weight.size(4)});
for (int b = 0; b < batch/step; b++) {
columns.fill_(0);
modulated_deform_conv3d_im2col_cuda(
input[b], offset[b], mask[b],step, channels,
height, width,length,
height_out,width_out, length_out,
kernel_h, kernel_w,kernel_l,
pad_h, pad_w, pad_l,
stride_h, stride_w, stride_l,
dilation_h, dilation_w, dilation_l,
deformable_group, columns);
columns = columns.view({group, columns.size(0) / group, columns.size(1)});
for (int g = 0; g < group; g++) {
output[b][g] = output[b][g].flatten(1)
.addmm_(weight[g].flatten(1), columns[g]).view_as(output[b][g]);
}
columns = columns.view({columns.size(0)*columns.size(1), columns.size(2)});
}
weight = weight.view({weight.size(0) * weight.size(1), weight.size(2),
weight.size(3), weight.size(4),weight.size(5)});
output = output.view({output.size(0), output.size(1) * output.size(2),
output.size(3), output.size(4),output.size(5),output.size(6)});
output = output.view({batch / step, channels_out, step, height_out, width_out,length_out});
output.transpose_(1, 2);
output = output.contiguous().view({batch , channels_out, height_out, width_out,length_out});
if (with_bias)
output += bias.view({1, bias.size(0), 1, 1, 1});
input=input.view({batch,channels,height,width,length});
offset=offset.view({batch,deformable_group * 3 *kernel_h*kernel_w*kernel_l,height_out,width_out,length_out});
mask=mask.view({batch,deformable_group *kernel_h*kernel_w*kernel_l,height_out,width_out,length_out});
return 0;
}
template <typename scalar_t>
__global__ void modulated_deform_conv3d_gradient_gpu_kernel(
const int n,const scalar_t *grad_col, const scalar_t *data_input,
const scalar_t *data_offset, const scalar_t *data_mask,scalar_t * columns,
const int channels_input,
const int height_input, const int width_input, const int length_input,
const int kernel_h, const int kernel_w, const int kernel_l,
const int pad_h, const int pad_w, const int pad_l,
const int stride_h, const int stride_w, const int stride_l,
const int dilation_h, const int dilation_w, const int dilation_l,
const int channel_per_deformable_group,const int step,
const int offset_channels, const int deformable_group,
const int height_col, const int width_col, const int length_col,
scalar_t * grad_input,scalar_t *grad_offset, scalar_t *grad_mask)
{
CUDA_KERNEL_LOOP(index, n)
{
int single_col_len=length_col * width_col * height_col;
int f = (index /step/ single_col_len )%(kernel_h * kernel_w * kernel_l);
int i=(f / kernel_l / kernel_w) % kernel_h;
int j=(f / kernel_l) %kernel_w;
int k=f % kernel_l;
int bpos=(index%(step*single_col_len))/(single_col_len);
int lpos_col = (index % (single_col_len)) % length_col;
int wpos_col = ((index % (single_col_len)) / length_col) % width_col;
int hpos_col = ((index % (single_col_len)) / length_col / width_col) % height_col;
int cpos_col = (index / step / single_col_len);
int cpos_in=cpos_col/kernel_h/kernel_w/kernel_l;
int offset_group_index=cpos_in/(channels_input/deformable_group);
//printf("index %d cpos_col %d hpos_col %d wpos_col %d \n",index,cpos_col,hpos_col,wpos_col);
int offset_base_ptr=bpos*(deformable_group * 3 * kernel_h * kernel_w * kernel_l*single_col_len)
+offset_group_index*channel_per_deformable_group*single_col_len
+hpos_col*width_col*length_col+wpos_col*length_col+lpos_col;
int offset_h_ptr=offset_base_ptr+3*f*single_col_len;
int offset_w_ptr=offset_base_ptr+(3*f+1)*single_col_len;
int offset_l_ptr=offset_base_ptr+(3*f+2)*single_col_len;
int mask_hwl_ptr=bpos*(deformable_group * kernel_h * kernel_w * kernel_l*single_col_len)
+offset_group_index*kernel_h*kernel_w*kernel_l*height_col*width_col*length_col
+f*height_col*width_col*length_col+hpos_col*width_col*length_col+wpos_col*length_col+lpos_col;
scalar_t offset_h=data_offset[offset_h_ptr];
scalar_t offset_w=data_offset[offset_w_ptr];
scalar_t offset_l=data_offset[offset_l_ptr];
int hpos_in = hpos_col * stride_h -pad_h + (i) * dilation_h;
int wpos_in = wpos_col * stride_w - pad_w + (j) * dilation_w;
int lpos_in = lpos_col * stride_l - pad_l + (k) * dilation_l;
auto real_offset_h=hpos_in+offset_h;
auto real_offset_w=wpos_in+offset_w;
auto real_offset_l=lpos_in+offset_l;
int h_low = floor(real_offset_h);
int w_low = floor(real_offset_w);
int l_low = floor(real_offset_l);
int h_high = h_low + 1;
int w_high = w_low + 1;
int l_high = l_low + 1;
scalar_t dh = real_offset_h - h_low;
scalar_t dw = real_offset_w - w_low;
scalar_t dl = real_offset_l - l_low;
scalar_t w1 = (1-dh) *(1- dw)*(1-dl), w2 =(1- dh) *(1- dw)*dl, w3 = (1-dh)*dw*(1-dl), w4 = (1-dh) * dw*dl;
scalar_t w5 = dh *(1- dw)*(1-dl), w6 =dh*(1- dw)*dl, w7 = dh*dw*(1-dl), w8 = dh*dw*dl;
auto dval=data_mask[mask_hwl_ptr]*grad_col[index];
int data_input_base_ptr=(bpos*channels_input+cpos_in)*height_input*width_input*length_input;
int grad_input_base_ptr=(bpos*channels_input+cpos_in)*height_input*width_input*length_input;
bool h_low_flag=h_low >= 0 && h_low <= height_input -1;
bool w_low_flag=w_low >= 0 && w_low <= width_input - 1;
bool l_low_flag=l_low >= 0 && l_low <= length_input -1;
bool h_high_flag=h_high >= 0 && h_high <= height_input -1 && abs(dh)>EPS;
bool w_high_flag=w_high >= 0 && w_high <= width_input - 1 && abs(dw)>EPS;
bool l_high_flag=l_high >= 0 && l_high <= length_input -1 && abs(dl)>EPS;
scalar_t v1 = static_cast<scalar_t>(0);
if (h_low_flag && w_low_flag && l_low_flag ){
v1 = data_input[data_input_base_ptr +h_low * width_input*length_input + w_low* length_input+l_low];
atomicAdd(grad_input+grad_input_base_ptr +h_low * width_input*length_input + w_low* length_input+l_low,w1*dval);
}
scalar_t v2 = static_cast<scalar_t>(0);
if (h_low_flag && w_low_flag && l_high_flag ){
v2 = data_input[data_input_base_ptr +h_low * width_input*length_input + w_low* length_input+l_high];
atomicAdd(grad_input+grad_input_base_ptr +h_low * width_input*length_input + w_low* length_input+l_high,w2*dval);
}
scalar_t v3 = static_cast<scalar_t>(0);
if (h_low_flag && w_high_flag && l_low_flag ){
v3 = data_input[data_input_base_ptr +h_low * width_input*length_input + w_high* length_input+l_low];
atomicAdd(grad_input+grad_input_base_ptr +h_low * width_input*length_input + w_high* length_input+l_low,w3*dval);
}
scalar_t v4 = static_cast<scalar_t>(0);
if (h_low_flag && w_high_flag && l_high_flag ){
v4 = data_input[data_input_base_ptr +h_low * width_input*length_input + w_high* length_input+l_high];
atomicAdd(grad_input+grad_input_base_ptr +h_low * width_input*length_input + w_high* length_input+l_high,w4*dval);
}
scalar_t v5 = static_cast<scalar_t>(0);
if (h_high_flag && w_low_flag && l_low_flag ){
v5 = data_input[data_input_base_ptr +h_high * width_input*length_input + w_low* length_input+l_low];
atomicAdd(grad_input+grad_input_base_ptr +h_high * width_input*length_input + w_low* length_input+l_low,w5*dval);
}
scalar_t v6 = static_cast<scalar_t>(0);
if (h_high_flag && w_low_flag && l_high_flag ){
v6 = data_input[data_input_base_ptr +h_high * width_input*length_input + w_low* length_input+l_high];
atomicAdd(grad_input+grad_input_base_ptr +h_high * width_input*length_input + w_low* length_input+l_high,w6*dval);
}
scalar_t v7 = static_cast<scalar_t>(0);
if (h_high_flag && w_high_flag && l_low_flag ){
v7 = data_input[data_input_base_ptr +h_high * width_input*length_input + w_high* length_input+l_low];
atomicAdd(grad_input+grad_input_base_ptr +h_high * width_input*length_input + w_high* length_input+l_low,w7*dval);
}
scalar_t v8 = static_cast<scalar_t>(0);
if (h_high_flag && w_high_flag && l_high_flag ){
v8 = data_input[data_input_base_ptr +h_high * width_input*length_input + w_high* length_input+l_high];
atomicAdd(grad_input+grad_input_base_ptr +h_high * width_input*length_input + w_high* length_input+l_high,w8*dval);
}
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4+w5 * v5 + w6 * v6 + w7 * v7 + w8 * v8);
scalar_t col=val*data_mask[mask_hwl_ptr];//
atomicAdd(grad_offset + offset_h_ptr,
(-1*(1-dw)*(1-dl)*v1-1*(1-dw)*dl*v2-1*dw*(1-dl)*v3-1*dw*dl*v4+(1-dw)*(1-dl)*v5+(1-dw)*dl*v6+dw*(1-dl)*v7+dw*dl*v8)*dval);
atomicAdd(grad_offset + offset_w_ptr,
(-1*(1-dh)*(1-dl)*v1-1*(1-dh)*dl*v2+(1-dh)*(1-dl)*v3+(1-dh)*dl*v4-1*dh*(1-dl)*v5-1*dh*dl*v6+dh*(1-dl)*v7+dh*dl*v8)*dval);
atomicAdd(grad_offset + offset_l_ptr,
(-1*(1-dh)*(1-dw)*v1+(1-dh)*(1-dw)*v2-1*(1-dh)*dw*v3+(1-dh)*dw*v4-1*dh*(1-dw)*v5+dh*(1-dw)*v6-1*dh*dw*v7+dh*dw*v8)*dval);
atomicAdd(grad_mask + mask_hwl_ptr,val*grad_col[index]);
columns[index]=col;
}
}
// gradient offset mask input
void modulated_deform_conv3d_gradient_cuda(
at::Tensor grad_col, at::Tensor data_input,
at::Tensor data_offset, at::Tensor data_mask, at::Tensor columns,
const int channels, const int height_input, const int width_input, const int length_input,
const int height_col, const int width_col, const int length_col,
const int kernel_h, const int kernel_w, const int kernel_l,
const int pad_h, const int pad_w, const int pad_l,
const int stride_h, const int stride_w, const int stride_l,
const int dilation_h, const int dilation_w, const int dilation_l,
const int step,const int deformable_group,
at::Tensor grad_input, at::Tensor grad_offset, at::Tensor grad_mask)
{
const int num_kernels =channels*height_col * width_col * length_col * kernel_h * kernel_w * kernel_l * step;
const int channel_per_deformable_group =3 * kernel_h * kernel_w * kernel_l;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_col.scalar_type(), "modulated_deform_conv3d_gradient_cuda", ([&] {
const scalar_t *grad_col_ = grad_col.data<scalar_t>();
const scalar_t *data_input_ = data_input.data<scalar_t>();
const scalar_t *data_offset_ = data_offset.data<scalar_t>();
const scalar_t *data_mask_ = data_mask.data<scalar_t>();
scalar_t *columns_ = columns.data<scalar_t>();
scalar_t *grad_input_ = grad_input.data<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data<scalar_t>();
scalar_t *grad_mask_ = grad_mask.data<scalar_t>();
modulated_deform_conv3d_gradient_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, grad_col_, data_input_, data_offset_, data_mask_,columns_,
channels, height_input, width_input, length_input,
kernel_h, kernel_w, kernel_l,
pad_h, pad_w, pad_l,
stride_h, stride_w, stride_l,
dilation_h, dilation_w, dilation_l,
channel_per_deformable_group,step,
channel_per_deformable_group * deformable_group,
deformable_group, height_col, width_col, length_col,
grad_input_,grad_offset_, grad_mask_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err));
}
}
int modulated_deform_conv3d_backward_cuda(
at::Tensor input, at::Tensor weight, at::Tensor bias,at::Tensor offset,at::Tensor mask,
at::Tensor grad_input, at::Tensor grad_weight, at::Tensor grad_bias,
at::Tensor grad_offset, at::Tensor grad_mask, at::Tensor grad_output,
const int kernel_h,const int kernel_w,const int kernel_l,
const int stride_h,const int stride_w,const int stride_l,
const int pad_h,const int pad_w,const int pad_l,
const int dilation_h,const int dilation_w,const int dilation_l,
const int group,const int deformable_group,const int in_step,const bool with_bias) {
TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous");
TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous");
TORCH_CHECK(bias.is_contiguous(), "bias tensor has to be contiguous");
TORCH_CHECK(offset.is_contiguous(), "offset tensor has to be contiguous");
TORCH_CHECK(mask.is_contiguous(), "mask tensor has to be contiguous");
TORCH_CHECK(grad_input.is_contiguous(), "grad_input tensor has to be contiguous");
TORCH_CHECK(grad_weight.is_contiguous(), "grad_weight tensor has to be contiguous");
TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias tensor has to be contiguous");
TORCH_CHECK(grad_offset.is_contiguous(), "grad_offset tensor has to be contiguous");
TORCH_CHECK(grad_mask.is_contiguous(), "grad_mask tensor has to be contiguous");
TORCH_CHECK(grad_output.is_contiguous(), "grad_output tensor has to be contiguous");
const int batch = input.size(0);
const int channels = input.size(1);
const int height = input.size(2);
const int width = input.size(3);
const int length = input.size(4);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_h_ = weight.size(2);
const int kernel_w_ = weight.size(3);
const int kernel_l_ = weight.size(4);
if (kernel_h_ != kernel_h || kernel_w_ != kernel_w || kernel_l_ != kernel_l)
AT_ERROR("Input shape and kernel shape wont match: (%d x %d x %d vs %d x %d x %d).",
kernel_h_, kernel_w_, kernel_l_, kernel_h, kernel_w, kernel_l);
if (channels != channels_kernel * group)
AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).",
channels, channels_kernel * group);
const int height_out =
(height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out =
(width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
const int length_out =
(length + 2 * pad_l - (dilation_l * (kernel_l - 1) + 1)) / stride_l + 1;
const int step=GET_STEP(batch,in_step);
at::Tensor ones = at::ones({step,height_out, width_out, length_out}, input.options());
at::Tensor columns = at::zeros({channels * kernel_h * kernel_w * kernel_l,
step*height_out * width_out * length_out},input.options());
at::Tensor grad_columns=at::zeros({channels * kernel_h * kernel_w * kernel_l,
step*height_out * width_out * length_out},input.options());
grad_output=grad_output.view({batch/step,step,channels_out,height_out,width_out,length_out});
grad_output.transpose_(1, 2);
grad_output =grad_output.view({grad_output.size(0), group, grad_output.size(1) / group,
grad_output.size(2), grad_output.size(3),grad_output.size(4),grad_output.size(5)});
input=input.view({batch/step,step,channels,height,width,length});
grad_input = grad_input.view({batch/step,step, channels, height, width,length});
offset=offset.view({batch/step,step,
deformable_group * 3 * kernel_h * kernel_w * kernel_l,height_out,width_out,length_out});
grad_offset=grad_offset.view({batch/step,step,
deformable_group * 3 * kernel_h * kernel_w * kernel_l,height_out,width_out,length_out});
mask=mask.view({batch/step,step,
deformable_group * kernel_h * kernel_w * kernel_l,height_out,width_out,length_out});
grad_mask=grad_mask.view({batch/step,step,
deformable_group * kernel_h * kernel_w * kernel_l,height_out,width_out,length_out});
for (int b = 0; b < batch/step; b++) {
// divide int group
grad_columns = grad_columns.view({group, grad_columns.size(0) / group, grad_columns.size(1)});
weight = weight.view({group, weight.size(0) / group, weight.size(1),weight.size(2), weight.size(3), weight.size(4)});
for (int g = 0; g < group; g++) {
grad_columns[g].addmm_(weight[g].flatten(1).transpose(0, 1),
grad_output[b][g].flatten(1), 0.0f, 1.0f);
}
grad_columns = grad_columns.view({grad_columns.size(0) * grad_columns.size(1), grad_columns.size(2)});
weight = weight.view({weight.size(0) * weight.size(1), weight.size(2),
weight.size(3), weight.size(4), weight.size(5)});
//print_tensor_size("grad_columns size",grad_columns);
//print_tensor_size("grad_mask[b] size",grad_mask[b]);
columns.fill_(0);
modulated_deform_conv3d_gradient_cuda(
grad_columns, input[b], offset[b], mask[b], columns,
channels, height, width, length,
height_out, width_out, length_out,
kernel_h, kernel_w, kernel_l,
pad_h, pad_w, pad_l,
stride_h, stride_w, stride_l,
dilation_h, dilation_w, dilation_l,
step,deformable_group,
grad_input[b],grad_offset[b],grad_mask[b]);
columns = columns.view({group, columns.size(0) / group, columns.size(1)});
grad_weight = grad_weight.view({group, grad_weight.size(0) / group,
grad_weight.size(1), grad_weight.size(2),
grad_weight.size(3),grad_weight.size(4)});
if (with_bias)
grad_bias = grad_bias.view({group, grad_bias.size(0) / group});
for (int g = 0; g < group; g++) {
grad_weight[g] =grad_weight[g].flatten(1)
.addmm_(grad_output[b][g].flatten(1),columns[g].transpose(0, 1),1.0f,1.0f)
.view_as(grad_weight[g]);
if (with_bias) {
at::Tensor temp=grad_bias[g].view({-1, 1});
temp.addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1}),1.0f,1.0f);
grad_bias[g] =temp.view(-1);
}
}
columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)});
grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1),
grad_weight.size(2), grad_weight.size(3),
grad_weight.size(4), grad_weight.size(5)});
if (with_bias)
grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)});
}
grad_output = grad_output.view({grad_output.size(0) ,grad_output.size(1)*grad_output.size(2),
grad_output.size(3),grad_output.size(4),
grad_output.size(5),grad_output.size(6)});
//grad_output=grad_output.view({batch/step,channels_kernel,step,height_out,width_out,length_out});
grad_output.transpose_(1, 2);
grad_output =grad_output.view({batch,channels_out,height_out,width_out,length_out});
input=input.view({batch,channels,height,width,length});
grad_input = grad_input.view({batch, channels, height, width,length});
offset=offset.view({batch,deformable_group * 3 * kernel_h * kernel_w *kernel_l,
height_out,width_out,length_out});
grad_offset=grad_offset.view({batch,deformable_group * 3 * kernel_h * kernel_w *kernel_l,
height_out,width_out,length_out});
mask=mask.view({batch,deformable_group * kernel_h * kernel_w *kernel_l,
height_out,width_out,length_out});
grad_mask=grad_mask.view({batch,deformable_group * kernel_h * kernel_w *kernel_l,
height_out,width_out,length_out});
return 0;
}
|
the_stack
|
#include "lite/kernels/cuda/fc_compute.h"
#include <string>
#include "lite/backends/cuda/cuda_utils.h"
#include "lite/core/op_registry.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace cuda {
template <typename T>
struct FcTypeTraits;
template <>
struct FcTypeTraits<float> {
typedef float4 Type;
};
template <typename T>
__global__ void AddBiasV2(const int num, const T* bias, T* data, int K) {
CUDA_KERNEL_LOOP(index, num) {
int bias_idx = index % K;
const T bias_ptr = bias[bias_idx];
const T in_ptr = data[index];
T packed_val;
packed_val.x = in_ptr.x + bias_ptr.x;
packed_val.y = in_ptr.y + bias_ptr.y;
data[index] = packed_val;
}
}
template <>
__global__ void AddBiasV2(const int num,
const half2* bias,
half2* data,
int K) {
CUDA_KERNEL_LOOP(index, num) {
int bias_idx = index % K;
const half2 bias_ptr = bias[bias_idx];
const half2 in_ptr = data[index];
#if __CUDA_ARCH__ >= 530
data[index] = __hadd2(in_ptr, bias_ptr);
#else
half2 packed_val;
packed_val.x = __hadd(in_ptr.x, bias_ptr.x);
packed_val.y = __hadd(in_ptr.y, bias_ptr.y);
data[index] = packed_val;
#endif
}
}
template <typename T>
__global__ void AddBiasReluV2(const int num, const T* bias, T* data, int K) {
CUDA_KERNEL_LOOP(index, num) {
int bias_idx = index % K;
const T bias_ptr = bias[bias_idx];
const T in_ptr = data[index];
T packed_val;
packed_val.x = fmaxf(0.f, in_ptr.x + bias_ptr.x);
packed_val.y = fmaxf(0.f, in_ptr.y + bias_ptr.y);
data[index] = packed_val;
}
}
template <>
__global__ void AddBiasReluV2(const int num,
const half2* bias,
half2* data,
int K) {
CUDA_KERNEL_LOOP(index, num) {
int bias_idx = index % K;
const half2 bias_ptr = bias[bias_idx];
const half2 in_ptr = data[index];
#if __CUDA_ARCH__ >= 530
data[index] = __hmul2(__hgt2(in_ptr + bias_ptr, __float2half2_rn(0.f)),
in_ptr + bias_ptr);
#else
const float2 bias = __half22float2(bias_ptr);
const float2 in = __half22float2(in_ptr);
data[index] = __floats2half2_rn(
bias.x + in.x > 0.0f ? static_cast<float>(bias.x + in.x) : 0.0f,
bias.y + in.y > 0.0f ? static_cast<float>(bias.y + in.y) : 0.0f);
#endif
}
}
template <typename T>
__global__ void AddBiasV4(const int num, const T* bias, T* data, int K) {
CUDA_KERNEL_LOOP(index, num) {
int bias_idx = index % K;
const T bias_ptr = bias[bias_idx];
const T in_ptr = data[index];
T packed_val;
packed_val.x = in_ptr.x + bias_ptr.x;
packed_val.y = in_ptr.y + bias_ptr.y;
packed_val.z = in_ptr.z + bias_ptr.z;
packed_val.w = in_ptr.w + bias_ptr.w;
data[index] = packed_val;
}
}
template <typename T>
__global__ void AddBiasReluV4(const int num, const T* bias, T* data, int K) {
CUDA_KERNEL_LOOP(index, num) {
int bias_idx = index % K;
const T bias_ptr = bias[bias_idx];
const T in_ptr = data[index];
T packed_val;
packed_val.x = fmaxf(0.f, in_ptr.x + bias_ptr.x);
packed_val.y = fmaxf(0.f, in_ptr.y + bias_ptr.y);
packed_val.z = fmaxf(0.f, in_ptr.z + bias_ptr.z);
packed_val.w = fmaxf(0.f, in_ptr.w + bias_ptr.w);
data[index] = packed_val;
}
}
template <typename T>
__global__ void AddBias(const int num, const T* bias, T* data) {
int offset = blockIdx.x * num;
for (int i = threadIdx.x; i < num; i += blockDim.x) {
T temp;
#if __CUDA_ARCH__ >= 350
temp = __ldg(data + offset + i) + __ldg(bias + i);
#else
temp = data[offset + i] + bias[i];
#endif
data[offset + i] = temp;
}
}
template <>
__global__ void AddBias(const int num, const half* bias, half* data) {
int offset = blockIdx.x * num;
for (int i = threadIdx.x; i < num; i += blockDim.x) {
half temp;
#if __CUDA_ARCH__ >= 350
temp = __hadd(__ldg(data + offset + i), __ldg(bias + i));
#else
temp = __hadd(data[offset + i], bias[i]);
#endif
data[offset + i] = temp;
}
}
template <typename T>
__global__ void AddBiasRelu(const int num, const T* bias, T* data) {
int offset = blockIdx.x * num;
for (int i = threadIdx.x; i < num; i += blockDim.x) {
T temp;
#if __CUDA_ARCH__ >= 350
temp = __ldg(data + offset + i) + __ldg(bias + i);
#else
temp = data[offset + i] + bias[i];
#endif
data[offset + i] = static_cast<int>(temp > 0) * temp;
}
}
template <>
__global__ void AddBiasRelu<half>(const int num, const half* bias, half* data) {
int offset = blockIdx.x * num;
for (int i = threadIdx.x; i < num; i += blockDim.x) {
half temp;
#if __CUDA_ARCH__ >= 350
temp = __hadd(__ldg(data + offset + i), __ldg(bias + i));
#else
temp = __hadd(data[offset + i], bias[i]);
#endif
#if __CUDA_ARCH__ >= 530
data[offset + i] =
__hgt(temp, __float2half(0.0f)) ? temp : __float2half(0.0f);
#else
data[offset + i] =
__float2half(__half2float(temp) > 0.f ? __half2float(temp) : 0.f);
#endif
}
}
template <typename T, PrecisionType PType>
void FcCompute<T, PType>::PrepareForRun() {
gemm_impl_.reset(new lite::cuda::math::Gemm<T, T>);
}
template <typename T, PrecisionType PType>
void FcCompute<T, PType>::Run() {
auto& context = this->ctx_->template As<CUDAContext>();
auto stream = context.exec_stream();
auto& param = this->template Param<param_t>();
const auto* x_data = param.input->template data<T>();
const auto* w_data = param.w->template data<T>();
const auto* b_data = param.bias ? param.bias->template data<T>() : nullptr;
auto out_vec = param.output->dims().Vectorize();
out_vec.back() = param.w->dims()[1];
param.output->Resize(out_vec);
auto* out_data = param.output->template mutable_data<T>(TARGET(kCUDA));
int in_num_col_dims = param.in_num_col_dims;
int M = static_cast<int>(
param.input->dims().Slice(0, param.in_num_col_dims).production());
int K = static_cast<int>(
param.input->dims()
.Slice(param.in_num_col_dims, param.input->dims().size())
.production());
int K2 = static_cast<int>(param.w->dims()[0]);
int N = static_cast<int>(param.w->dims()[1]);
CHECK_EQ(K, K2) << "x_w must be equal with y_h";
CHECK(gemm_impl_->init(false, false, M, N, K, &context));
gemm_impl_->run(1.0f, 0.0f, x_data, w_data, out_data, &context);
if (b_data == nullptr) {
return;
}
std::string activation_type = param.activation_type;
if (N % 4 == 0) {
const int threads = 256;
const int num = M * N / 4;
const int blocks = (num + threads - 1) / threads;
typedef typename FcTypeTraits<T>::Type trans_type;
const auto* bias_ptr_v4 = reinterpret_cast<const trans_type*>(b_data);
auto* data_ptr_v4 = reinterpret_cast<trans_type*>(out_data);
if (activation_type == "relu") {
AddBiasReluV4<trans_type><<<blocks, threads, 0, stream>>>(
num, bias_ptr_v4, data_ptr_v4, N / 4);
} else if (activation_type == "") {
AddBiasV4<trans_type><<<blocks, threads, 0, stream>>>(
num, bias_ptr_v4, data_ptr_v4, N / 4);
} else {
LOG(FATAL) << "not supported activation type: " << activation_type;
}
} else {
const int threads = 256;
const int blocks = M;
if (activation_type == "relu") {
AddBiasRelu<T><<<blocks, threads, 0, stream>>>(N, b_data, out_data);
} else if (activation_type == "") {
AddBias<T><<<blocks, threads, 0, stream>>>(N, b_data, out_data);
} else {
LOG(FATAL) << "not supported activation type: " << activation_type;
}
}
}
template <>
void FcCompute<half, PRECISION(kFP16)>::Run() {
auto& context = this->ctx_->template As<CUDAContext>();
auto stream = context.exec_stream();
auto& param = this->template Param<param_t>();
const auto* x_data = param.input->template data<half>();
const auto* w_data = param.w->template data<half>();
const auto* b_data = param.bias ? param.bias->template data<half>() : nullptr;
auto out_vec = param.output->dims().Vectorize();
out_vec.back() = param.w->dims()[1];
param.output->Resize(out_vec);
auto* out_data = param.output->template mutable_data<half>(TARGET(kCUDA));
int in_num_col_dims = param.in_num_col_dims;
int M = static_cast<int>(
param.input->dims().Slice(0, param.in_num_col_dims).production());
int K = static_cast<int>(
param.input->dims()
.Slice(param.in_num_col_dims, param.input->dims().size())
.production());
int K2 = static_cast<int>(param.w->dims()[0]);
int N = static_cast<int>(param.w->dims()[1]);
CHECK_EQ(K, K2) << "x_w must be equal with y_h";
CHECK(gemm_impl_->init(false, false, M, N, K, &context));
gemm_impl_->run(1.0f, 0.0f, x_data, w_data, out_data, &context);
if (b_data == nullptr) {
return;
}
std::string activation_type = param.activation_type;
if (N % 2 == 0) {
const int threads = 256;
const int num = M * N / 2;
const int blocks = (num + threads - 1) / threads;
const auto* bias_ptr_v2 = reinterpret_cast<const half2*>(b_data);
auto* data_ptr_v2 = reinterpret_cast<half2*>(out_data);
if (activation_type == "relu") {
AddBiasReluV2<half2><<<blocks, threads, 0, stream>>>(
num, bias_ptr_v2, data_ptr_v2, N / 2);
} else if (activation_type == "") {
AddBiasV2<half2><<<blocks, threads, 0, stream>>>(
num, bias_ptr_v2, data_ptr_v2, N / 2);
} else {
LOG(FATAL) << "not supported activation type: " << activation_type;
}
} else {
const int threads = 256;
const int blocks = M;
if (activation_type == "relu") {
AddBiasRelu<half><<<blocks, threads, 0, stream>>>(N, b_data, out_data);
} else if (activation_type == "") {
AddBias<half><<<blocks, threads, 0, stream>>>(N, b_data, out_data);
} else {
LOG(FATAL) << "not supported activation type: " << activation_type;
}
}
}
} // namespace cuda
} // namespace kernels
} // namespace lite
} // namespace paddle
using FcFp32 = paddle::lite::kernels::cuda::FcCompute<float, PRECISION(kFloat)>;
using FcFp16 = paddle::lite::kernels::cuda::FcCompute<half, PRECISION(kFP16)>;
REGISTER_LITE_KERNEL(fc, kCUDA, kFloat, kNCHW, FcFp32, def)
.BindInput("Input", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindInput("Bias", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindInput("W", {LiteType::GetTensorTy(TARGET(kCUDA))})
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA))})
.Finalize();
REGISTER_LITE_KERNEL(fc, kCUDA, kFP16, kNCHW, FcFp16, def)
.BindInput("Input",
{LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindInput("Bias", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindInput("W", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFP16))})
.Finalize();
|
the_stack
|
#include <cub/cub.cuh>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <algorithm>
#include <crc/crc.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <nvbio/sufsort/sufsort.h>
#include <nvbio/sufsort/sufsort_utils.h>
#include <nvbio/basic/exceptions.h>
#include <nvbio/basic/timer.h>
#include <nvbio/strings/string_set.h>
#include <nvbio/basic/cuda/arch.h>
#include <nvbio/basic/cuda/ldg.h>
#include <nvbio/io/fmindex/fmindex.h>
#include <nvbio/io/sequence/sequence.h>
#include <nvbio/basic/dna.h>
#include <nvbio/fmindex/bwt.h>
#include <thrust/device_vector.h>
namespace nvbio {
namespace sufsort {
template <uint32 SYMBOL_SIZE, typename offset_type>
void make_test_string_set(
const uint64 N_strings,
const uint32 N,
thrust::host_vector<uint32>& h_string,
thrust::host_vector<offset_type>& h_offsets)
{
for (uint64 i = 0; i < N_strings; ++i)
h_offsets[i] = offset_type( uint64(N)*i );
LCG_random rand;
for (uint64 i = 0; i < h_string.size(); ++i)
h_string[i] = rand.next();
h_offsets[N_strings] = N*N_strings;
}
struct SuffixHandler
{
void process(
const uint32 n_suffixes,
const uint32* suffix_array,
const uint32* string_ids,
const uint32* cum_lengths)
{
output.resize( n_suffixes );
thrust::copy(
thrust::device_ptr<const uint32>( suffix_array ),
thrust::device_ptr<const uint32>( suffix_array ) + n_suffixes,
output.begin() );
}
thrust::device_vector<uint32> output;
};
} // namespace sufsort
int sufsort_test(int argc, char* argv[])
{
enum Test
{
kGPU_SA = 1u,
kGPU_BWT = 2u,
kCPU_BWT = 4u,
kGPU_BWT_FUNCTIONAL = 8u,
kGPU_BWT_GENOME = 16u,
kGPU_BWT_SET = 32u,
kCPU_BWT_SET = 64u,
kGPU_SA_SET = 128u,
};
uint32 TEST_MASK = 0xFFFFFFFFu;
uint32 gpu_bwt_size = 50u;
uint32 cpu_bwt_size = 100u;
#ifdef _OPENMP
uint32 threads = omp_get_num_procs();
#else
uint32 threads = 1;
#endif
bool store_output = true;
const char* index_name = "data/human.NCBI36/Homo_sapiens.NCBI36.53.dna.toplevel.fa";
BWTParams params;
for (int i = 0; i < argc; ++i)
{
if (strcmp( argv[i], "-v" ) == 0 ||
strcmp( argv[i], "-verbosity" ) == 0 ||
strcmp( argv[i], "--verbosity" ) == 0)
{
set_verbosity( Verbosity( atoi( argv[++i] ) ) );
}
else if (strcmp( argv[i], "-cpu-mem" ) == 0)
{
params.host_memory = atoi( argv[++i] ) * uint64(1024u*1024u);
}
else if (strcmp( argv[i], "-gpu-mem" ) == 0)
{
params.device_memory = atoi( argv[++i] ) * uint64(1024u*1024u);
}
else if (strcmp( argv[i], "-cpu-bwt-size" ) == 0)
{
cpu_bwt_size = atoi( argv[++i] );
}
else if (strcmp( argv[i], "-gpu-bwt-size" ) == 0)
{
gpu_bwt_size = atoi( argv[++i] );
}
else if (strcmp( argv[i], "-threads" ) == 0)
{
threads = atoi( argv[++i] );
}
else if (strcmp( argv[i], "-no-output" ) == 0)
{
store_output = false;
}
else if ((strcmp( argv[i], "-genome" ) == 0) ||
(strcmp( argv[i], "-index" ) == 0))
{
index_name = argv[++i];
}
else if (strcmp( argv[i], "-tests" ) == 0)
{
const std::string tests_string( argv[++i] );
char temp[256];
const char* begin = tests_string.c_str();
const char* end = begin;
TEST_MASK = 0u;
while (1)
{
while (*end != ':' && *end != '\0')
{
temp[end - begin] = *end;
end++;
}
temp[end - begin] = '\0';
if (strcmp( temp, "gpu-sa" ) == 0)
TEST_MASK |= kGPU_SA;
else if (strcmp( temp, "gpu-bwt" ) == 0)
TEST_MASK |= kGPU_BWT;
else if (strcmp( temp, "gpu-bwt-func" ) == 0)
TEST_MASK |= kGPU_BWT_FUNCTIONAL;
else if (strcmp( temp, "gpu-bwt-genome" ) == 0)
TEST_MASK |= kGPU_BWT_GENOME;
else if (strcmp( temp, "cpu-bwt" ) == 0)
TEST_MASK |= kCPU_BWT;
else if (strcmp( temp, "gpu-set-bwt" ) == 0)
TEST_MASK |= kGPU_BWT_SET;
else if (strcmp( temp, "cpu-set-bwt" ) == 0)
TEST_MASK |= kCPU_BWT_SET;
if (*end == '\0')
break;
++end; begin = end;
}
}
}
#ifdef _OPENMP
// Now set the number of threads
omp_set_num_threads( threads );
#endif
log_info(stderr, "nvbio/sufsort test... started (%u threads)\n", threads);
#pragma omp parallel
{
log_info(stderr, " running on multiple threads\n");
}
const uint32 N = 100;
const uint32 SYMBOL_SIZE = 2;
const uint32 SYMBOLS_PER_WORD = (8u*sizeof(uint32)) / SYMBOL_SIZE;
if (TEST_MASK & kGPU_SA)
{
typedef uint32 index_type;
typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,true,index_type> packed_stream_type;
const index_type N_symbols = 8u*1024u*1024u;
const index_type N_words = (N_symbols + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD;
log_info(stderr, " gpu sa test\n");
log_info(stderr, " %5.1f M symbols\n", (1.0e-6f*float(N_symbols)));
log_info(stderr, " %5.2f GB\n", (float(N_words)*sizeof(uint32))/float(1024*1024*1024));
thrust::host_vector<uint32> h_string( N_words );
LCG_random rand;
for (index_type i = 0; i < N_words; ++i)
h_string[i] = rand.next();
for (uint32 lcp = 100; lcp <= 100000; lcp *= 10)
{
// insert some long common prefixes
for (uint32 i = 50; i < 50 + lcp; ++i)
h_string[i] = 0;
thrust::device_vector<uint32> d_string( h_string );
thrust::device_vector<uint32> d_sa( N_symbols+1 );
cudaDeviceSynchronize();
packed_stream_type d_packed_string( nvbio::plain_view( d_string ) );
log_info(stderr, "\n sa... started (LCP: %u)\n", lcp*16u);
Timer timer;
timer.start();
cuda::suffix_sort(
N_symbols,
d_packed_string,
d_sa.begin(),
¶ms );
cudaDeviceSynchronize();
timer.stop();
log_info(stderr, " sa... done: %.2fs (%.1fM suffixes/s)\n", timer.seconds(), 1.0e-6f*float(N_symbols)/float(timer.seconds()));
if (1)
{
log_info(stderr, " sa-is... started\n");
timer.start();
std::vector<int32> sa_ref( N_symbols+1 );
gen_sa( N_symbols, packed_stream_type( nvbio::plain_view( h_string ) ), &sa_ref[0] );
timer.stop();
log_info(stderr, " sa-is... done: %.2fs (%.1fM suffixes/s)\n", timer.seconds(), 1.0e-6f*float(N_symbols)/float(timer.seconds()));
thrust::host_vector<uint32> h_sa( d_sa );
for (uint32 i = 0; i < N_symbols; ++i)
{
const uint32 s = h_sa[i];
const uint32 r = sa_ref[i];
if (s != r)
{
log_error(stderr, " mismatch at %u: expected %u, got %u\n", i, r, s);
return 0u;
}
}
}
}
FILE* file = fopen("./data/howto", "r" );
if (file == NULL)
log_warning(stderr, " unable to open \"howto\" file\n");
else
{
log_info(stderr, "\n loading \"howto\" text benchmark\n");
fseek( file, 0, SEEK_END );
const uint32 N_symbols = uint32( ftell( file ) );
thrust::host_vector<uint8> h_text( N_symbols );
rewind( file );
fread( &h_text[0], 1, N_symbols, file );
fclose( file );
thrust::device_vector<uint8> d_text( h_text );
thrust::device_vector<uint32> d_sa( N_symbols+1 );
cudaDeviceSynchronize();
log_info(stderr, " sa... started (%u bytes)\n", N_symbols);
Timer timer;
timer.start();
cuda::suffix_sort(
N_symbols,
d_text.begin(),
d_sa.begin(),
¶ms );
cudaDeviceSynchronize();
timer.stop();
log_info(stderr, " sa... done: %.2fs (%.1fM suffixes/s)\n", timer.seconds(), 1.0e-6f*float(N_symbols)/float(timer.seconds()));
if (1)
{
log_info(stderr, " sa-is... started\n");
timer.start();
std::vector<int32> sa_ref( N_symbols+1 );
sa_ref[0] = N_symbols;
saisxx( nvbio::plain_view( h_text ), &sa_ref[0] + 1, int32(N_symbols), 256 );
timer.stop();
log_info(stderr, " sa-is... done: %.2fs (%.1fM suffixes/s)\n", timer.seconds(), 1.0e-6f*float(N_symbols)/float(timer.seconds()));
thrust::host_vector<uint32> h_sa( d_sa );
for (uint32 i = 0; i < N_symbols; ++i)
{
const uint32 s = h_sa[i];
const uint32 r = sa_ref[i];
if (s != r)
{
log_error(stderr, " mismatch at %u: expected %u, got %u\n", i, r, s);
return 0u;
}
}
}
}
}
if (TEST_MASK & kGPU_SA_SET)
{
typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,false> packed_stream_type;
typedef ConcatenatedStringSet<packed_stream_type,uint32*> string_set;
const uint32 N_strings = 1024*1024;
const uint32 N_tests = 10;
const uint32 N_words = uint32((uint64(N_strings)*N + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD);
thrust::host_vector<uint32> h_string( N_words );
thrust::host_vector<uint32> h_offsets( N_strings+1 );
sufsort::make_test_string_set<SYMBOL_SIZE>(
N_strings,
N,
h_string,
h_offsets );
thrust::device_vector<uint32> d_string( h_string );
thrust::device_vector<uint32> d_offsets( h_offsets );
packed_stream_type d_packed_string( nvbio::plain_view( d_string ) );
string_set d_string_set(
N_strings,
d_packed_string,
nvbio::plain_view( d_offsets ) );
cudaDeviceSynchronize();
log_info(stderr, " gpu SA test\n");
log_info(stderr, " %5.1f M strings\n", (1.0e-6f*float(N_strings)));
log_info(stderr, " %5.1f M suffixes\n", (1.0e-6f*float(N_strings*(N+1))));
log_info(stderr, " %5.1f G symbols\n", (1.0e-9f*float(uint64(N_strings)*(N+1)*(N+1)/2)));
log_info(stderr, " %5.2f GB\n", (float(N_words)*sizeof(uint32))/float(1024*1024*1024));
// copy a sparse string set into a packed concatenated one
{
sufsort::SuffixHandler suffix_hander;
Timer timer;
timer.start();
// sort the suffixes
for (uint32 i = 0; i < N_tests; ++i)
cuda::suffix_sort( d_string_set, suffix_hander, ¶ms );
cudaDeviceSynchronize();
timer.stop();
log_info(stderr, " sorting time: %.2fs\n", timer.seconds()/float(N_tests));
log_info(stderr, " %5.1f M strings/s\n", (1.0e-6f*float(N_strings)) * (float(N_tests)/timer.seconds()));
log_info(stderr, " %5.1f M suffixes/s\n", (1.0e-6f*float(N_strings*(N+1))) * (float(N_tests)/timer.seconds()));
log_info(stderr, " %5.1f G symbols/s\n", (1.0e-9f*float(uint64(N_strings)*(N+1)*(N+1)/2)) * (float(N_tests)/timer.seconds()));
}
}
if (TEST_MASK & kGPU_BWT_FUNCTIONAL)
{
typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,true,uint32> packed_stream_type;
const uint32 N_words = 8;
const uint32 N_symbols = N_words * SYMBOLS_PER_WORD - 13u;
char char_string[N_symbols+1];
log_info(stderr, " gpu bwt test\n");
thrust::host_vector<uint32> h_string( N_words );
thrust::host_vector<uint32> h_bwt( N_words+1 );
thrust::host_vector<uint32> h_bwt_ref( N_words+1 );
uint32 primary_ref;
LCG_random rand;
for (uint32 i = 0; i < N_words; ++i)
h_string[i] = rand.next();
dna_to_string(
packed_stream_type( nvbio::plain_view( h_string ) ),
N_symbols,
char_string );
log_info(stderr, " str : %s\n", char_string );
{
// generate the SA using SA-IS
int32 sa[N_symbols+1];
gen_sa( N_symbols, packed_stream_type( nvbio::plain_view( h_string ) ), &sa[0] );
// generate the BWT from the SA
primary_ref = gen_bwt_from_sa( N_symbols, packed_stream_type( nvbio::plain_view( h_string ) ), sa, packed_stream_type( nvbio::plain_view( h_bwt_ref ) ) );
dna_to_string(
packed_stream_type( nvbio::plain_view( h_bwt_ref ) ),
N_symbols,
char_string );
log_info(stderr, " primary : %u\n", primary_ref );
log_info(stderr, " bwt : %s\n", char_string );
}
thrust::device_vector<uint32> d_string( h_string );
thrust::device_vector<uint32> d_bwt( N_words+1 );
cudaDeviceSynchronize();
packed_stream_type d_packed_string( nvbio::plain_view( d_string ) );
packed_stream_type d_packed_bwt( nvbio::plain_view( d_bwt ) );
log_info(stderr, " bwt... started\n");
Timer timer;
timer.start();
const uint32 primary = cuda::bwt(
N_symbols,
d_packed_string,
d_packed_bwt,
¶ms );
timer.stop();
log_info(stderr, " bwt... done: %.2fs\n", timer.seconds());
h_bwt = d_bwt;
{
// check whether the results match our expectations
packed_stream_type h_packed_bwt_ref( nvbio::plain_view( h_bwt_ref ) );
packed_stream_type h_packed_bwt( nvbio::plain_view( h_bwt ) );
bool check = (primary_ref == primary);
for (uint32 i = 0; i < N_symbols; ++i)
{
if (h_packed_bwt[i] != h_packed_bwt_ref[i])
check = false;
}
if (check == false)
{
dna_to_string(
packed_stream_type( nvbio::plain_view( h_bwt ) ),
N_symbols,
char_string );
log_error(stderr, "mismatching results!\n" );
log_error(stderr, " primary : %u\n", primary );
log_error(stderr, " bwt : %s\n", char_string );
return 0u;
}
}
}
if (TEST_MASK & kGPU_BWT_GENOME)
{
// load a genome
io::SequenceDataHost h_ref;
io::FMIndexDataHost h_fmi;
if (io::load_sequence_file( DNA, &h_ref, index_name ) == false)
return 0;
if (h_fmi.load( index_name, io::FMIndexData::FORWARD ) == false)
return 0;
// copy it to the gpu
io::SequenceDataDevice d_ref( h_ref );
io::FMIndexDataDevice d_fmi( h_fmi, 0u );
typedef io::SequenceDataAccess<DNA,io::ConstSequenceDataView> const_reference_access_type;
typedef io::SequenceDataEdit<DNA,io::SequenceDataView> reference_access_type;
typedef const_reference_access_type::sequence_stream_type const_packed_stream_type;
typedef reference_access_type::sequence_stream_type packed_stream_type;
const uint32 N_symbols = d_ref.bps();
const uint32 N_words = d_ref.words();
log_info(stderr, " gpu bwt test\n");
log_info(stderr, " %5.1f G symbols\n", (1.0e-6f*float(N_symbols)));
log_info(stderr, " %5.2f GB\n", (float(N_words)*sizeof(uint32))/float(1024*1024*1024));
thrust::device_vector<uint32> d_bwt_storage( N_words+1 );
const const_reference_access_type d_ref_access( d_ref );
const_packed_stream_type d_packed_string( d_ref_access.sequence_stream() );
packed_stream_type d_packed_bwt( nvbio::plain_view( d_bwt_storage ) );
const uint32 primary_ref = cuda::find_primary( N_symbols, d_packed_string );
log_info(stderr, " primary: %u\n", primary_ref);
{
const const_reference_access_type h_ref_access( h_ref );
const_packed_stream_type h_packed_string( h_ref_access.sequence_stream() );
const uint32 crc = crcCalc( h_packed_string, N_symbols );
log_info(stderr, " crc : %u\n", crc);
}
log_info(stderr, " bwt... started\n");
Timer timer;
timer.start();
const uint32 primary = cuda::bwt(
N_symbols,
d_packed_string,
d_packed_bwt,
¶ms );
timer.stop();
log_info(stderr, " bwt... done: %.2fs\n", timer.seconds());
bool check = primary == primary_ref;
if (check == false)
{
log_error(stderr, "mismatching results!\n" );
log_error(stderr, " primary : %u\n", primary );
return 0u;
}
log_info(stderr, " testing correctness... started\n");
thrust::host_vector<uint32> h_bwt_storage( d_bwt_storage );
const const_packed_stream_type h_packed_bwt( nvbio::plain_view( h_bwt_storage ) );
const io::FMIndexData::bwt_stream_type h_ref_bwt( h_fmi.bwt_iterator() );
for (uint32 i = 0; i < N_symbols; ++i)
{
const uint8 c0 = h_ref_bwt[i];
const uint8 c1 = h_packed_bwt[i];
if (c0 != c1)
{
log_error(stderr, "mismatching results!\n" );
log_error(stderr, " at %u, expected %c, got %c\n", i, dna_to_char(c0), dna_to_char(c1) );
return 0u;
}
}
log_info(stderr, " testing correctness... done\n");
{
const uint32 crc = crcCalc( h_packed_bwt, N_symbols );
log_info(stderr, " crc: %u\n", crc);
}
}
if (TEST_MASK & kGPU_BWT)
{
typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,true,uint64> packed_stream_type;
const uint64 N_symbols = 4llu*1024u*1024u*1024u - 1u;
const uint64 N_words = (N_symbols + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD;
log_info(stderr, " gpu bwt test\n");
log_info(stderr, " %5.1f G symbols\n", (1.0e-9f*float(N_symbols)));
log_info(stderr, " %5.2f GB\n", (float(N_words)*sizeof(uint32))/float(1024*1024*1024));
thrust::host_vector<uint32> h_string( N_words );
LCG_random rand;
for (uint64 i = 0; i < N_words; ++i)
h_string[i] = rand.next();
// insert some long common prefixes
for (uint32 i = 50; i < 100; ++i)
h_string[i] = 0;
thrust::device_vector<uint32> d_string( h_string );
thrust::device_vector<uint32> d_bwt( N_words );
cudaDeviceSynchronize();
packed_stream_type d_packed_string( nvbio::plain_view( d_string ) );
packed_stream_type d_packed_bwt( nvbio::plain_view( d_bwt ) );
log_info(stderr, " bwt... started\n");
Timer timer;
timer.start();
cuda::bwt(
N_symbols,
d_packed_string,
d_packed_bwt,
¶ms );
timer.stop();
log_info(stderr, " bwt... done: %.2fs\n", timer.seconds());
}
if (TEST_MASK & kGPU_BWT_SET)
{
typedef uint32 word_type;
typedef cuda::load_pointer<word_type,cuda::LOAD_DEFAULT> storage_type;
typedef PackedStream<word_type*,uint8,SYMBOL_SIZE,true,uint64> packed_stream_type;
typedef PackedStream<storage_type,uint8,SYMBOL_SIZE,true,uint64> mod_packed_stream_type;
typedef ConcatenatedStringSet<mod_packed_stream_type,uint64*> string_set;
const uint32 N_strings = gpu_bwt_size*1000*1000;
const uint64 N_words = util::divide_ri( uint64(N_strings)*(N+0), SYMBOLS_PER_WORD );
const uint64 N_bwt_words = util::divide_ri( uint64(N_strings)*(N+1), SYMBOLS_PER_WORD );
log_info(stderr, " gpu set-bwt test\n");
log_info(stderr, " %5.1f M strings\n", (1.0e-6f*float(N_strings)));
log_info(stderr, " %5.1f G suffixes\n", (1.0e-9f*float(uint64(N_strings)*uint64(N+1))));
log_info(stderr, " %5.2f GB\n", (float(N_words)*sizeof(uint32))/float(1024*1024*1024));
thrust::host_vector<uint32> h_string( N_words );
thrust::host_vector<uint64> h_offsets( N_strings+1 );
sufsort::make_test_string_set<SYMBOL_SIZE>(
N_strings,
N,
h_string,
h_offsets );
thrust::device_vector<uint32> d_string( h_string );
thrust::device_vector<uint64> d_offsets( h_offsets );
cudaDeviceSynchronize();
mod_packed_stream_type d_packed_string( storage_type( (word_type*)nvbio::plain_view( d_string ) ) );
string_set d_string_set(
N_strings,
d_packed_string,
nvbio::plain_view( d_offsets ) );
log_info(stderr, " bwt... started\n");
Timer timer;
if (store_output)
{
thrust::device_vector<uint32> d_bwt( N_bwt_words );
packed_stream_type d_packed_bwt( (word_type*)nvbio::plain_view( d_bwt ) );
DeviceBWTHandler<packed_stream_type> output_handler( d_packed_bwt );
timer.start();
cuda::bwt<SYMBOL_SIZE,true>(
d_string_set,
output_handler,
¶ms );
timer.stop();
}
else
{
DiscardBWTHandler output_handler;
timer.start();
cuda::bwt<SYMBOL_SIZE,true>(
d_string_set,
output_handler,
¶ms );
timer.stop();
}
log_info(stderr, " bwt... done: %.2fs\n", timer.seconds());
}
if (TEST_MASK & kCPU_BWT_SET)
{
typedef uint32 word_type;
typedef PackedStream<word_type*,uint8,SYMBOL_SIZE,true,uint64> packed_stream_type;
typedef ConcatenatedStringSet<packed_stream_type,uint64*> string_set;
const uint32 N_strings = cpu_bwt_size*1000*1000;
const uint64 N_words = util::divide_ri( uint64(N_strings)*(N+0), SYMBOLS_PER_WORD );
const uint64 N_bwt_words = util::divide_ri( uint64(N_strings)*(N+1), SYMBOLS_PER_WORD );
log_info(stderr, " cpu set-bwt test\n");
log_info(stderr, " %5.1f M strings\n", (1.0e-6f*float(N_strings)));
log_info(stderr, " %5.1f G suffixes\n", (1.0e-9f*float(uint64(N_strings)*uint64(N+1))));
log_info(stderr, " %5.2f GB\n", (float(N_words)*sizeof(uint32))/float(1024*1024*1024));
thrust::host_vector<uint32> h_string( N_words );
thrust::host_vector<uint64> h_offsets( N_strings+1 );
sufsort::make_test_string_set<SYMBOL_SIZE>(
N_strings,
N,
h_string,
h_offsets );
packed_stream_type h_packed_string( (word_type*)nvbio::plain_view( h_string ) );
string_set h_string_set(
N_strings,
h_packed_string,
nvbio::plain_view( h_offsets ) );
log_info(stderr, " bwt... started\n");
Timer timer;
if (store_output)
{
thrust::host_vector<uint32> h_bwt( N_bwt_words );
packed_stream_type h_packed_bwt( (word_type*)nvbio::plain_view( h_bwt ) );
HostBWTHandler<packed_stream_type> output_handler( h_packed_bwt );
timer.start();
large_bwt<SYMBOL_SIZE,true>(
h_string_set,
output_handler,
¶ms );
timer.stop();
}
else
{
DiscardBWTHandler output_handler;
timer.start();
large_bwt<SYMBOL_SIZE,true>(
h_string_set,
output_handler,
¶ms );
timer.stop();
}
log_info(stderr, " bwt... done: %.2fs\n", timer.seconds());
}
log_info(stderr, "nvbio/sufsort test... done\n");
return 0;
}
} // namespace nvbio
using namespace nvbio;
int main(int argc, char* argv[])
{
crcInit();
int cuda_device = -1;
int device_count;
cudaGetDeviceCount(&device_count);
log_verbose(stderr, " cuda devices : %d\n", device_count);
int arg = 1;
if (argc > 1)
{
if (strcmp( argv[arg], "-device" ) == 0)
{
cuda_device = atoi(argv[++arg]);
++arg;
}
}
// inspect and select cuda devices
if (device_count)
{
if (cuda_device == -1)
{
int best_device = 0;
cudaDeviceProp best_device_prop;
cudaGetDeviceProperties( &best_device_prop, best_device );
for (int device = 0; device < device_count; ++device)
{
cudaDeviceProp device_prop;
cudaGetDeviceProperties( &device_prop, device );
log_verbose(stderr, " device %d has compute capability %d.%d\n", device, device_prop.major, device_prop.minor);
log_verbose(stderr, " SM count : %u\n", device_prop.multiProcessorCount);
log_verbose(stderr, " SM clock rate : %u Mhz\n", device_prop.clockRate / 1000);
log_verbose(stderr, " memory clock rate : %.1f Ghz\n", float(device_prop.memoryClockRate) * 1.0e-6f);
if (device_prop.major >= best_device_prop.major &&
device_prop.minor >= best_device_prop.minor)
{
best_device_prop = device_prop;
best_device = device;
}
}
cuda_device = best_device;
}
log_verbose(stderr, " chosen device %d\n", cuda_device);
{
cudaDeviceProp device_prop;
cudaGetDeviceProperties( &device_prop, cuda_device );
log_verbose(stderr, " device name : %s\n", device_prop.name);
log_verbose(stderr, " compute capability : %d.%d\n", device_prop.major, device_prop.minor);
}
cudaSetDevice( cuda_device );
}
// allocate some heap
cudaDeviceSetLimit( cudaLimitMallocHeapSize, 128*1024*1024 );
argc = argc >= arg ? argc-arg : 0;
try
{
nvbio::sufsort_test( argc, argv+arg );
}
catch (nvbio::cuda_error e)
{
log_error(stderr, "caught a nvbio::cuda_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::bad_alloc e)
{
log_error(stderr, "caught a nvbio::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::logic_error e)
{
log_error(stderr, "caught a nvbio::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (nvbio::runtime_error e)
{
log_error(stderr, "caught a nvbio::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::bad_alloc e)
{
log_error(stderr, "caught a std::bad_alloc exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::logic_error e)
{
log_error(stderr, "caught a std::logic_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (std::runtime_error e)
{
log_error(stderr, "caught a std::runtime_error exception:\n");
log_error(stderr, " %s\n", e.what());
}
catch (...)
{
log_error(stderr,"unknown exception caught!\n");
exit(1);
}
cudaDeviceReset();
return 0;
}
|
the_stack
|
#pragma once
#include <gunrock/graph/gp.cuh>
#include <gunrock/oprtr/advance/advance_base.cuh>
#include <gunrock/app/enactor_helper.cuh>
#include <gunrock/app/enactor_kernel.cuh>
namespace gunrock {
namespace app {
using IterationFlag = uint32_t;
enum : IterationFlag {
Use_SubQ = 0x01,
Use_FullQ = 0x02,
Push = 0x10,
Pull = 0x20,
Update_Predecessors = 0x100,
Unified_Receive = 0x200,
Use_Double_Buffer = 0x400,
Skip_Makeout_Selection = 0x800,
Skip_PreScan = 0x1000,
Iteration_Default = Use_FullQ | Push,
};
template <typename EnactorT, bool valid>
struct BoolSwitch {
static cudaError_t UpdatePreds(EnactorT *enactor, int gpu_num,
typename EnactorT::SizeT num_elements) {
return cudaSuccess;
}
};
template <typename EnactorT>
struct BoolSwitch<EnactorT, true> {
static cudaError_t UpdatePreds(EnactorT *enactor, int gpu_num,
typename EnactorT::SizeT num_elements) {
typedef typename EnactorT::VertexT VertexT;
typedef typename EnactorT::SizeT SizeT;
cudaError_t retval = cudaSuccess;
int k = gpu_num * enactor->num_gpus;
if (enactor->flag & Size_Check) k += enactor->num_gpus;
// int selector = frontier_attribute->selector;
int block_size = 256;
int grid_size = num_elements / block_size;
if ((num_elements % block_size) != 0) grid_size++;
if (grid_size > 512) grid_size = 512;
auto &enactor_slice = enactor->enactor_slices[k];
auto &data_slice = enactor->problem->data_slices[gpu_num][0];
auto &frontier = enactor_slice.frontier;
auto &stream = enactor_slice.stream;
auto &sub_graph = enactor->problem->sub_graphs[gpu_num];
CopyPreds_Kernel<VertexT, SizeT><<<grid_size, block_size, 0, stream>>>(
num_elements, frontier.V_Q()->GetPointer(util::DEVICE),
data_slice.preds.GetPointer(util::DEVICE),
data_slice.temp_preds.GetPointer(util::DEVICE));
UpdatePreds_Kernel<VertexT, SizeT><<<grid_size, block_size, 0, stream>>>(
num_elements, sub_graph.nodes, frontier.V_Q()->GetPointer(util::DEVICE),
sub_graph.original_vertex.GetPointer(util::DEVICE),
data_slice.temp_preds.GetPointer(util::DEVICE),
data_slice.preds.GetPointer(util::DEVICE));
return retval;
}
};
/*
* @brief IterationLoopBase data structure.
* @tparam Iteration_Flag
* @tparam Enactor
*/
template <typename _Enactor, IterationFlag _FLAG = Iteration_Default>
struct IterationLoopBase {
public:
typedef _Enactor Enactor;
typedef typename Enactor::SizeT SizeT;
typedef typename Enactor::ValueT ValueT;
typedef typename Enactor::VertexT VertexT;
typedef typename Enactor::Problem Problem;
typedef typename Problem::DataSlice DataSlice;
static const IterationFlag FLAG = _FLAG;
Enactor *enactor;
int gpu_num;
IterationFlag flag;
IterationLoopBase() : enactor(NULL), gpu_num(0) {}
cudaError_t Init(Enactor *enactor, int gpu_num) {
this->enactor = enactor;
this->gpu_num = gpu_num;
return cudaSuccess;
}
cudaError_t Core(int peer_) { return cudaSuccess; }
cudaError_t Gather(int peer_) { return cudaSuccess; }
bool Stop_Condition(int gpu_num = 0) { return All_Done(enactor[0], gpu_num); }
cudaError_t Change() {
auto &enactor_stats =
enactor->enactor_slices[gpu_num * enactor->num_gpus].enactor_stats;
enactor_stats.iteration++;
return enactor_stats.retval;
}
cudaError_t UpdatePreds(SizeT num_elements) {
cudaError_t retval = cudaSuccess;
if (num_elements == 0) return retval;
retval =
BoolSwitch<Enactor, (FLAG & Update_Predecessors) != 0>::UpdatePreds(
enactor, gpu_num, num_elements);
return retval;
}
cudaError_t Check_Queue_Size(int peer_) {
int k = gpu_num * enactor->num_gpus + peer_;
auto &enactor_slice = enactor->enactor_slices[k];
auto &enactor_stats = enactor_slice.enactor_stats;
auto &frontier = enactor_slice.frontier;
bool over_sized = false;
// int selector = frontier_attribute->selector;
auto iteration = enactor_stats.iteration;
auto request_length = frontier.output_length[0] + 2;
auto &retval = enactor_stats.retval;
if (enactor->flag & Debug) {
printf("%d\t %lld\t %d\t queue_size = %lld, output_length = %lld\n",
gpu_num, (long long)iteration, peer_,
(long long)(frontier.Next_V_Q()->GetSize()),
(long long)request_length);
fflush(stdout);
}
retval = CheckSize<SizeT, VertexT>(true, "queue3", request_length,
frontier.Next_V_Q(), over_sized, gpu_num,
iteration, peer_, false);
if (retval) return retval;
retval = CheckSize<SizeT, VertexT>(true, "queue3", request_length,
frontier.V_Q(), over_sized, gpu_num,
iteration, peer_, true);
if (retval) return retval;
// TODO
// if (enactor -> problem -> use_double_buffer)
//{
// if (enactor_stats->retval =
// Check_Size</*true,*/ SizeT, Value> (
// true, "queue3", request_length,
// &frontier_queue->values[selector^1], over_sized, thread_num,
// iteration, peer_, false)) return;
// if (enactor_stats->retval =
// Check_Size</*true,*/ SizeT, Value> (
// true, "queue3", request_length,
// &frontier_queue->values[selector ], over_sized, thread_num,
// iteration, peer_, true )) return;
//}
return retval;
}
/*
* @brief Make_Output function.
* @tparam NUM_VERTEX_ASSOCIATES
* @tparam NUM_VALUE__ASSOCIATES
*/
template <int NUM_VERTEX_ASSOCIATES, int NUM_VALUE__ASSOCIATES>
cudaError_t MakeOutput(SizeT num_elements) {
auto &mgpu_slice = enactor->mgpu_slices[gpu_num];
int num_gpus = enactor->num_gpus;
auto &enactor_slice =
enactor->enactor_slices[gpu_num * num_gpus +
((enactor->flag & Size_Check) ? 0 : num_gpus)];
auto &enactor_stats = enactor_slice.enactor_stats;
auto &retval = enactor_stats.retval;
auto &stream = enactor_slice.stream;
auto &frontier = enactor_slice.frontier;
auto &graph = enactor->problem->sub_graphs[gpu_num];
if (num_gpus < 2) return retval;
if (num_elements == 0) {
for (int peer_ = 0; peer_ < num_gpus; peer_++) {
mgpu_slice.out_length[peer_] = 0;
}
return retval;
}
bool over_sized = false, keys_over_sized = false;
// int selector = frontier_attribute->selector;
// printf("%d Make_Output begin, num_elements = %d, size_check = %s\n",
// data_slice -> gpu_idx, num_elements, enactor->size_check ? "true" :
// "false");
// fflush(stdout);
SizeT size_multi = 0;
if (FLAG & Push) size_multi += 1;
if (FLAG & Pull) size_multi += 1;
for (int peer_ = 0; peer_ < num_gpus; peer_++) {
if (retval = CheckSize<SizeT, VertexT>(
enactor->flag & Size_Check, "keys_out", num_elements * size_multi,
(peer_ == 0) ? enactor_slice.frontier.Next_V_Q()
: &(mgpu_slice.keys_out[peer_]),
keys_over_sized, gpu_num, enactor_stats.iteration, peer_, false))
break;
// if (keys_over_sized)
mgpu_slice.keys_outs[peer_] =
(peer_ == 0)
? enactor_slice.frontier.Next_V_Q()->GetPointer(util::DEVICE)
: mgpu_slice.keys_out[peer_].GetPointer(util::DEVICE);
if (peer_ == 0) continue;
over_sized = false;
// for (i = 0; i< NUM_VERTEX_ASSOCIATES; i++)
//{
if (retval = CheckSize<SizeT, VertexT>(
enactor->flag & Size_Check, "vertex_associate_outs",
num_elements * NUM_VERTEX_ASSOCIATES * size_multi,
&mgpu_slice.vertex_associate_out[peer_], over_sized, gpu_num,
enactor_stats.iteration, peer_, false))
break;
// if (over_sized)
mgpu_slice.vertex_associate_outs[peer_] =
mgpu_slice.vertex_associate_out[peer_].GetPointer(util::DEVICE);
//}
// if (enactor_stats->retval) break;
// if (over_sized)
// data_slice->vertex_associate_outs[peer_].Move(
// util::HOST, util::DEVICE, NUM_VERTEX_ASSOCIATES, 0, stream);
over_sized = false;
// for (i=0;i<NUM_VALUE__ASSOCIATES;i++)
//{
if (retval = CheckSize<SizeT, ValueT>(
enactor->flag & Size_Check, "value__associate_outs",
num_elements * NUM_VALUE__ASSOCIATES * size_multi,
&mgpu_slice.value__associate_out[peer_], over_sized, gpu_num,
enactor_stats.iteration, peer_, false))
break;
// if (over_sized)
mgpu_slice.value__associate_outs[peer_] =
mgpu_slice.value__associate_out[peer_].GetPointer(util::DEVICE);
//}
// if (enactor_stats->retval) break;
// if (over_sized)
// data_slice->value__associate_outs[peer_].Move(
// util::HOST, util::DEVICE, NUM_VALUE__ASSOCIATES, 0, stream);
if (FLAG & Skip_Makeout_Selection) break;
}
if (retval) return retval;
if (FLAG & Skip_Makeout_Selection) {
if (NUM_VALUE__ASSOCIATES == 0 && NUM_VERTEX_ASSOCIATES == 0) {
// util::MemsetCopyVectorKernel<<<120, 512, 0, stream>>>(
// data_slice -> keys_out[1].GetPointer(util::DEVICE),
// frontier_queue -> keys[frontier_attribute ->
// selector].GetPointer(util::DEVICE), num_elements);
mgpu_slice.keys_out[1].ForEach(
frontier.V_Q()[0],
[] __host__ __device__(VertexT & key_out, const VertexT &key_in) {
key_out = key_in;
},
num_elements, util::DEVICE, stream);
for (int peer_ = 0; peer_ < num_gpus; peer_++)
mgpu_slice.out_length[peer_] = num_elements;
if (retval = util::GRError(cudaStreamSynchronize(stream),
"cudaStreamSynchronize failed", __FILE__,
__LINE__))
return retval;
return retval;
} else {
for (int peer_ = 2; peer_ < num_gpus; peer_++) {
mgpu_slice.keys_out[peer_].SetPointer(
mgpu_slice.keys_out[1].GetPointer(util::DEVICE),
mgpu_slice.keys_out[1].GetSize(), util::DEVICE);
mgpu_slice.keys_outs[peer_] =
mgpu_slice.keys_out[peer_].GetPointer(util::DEVICE);
mgpu_slice.vertex_associate_out[peer_].SetPointer(
mgpu_slice.vertex_associate_out[1].GetPointer(util::DEVICE),
mgpu_slice.vertex_associate_out[1].GetSize(), util::DEVICE);
mgpu_slice.vertex_associate_outs[peer_] =
mgpu_slice.vertex_associate_out[peer_].GetPointer(util::DEVICE);
mgpu_slice.value__associate_out[peer_].SetPointer(
mgpu_slice.value__associate_out[1].GetPointer(util::DEVICE),
mgpu_slice.value__associate_out[1].GetSize(), util::DEVICE);
mgpu_slice.value__associate_outs[peer_] =
mgpu_slice.value__associate_out[peer_].GetPointer(util::DEVICE);
}
}
}
// printf("%d Make_Out 1\n", data_slice -> gpu_idx);
// fflush(stdout);
// if (keys_over_sized)
mgpu_slice.keys_outs.Move(util::HOST, util::DEVICE, num_gpus, 0, stream);
mgpu_slice.vertex_associate_outs.Move(util::HOST, util::DEVICE, num_gpus, 0,
stream);
mgpu_slice.value__associate_outs.Move(util::HOST, util::DEVICE, num_gpus, 0,
stream);
// util::cpu_mt::PrintGPUArray<SizeT, VertexId>("PreMakeOut",
// frontier_queue -> keys[frontier_attribute ->
// selector].GetPointer(util::DEVICE), num_elements, data_slice ->
// gpu_idx, enactor_stats -> iteration, -1, stream);
int block_size = 512;
int grid_size = num_elements / block_size / 2 + 1;
if (grid_size > 480) grid_size = 480;
// printf("%d Make_Out 2, num_blocks = %d, num_threads = %d\n",
// data_slice -> gpu_idx, num_blocks, AdvanceKernelPolicy::THREADS);
// fflush(stdout);
if ((FLAG & Skip_Makeout_Selection) == 0) {
for (int i = 0; i < num_gpus; i++) mgpu_slice.out_length[i] = 1;
mgpu_slice.out_length.Move(util::HOST, util::DEVICE, num_gpus, 0, stream);
// printf("Make_Output direction = %s %s\n", FORWARD ? "FORWARD" : "",
// BACKWARD ? "BACKWARD" : "");
/*printf("num_blocks = %d, num_threads = %d, stream = %p, "
"num_elements = %d, num_gpus = %d, out_length = %p, (%d)"
"keys_in = %p (%d), partition_table = %p (%d), convertion_table = %d
(%d), " "vertex_associate_orgs = %p (%d), value__associate_orgs = %p
(%d), " "keys_outs = %p (%d), vertex_associate_outs = %p (%d),
value__associate_outs = %p (%d), " "keep_node_num = %s,
num_vertex_associates = %d, num_value_associates = %d\n", num_blocks,
AdvanceKernelPolicy::THREADS /2, stream, num_elements, num_gpus,
data_slice -> out_length.GetPointer(util::DEVICE), data_slice ->
out_length.GetSize(), frontier_queue -> keys[frontier_attribute ->
selector].GetPointer(util::DEVICE), frontier_queue ->
keys[frontier_attribute -> selector].GetSize(), graph_slice ->
partition_table .GetPointer(util::DEVICE), graph_slice ->
partition_table .GetSize(), graph_slice -> convertion_table
.GetPointer(util::DEVICE), graph_slice -> convertion_table .GetSize(),
data_slice -> vertex_associate_orgs[0],
data_slice -> vertex_associate_orgs.GetSize(),
data_slice -> value__associate_orgs[0],
data_slice -> value__associate_orgs.GetSize(),
data_slice -> keys_outs .GetPointer(util::DEVICE),
data_slice -> keys_outs .GetSize(),
data_slice -> vertex_associate_outs[1],
data_slice -> vertex_associate_outs.GetSize(),
data_slice -> value__associate_outs[1],
data_slice -> value__associate_outs.GetSize(),
enactor -> problem -> keep_node_num ? "true" : "false",
NUM_VERTEX_ASSOCIATES, NUM_VALUE__ASSOCIATES);*/
if (FLAG & Push)
MakeOutput_Kernel<VertexT, SizeT, ValueT, NUM_VERTEX_ASSOCIATES,
NUM_VALUE__ASSOCIATES>
<<<grid_size, block_size, 0, stream>>>(
num_elements, num_gpus,
mgpu_slice.out_length.GetPointer(util::DEVICE),
frontier.V_Q()->GetPointer(util::DEVICE),
graph.partition_table.GetPointer(util::DEVICE),
graph.convertion_table.GetPointer(util::DEVICE),
mgpu_slice.vertex_associate_orgs.GetPointer(util::DEVICE),
mgpu_slice.value__associate_orgs.GetPointer(util::DEVICE),
mgpu_slice.keys_outs.GetPointer(util::DEVICE),
mgpu_slice.vertex_associate_outs.GetPointer(util::DEVICE),
mgpu_slice.value__associate_outs.GetPointer(util::DEVICE),
enactor->problem->flag & partitioner::Keep_Node_Num);
if (FLAG & Pull)
MakeOutput_Backward_Kernel<VertexT, SizeT, ValueT,
NUM_VERTEX_ASSOCIATES, NUM_VALUE__ASSOCIATES>
<<<grid_size, block_size, 0, stream>>>(
num_elements, num_gpus,
mgpu_slice.out_length.GetPointer(util::DEVICE),
frontier.V_Q()->GetPointer(util::DEVICE),
graph.backward_offset.GetPointer(util::DEVICE),
graph.backward_partition.GetPointer(util::DEVICE),
graph.backward_convertion.GetPointer(util::DEVICE),
mgpu_slice.vertex_associate_orgs.GetPointer(util::DEVICE),
mgpu_slice.value__associate_orgs.GetPointer(util::DEVICE),
mgpu_slice.keys_outs.GetPointer(util::DEVICE),
mgpu_slice.vertex_associate_outs.GetPointer(util::DEVICE),
mgpu_slice.value__associate_outs.GetPointer(util::DEVICE),
enactor->problem->flag & partitioner::Keep_Node_Num);
mgpu_slice.out_length.Move(util::DEVICE, util::HOST, num_gpus, 0, stream);
frontier.queue_index++;
} else {
MakeOutput_SkipSelection_Kernel<
VertexT, SizeT, ValueT, NUM_VERTEX_ASSOCIATES, NUM_VALUE__ASSOCIATES>
<<<grid_size, block_size, 0, stream>>>(
num_elements, frontier.V_Q()->GetPointer(util::DEVICE),
mgpu_slice.vertex_associate_orgs.GetPointer(util::DEVICE),
mgpu_slice.value__associate_orgs.GetPointer(util::DEVICE),
mgpu_slice.keys_out[1].GetPointer(util::DEVICE),
mgpu_slice.vertex_associate_out[1].GetPointer(util::DEVICE),
mgpu_slice.value__associate_out[1].GetPointer(util::DEVICE));
for (int peer_ = 0; peer_ < num_gpus; peer_++)
mgpu_slice.out_length[peer_] = num_elements;
}
if (retval = util::GRError(cudaStreamSynchronize(stream),
"Make_Output failed", __FILE__, __LINE__))
return retval;
if ((FLAG & Skip_Makeout_Selection) == 0) {
for (int i = 0; i < num_gpus; i++) {
mgpu_slice.out_length[i]--;
// printf("out_length[%d] = %d\n", i, data_slice -> out_length[i]);
}
}
// for (int i=0; i<num_gpus; i++)
//{
// if (i == 0)
// printf("%d, selector = %d, keys = %p\n",
// data_slice -> gpu_idx, frontier_attribute -> selector^1,
// data_slice -> keys_outs[i]);
// util::cpu_mt::PrintGPUArray<SizeT, VertexId>("PostMakeOut",
// data_slice -> keys_outs[i], data_slice -> out_length[i],
// data_slice -> gpu_idx, enactor_stats -> iteration, i, stream);
//}
// printf("%d Make_Out 3\n", data_slice -> gpu_idx);
// fflush(stdout);
return retval;
}
/*template <
int NUM_VERTEX_ASSOCIATES,
int NUM_VALUE__ASSOCIATES>
cudaError_t Expand_Incoming(int peer_)
{
return cudaSuccess;
}*/
template <int NUM_VERTEX_ASSOCIATES, int NUM_VALUE__ASSOCIATES,
typename ExpandOpT>
cudaError_t ExpandIncomingBase(SizeT &received_length, int peer_,
ExpandOpT exapnd_op) {
bool over_sized = false;
auto &mgpu_slice = enactor->mgpu_slices[gpu_num];
auto &enactor_slice =
enactor->enactor_slices[gpu_num * enactor->num_gpus +
((FLAG & Unified_Receive) ? 0 : peer_)];
auto &iteration = enactor_slice.enactor_stats.iteration;
auto &out_length = mgpu_slice.in_length_out;
auto &num_elements = mgpu_slice.in_length[iteration % 2][peer_];
auto &keys_in = mgpu_slice.keys_in[iteration % 2][peer_];
auto &vertex_associate_in =
mgpu_slice.vertex_associate_in[iteration % 2][peer_];
auto &value__associate_in =
mgpu_slice.value__associate_in[iteration % 2][peer_];
auto &retval = enactor_slice.enactor_stats.retval;
auto &frontier = enactor_slice.frontier;
auto &stream = enactor_slice.stream;
if (FLAG & Unified_Receive) {
retval = CheckSize<SizeT, VertexT>(
enactor->flag & Size_Check, "incoming_queue",
num_elements + received_length, frontier.V_Q(), over_sized, gpu_num,
iteration, peer_, true);
if (retval) return retval;
received_length += num_elements;
} else {
retval = CheckSize<SizeT, VertexT>(
enactor->flag & Size_Check, "incomping_queue", num_elements,
frontier.V_Q(), over_sized, gpu_num, iteration, peer_, false);
if (retval) return retval;
out_length[peer_] = 0;
GUARD_CU(out_length.Move(util::HOST, util::DEVICE, 1, peer_, stream));
}
int block_size = 512;
int grid_size = num_elements / block_size + 1;
if (grid_size > 240) grid_size = 240;
ExpandIncoming_Kernel<VertexT, SizeT, ValueT, NUM_VERTEX_ASSOCIATES,
NUM_VALUE__ASSOCIATES>
<<<grid_size, block_size, 0, stream>>>(
gpu_num, num_elements, keys_in.GetPointer(util::DEVICE),
vertex_associate_in.GetPointer(util::DEVICE),
value__associate_in.GetPointer(util::DEVICE),
out_length.GetPointer(util::DEVICE) +
((FLAG & Unified_Receive) ? 0 : peer_),
frontier.V_Q()->GetPointer(util::DEVICE), exapnd_op);
GUARD_CU(out_length.Move(util::DEVICE, util::HOST, 1,
(FLAG & Unified_Receive) ? 0 : peer_, stream));
return retval;
}
cudaError_t Compute_OutputLength(int peer_) {
cudaError_t retval = cudaSuccess;
bool over_sized = false;
auto &enactor_slice =
enactor->enactor_slices[gpu_num * enactor->num_gpus + peer_];
auto &frontier = enactor_slice.frontier;
auto &stream = enactor_slice.stream;
auto &graph = enactor->problem->sub_graphs[gpu_num];
if ((enactor->flag & Size_Check) == 0 && (flag & Skip_PreScan)) {
frontier.output_length[0] = 0;
return retval;
}
retval = CheckSize<SizeT, SizeT>(
enactor->flag & Size_Check, "scanned_edges", frontier.queue_length + 2,
&frontier.output_offsets, over_sized, -1, -1, -1, false);
if (retval) return retval;
GUARD_CU(oprtr::ComputeOutputLength<oprtr::OprtrType_V2V>(
graph.csr(), frontier.V_Q(), enactor_slice.oprtr_parameters));
GUARD_CU(
frontier.output_length.Move(util::DEVICE, util::HOST, 1, 0, stream));
return retval;
}
};
} // namespace app
} // namespace gunrock
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
the_stack
|
\brief Command line options for performance test program
*/
#include <algorithm>
#include "cutlass/cutlass.h"
#include "cutlass/version.h"
#include "cutlass/library/util.h"
#include "options.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Newline and indent for help strings
static char const *end_of_line = "\n ";
/////////////////////////////////////////////////////////////////////////////////////////////////
Options::Device::Device(cutlass::CommandLine const &cmdline) {
cmdline.get_cmd_line_argument("device", device, 0);
cudaError_t result;
result = cudaGetDeviceProperties(&properties, device);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties() failed for given device");
}
result = cudaSetDevice(device);
if (result != cudaSuccess) {
throw std::runtime_error("cudaSetDevice() failed for given device.");
}
// Permit overriding the compute capability
if (cmdline.check_cmd_line_flag("compute-capability")) {
int cc = compute_capability();
cmdline.get_cmd_line_argument("compute-capability", cc, cc);
properties.major = cc / 10;
properties.minor = cc % 10;
}
// Permit overriding the L2 cache capacity
if (cmdline.check_cmd_line_flag("llc-capacity")) {
int llc_capacity = 0;
cmdline.get_cmd_line_argument("llc-capacity", llc_capacity, 0);
if (llc_capacity >= 0) {
properties.l2CacheSize = (llc_capacity << 10);
}
}
}
void Options::Device::print_usage(std::ostream &out) const {
out << "Device:\n"
<< " --device=<int> "
<< " CUDA Device ID\n\n";
int device_count = 0;
cudaError_t result = cudaGetDeviceCount(&device_count);
if (result != cudaSuccess) {
out << " <could not query for CUDA devices>\n";
}
else {
for (int idx = 0; idx < device_count; ++idx) {
cudaDeviceProp prop;
result = cudaGetDeviceProperties(&prop, idx);
if (result != cudaSuccess) {
out << " <could not obtain device properties for device " << idx << ">" << std::endl;
break;
}
else {
out << " [" << idx << "] - "
<< prop.name << " - SM " << prop.major << "." << prop.minor << ", "
<< prop.multiProcessorCount << " SMs @ " << (prop.clockRate / 1000.0) << " MHz, "
<< "L2 cache: " << (prop.l2CacheSize >> 20) << " MB, Global Memory: " << (prop.totalGlobalMem >> 30) << " GB"
<< std::endl;
}
}
out << "\n";
}
out
<< " --compute-capability=<int> "
<< " Override the compute capability.\n\n"
<< " --llc-capacity=<capacity in KiB> "
<< " Capacity of last-level cache in kilobytes. If this is non-zero," << end_of_line
<< " profiling phases cycle through different input tensors to induce" << end_of_line
<< " capacity misses in the L2.\n\n";
}
void Options::Device::print_device_info(std::ostream &out) const {
int num_devices;
cudaDeviceProp props;
cudaError_t result;
result = cudaGetDeviceCount(&num_devices);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetNumDevices() failed");
}
out << "Device Name,SM,CUDA Device ID,Phy Device ID" << std::endl;
for(int device = 0; device < num_devices; device++) {
result = cudaSetDevice(device);
if (result != cudaSuccess) {
throw std::runtime_error("cudaSetDevice() failed for device");
}
result = cudaGetDeviceProperties(&props, device);
if (result != cudaSuccess) {
throw std::runtime_error("cudaGetDeviceProperties failed for device");
}
out << props.name << "," << props.major << props.minor << ","
<< device << "," << props.multiGpuBoardGroupID << std::endl;
}
}
void Options::Device::print_options(std::ostream &out, int indent) const {
out
<< indent_str(indent) << "device: " << device << "\n"
<< indent_str(indent) << "clock: " << int(double(properties.clockRate) / 1000.0) << "\n"
<< indent_str(indent) << "compute-capability: " << compute_capability() << "\n";
}
/// Returns the compute capability of the listed device (e.g. 61, 60, 70, 75)
int Options::Device::compute_capability() const {
return properties.major * 10 + properties.minor;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Options::Initialization::Initialization(cutlass::CommandLine const &cmdline) {
cmdline.get_cmd_line_argument("initialization-enabled", enabled, true);
if (cmdline.check_cmd_line_flag("initialization-provider")) {
std::string str;
cmdline.get_cmd_line_argument("initialization-provider", str);
provider = library::from_string<library::Provider>(str);
if (provider == library::Provider::kInvalid) {
enabled = false;
}
else if (provider != library::Provider::kReferenceHost && provider != library::Provider::kReferenceDevice) {
throw std::runtime_error("Unsupported intialization provider specified.");
}
}
else {
provider = library::Provider::kReferenceDevice;
}
cmdline.get_cmd_line_argument("seed", seed, 2019);
if (cmdline.check_cmd_line_flag("dist")) {
// user has set the data distribution (fix data distribution once set)
fix_data_distribution = true;
// set user provided data distribution
get_distribution(cmdline, "dist", data_distribution);
}
else {
// profiler choosen data distribution (allowed to change based on numeric types)
fix_data_distribution = false;
// set uniform data distribution with range [-4, 4]
data_distribution.set_uniform(-4, 4, 0);
}
}
/// Gets the initial distribution
void Options::Initialization::get_distribution(
cutlass::CommandLine const &args,
std::string const &arg,
cutlass::Distribution &dist) {
struct {
const char *label;
cutlass::Distribution::Kind kind;
} distribution_kinds[] = {
{"uniform", cutlass::Distribution::Uniform},
{"gaussian", cutlass::Distribution::Gaussian},
{"identity", cutlass::Distribution::Identity},
{"sequential", cutlass::Distribution::Sequential},
{0, cutlass::Distribution::Invalid}
};
struct {
char const *label;
double *member;
} members[] = {
{"min", &dist.uniform.min},
{"max", &dist.uniform.max},
{"mean", &dist.gaussian.mean},
{"stddev", &dist.gaussian.stddev},
{"start", &dist.sequential.start},
{"delta", &dist.sequential.delta},
{0, 0}
};
using KeyValueVector = std::vector<std::pair<std::string, std::string> >;
KeyValueVector values;
args.get_cmd_line_argument_pairs(arg.c_str(), values);
// The parser expects the first token to be a string identifying the distribution type.
auto it = values.begin();
if (it != values.end()) {
for (int i = 0; distribution_kinds[i].label; ++i) {
if (it->first == distribution_kinds[i].label) {
dist.kind = distribution_kinds[i].kind;
break;
}
}
++it;
}
// Subsequent key-value pairs update the named field of the distribution struct.
for (; it != values.end(); ++it) {
// Integer scaling factor - if < 0, no integer rounding is performed.
if ((it->first.compare("scale") == 0) && !it->second.empty()) {
std::stringstream ss;
ss << it->second;
ss >> dist.int_scale;
continue; // next token
}
// Casts as integer without scaling
if (it->first.compare("integer") == 0) {
dist.int_scale = 0;
continue; // next token
}
// initialize other members
for (int m = 0; members[m].label; ++m) {
if (it->first == members[m].label && !it->second.empty()) {
std::stringstream ss;
ss << it->second;
ss >> *(members[m].member);
}
}
}
}
void Options::Initialization::print_usage(std::ostream &out) const {
out << "Initialization:\n"
<< " --initialization=<bool> "
<< " Enables initialization (default: true). If false, device memory is" << end_of_line
<< " not initialized after allocation.\n\n"
<< " --initialization-provider=<provider> "
<< " Selects initialization provider {host, device*}. (default: '*')\n\n"
<< " --dist=<distribution> "
<< " Data distribution of input tensors {uniform*, gaussian, identity, sequential}" << end_of_line
<< " --dist=uniform,min:<double>,max:<double>,scale:<integer>" << end_of_line
<< " --dist=gaussian,mean:<double>,stddev:<double>,scale:<integer>" << end_of_line
<< " --dist=sequential,start:<double>,delta:<double>,scale:<integer>" << end_of_line
<< " --dist=identity\n\n"
<< " --seed=<int> "
<< " Random number generator seed. Used to enforce deterministic" << end_of_line
<< " initialization.\n\n";
}
void Options::Initialization::print_options(std::ostream &out, int indent) const {
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Options::Library::Library(cutlass::CommandLine const &cmdline) {
algorithm_mode = AlgorithmMode::kDefault;
if (cmdline.check_cmd_line_flag("library-algo-mode")) {
std::string mode = "default";
cmdline.get_cmd_line_argument("library-algo-mode", mode);
algorithm_mode = from_string<AlgorithmMode>(mode);
}
if (cmdline.check_cmd_line_flag("library-algos")) {
// If algorithms are specified, override as kBest.
algorithm_mode = AlgorithmMode::kBest;
std::vector<std::string> tokens;
cmdline.get_cmd_line_arguments("library-algos", tokens);
algorithms.reserve(tokens.size());
for (auto const & token : tokens) {
if (token.find(":")) {
// todo - tokenized range
}
else {
int algo;
std::stringstream ss;
ss << token;
ss >> algo;
algorithms.push_back(algo);
}
}
}
}
void Options::Library::print_usage(std::ostream &out) const {
out << "Library:\n"
<< " --library-algo-mode=<mode> "
<< " Indicates algorithm mode used to call libraries such as cuBLAS and cuDNN.\n"
<< " "
<< " mode={default*,matching,best}\n\n"
<< " --library-algos=<range-list> "
<< " If --algorithm-mode=best, permits specifying a selection of algorithms.\n\n";
}
void Options::Library::print_options(std::ostream &out, int indent) const {
out
<< indent_str(indent) << "library-algo-mode: " << to_string(algorithm_mode) << "\n"
<< indent_str(indent) << "library-algos: ";
int j = 0;
for (int x : algorithms) {
out << (j++ ? "," : "") << x;
}
out << "\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Options::Profiling::Profiling(cutlass::CommandLine const &cmdline) {
cmdline.get_cmd_line_argument("workspace-count", workspace_count, 0);
cmdline.get_cmd_line_argument("warmup-iterations", warmup_iterations, 10);
cmdline.get_cmd_line_argument("profiling-iterations", iterations, 100);
cmdline.get_cmd_line_argument("sleep-duration", sleep_duration, 50);
cmdline.get_cmd_line_argument("profiling-enabled", enabled, true);
if (cmdline.check_cmd_line_flag("providers")) {
std::vector<std::string> tokens;
cmdline.get_cmd_line_arguments("providers", tokens);
providers.clear();
for (auto const &token : tokens) {
providers.push_back(library::from_string<library::Provider>(token));
}
}
else {
providers.push_back(library::Provider::kCUTLASS);
providers.push_back(library::Provider::kCUBLAS);
providers.push_back(library::Provider::kCUDNN);
}
}
void Options::Profiling::print_usage(std::ostream &out) const {
out << "Profiling:\n"
<< " --workspace-count=<workspace count> "
<< " Number of discrete workspaces maintained to avoid cache-resident " << end_of_line
<< " If zero (default), the amount is chosen for each workload based on " << end_of_line
<< " capacity of the last-level cache.\n\n"
<< " --profiling-iterations=<iterations> "
<< " Number of iterations to profile each kernel. If zero, kernels" << end_of_line
<< " are launched up to the profiling duration.\n\n"
<< " --warmup-iterations=<iterations> "
<< " Number of iterations to execute each kernel prior to profiling.\n\n"
<< " --sleep-duration=<duration> "
<< " Number of ms to sleep between profiling periods (ms).\n\n"
<< " --profiling-enabled=<bool> "
<< " If true, profiling is actually conducted.\n\n"
;
}
void Options::Profiling::print_options(std::ostream &out, int indent) const {
out
<< indent_str(indent) << "profiling_iterations: " << iterations << "\n"
<< indent_str(indent) << "sleep_duration: " << sleep_duration << "\n"
<< indent_str(indent) << "profiling_enabled: " << enabled << "\n"
<< indent_str(indent) << "providers: [";
int j = 0;
for (auto const & provider : providers) {
out << (j++ ? ", " : "") << library::to_string(provider);
}
out << "]\n";
}
/// Returns true if a provider is enabled
bool Options::Profiling::provider_enabled(library::Provider provider) const {
return std::find(providers.begin(), providers.end(), provider) != providers.end();
}
/// Returns the index of a provider if its enabled
size_t Options::Profiling::index(library::Provider provider) const {
size_t idx = 0;
for (auto const & x : providers) {
if (x == provider) {
return idx;
}
++idx;
}
return idx;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Options::Verification::Verification(cutlass::CommandLine const &cmdline) {
cmdline.get_cmd_line_argument("verification-enabled", enabled, true);
cmdline.get_cmd_line_argument("epsilon", epsilon, 0.05);
cmdline.get_cmd_line_argument("nonzero-floor", nonzero_floor, 1.0 / 256.0);
if (cmdline.check_cmd_line_flag("save-workspace")) {
std::string value;
cmdline.get_cmd_line_argument("save-workspace", value);
save_workspace = from_string<SaveWorkspace>(value);
}
else {
save_workspace = SaveWorkspace::kNever;
}
if (cmdline.check_cmd_line_flag("verification-providers")) {
std::vector<std::string> tokens;
cmdline.get_cmd_line_arguments("verification-providers", tokens);
providers.clear();
for (auto const &token : tokens) {
library::Provider provider = library::from_string<library::Provider>(token);
if (provider != library::Provider::kInvalid) {
providers.push_back(provider);
}
}
}
else {
providers.push_back(library::Provider::kCUBLAS);
providers.push_back(library::Provider::kReferenceDevice);
providers.push_back(library::Provider::kCUDNN);
}
}
void Options::Verification::print_usage(std::ostream &out) const {
out << "Verification:\n"
<< " --verification-enabled=<bool> "
<< " Whether to perform verification checks.\n\n"
<< " --epsilon=<error> "
<< " Error threshold. Setting to zero (default) requires" << end_of_line
<< " bit-level equivalence.\n\n"
<< " --nonzero-floor=<floor> "
<< " Results whose absolute value is less than this quantity" << end_of_line
<< " are treated as zero for comparisons.\n\n"
<< " --save-workspace=<string> "
<< " Specifies when to save the GEMM inputs and results to the filesystem." << end_of_line
<< " --save-workspace=never never save workspace (default)" << end_of_line
<< " --save-workspace=incorrect save workspace for incorrect results" << end_of_line
<< " --save-workspace=always always save workspace\n\n"
<< " --verification-providers=<providers> "
<< " List of providers used to verify result. (default: '*')" << end_of_line
<< " Gemm verification-providers {cublas*}" << end_of_line
<< " Conv2d verification-providers {cudnn*, device*, host}"
<< "\n\n";
}
void Options::Verification::print_options(std::ostream &out, int indent) const {
out
<< indent_str(indent) << "verification_enabled: " << enabled << "\n"
<< indent_str(indent) << "epsilon: " << epsilon << "\n"
<< indent_str(indent) << "save_workspace: " << to_string(save_workspace) << "\n"
<< indent_str(indent) << "verification_providers: [";
int j = 0;
for (auto const & provider : providers) {
out << (j++ ? ", " : "") << library::to_string(provider);
}
out << "]\n";
}
/// Returns true if a provider is enabled
bool Options::Verification::provider_enabled(library::Provider provider) const {
return std::find(providers.begin(), providers.end(), provider) != providers.end();
}
/// Returns the index of a provider if its enabled
size_t Options::Verification::index(library::Provider provider) const {
size_t idx = 0;
for (auto const & x : providers) {
if (x == provider) {
return idx;
}
++idx;
}
return idx;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Options::Report::Report(cutlass::CommandLine const &cmdline) {
cmdline.get_cmd_line_argument("append", append, false);
cmdline.get_cmd_line_argument("output", output_path);
cmdline.get_cmd_line_argument("junit-output", junit_output_path);
if (cmdline.check_cmd_line_flag("tags")) {
cmdline.get_cmd_line_argument_pairs("tags", pivot_tags);
}
cmdline.get_cmd_line_argument("report-not-run", report_not_run, false);
cmdline.get_cmd_line_argument("verbose", verbose, true);
}
void Options::Report::print_usage(std::ostream &out) const {
out << "Report:\n"
<< " --append=<bool> "
<< " If true, result is appended to possibly existing file. Otherwise, " << end_of_line
<< " any existing file is overwritten.\n\n"
<< " --output=<path> "
<< " Path to output file for machine readable results. Operation kind and '.csv' is appended.\n\n"
<< " --junit-output=<path> "
<< " Path to junit output file for result reporting. Operation kind and '.junit.xml' is appended.\n\n"
<< " --report-not-run=<bool> "
<< " If true, reports the status of all kernels including those that" << end_of_line
<< " do not satisfy the given arguments.\n\n"
<< " --tags=<column:tag,...> "
<< " Inserts leading columns in output table and uniform values for each" << end_of_line
<< " column. Useful for generating pivot tables.\n\n"
<< " --verbose=<bool> "
<< " Prints human-readable text to stdout. If false, nothing is written to stdout.\n\n";
}
void Options::Report::print_options(std::ostream &out, int indent) const {
out
<< indent_str(indent) << "append: " << append << "\n"
<< indent_str(indent) << "output: " << output_path << "\n"
<< indent_str(indent) << "junit-output: " << junit_output_path << "\n"
<< indent_str(indent) << "report_not_run: " << report_not_run << "\n"
<< indent_str(indent) << "tags:\n";
for (auto const & tag : pivot_tags) {
out << indent_str(indent + 1) << tag.first << ": " << tag.second << "\n";
}
out
<< indent_str(indent) << "verbose: " << verbose << "\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Options::About::About(cutlass::CommandLine const &cmdline) {
help = cmdline.check_cmd_line_flag("help");
version = cmdline.check_cmd_line_flag("version");
device_info = cmdline.check_cmd_line_flag("device-info");
}
void Options::About::print_usage(std::ostream &out) const {
out << "About:\n"
<< " --version ";
print_version(out);
out << "\n";
}
void Options::About::print_version(std::ostream &out) {
out << "CUTLASS " << cutlass::getVersionString()
<< " built on " << __DATE__ << " at " << __TIME__;
if (!cutlass::getGitRevision().empty()) out << " with commit " << cutlass::getGitRevision() << "";
}
void Options::About::print_options(std::ostream &out, int indent) const {
}
/////////////////////////////////////////////////////////////////////////////////////////////////
Options::Options(cutlass::CommandLine const &cmdline):
cmdline(cmdline),
device(cmdline),
initialization(cmdline),
library(cmdline),
profiling(cmdline),
verification(cmdline),
report(cmdline),
about(cmdline) {
if (cmdline.check_cmd_line_flag("mode")) {
std::string token;
cmdline.get_cmd_line_argument("mode", token);
execution_mode = from_string<ExecutionMode>(token);
}
else {
execution_mode = ExecutionMode::kProfile;
}
// Enumerating kernels is equivalent to a dry run.
if (execution_mode == ExecutionMode::kEnumerate) {
execution_mode = ExecutionMode::kDryRun;
}
if (cmdline.check_cmd_line_flag("operation")) {
std::string str;
cmdline.get_cmd_line_argument("operation", str);
operation_kind = library::from_string<library::OperationKind>(str);
}
else if (cmdline.check_cmd_line_flag("function")) {
std::string str;
cmdline.get_cmd_line_argument("function", str);
operation_kind = library::from_string<library::OperationKind>(str);
}
else {
operation_kind = library::OperationKind::kInvalid;
}
if (cmdline.check_cmd_line_flag("operation_names")) {
cmdline.get_cmd_line_arguments("operation_names", operation_names);
}
else if (cmdline.check_cmd_line_flag("kernels")) {
cmdline.get_cmd_line_arguments("kernels", operation_names);
}
if (cmdline.check_cmd_line_flag("ignore-kernels")) {
cmdline.get_cmd_line_arguments("ignore-kernels", excluded_operation_names);
}
// Prevent launches on the device for anything other than CUTLASS operation
if (execution_mode == ExecutionMode::kTrace) {
initialization.provider = library::Provider::kReferenceHost;
verification.enabled = false;
profiling.enabled = false;
}
}
void Options::print_usage(std::ostream &out) const {
out
<< "CUTLASS Profiler\n"
<< "usage:\n\n"
<< " cutlass_profiler [options]\n\n"
<< " --help\n\n"
<< " --mode=<string> "
<< " Cutlass profiler execution mode." << end_of_line
<< " --mode=profile regular verification and profiling (default)" << end_of_line
<< " --mode=dry_run no kernels are launched or workspaces allocated" << end_of_line
<< " --mode=enumerate lists all operation kind and operations" << end_of_line
<< " --mode=trace executes a single device-side computation with" << end_of_line
<< " no other kernel launches\n\n"
<< " --device-info "
<< " Prints information on all GPUs present in the system\n\n"
<< " --operation=<operation_kind> "
<< " CUTLASS operation to profile.\n\n"
<< " --kernels=<string_list> "
<< " Filter operations by kernel names. For example, call all kernels with" << end_of_line
<< " (\"s1688\" and \"nt\") or (\"s844\" and \"tn\" and \"align8\") in their" << end_of_line
<< " operation name using --kernels=\"s1688*nt, s884*tn*align8\"\n\n"
<< " --ignore-kernels=<string_list> "
<< " Excludes kernels whose names match anything in this list.\n\n"
;
//
// Detailed options
//
device.print_usage(out);
out << "\n";
initialization.print_usage(out);
out << "\n";
library.print_usage(out);
out << "\n";
profiling.print_usage(out);
out << "\n";
verification.print_usage(out);
out << "\n";
report.print_usage(out);
out << "\n";
about.print_usage(out);
out << "\n";
}
void Options::print_options(std::ostream &out) const {
out
<< "options:\n"
<< " help: " << about.help << "\n"
<< " mode: " << to_string(execution_mode) << "\n";
out
<< " device:\n";
device.print_options(out, 2);
out
<< " initialization:\n";
initialization.print_options(out, 2);
out
<< " profiling:\n";
profiling.print_options(out, 2);
out
<< " verification:\n";
verification.print_options(out, 2);
out
<< " report:\n";
report.print_options(out, 2);
}
std::string Options::indent_str(int indent) {
return std::string(indent * 2, ' ');
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
|
the_stack
|
__device__ float clamp(float a, float minv, float maxv)
{
return fminf(fmaxf(minv, a),maxv);
}
__device__ float3 clampv3(float3 in, float3 minv, float3 maxv)
{
float xout = clamp(in.x,minv.x,maxv.x);
float yout = clamp(in.y,minv.y,maxv.y);
float zout = clamp(in.z,minv.z,maxv.z);
return make_float3(xout, yout, zout);
}
__device__ float lerp(float a, float b, float c)
{
return (1.0-c)*a + c*b;
}
__device__ float triLerp(float v000, float v001, float v010, float v011, float v100, float v101,
float v110, float v111, float a, float b, float c)
{
return lerp(
lerp(
lerp(v000, v001, a),
lerp(v010, v011, a),
b),
lerp(
lerp(v100, v101, a),
lerp(v110, v111, a),
b),
c);
}
__device__ float sample_buffer(float * b, int nx, int ny, int nz, float h, float3 off_set, float3 pos)
{
float3 samplepos = make_float3(pos.x-off_set.x, pos.y-off_set.y, pos.z-off_set.z);
int i = int(floorf(samplepos.x/h));
int j = int(floorf(samplepos.y/h));
int k = int(floorf(samplepos.z/h));
float fx = samplepos.x/h - float(i);
float fy = samplepos.y/h - float(j);
float fz = samplepos.z/h - float(k);
int idx000 = i + nx*j + nx*ny*k;
int idx001 = i + nx*j + nx*ny*k + 1;
int idx010 = i + nx*j + nx*ny*k + nx;
int idx011 = i + nx*j + nx*ny*k + nx + 1;
int idx100 = i + nx*j + nx*ny*k + nx*ny;
int idx101 = i + nx*j + nx*ny*k + nx*ny + 1;
int idx110 = i + nx*j + nx*ny*k + nx*ny + nx;
int idx111 = i + nx*j + nx*ny*k + nx*ny + nx + 1;
return triLerp(b[idx000], b[idx001],b[idx010],b[idx011],b[idx100],b[idx101],b[idx110],b[idx111], fx, fy, fz);
}
__device__ float3 getVelocity(float *u, float *v, float *w, float h, float nx, float ny, float nz, float3 pos)
{
float _u = sample_buffer(u, nx+1, ny, nz, h, make_float3(-0.5*h,0,0), pos);
float _v = sample_buffer(v, nx, ny+1, nz, h, make_float3(0,-0.5*h,0), pos);
float _w = sample_buffer(w, nx, ny, nz+1, h, make_float3(0,0,-0.5*h), pos);
return make_float3(_u,_v,_w);
}
__device__ float3 traceRK3(float *u, float *v, float *w, float h, int ni, int nj, int nk, float dt, float3 pos)
{
float c1 = 2.0/9.0*dt, c2 = 3.0/9.0 * dt, c3 = 4.0/9.0 * dt;
float3 input = pos;
float3 v1 = getVelocity(u,v,w,h,ni,nj,nk, input);
float3 midp1 = make_float3(input.x + 0.5*dt*v1.x, input.y + 0.5*dt*v1.y, input.z + 0.5*dt*v1.z);
float3 v2 = getVelocity(u,v,w,h,ni,nj,nk, midp1);
float3 midp2 = make_float3(input.x + 0.75*dt*v2.x, input.y + 0.75*dt*v2.y, input.z + 0.75*dt*v2.z);
float3 v3 = getVelocity(u,v,w,h,ni,nj,nk, midp2);
float3 output = make_float3(input.x + c1*v1.x + c2*v2.x + c3*v3.x,
input.y + c1*v1.y + c2*v2.y + c3*v3.y,
input.z + c1*v1.z + c2*v2.z + c3*v3.z);
output = clampv3(output, make_float3(h,h,h),
make_float3(float(ni) * h - h, float(nj) * h - h, float(nk) * h - h ));
return output;
}
__device__ float3 trace(float *u, float *v, float *w, float h, int ni, int nj, int nk, float cfldt, float dt, float3 pos)
{
if(dt>0)
{
float T = dt;
float3 opos = pos;
float t = 0;
float substep = cfldt;
while(t<T)
{
if(t+substep > T)
substep = T - t;
opos = traceRK3(u,v,w,h,ni,nj,nk,substep,opos);
t+=substep;
}
return opos;
}
else
{
float T = -dt;
float3 opos = pos;
float t = 0;
float substep = cfldt;
while(t<T)
{
if(t+substep > T)
substep = T - t;
opos = traceRK3(u,v,w,h,ni,nj,nk,-substep,opos);
t+=substep;
}
return opos;
}
}
__global__ void forward_kernel(float *u, float *v, float *w,
float *x_fwd, float *y_fwd, float *z_fwd,
float h, int ni, int nj, int nk, float cfldt, float dt)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int i = index%ni;
int j = (index%(ni*nj))/ni;
int k = index/(ni*nj);
if (i > 1 && i<ni-2 && j > 1 && j<nj-2 && k > 1 && k<nk-2)
{
float3 point = make_float3(x_fwd[index], y_fwd[index], z_fwd[index]);
float3 pointout = trace(u,v,w,h,ni,nj,nk,cfldt,dt,point);
x_fwd[index] = pointout.x;
y_fwd[index] = pointout.y;
z_fwd[index] = pointout.z;
}
__syncthreads();
}
__global__ void clampExtrema_kernel(float *before, float *after, int ni, int nj, int nk)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int i = index%ni;
int j = (index%(ni*nj))/ni;
int k = index/(ni*nj);
float max_value = before[index];
float min_value = before[index];
if (i>0 && i<ni-1 && j>0 && j<nj-1 && k>0 && k<nk-1)
{
for(int kk=k-1;kk<=k+1;kk++)for(int jj=j-1;jj<=j+1;jj++)for(int ii=i-1;ii<=i+1;ii++)
{
int idx = ii + jj*ni + kk*ni*nj;
if(before[idx]>max_value)
max_value = before[idx];
if(before[idx]<min_value)
min_value = before[idx];
}
after[index] = min(max(min_value, after[index]), max_value);
}
__syncthreads();
}
__global__ void DMC_backward_kernel(float *u, float *v, float *w,
float *x_in, float *y_in, float *z_in,
float *x_out, float *y_out, float *z_out,
float h, int ni, int nj, int nk, float substep)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int i = index%ni;
int j = (index%(ni*nj))/ni;
int k = index/(ni*nj);
if (i > 1 && i<ni-2 && j > 1 && j<nj-2 && k > 1 && k<nk-2)
{
float3 point = make_float3(h*float(i),h*float(j),h*float(k));
float3 vel = getVelocity(u, v, w, h, ni, nj, nk, point);
float temp_x = (vel.x > 0)? point.x - h: point.x + h;
float temp_y = (vel.y > 0)? point.y - h: point.y + h;
float temp_z = (vel.z > 0)? point.z - h: point.z + h;
float3 temp_point = make_float3(temp_x, temp_y, temp_z);
float3 temp_vel = getVelocity(u, v, w, h, ni, nj, nk, temp_point);
float a_x = (vel.x - temp_vel.x) / (point.x - temp_point.x);
float a_y = (vel.y - temp_vel.y) / (point.y - temp_point.y);
float a_z = (vel.z - temp_vel.z) / (point.z - temp_point.z);
float new_x = (fabs(a_x) > 1e-4)? point.x - (1 - exp(-a_x*substep))*vel.x/a_x : point.x - vel.x*substep;
float new_y = (fabs(a_y) > 1e-4)? point.y - (1 - exp(-a_y*substep))*vel.y/a_y : point.y - vel.y*substep;
float new_z = (fabs(a_z) > 1e-4)? point.z - (1 - exp(-a_z*substep))*vel.z/a_z : point.z - vel.z*substep;
float3 pointnew = make_float3(new_x, new_y, new_z);
x_out[index] = sample_buffer(x_in,ni,nj,nk,h,make_float3(0.0,0.0,0.0),pointnew);
y_out[index] = sample_buffer(y_in,ni,nj,nk,h,make_float3(0.0,0.0,0.0),pointnew);
z_out[index] = sample_buffer(z_in,ni,nj,nk,h,make_float3(0.0,0.0,0.0),pointnew);
}
__syncthreads();
}
__global__ void semilag_kernel(float *field, float *field_src,
float *u, float *v, float *w,
int dim_x, int dim_y, int dim_z,
float h, int ni, int nj, int nk, float cfldt, float dt)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
float3 buffer_origin = make_float3(-float(dim_x)*0.5f*h, -float(dim_y)*0.5f*h, -float(dim_z)*0.5f*h);
int field_buffer_i = ni + dim_x;
int field_buffer_j = nj + dim_y;
int field_buffer_k = nk + dim_z;
int i = index % field_buffer_i;
int j = (index % (field_buffer_i * field_buffer_j)) / field_buffer_i;
int k = index/(field_buffer_i*field_buffer_j);
if (i > 1 && i < field_buffer_i-2-dim_x && j > 1 && j < field_buffer_j-2-dim_y && k > 1 && k < field_buffer_k-2-dim_z)
{
float3 point = make_float3(h*float(i) + buffer_origin.x,
h*float(j) + buffer_origin.y,
h*float(k) + buffer_origin.z);
float3 pointnew = trace(u, v, w, h, ni, nj, nk, cfldt, dt, point);
field[index] = sample_buffer(field_src, field_buffer_i, field_buffer_j, field_buffer_k, h, buffer_origin, pointnew);
}
__syncthreads();
}
__global__ void doubleAdvect_kernel(float *field, float *temp_field,
float *backward_x, float *backward_y, float * backward_z,
float *backward_xprev, float *backward_yprev, float *backward_zprev,
float h, int ni, int nj, int nk,
int dimx, int dimy, int dimz, bool is_point, float blend_coeff)
{
float3 volume[8];
int evaluations = 8;
volume[0] = make_float3(0.25f*h, 0.25f*h, 0.25f*h); volume[1] = make_float3(0.25f*h, 0.25f*h, -0.25f*h);
volume[2] = make_float3(0.25f*h, -0.25f*h, 0.25f*h); volume[3] = make_float3(0.25f*h, -0.25f*h, -0.25f*h);
volume[4] = make_float3(-0.25f*h, 0.25f*h, 0.25f*h); volume[5] = make_float3(-0.25f*h, 0.25f*h, -0.25f*h);
volume[6] = make_float3(-0.25f*h, -0.25f*h, 0.25f*h);volume[7] = make_float3(-0.25f*h, -0.25f*h, -0.25f*h);
if(is_point) {
volume[0] = make_float3(0, 0, 0);
evaluations = 1;
}
int index = blockDim.x*blockIdx.x + threadIdx.x;
float weight = 1.0/float(evaluations);
float3 buffer_origin = make_float3(-float(dimx)*0.5f*h, -float(dimy)*0.5f*h, -float(dimz)*0.5f*h);
int vel_buffer_i = ni + dimx;
int vel_buffer_j = nj + dimy;
int vel_buffer_k = nk + dimz;
int i = index%vel_buffer_i;
int j = (index%(vel_buffer_i*vel_buffer_j))/vel_buffer_i;
int k = index/(vel_buffer_i*vel_buffer_j);
if (2+dimx<i && i<vel_buffer_i-3 && 2+dimy< j && j<vel_buffer_j-3 && 2+dimz<k && k<vel_buffer_k-3)
{
float sum = 0.0;
for (int ii = 0; ii<evaluations; ii++)
{
float3 pos = make_float3(float(i)*h + buffer_origin.x + volume[ii].x,
float(j)*h + buffer_origin.y + volume[ii].y,
float(k)*h + buffer_origin.z + volume[ii].z);
float x_init = sample_buffer(backward_x, ni, nj, nk, h, make_float3(0,0,0), pos);
float y_init = sample_buffer(backward_y, ni, nj, nk, h, make_float3(0,0,0), pos);
float z_init = sample_buffer(backward_z, ni, nj, nk, h, make_float3(0,0,0), pos);
float3 midpos = make_float3(x_init, y_init, z_init);
midpos = clampv3(midpos,make_float3(h,h,h), make_float3(h*float(ni) - h, h*float(nj) - h, h*float(nk) -h ));
float x_orig = sample_buffer(backward_xprev, ni, nj, nk, h, make_float3(0,0,0), midpos);
float y_orig = sample_buffer(backward_yprev, ni, nj, nk, h, make_float3(0,0,0), midpos);
float z_orig = sample_buffer(backward_zprev, ni, nj, nk, h, make_float3(0,0,0), midpos);
float3 finalpos = make_float3(x_orig, y_orig, z_orig);
finalpos = clampv3(finalpos,make_float3(h,h,h), make_float3(h*float(ni) - h, h*float(nj) - h, h*float(nk) - h));
sum += weight*sample_buffer(temp_field, ni+dimx, nj+dimy, nk+dimz, h, buffer_origin, finalpos);
}
float3 pos = make_float3(float(i)*h + buffer_origin.x,
float(j)*h + buffer_origin.y,
float(k)*h + buffer_origin.z);
float x_init = sample_buffer(backward_x, ni, nj, nk, h, make_float3(0,0,0), pos);
float y_init = sample_buffer(backward_y, ni, nj, nk, h, make_float3(0,0,0), pos);
float z_init = sample_buffer(backward_z, ni, nj, nk, h, make_float3(0,0,0), pos);
float3 midpos = make_float3(x_init, y_init, z_init);
midpos = clampv3(midpos,make_float3(h,h,h), make_float3(h*float(ni) - h, h*float(nj) - h, h*float(nk) -h ));
float x_orig = sample_buffer(backward_xprev, ni, nj, nk, h, make_float3(0,0,0), midpos);
float y_orig = sample_buffer(backward_yprev, ni, nj, nk, h, make_float3(0,0,0), midpos);
float z_orig = sample_buffer(backward_zprev, ni, nj, nk, h, make_float3(0,0,0), midpos);
float3 finalpos = make_float3(x_orig, y_orig, z_orig);
finalpos = clampv3(finalpos,make_float3(h,h,h), make_float3(h*float(ni) - h, h*float(nj) - h, h*float(nk) - h));
float value = sample_buffer(temp_field, ni+dimx, nj+dimy, nk+dimz, h, buffer_origin, finalpos);
float prev_value = 0.5f*(sum + value);
field[index] = field[index]*blend_coeff + (1-blend_coeff)*prev_value;
}
__syncthreads();
}
__global__ void advect_kernel(float *field, float *field_init,
float *backward_x, float *backward_y, float *backward_z,
float h, int ni, int nj, int nk,
int dimx, int dimy, int dimz, bool is_point)
{
float3 volume[8];
int evaluations = 8;
volume[0] = make_float3(0.25f*h, 0.25f*h, 0.25f*h); volume[1] = make_float3(0.25f*h, 0.25f*h, -0.25f*h);
volume[2] = make_float3(0.25f*h, -0.25f*h, 0.25f*h); volume[3] = make_float3(0.25f*h, -0.25f*h, -0.25f*h);
volume[4] = make_float3(-0.25f*h, 0.25f*h, 0.25f*h); volume[5] = make_float3(-0.25f*h, 0.25f*h, -0.25f*h);
volume[6] = make_float3(-0.25f*h, -0.25f*h, 0.25f*h);volume[7] = make_float3(-0.25f*h, -0.25f*h, -0.25f*h);
if(is_point) {
volume[0] = make_float3(0, 0, 0);
evaluations = 1;
}
int index = blockDim.x*blockIdx.x + threadIdx.x;
float weight = 1.0/float(evaluations);
float3 buffer_origin = make_float3(-float(dimx)*0.5f*h, -float(dimy)*0.5f*h, -float(dimz)*0.5f*h);
int vel_buffer_i = ni + dimx;
int vel_buffer_j = nj + dimy;
int vel_buffer_k = nk + dimz;
int i = index%vel_buffer_i;
int j = (index%(vel_buffer_i*vel_buffer_j))/vel_buffer_i;
int k = index/(vel_buffer_i*vel_buffer_j);
if (2+dimx<i && i<vel_buffer_i-3 && 2+dimy< j && j<vel_buffer_j-3 && 2+dimz<k && k<vel_buffer_k-3)
{
float sum = 0.0;
for (int ii = 0; ii<evaluations; ii++)
{
float3 pos = make_float3(float(i)*h + buffer_origin.x + volume[ii].x,
float(j)*h + buffer_origin.y + volume[ii].y,
float(k)*h + buffer_origin.z + volume[ii].z);
float x_init = sample_buffer(backward_x, ni, nj, nk, h, make_float3(0,0,0), pos);
float y_init = sample_buffer(backward_y, ni, nj, nk, h, make_float3(0,0,0), pos);
float z_init = sample_buffer(backward_z, ni, nj, nk, h, make_float3(0,0,0), pos);
float3 pos_init = make_float3(x_init, y_init, z_init);
pos_init = clampv3(pos_init, make_float3(h,h,h), make_float3(h*float(ni) - h, h*float(nj) - h, h*float(nk) - h));
sum += weight*sample_buffer(field_init, ni+dimx, nj+dimy, nk+dimz, h, buffer_origin, pos_init);
}
float3 pos = make_float3(float(i)*h + buffer_origin.x,
float(j)*h + buffer_origin.y,
float(k)*h + buffer_origin.z);
float x_init = sample_buffer(backward_x, ni, nj, nk, h, make_float3(0,0,0), pos);
float y_init = sample_buffer(backward_y, ni, nj, nk, h, make_float3(0,0,0), pos);
float z_init = sample_buffer(backward_z, ni, nj, nk, h, make_float3(0,0,0), pos);
float3 pos_init = make_float3(x_init, y_init, z_init);
pos_init = clampv3(pos_init, make_float3(h,h,h), make_float3(h*float(ni) - h, h*float(nj) - h, h*float(nk) - h));
float value = sample_buffer(field_init, ni+dimx, nj+dimy, nk+dimz, h, buffer_origin, pos_init);
field[index] = 0.5f*sum + 0.5f*value;
}
__syncthreads();
}
__global__ void cumulate_kernel(float *dfield, float *dfield_init,
float *x_map, float *y_map, float *z_map,
float h, int ni, int nj, int nk,
int dimx, int dimy, int dimz, bool is_point, float coeff)
{
float3 volume[8];
int evaluations = 8;
volume[0] = make_float3(0.25f*h, 0.25f*h, 0.25f*h); volume[1] = make_float3(0.25f*h, 0.25f*h, -0.25f*h);
volume[2] = make_float3(0.25f*h, -0.25f*h, 0.25f*h); volume[3] = make_float3(0.25f*h, -0.25f*h, -0.25f*h);
volume[4] = make_float3(-0.25f*h, 0.25f*h, 0.25f*h); volume[5] = make_float3(-0.25f*h, 0.25f*h, -0.25f*h);
volume[6] = make_float3(-0.25f*h, -0.25f*h, 0.25f*h);volume[7] = make_float3(-0.25f*h, -0.25f*h, -0.25f*h);
if(is_point) {
volume[0] = make_float3(0, 0, 0);
evaluations = 1;
}
int index = blockDim.x*blockIdx.x + threadIdx.x;
float weight = 1.0/float(evaluations);
float3 buffer_origin = make_float3(-float(dimx)*0.5f*h, -float(dimy)*0.5f*h, -float(dimz)*0.5f*h);
int vel_buffer_i = ni + dimx;
int vel_buffer_j = nj + dimy;
int vel_buffer_k = nk + dimz;
int i = index%vel_buffer_i;
int j = (index%(vel_buffer_i*vel_buffer_j))/vel_buffer_i;
int k = index/(vel_buffer_i*vel_buffer_j);
if (1+dimx<i && i<vel_buffer_i-2 && 1+dimy< j && j<vel_buffer_j-2 && 1+dimz<k && k<vel_buffer_k-2)
{
float sum = 0.0;
for (int ii = 0; ii<evaluations; ii++)
{
float3 point = make_float3(float(i)*h + buffer_origin.x + volume[ii].x,
float(j)*h + buffer_origin.y + volume[ii].y,
float(k)*h + buffer_origin.z + volume[ii].z);
// forward mapping position
// also used in compensation
float x_pos = sample_buffer(x_map,ni,nj,nk,h,make_float3(0.0,0.0,0.0),point);
float y_pos = sample_buffer(y_map,ni,nj,nk,h,make_float3(0.0,0.0,0.0),point);
float z_pos = sample_buffer(z_map,ni,nj,nk,h,make_float3(0.0,0.0,0.0),point);
float3 map_pos = make_float3(x_pos, y_pos, z_pos);
map_pos = clampv3(map_pos, make_float3(0,0,0), make_float3(h*float(ni), h*float(nj), h*float(nk)));
sum += weight * coeff * sample_buffer(dfield, ni+dimx, nj+dimy, nk+dimz, h, buffer_origin, map_pos);
}
float3 point = make_float3(float(i)*h + buffer_origin.x,
float(j)*h + buffer_origin.y,
float(k)*h + buffer_origin.z);
// forward mapping position
float x_pos = sample_buffer(x_map,ni,nj,nk,h,make_float3(0.0,0.0,0.0),point);
float y_pos = sample_buffer(y_map,ni,nj,nk,h,make_float3(0.0,0.0,0.0),point);
float z_pos = sample_buffer(z_map,ni,nj,nk,h,make_float3(0.0,0.0,0.0),point);
float3 map_pos = make_float3(x_pos, y_pos, z_pos);
map_pos = clampv3(map_pos, make_float3(0,0,0), make_float3(h*float(ni), h*float(nj), h*float(nk)));
float value = coeff * sample_buffer(dfield, ni+dimx, nj+dimy, nk+dimz, h, buffer_origin, map_pos);
sum = 0.5*sum + 0.5 * value;
dfield_init[index] += sum;
}
__syncthreads();
}
__global__ void compensate_kernel(float *src_buffer, float *temp_buffer, float *test_buffer,
float *x_map, float *y_map, float *z_map,
float h, int ni, int nj, int nk,
int dimx, int dimy, int dimz, bool is_point)
{
float3 volume[8];
int evaluations = 8;
volume[0] = make_float3(0.25f*h, 0.25f*h, 0.25f*h); volume[1] = make_float3(0.25f*h, 0.25f*h, -0.25f*h);
volume[2] = make_float3(0.25f*h, -0.25f*h, 0.25f*h); volume[3] = make_float3(0.25f*h, -0.25f*h, -0.25f*h);
volume[4] = make_float3(-0.25f*h, 0.25f*h, 0.25f*h); volume[5] = make_float3(-0.25f*h, 0.25f*h, -0.25f*h);
volume[6] = make_float3(-0.25f*h, -0.25f*h, 0.25f*h);volume[7] = make_float3(-0.25f*h, -0.25f*h, -0.25f*h);
if(is_point) {
volume[0] = make_float3(0, 0, 0);
evaluations = 1;
}
int index = blockDim.x*blockIdx.x + threadIdx.x;
float weight = 1.0/float(evaluations);
float3 buffer_origin = make_float3(-float(dimx)*0.5f*h, -float(dimy)*0.5f*h, -float(dimz)*0.5f*h);
int vel_buffer_i = ni + dimx;
int vel_buffer_j = nj + dimy;
int vel_buffer_k = nk + dimz;
int i = index%vel_buffer_i;
int j = (index%(vel_buffer_i*vel_buffer_j))/vel_buffer_i;
int k = index/(vel_buffer_i*vel_buffer_j);
if (1+dimx<i && i<vel_buffer_i-2 && 1+dimy< j && j<vel_buffer_j-2 && 1+dimz<k && k<vel_buffer_k-2)
{
float sum = 0.0;
for (int ii = 0; ii<evaluations; ii++)
{
float3 point = make_float3(float(i)*h + buffer_origin.x + volume[ii].x,
float(j)*h + buffer_origin.y + volume[ii].y,
float(k)*h + buffer_origin.z + volume[ii].z);
float x_pos = sample_buffer(x_map,ni,nj,nk,h,make_float3(0.0,0.0,0.0),point);
float y_pos = sample_buffer(y_map,ni,nj,nk,h,make_float3(0.0,0.0,0.0),point);
float z_pos = sample_buffer(z_map,ni,nj,nk,h,make_float3(0.0,0.0,0.0),point);
float3 map_pos = make_float3(x_pos, y_pos, z_pos);
map_pos = clampv3(map_pos, make_float3(0,0,0), make_float3(h*float(ni), h*float(nj), h*float(nk)));
sum += weight * sample_buffer(src_buffer, ni+dimx, nj+dimy, nk+dimz, h, buffer_origin, map_pos);
}
float3 point = make_float3(float(i)*h + buffer_origin.x,
float(j)*h + buffer_origin.y,
float(k)*h + buffer_origin.z);
// forward mapping position
float x_pos = sample_buffer(x_map,ni,nj,nk,h,make_float3(0.0,0.0,0.0),point);
float y_pos = sample_buffer(y_map,ni,nj,nk,h,make_float3(0.0,0.0,0.0),point);
float z_pos = sample_buffer(z_map,ni,nj,nk,h,make_float3(0.0,0.0,0.0),point);
float3 map_pos = make_float3(x_pos, y_pos, z_pos);
map_pos = clampv3(map_pos, make_float3(0,0,0), make_float3(h*float(ni), h*float(nj), h*float(nk)));
float value = sample_buffer(src_buffer, ni+dimx, nj+dimy, nk+dimz, h, buffer_origin, map_pos);
sum = 0.5*sum + 0.5*value;
test_buffer[index] = sum - temp_buffer[index];
// sum -= temp_buffer[index];
// sum *= 0.5f;
// temp_buffer[index] = sum;
}
__syncthreads();
}
__global__ void estimate_kernel(float *dist_buffer, float *x_first, float *y_first, float *z_first,
float *x_second, float *y_second, float *z_second,
float h, int ni, int nj, int nk)
{
int index = blockDim.x*blockIdx.x + threadIdx.x;
int i = index%ni;
int j = (index%(ni*nj))/ni;
int k = index/(ni*nj);
if (i>1 && i<ni-2 && j>1 && j<nj-2 && k>1 && k<nk-2)
{
float3 point = make_float3(h*float(i),h*float(j),h*float(k));
// backward then forward
float back_x = sample_buffer(x_first,ni,nj,nk,h,make_float3(0.0,0.0,0.0),point);
float back_y = sample_buffer(y_first,ni,nj,nk,h,make_float3(0.0,0.0,0.0),point);
float back_z = sample_buffer(z_first,ni,nj,nk,h,make_float3(0.0,0.0,0.0),point);
float3 back_pos = make_float3(back_x, back_y, back_z);
float fwd_x = sample_buffer(x_second,ni,nj,nk,h,make_float3(0.0,0.0,0.0),back_pos);
float fwd_y = sample_buffer(y_second,ni,nj,nk,h,make_float3(0.0,0.0,0.0),back_pos);
float fwd_z = sample_buffer(z_second,ni,nj,nk,h,make_float3(0.0,0.0,0.0),back_pos);
float dist_bf = (point.x-fwd_x)*(point.x-fwd_x) +
(point.y-fwd_y)*(point.y-fwd_y) +
(point.z-fwd_z)*(point.z-fwd_z);
// forward then backward
fwd_x = sample_buffer(x_second,ni,nj,nk,h,make_float3(0.0,0.0,0.0),point);
fwd_y = sample_buffer(y_second,ni,nj,nk,h,make_float3(0.0,0.0,0.0),point);
fwd_z = sample_buffer(z_second,ni,nj,nk,h,make_float3(0.0,0.0,0.0),point);
float3 fwd_pos = make_float3(fwd_x, fwd_y, fwd_z);
back_x = sample_buffer(x_first,ni,nj,nk,h,make_float3(0.0,0.0,0.0),fwd_pos);
back_y = sample_buffer(y_first,ni,nj,nk,h,make_float3(0.0,0.0,0.0),fwd_pos);
back_z = sample_buffer(z_first,ni,nj,nk,h,make_float3(0.0,0.0,0.0),fwd_pos);
float dist_fb = (point.x-back_x)*(point.x-back_x) +
(point.y-back_y)*(point.y-back_y) +
(point.z-back_z)*(point.z-back_z);
dist_buffer[index] = max(dist_bf, dist_fb);
}
__syncthreads();
}
__global__ void reduce0(float *g_idata, float *g_odata, int N) {
extern __shared__ float sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x *blockDim.x + threadIdx.x;
sdata[tid] = (i<N)?g_idata[i]:0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s > 0; s >>= 1)
{
if (tid < s && i < N)
{
sdata[tid] = max(sdata[tid], sdata[tid+s]);
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void add_kernel(float *field1, float *field2, float coeff)
{
unsigned int i = blockIdx.x *blockDim.x + threadIdx.x;
field1[i] += coeff*field2[i];
__syncthreads();
}
extern "C" void gpu_solve_forward(float *u, float *v, float *w,
float *x_fwd, float *y_fwd, float *z_fwd,
float h, int ni, int nj, int nk, float cfldt, float dt)
{
int blocksize = 256;
int numBlocks = ((ni*nj*nk) + 255)/256;
forward_kernel<<< numBlocks, blocksize >>> (u, v, w, x_fwd, y_fwd, z_fwd, h, ni, nj, nk, cfldt, dt);
}
extern "C" void gpu_solve_backwardDMC(float *u, float *v, float *w,
float *x_in, float *y_in, float *z_in,
float *x_out, float *y_out, float *z_out,
float h, int ni, int nj, int nk, float substep)
{
int blocksize = 256;
int numBlocks = ((ni*nj*nk) + 255)/256;
DMC_backward_kernel<<< numBlocks, blocksize >>> (u, v, w, x_in, y_in, z_in, x_out, y_out, z_out, h, ni, nj, nk, substep);
}
extern "C" void gpu_advect_velocity(float *u, float *v, float *w,
float *u_init, float *v_init, float *w_init,
float *backward_x, float *backward_y, float *backward_z,
float h, int ni, int nj, int nk, bool is_point)
{
int blocksize = 256;
int numBlocks_u = ((ni+1)*nj*nk + 255)/256;
int numBlocks_v = (ni*(nj+1)*nk + 255)/256;
int numBlocks_w = (ni*nj*(nk+1) + 255)/256;
advect_kernel<<< numBlocks_u, blocksize >>>(u, u_init, backward_x, backward_y, backward_z, h, ni, nj, nk, 1, 0, 0, is_point);
advect_kernel<<< numBlocks_v, blocksize >>>(v, v_init, backward_x, backward_y, backward_z, h, ni, nj, nk, 0, 1, 0, is_point);
advect_kernel<<< numBlocks_w, blocksize >>>(w, w_init, backward_x, backward_y, backward_z, h, ni, nj, nk, 0, 0, 1, is_point);
}
extern "C" void gpu_advect_vel_double(float *u, float *v, float *w,
float *utemp, float *vtemp, float *wtemp,
float *backward_x, float *backward_y, float *backward_z,
float *backward_xprev, float *backward_yprev, float *backward_zprev,
float h, int ni, int nj, int nk, bool is_point, float blend_coeff)
{
int blocksize = 256;
int numBlocks_u = ((ni+1)*nj*nk + 255)/256;
int numBlocks_v = (ni*(nj+1)*nk + 255)/256;
int numBlocks_w = (ni*nj*(nk+1) + 255)/256;
doubleAdvect_kernel<<< numBlocks_u, blocksize >>> (u, utemp, backward_x,backward_y,backward_z,
backward_xprev, backward_yprev, backward_zprev,h,ni,nj,nk, 1, 0, 0, is_point, blend_coeff);
doubleAdvect_kernel<<< numBlocks_v, blocksize >>> (v, vtemp, backward_x,backward_y,backward_z,
backward_xprev, backward_yprev, backward_zprev,h,ni,nj,nk, 0, 1, 0, is_point, blend_coeff);
doubleAdvect_kernel<<< numBlocks_w, blocksize >>> (w, wtemp, backward_x,backward_y,backward_z,
backward_xprev, backward_yprev, backward_zprev,h,ni,nj,nk, 0, 0, 1, is_point, blend_coeff);
}
extern "C" void gpu_advect_field(float *field, float *field_init,
float *backward_x, float *backward_y, float *backward_z,
float h, int ni, int nj, int nk, bool is_point)
{
int blocksize = 256;
int numBlocks = ((ni*nj*nk) + 255)/256;
advect_kernel<<< numBlocks, blocksize >>>(field, field_init, backward_x, backward_y, backward_z, h, ni, nj, nk, 0, 0, 0, is_point);
}
extern "C" void gpu_advect_field_double(float *field, float *field_prev,
float *backward_x, float *backward_y, float *backward_z,
float *backward_xprev, float *backward_yprev, float *backward_zprev,
float h, int ni, int nj, int nk, bool is_point, float blend_coeff)
{
int blocksize = 256;
int numBlocks = ((ni*nj*nk) + 255)/256;
doubleAdvect_kernel<<< numBlocks, blocksize >>> (field, field_prev, backward_x, backward_y, backward_z,
backward_xprev, backward_yprev, backward_zprev,h,ni,nj,nk, 0, 0, 0, is_point, blend_coeff);
}
extern "C" void gpu_compensate_velocity(float *u, float *v, float *w,
float *du, float *dv, float *dw,
float *u_src, float *v_src, float *w_src,
float *forward_x, float *forward_y, float *forward_z,
float *backward_x, float *backward_y, float *backward_z,
float h, int ni, int nj, int nk, bool is_point)
{
int blocksize = 256;
int numBlocks_u = ((ni+1)*nj*nk + 255)/256;
int numBlocks_v = (ni*(nj+1)*nk + 255)/256;
int numBlocks_w = (ni*nj*(nk+1) + 255)/256;
// error at time 0 will be in du, dv, dw
compensate_kernel<<< numBlocks_u, blocksize>>>(u, du, u_src, forward_x, forward_y, forward_z, h, ni, nj, nk, 1, 0, 0, is_point);
compensate_kernel<<< numBlocks_v, blocksize>>>(v, dv, v_src, forward_x, forward_y, forward_z, h, ni, nj, nk, 0, 1, 0, is_point);
compensate_kernel<<< numBlocks_w, blocksize>>>(w, dw, w_src, forward_x, forward_y, forward_z, h, ni, nj, nk, 0, 0, 1, is_point);
// now subtract error at time t, compensated velocity will be stored in gpu.u, gpu.v, gpu.w
cudaMemcpy(du, u, sizeof(float)*(ni+1)*nj*nk, cudaMemcpyDeviceToDevice);
cudaMemcpy(dv, v, sizeof(float)*ni*(nj+1)*nk, cudaMemcpyDeviceToDevice);
cudaMemcpy(dw, w, sizeof(float)*ni*nj*(nk+1), cudaMemcpyDeviceToDevice);
cumulate_kernel<<< numBlocks_u, blocksize >>>(u_src, u, backward_x, backward_y, backward_z, h, ni, nj, nk, 1, 0, 0, is_point, -0.5f);
cumulate_kernel<<< numBlocks_v, blocksize >>>(v_src, v, backward_x, backward_y, backward_z, h, ni, nj, nk, 0, 1, 0, is_point, -0.5f);
cumulate_kernel<<< numBlocks_w, blocksize >>>(w_src, w, backward_x, backward_y, backward_z, h, ni, nj, nk, 0, 0, 1, is_point, -0.5f);
// clamp extrema, clamped result will be in gpu.u, gpu.v, gpu.w
clampExtrema_kernel<<< numBlocks_u, blocksize >>>(du, u, ni+1, nj, nk);
clampExtrema_kernel<<< numBlocks_v, blocksize >>>(dv, v, ni, nj+1, nk);
clampExtrema_kernel<<< numBlocks_w, blocksize >>>(dw, w, ni, nj, nk+1);
}
extern "C" void gpu_compensate_field(float *u, float *du, float *u_src,
float *forward_x, float *forward_y, float *forward_z,
float *backward_x, float *backward_y, float *backward_z,
float h, int ni, int nj, int nk, bool is_point)
{
int blocksize = 256;
int numBlocks_u = ((ni+1)*nj*nk + 255)/256;
// error at time 0 will be in du
compensate_kernel<<< numBlocks_u, blocksize>>>(u, du, u_src, forward_x, forward_y, forward_z, h, ni, nj, nk, 0, 0, 0, is_point);
// now subtract error at time t, compensated velocity will be stored in gpu.u
cudaMemcpy(du, u, sizeof(float)*(ni+1)*nj*nk, cudaMemcpyDeviceToDevice);
cumulate_kernel<<< numBlocks_u, blocksize >>>(u_src, u, backward_x, backward_y, backward_z, h, ni, nj, nk, 0, 0, 0, is_point, -0.5f);
// clamp extrema, clamped result will be in gpu.u
clampExtrema_kernel<<< numBlocks_u, blocksize >>>(du, u, ni, nj, nk);
}
extern "C" void gpu_accumulate_velocity(float *u_change, float *v_change, float *w_change,
float *du_init, float *dv_init, float *dw_init,
float *forward_x, float *forward_y, float *forward_z,
float h, int ni, int nj, int nk, bool is_point, float coeff)
{
int blocksize = 256;
int numBlocks_u = ((ni+1)*nj*nk + 255)/256;
int numBlocks_v = (ni*(nj+1)*nk + 255)/256;
int numBlocks_w = (ni*nj*(nk+1) + 255)/256;
cumulate_kernel<<< numBlocks_u, blocksize >>> (u_change, du_init, forward_x, forward_y, forward_z, h, ni, nj, nk, 1, 0, 0, is_point, coeff);
cumulate_kernel<<< numBlocks_v, blocksize >>> (v_change, dv_init, forward_x, forward_y, forward_z, h, ni, nj, nk, 0, 1, 0, is_point, coeff);
cumulate_kernel<<< numBlocks_w, blocksize >>> (w_change, dw_init, forward_x, forward_y, forward_z, h, ni, nj, nk, 0, 0, 1, is_point, coeff);
}
extern "C" void gpu_accumulate_field(float *field_change, float *dfield_init,
float *forward_x, float *forward_y, float *forward_z,
float h, int ni, int nj, int nk, bool is_point, float coeff)
{
int blocksize = 256;
int numBlocks = ((ni*nj*nk) + 255)/256;
cumulate_kernel<<< numBlocks, blocksize >>> (field_change, dfield_init, forward_x, forward_y, forward_z, h, ni, nj, nk, 0, 0, 0, is_point, coeff);
}
extern "C" void gpu_estimate_distortion(float *du,
float *x_back, float *y_back, float *z_back,
float *x_fwd, float *y_fwd, float *z_fwd,
float h, int ni, int nj, int nk)
{
int blocksize = 256;
int est_numBlocks = ((ni*nj*nk) + 255)/256;
// distortion will be stored in gpu.du
estimate_kernel<<< est_numBlocks, blocksize>>> (du, x_back, y_back, z_back, x_fwd, y_fwd, z_fwd, h, ni, nj, nk);
}
extern "C" void gpu_semilag(float *field, float *field_src,
float *u, float *v, float *w,
int dim_x, int dim_y, int dim_z,
float h, int ni, int nj, int nk, float cfldt, float dt)
{
int blocksize = 256;
int total_num = (ni+dim_x)*(nj+dim_y)*(nk+dim_z);
int numBlocks = (total_num + 255)/256;
semilag_kernel<<<numBlocks, blocksize>>>(field, field_src, u, v, w, dim_x, dim_y, dim_z, h, ni, nj, nk, cfldt, dt);
}
extern "C" void gpu_add(float *field1, float *field2, float coeff, int number)
{
int blocksize = 256;
int numBlocks = (number + 255)/256;
add_kernel<<<numBlocks, blocksize>>>(field1, field2, coeff);
}
|
the_stack
|
#include <torch/extension.h>
// #include <cuda.h>
// #include <cuda_runtime.h>
// #include <iostream>
// Copied from fast-soft-sort (https://bit.ly/3r0gOav) with the following modifications:
// - replace numpy functions with torch equivalents
// - re-write in CUDA
// - return solution in place
// - added backward pass (vector jacobian product)
// Copied from scikit-learn with the following modifications:
// - use decreasing constraints by default,
// - do not return solution in place, rather save in array `sol`,
// - avoid some needless multiplications.
// namespace {
// Numerically stable log-add-exp
template <typename scalar_t>
__device__ __forceinline__ scalar_t log_add_exp(scalar_t x, scalar_t y) {
scalar_t larger = max(x, y);
scalar_t smaller = min(x, y);
return larger + log1p(exp(smaller - larger));
}
// Returns partition corresponding to solution. Expects sizes to be zeros
template <typename scalar_t>
__device__ void partition(
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> solution,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sizes,
int n,
int b) {
const scalar_t eps = 1.0e-9;
int tail = 1;
if (n > 0) {
sizes[b][0] = 1;
}
for (int i = 1; i < n; i++) {
if (std::abs(solution[b][i] - solution[b][i - 1]) > eps) {
tail += 1;
}
sizes[b][tail - 1] += 1;
}
}
template <typename scalar_t>
__global__ void isotonic_l2_kernel(
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> s,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sol,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sums,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> target,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> c,
int n,
int batch) {
const int b = blockIdx.x * blockDim.x + threadIdx.x;
if (b >= batch) {
// outside the batch
return;
}
// target describes a list of blocks. at any time, if [i..j] (inclusive) is
// an active block, then target[i] := j and target[j] := i.
for (int i = 0; i < n; i++) {
c[b][i] = 1.0;
sol[b][i] = s[b][i];
sums[b][i] = s[b][i];
target[b][i] = i;
}
int i = 0;
while (i < n) {
auto k = target[b][i] + 1;
if (k == n) {
break;
}
if (sol[b][i] > sol[b][k]) {
i = k;
continue;
}
auto sum_y = sums[b][i];
auto sum_c = c[b][i];
while (true) {
// We are within an increasing subsequence
auto prev_y = sol[b][k];
sum_y += sums[b][k];
sum_c += c[b][k];
k = target[b][k] + 1;
if ((k == n) || (prev_y > sol[b][k])) {
// Non-singleton increasing subsequence is finished,
// update first entry.
sol[b][i] = sum_y / sum_c;
sums[b][i] = sum_y;
c[b][i] = sum_c;
target[b][i] = k - 1;
target[b][k - 1] = i;
if (i > 0) {
// Backtrack if we can. This makes the algorithm
// single-pass and ensures O(n) complexity.
i = target[b][i - 1];
}
// Otherwise, restart from the same point
break;
}
}
}
// Reconstruct the solution
i = 0;
while (i < n) {
auto k = target[b][i] + 1;
for (int j = i + 1; j < k; j++) {
sol[b][j] = sol[b][i];
}
i = k;
}
}
template <typename scalar_t>
__global__ void isotonic_kl_kernel(
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> y,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> w,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sol,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> lse_y_,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> lse_w_,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> target,
int n,
int batch) {
const int b = blockIdx.x * blockDim.x + threadIdx.x;
if (b >= batch) {
// outside the batch
return;
}
// target describes a list of blocks. At any time, if [i..j] (inclusive) is
// an active block, then target[i] := j and target[j] := i.
for (int i = 0; i < n; i++) {
sol[b][i] = y[b][i] - w[b][i];
lse_y_[b][i] = y[b][i];
lse_w_[b][i] = w[b][i];
target[b][i] = i;
}
int i = 0;
while (i < n) {
auto k = target[b][i] + 1;
if (k == n) {
break;
}
if (sol[b][i] > sol[b][k]) {
i = k;
continue;
}
auto lse_y = lse_y_[b][i];
auto lse_w = lse_w_[b][i];
while (true) {
// We are within an increasing subsequence
auto prev_y = sol[b][k];
lse_y = log_add_exp(lse_y, lse_y_[b][k]);
lse_w = log_add_exp(lse_w, lse_w_[b][k]);
k = target[b][k] + 1;
if ((k == n) || (prev_y > sol[b][k])) {
// Non-singleton increasing subsequence is finished,
// update first entry.
sol[b][i] = lse_y - lse_w;
lse_y_[b][i] = lse_y;
lse_w_[b][i] = lse_w;
target[b][i] = k - 1;
target[b][k - 1] = i;
if (i > 0) {
// Backtrack if we can. This makes the algorithm
// single-pass and ensures O(n) complexity.
i = target[b][i - 1];
}
// Otherwise, restart from the same point
break;
}
}
}
// Reconstruct the solution
i = 0;
while (i < n) {
auto k = target[b][i] + 1;
for (int j = i + 1; j < k; j++) {
sol[b][j] = sol[b][i];
}
i = k;
}
}
template <typename scalar_t>
__global__ void isotonic_l2_backward_kernel(
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> s, // not used
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sol,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> grad_input,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> ret,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sizes,
int n,
int batch) {
int end;
scalar_t sum;
scalar_t val;
const int b = blockIdx.x * blockDim.x + threadIdx.x;
if (b >= batch) {
// outside the batch
return;
}
int start = 0;
partition(sol, sizes, n, b);
for (int size = 0; (sizes[b][size] > 0 && size < n); size++) {
end = start + sizes[b][size];
sum = 0;
val = 1.0 / (scalar_t) sizes[b][size];
for (int i = start; i < end; i++) {
sum += grad_input[b][i];
}
for (int i = start; i < end; i++) {
ret[b][i] = val * sum;
}
start = end;
}
}
template <typename scalar_t>
__global__ void isotonic_kl_backward_kernel(
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> s,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sol,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> grad_input,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> ret,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sizes,
int n,
int batch) {
int end;
scalar_t sum;
scalar_t softmax;
const int b = blockIdx.x * blockDim.x + threadIdx.x;
if (b >= batch) {
// outside the batch
return;
}
int start = 0;
partition(sol, sizes, n, b);
for (int size = 0; (sizes[b][size] > 0 && size < n); size++) {
end = start + sizes[b][size];
sum = 0;
softmax = 0;
for (int i = start; i < end; i++) {
softmax += std::exp(s[b][i]);
sum += grad_input[b][i];
}
for (int i = start; i < end; i++) {
ret[b][i] = std::exp(s[b][i]) / softmax * sum;
}
start = end;
}
}
// Solves an isotonic regression problem using PAV.
// Formally, it solves argmin_{v_1 >= ... >= v_n} 0.5 ||v - y||^2.
torch::Tensor isotonic_l2(torch::Tensor y) {
auto batch = y.size(0);
auto n = y.size(1);
auto sol = torch::zeros_like(y);
auto sums = torch::zeros_like(y);
auto target = torch::zeros_like(y);
auto c = torch::zeros_like(y);
const int threads = 1024;
const int blocks = (batch + threads - 1) / threads;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(y.scalar_type(), "isotonic_l2", ([&] {
isotonic_l2_kernel<scalar_t><<<blocks, threads>>>(
y.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
sol.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
sums.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
target.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
c.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
n,
batch);
}));
return sol;
}
// Solves isotonic optimization with KL divergence using PAV.
// Formally, it solves argmin_{v_1 >= ... >= v_n} <e^{y-v}, 1> + <e^w, v>.
torch::Tensor isotonic_kl(torch::Tensor y, torch::Tensor w) {
auto batch = y.size(0);
auto n = y.size(1);
auto sol = torch::zeros_like(y);
auto lse_y_ = torch::zeros_like(y);
auto lse_w_ = torch::zeros_like(y);
auto target = torch::zeros_like(y);
const int threads = 1024;
const int blocks = (batch + threads - 1) / threads;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(y.scalar_type(), "isotonic_kl", ([&] {
isotonic_kl_kernel<scalar_t><<<blocks, threads>>>(
y.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
w.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
sol.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
lse_y_.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
lse_w_.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
target.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
n,
batch);
}));
return sol;
}
torch::Tensor isotonic_l2_backward(torch::Tensor s, torch::Tensor sol, torch::Tensor grad_input) {
auto batch = sol.size(0);
auto n = sol.size(1);
auto ret = torch::zeros_like(sol);
auto sizes = torch::zeros_like(sol);
const int threads = 1024;
const int blocks = (batch + threads - 1) / threads;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(sol.scalar_type(), "isotonic_l2_backward", ([&] {
isotonic_l2_backward_kernel<scalar_t><<<blocks, threads>>>(
s.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
sol.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
grad_input.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
ret.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
sizes.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
n,
batch);
}));
return ret;
}
torch::Tensor isotonic_kl_backward(torch::Tensor s, torch::Tensor sol, torch::Tensor grad_input) {
auto batch = sol.size(0);
auto n = sol.size(1);
auto ret = torch::zeros_like(sol);
auto sizes = torch::zeros_like(sol);
const int threads = 1024;
const int blocks = (batch + threads - 1) / threads;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(sol.scalar_type(), "isotonic_kl_backward", ([&] {
isotonic_kl_backward_kernel<scalar_t><<<blocks, threads>>>(
s.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
sol.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
grad_input.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
ret.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
sizes.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
n,
batch);
}));
return ret;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("isotonic_l2", &isotonic_l2, "Isotonic L2");
m.def("isotonic_l2_backward", &isotonic_l2_backward, "Isotonic L2 Backward");
m.def("isotonic_kl", &isotonic_kl, "Isotonic KL");
m.def("isotonic_kl_backward", &isotonic_kl_backward, "Isotonic KL Backward");
}
|
the_stack
|
#include <cuda.h>
#include "Utility.h"
#include "math.h" // CUDA math library
#include "qr.cuh"
#define gone 1065353216
#define gsine_pi_over_eight 1053028117
#define gcosine_pi_over_eight 1064076127
#define gone_half 0.5f
#define gsmall_number 1.e-12f
#define gtiny_number 1.e-20f
#define gfour_gamma_squared 5.8284273147583007813f
namespace mn {
namespace math {
union un {
float f;
unsigned int ui;
};
template<typename T>
__forceinline__ __device__ void svd(
T a11, T a12, T a13, T a21, T a22, T a23, T a31, T a32, T a33, // input A
T& u11, T& u12, T& u13, T& u21, T& u22, T& u23, T& u31, T& u32, T& u33, // output U
T& s11,
//float &s12, float &s13, float &s21,
T& s22,
//float &s23, float &s31, float &s32,
T& s33, // output S
T& v11, T& v12, T& v13, T& v21, T& v22, T& v23, T& v31, T& v32, T& v33 // output V
)
{
un Sa11, Sa21, Sa31, Sa12, Sa22, Sa32, Sa13, Sa23, Sa33;
un Su11, Su21, Su31, Su12, Su22, Su32, Su13, Su23, Su33;
un Sv11, Sv21, Sv31, Sv12, Sv22, Sv32, Sv13, Sv23, Sv33;
un Sc, Ss, Sch, Ssh;
un Stmp1, Stmp2, Stmp3, Stmp4, Stmp5;
un Ss11, Ss21, Ss31, Ss22, Ss32, Ss33;
un Sqvs, Sqvvx, Sqvvy, Sqvvz;
Sa11.f = a11;
Sa12.f = a12;
Sa13.f = a13;
Sa21.f = a21;
Sa22.f = a22;
Sa23.f = a23;
Sa31.f = a31;
Sa32.f = a32;
Sa33.f = a33;
//###########################################################
// Compute normal equations matrix
//###########################################################
Ss11.f = Sa11.f * Sa11.f;
Stmp1.f = Sa21.f * Sa21.f;
Ss11.f = __fadd_rn(Stmp1.f, Ss11.f);
Stmp1.f = Sa31.f * Sa31.f;
Ss11.f = __fadd_rn(Stmp1.f, Ss11.f);
Ss21.f = Sa12.f * Sa11.f;
Stmp1.f = Sa22.f * Sa21.f;
Ss21.f = __fadd_rn(Stmp1.f, Ss21.f);
Stmp1.f = Sa32.f * Sa31.f;
Ss21.f = __fadd_rn(Stmp1.f, Ss21.f);
Ss31.f = Sa13.f * Sa11.f;
Stmp1.f = Sa23.f * Sa21.f;
Ss31.f = __fadd_rn(Stmp1.f, Ss31.f);
Stmp1.f = Sa33.f * Sa31.f;
Ss31.f = __fadd_rn(Stmp1.f, Ss31.f);
Ss22.f = Sa12.f * Sa12.f;
Stmp1.f = Sa22.f * Sa22.f;
Ss22.f = __fadd_rn(Stmp1.f, Ss22.f);
Stmp1.f = Sa32.f * Sa32.f;
Ss22.f = __fadd_rn(Stmp1.f, Ss22.f);
Ss32.f = Sa13.f * Sa12.f;
Stmp1.f = Sa23.f * Sa22.f;
Ss32.f = __fadd_rn(Stmp1.f, Ss32.f);
Stmp1.f = Sa33.f * Sa32.f;
Ss32.f = __fadd_rn(Stmp1.f, Ss32.f);
Ss33.f = Sa13.f * Sa13.f;
Stmp1.f = Sa23.f * Sa23.f;
Ss33.f = __fadd_rn(Stmp1.f, Ss33.f);
Stmp1.f = Sa33.f * Sa33.f;
Ss33.f = __fadd_rn(Stmp1.f, Ss33.f);
Sqvs.f = 1.f;
Sqvvx.f = 0.f;
Sqvvy.f = 0.f;
Sqvvz.f = 0.f;
//###########################################################
// Solve symmetric eigenproblem using Jacobi iteration
//###########################################################
for (int i = 0; i < 4; i++) {
Ssh.f = Ss21.f * 0.5f;
Stmp5.f = __fsub_rn(Ss11.f, Ss22.f);
Stmp2.f = Ssh.f * Ssh.f;
Stmp1.ui = (Stmp2.f >= gtiny_number) ? 0xffffffff : 0;
Ssh.ui = Stmp1.ui & Ssh.ui;
Sch.ui = Stmp1.ui & Stmp5.ui;
Stmp2.ui = ~Stmp1.ui & gone;
Sch.ui = Sch.ui | Stmp2.ui;
Stmp1.f = Ssh.f * Ssh.f;
Stmp2.f = Sch.f * Sch.f;
Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp4.f = __frsqrt_rn(Stmp3.f);
Ssh.f = Stmp4.f * Ssh.f;
Sch.f = Stmp4.f * Sch.f;
Stmp1.f = gfour_gamma_squared * Stmp1.f;
Stmp1.ui = (Stmp2.f <= Stmp1.f) ? 0xffffffff : 0;
Stmp2.ui = gsine_pi_over_eight & Stmp1.ui;
Ssh.ui = ~Stmp1.ui & Ssh.ui;
Ssh.ui = Ssh.ui | Stmp2.ui;
Stmp2.ui = gcosine_pi_over_eight & Stmp1.ui;
Sch.ui = ~Stmp1.ui & Sch.ui;
Sch.ui = Sch.ui | Stmp2.ui;
Stmp1.f = Ssh.f * Ssh.f;
Stmp2.f = Sch.f * Sch.f;
Sc.f = __fsub_rn(Stmp2.f, Stmp1.f);
Ss.f = Sch.f * Ssh.f;
Ss.f = __fadd_rn(Ss.f, Ss.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("GPU s %.20g, c %.20g, sh %.20g, ch %.20g\n", Ss.f, Sc.f, Ssh.f, Sch.f);
#endif
//###########################################################
// Perform the actual Givens conjugation
//###########################################################
Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f);
Ss33.f = Ss33.f * Stmp3.f;
Ss31.f = Ss31.f * Stmp3.f;
Ss32.f = Ss32.f * Stmp3.f;
Ss33.f = Ss33.f * Stmp3.f;
Stmp1.f = Ss.f * Ss31.f;
Stmp2.f = Ss.f * Ss32.f;
Ss31.f = Sc.f * Ss31.f;
Ss32.f = Sc.f * Ss32.f;
Ss31.f = __fadd_rn(Stmp2.f, Ss31.f);
Ss32.f = __fsub_rn(Ss32.f, Stmp1.f);
Stmp2.f = Ss.f * Ss.f;
Stmp1.f = Ss22.f * Stmp2.f;
Stmp3.f = Ss11.f * Stmp2.f;
Stmp4.f = Sc.f * Sc.f;
Ss11.f = Ss11.f * Stmp4.f;
Ss22.f = Ss22.f * Stmp4.f;
Ss11.f = __fadd_rn(Ss11.f, Stmp1.f);
Ss22.f = __fadd_rn(Ss22.f, Stmp3.f);
Stmp4.f = __fsub_rn(Stmp4.f, Stmp2.f);
Stmp2.f = __fadd_rn(Ss21.f, Ss21.f);
Ss21.f = Ss21.f * Stmp4.f;
Stmp4.f = Sc.f * Ss.f;
Stmp2.f = Stmp2.f * Stmp4.f;
Stmp5.f = Stmp5.f * Stmp4.f;
Ss11.f = __fadd_rn(Ss11.f, Stmp2.f);
Ss21.f = __fsub_rn(Ss21.f, Stmp5.f);
Ss22.f = __fsub_rn(Ss22.f, Stmp2.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("%.20g\n", Ss11.f);
printf("%.20g %.20g\n", Ss21.f, Ss22.f);
printf("%.20g %.20g %.20g\n", Ss31.f, Ss32.f, Ss33.f);
#endif
//###########################################################
// Compute the cumulative rotation, in quaternion form
//###########################################################
Stmp1.f = Ssh.f * Sqvvx.f;
Stmp2.f = Ssh.f * Sqvvy.f;
Stmp3.f = Ssh.f * Sqvvz.f;
Ssh.f = Ssh.f * Sqvs.f;
Sqvs.f = Sch.f * Sqvs.f;
Sqvvx.f = Sch.f * Sqvvx.f;
Sqvvy.f = Sch.f * Sqvvy.f;
Sqvvz.f = Sch.f * Sqvvz.f;
Sqvvz.f = __fadd_rn(Sqvvz.f, Ssh.f);
Sqvs.f = __fsub_rn(Sqvs.f, Stmp3.f);
Sqvvx.f = __fadd_rn(Sqvvx.f, Stmp2.f);
Sqvvy.f = __fsub_rn(Sqvvy.f, Stmp1.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("GPU q %.20g %.20g %.20g %.20g\n", Sqvvx.f, Sqvvy.f, Sqvvz.f, Sqvs.f);
#endif
//////////////////////////////////////////////////////////////////////////
// (1->3)
//////////////////////////////////////////////////////////////////////////
Ssh.f = Ss32.f * 0.5f;
Stmp5.f = __fsub_rn(Ss22.f, Ss33.f);
Stmp2.f = Ssh.f * Ssh.f;
Stmp1.ui = (Stmp2.f >= gtiny_number) ? 0xffffffff : 0;
Ssh.ui = Stmp1.ui & Ssh.ui;
Sch.ui = Stmp1.ui & Stmp5.ui;
Stmp2.ui = ~Stmp1.ui & gone;
Sch.ui = Sch.ui | Stmp2.ui;
Stmp1.f = Ssh.f * Ssh.f;
Stmp2.f = Sch.f * Sch.f;
Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp4.f = __frsqrt_rn(Stmp3.f);
Ssh.f = Stmp4.f * Ssh.f;
Sch.f = Stmp4.f * Sch.f;
Stmp1.f = gfour_gamma_squared * Stmp1.f;
Stmp1.ui = (Stmp2.f <= Stmp1.f) ? 0xffffffff : 0;
Stmp2.ui = gsine_pi_over_eight & Stmp1.ui;
Ssh.ui = ~Stmp1.ui & Ssh.ui;
Ssh.ui = Ssh.ui | Stmp2.ui;
Stmp2.ui = gcosine_pi_over_eight & Stmp1.ui;
Sch.ui = ~Stmp1.ui & Sch.ui;
Sch.ui = Sch.ui | Stmp2.ui;
Stmp1.f = Ssh.f * Ssh.f;
Stmp2.f = Sch.f * Sch.f;
Sc.f = __fsub_rn(Stmp2.f, Stmp1.f);
Ss.f = Sch.f * Ssh.f;
Ss.f = __fadd_rn(Ss.f, Ss.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("GPU s %.20g, c %.20g, sh %.20g, ch %.20g\n", Ss.f, Sc.f, Ssh.f, Sch.f);
#endif
//###########################################################
// Perform the actual Givens conjugation
//###########################################################
Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f);
Ss11.f = Ss11.f * Stmp3.f;
Ss21.f = Ss21.f * Stmp3.f;
Ss31.f = Ss31.f * Stmp3.f;
Ss11.f = Ss11.f * Stmp3.f;
Stmp1.f = Ss.f * Ss21.f;
Stmp2.f = Ss.f * Ss31.f;
Ss21.f = Sc.f * Ss21.f;
Ss31.f = Sc.f * Ss31.f;
Ss21.f = __fadd_rn(Stmp2.f, Ss21.f);
Ss31.f = __fsub_rn(Ss31.f, Stmp1.f);
Stmp2.f = Ss.f * Ss.f;
Stmp1.f = Ss33.f * Stmp2.f;
Stmp3.f = Ss22.f * Stmp2.f;
Stmp4.f = Sc.f * Sc.f;
Ss22.f = Ss22.f * Stmp4.f;
Ss33.f = Ss33.f * Stmp4.f;
Ss22.f = __fadd_rn(Ss22.f, Stmp1.f);
Ss33.f = __fadd_rn(Ss33.f, Stmp3.f);
Stmp4.f = __fsub_rn(Stmp4.f, Stmp2.f);
Stmp2.f = __fadd_rn(Ss32.f, Ss32.f);
Ss32.f = Ss32.f * Stmp4.f;
Stmp4.f = Sc.f * Ss.f;
Stmp2.f = Stmp2.f * Stmp4.f;
Stmp5.f = Stmp5.f * Stmp4.f;
Ss22.f = __fadd_rn(Ss22.f, Stmp2.f);
Ss32.f = __fsub_rn(Ss32.f, Stmp5.f);
Ss33.f = __fsub_rn(Ss33.f, Stmp2.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("%.20g\n", Ss11.f);
printf("%.20g %.20g\n", Ss21.f, Ss22.f);
printf("%.20g %.20g %.20g\n", Ss31.f, Ss32.f, Ss33.f);
#endif
//###########################################################
// Compute the cumulative rotation, in quaternion form
//###########################################################
Stmp1.f = Ssh.f * Sqvvx.f;
Stmp2.f = Ssh.f * Sqvvy.f;
Stmp3.f = Ssh.f * Sqvvz.f;
Ssh.f = Ssh.f * Sqvs.f;
Sqvs.f = Sch.f * Sqvs.f;
Sqvvx.f = Sch.f * Sqvvx.f;
Sqvvy.f = Sch.f * Sqvvy.f;
Sqvvz.f = Sch.f * Sqvvz.f;
Sqvvx.f = __fadd_rn(Sqvvx.f, Ssh.f);
Sqvs.f = __fsub_rn(Sqvs.f, Stmp1.f);
Sqvvy.f = __fadd_rn(Sqvvy.f, Stmp3.f);
Sqvvz.f = __fsub_rn(Sqvvz.f, Stmp2.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("GPU q %.20g %.20g %.20g %.20g\n", Sqvvx.f, Sqvvy.f, Sqvvz.f, Sqvs.f);
#endif
#if 1
//////////////////////////////////////////////////////////////////////////
// 1 -> 2
//////////////////////////////////////////////////////////////////////////
Ssh.f = Ss31.f * 0.5f;
Stmp5.f = __fsub_rn(Ss33.f, Ss11.f);
Stmp2.f = Ssh.f * Ssh.f;
Stmp1.ui = (Stmp2.f >= gtiny_number) ? 0xffffffff : 0;
Ssh.ui = Stmp1.ui & Ssh.ui;
Sch.ui = Stmp1.ui & Stmp5.ui;
Stmp2.ui = ~Stmp1.ui & gone;
Sch.ui = Sch.ui | Stmp2.ui;
Stmp1.f = Ssh.f * Ssh.f;
Stmp2.f = Sch.f * Sch.f;
Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp4.f = __frsqrt_rn(Stmp3.f);
Ssh.f = Stmp4.f * Ssh.f;
Sch.f = Stmp4.f * Sch.f;
Stmp1.f = gfour_gamma_squared * Stmp1.f;
Stmp1.ui = (Stmp2.f <= Stmp1.f) ? 0xffffffff : 0;
Stmp2.ui = gsine_pi_over_eight & Stmp1.ui;
Ssh.ui = ~Stmp1.ui & Ssh.ui;
Ssh.ui = Ssh.ui | Stmp2.ui;
Stmp2.ui = gcosine_pi_over_eight & Stmp1.ui;
Sch.ui = ~Stmp1.ui & Sch.ui;
Sch.ui = Sch.ui | Stmp2.ui;
Stmp1.f = Ssh.f * Ssh.f;
Stmp2.f = Sch.f * Sch.f;
Sc.f = __fsub_rn(Stmp2.f, Stmp1.f);
Ss.f = Sch.f * Ssh.f;
Ss.f = __fadd_rn(Ss.f, Ss.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("GPU s %.20g, c %.20g, sh %.20g, ch %.20g\n", Ss.f, Sc.f, Ssh.f, Sch.f);
#endif
//###########################################################
// Perform the actual Givens conjugation
//###########################################################
Stmp3.f = __fadd_rn(Stmp1.f, Stmp2.f);
Ss22.f = Ss22.f * Stmp3.f;
Ss32.f = Ss32.f * Stmp3.f;
Ss21.f = Ss21.f * Stmp3.f;
Ss22.f = Ss22.f * Stmp3.f;
Stmp1.f = Ss.f * Ss32.f;
Stmp2.f = Ss.f * Ss21.f;
Ss32.f = Sc.f * Ss32.f;
Ss21.f = Sc.f * Ss21.f;
Ss32.f = __fadd_rn(Stmp2.f, Ss32.f);
Ss21.f = __fsub_rn(Ss21.f, Stmp1.f);
Stmp2.f = Ss.f * Ss.f;
Stmp1.f = Ss11.f * Stmp2.f;
Stmp3.f = Ss33.f * Stmp2.f;
Stmp4.f = Sc.f * Sc.f;
Ss33.f = Ss33.f * Stmp4.f;
Ss11.f = Ss11.f * Stmp4.f;
Ss33.f = __fadd_rn(Ss33.f, Stmp1.f);
Ss11.f = __fadd_rn(Ss11.f, Stmp3.f);
Stmp4.f = __fsub_rn(Stmp4.f, Stmp2.f);
Stmp2.f = __fadd_rn(Ss31.f, Ss31.f);
Ss31.f = Ss31.f * Stmp4.f;
Stmp4.f = Sc.f * Ss.f;
Stmp2.f = Stmp2.f * Stmp4.f;
Stmp5.f = Stmp5.f * Stmp4.f;
Ss33.f = __fadd_rn(Ss33.f, Stmp2.f);
Ss31.f = __fsub_rn(Ss31.f, Stmp5.f);
Ss11.f = __fsub_rn(Ss11.f, Stmp2.f);
#ifdef DEBUG_JACOBI_CONJUGATE
printf("%.20g\n", Ss11.f);
printf("%.20g %.20g\n", Ss21.f, Ss22.f);
printf("%.20g %.20g %.20g\n", Ss31.f, Ss32.f, Ss33.f);
#endif
//###########################################################
// Compute the cumulative rotation, in quaternion form
//###########################################################
Stmp1.f = Ssh.f * Sqvvx.f;
Stmp2.f = Ssh.f * Sqvvy.f;
Stmp3.f = Ssh.f * Sqvvz.f;
Ssh.f = Ssh.f * Sqvs.f;
Sqvs.f = Sch.f * Sqvs.f;
Sqvvx.f = Sch.f * Sqvvx.f;
Sqvvy.f = Sch.f * Sqvvy.f;
Sqvvz.f = Sch.f * Sqvvz.f;
Sqvvy.f = __fadd_rn(Sqvvy.f, Ssh.f);
Sqvs.f = __fsub_rn(Sqvs.f, Stmp2.f);
Sqvvz.f = __fadd_rn(Sqvvz.f, Stmp1.f);
Sqvvx.f = __fsub_rn(Sqvvx.f, Stmp3.f);
#endif
}
//###########################################################
// Normalize quaternion for matrix V
//###########################################################
Stmp2.f = Sqvs.f * Sqvs.f;
Stmp1.f = Sqvvx.f * Sqvvx.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = Sqvvy.f * Sqvvy.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = Sqvvz.f * Sqvvz.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f * 0.5f;
Stmp3.f = Stmp1.f * Stmp4.f;
Stmp3.f = Stmp1.f * Stmp3.f;
Stmp3.f = Stmp2.f * Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Sqvs.f = Sqvs.f * Stmp1.f;
Sqvvx.f = Sqvvx.f * Stmp1.f;
Sqvvy.f = Sqvvy.f * Stmp1.f;
Sqvvz.f = Sqvvz.f * Stmp1.f;
//###########################################################
// Transform quaternion to matrix V
//###########################################################
Stmp1.f = Sqvvx.f * Sqvvx.f;
Stmp2.f = Sqvvy.f * Sqvvy.f;
Stmp3.f = Sqvvz.f * Sqvvz.f;
Sv11.f = Sqvs.f * Sqvs.f;
Sv22.f = __fsub_rn(Sv11.f, Stmp1.f);
Sv33.f = __fsub_rn(Sv22.f, Stmp2.f);
Sv33.f = __fadd_rn(Sv33.f, Stmp3.f);
Sv22.f = __fadd_rn(Sv22.f, Stmp2.f);
Sv22.f = __fsub_rn(Sv22.f, Stmp3.f);
Sv11.f = __fadd_rn(Sv11.f, Stmp1.f);
Sv11.f = __fsub_rn(Sv11.f, Stmp2.f);
Sv11.f = __fsub_rn(Sv11.f, Stmp3.f);
Stmp1.f = __fadd_rn(Sqvvx.f, Sqvvx.f);
Stmp2.f = __fadd_rn(Sqvvy.f, Sqvvy.f);
Stmp3.f = __fadd_rn(Sqvvz.f, Sqvvz.f);
Sv32.f = Sqvs.f * Stmp1.f;
Sv13.f = Sqvs.f * Stmp2.f;
Sv21.f = Sqvs.f * Stmp3.f;
Stmp1.f = Sqvvy.f * Stmp1.f;
Stmp2.f = Sqvvz.f * Stmp2.f;
Stmp3.f = Sqvvx.f * Stmp3.f;
Sv12.f = __fsub_rn(Stmp1.f, Sv21.f);
Sv23.f = __fsub_rn(Stmp2.f, Sv32.f);
Sv31.f = __fsub_rn(Stmp3.f, Sv13.f);
Sv21.f = __fadd_rn(Stmp1.f, Sv21.f);
Sv32.f = __fadd_rn(Stmp2.f, Sv32.f);
Sv13.f = __fadd_rn(Stmp3.f, Sv13.f);
///###########################################################
// Multiply (from the right) with V
//###########################################################
Stmp2.f = Sa12.f;
Stmp3.f = Sa13.f;
Sa12.f = Sv12.f * Sa11.f;
Sa13.f = Sv13.f * Sa11.f;
Sa11.f = Sv11.f * Sa11.f;
Stmp1.f = Sv21.f * Stmp2.f;
Sa11.f = __fadd_rn(Sa11.f, Stmp1.f);
Stmp1.f = Sv31.f * Stmp3.f;
Sa11.f = __fadd_rn(Sa11.f, Stmp1.f);
Stmp1.f = Sv22.f * Stmp2.f;
Sa12.f = __fadd_rn(Sa12.f, Stmp1.f);
Stmp1.f = Sv32.f * Stmp3.f;
Sa12.f = __fadd_rn(Sa12.f, Stmp1.f);
Stmp1.f = Sv23.f * Stmp2.f;
Sa13.f = __fadd_rn(Sa13.f, Stmp1.f);
Stmp1.f = Sv33.f * Stmp3.f;
Sa13.f = __fadd_rn(Sa13.f, Stmp1.f);
Stmp2.f = Sa22.f;
Stmp3.f = Sa23.f;
Sa22.f = Sv12.f * Sa21.f;
Sa23.f = Sv13.f * Sa21.f;
Sa21.f = Sv11.f * Sa21.f;
Stmp1.f = Sv21.f * Stmp2.f;
Sa21.f = __fadd_rn(Sa21.f, Stmp1.f);
Stmp1.f = Sv31.f * Stmp3.f;
Sa21.f = __fadd_rn(Sa21.f, Stmp1.f);
Stmp1.f = Sv22.f * Stmp2.f;
Sa22.f = __fadd_rn(Sa22.f, Stmp1.f);
Stmp1.f = Sv32.f * Stmp3.f;
Sa22.f = __fadd_rn(Sa22.f, Stmp1.f);
Stmp1.f = Sv23.f * Stmp2.f;
Sa23.f = __fadd_rn(Sa23.f, Stmp1.f);
Stmp1.f = Sv33.f * Stmp3.f;
Sa23.f = __fadd_rn(Sa23.f, Stmp1.f);
Stmp2.f = Sa32.f;
Stmp3.f = Sa33.f;
Sa32.f = Sv12.f * Sa31.f;
Sa33.f = Sv13.f * Sa31.f;
Sa31.f = Sv11.f * Sa31.f;
Stmp1.f = Sv21.f * Stmp2.f;
Sa31.f = __fadd_rn(Sa31.f, Stmp1.f);
Stmp1.f = Sv31.f * Stmp3.f;
Sa31.f = __fadd_rn(Sa31.f, Stmp1.f);
Stmp1.f = Sv22.f * Stmp2.f;
Sa32.f = __fadd_rn(Sa32.f, Stmp1.f);
Stmp1.f = Sv32.f * Stmp3.f;
Sa32.f = __fadd_rn(Sa32.f, Stmp1.f);
Stmp1.f = Sv23.f * Stmp2.f;
Sa33.f = __fadd_rn(Sa33.f, Stmp1.f);
Stmp1.f = Sv33.f * Stmp3.f;
Sa33.f = __fadd_rn(Sa33.f, Stmp1.f);
//###########################################################
// Permute columns such that the singular values are sorted
//###########################################################
Stmp1.f = Sa11.f * Sa11.f;
Stmp4.f = Sa21.f * Sa21.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp4.f = Sa31.f * Sa31.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp2.f = Sa12.f * Sa12.f;
Stmp4.f = Sa22.f * Sa22.f;
Stmp2.f = __fadd_rn(Stmp2.f, Stmp4.f);
Stmp4.f = Sa32.f * Sa32.f;
Stmp2.f = __fadd_rn(Stmp2.f, Stmp4.f);
Stmp3.f = Sa13.f * Sa13.f;
Stmp4.f = Sa23.f * Sa23.f;
Stmp3.f = __fadd_rn(Stmp3.f, Stmp4.f);
Stmp4.f = Sa33.f * Sa33.f;
Stmp3.f = __fadd_rn(Stmp3.f, Stmp4.f);
// Swap columns 1-2 if necessary
Stmp4.ui = (Stmp1.f < Stmp2.f) ? 0xffffffff : 0;
Stmp5.ui = Sa11.ui ^ Sa12.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Sa11.ui = Sa11.ui ^ Stmp5.ui;
Sa12.ui = Sa12.ui ^ Stmp5.ui;
Stmp5.ui = Sa21.ui ^ Sa22.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Sa21.ui = Sa21.ui ^ Stmp5.ui;
Sa22.ui = Sa22.ui ^ Stmp5.ui;
Stmp5.ui = Sa31.ui ^ Sa32.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Sa31.ui = Sa31.ui ^ Stmp5.ui;
Sa32.ui = Sa32.ui ^ Stmp5.ui;
Stmp5.ui = Sv11.ui ^ Sv12.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Sv11.ui = Sv11.ui ^ Stmp5.ui;
Sv12.ui = Sv12.ui ^ Stmp5.ui;
Stmp5.ui = Sv21.ui ^ Sv22.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Sv21.ui = Sv21.ui ^ Stmp5.ui;
Sv22.ui = Sv22.ui ^ Stmp5.ui;
Stmp5.ui = Sv31.ui ^ Sv32.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Sv31.ui = Sv31.ui ^ Stmp5.ui;
Sv32.ui = Sv32.ui ^ Stmp5.ui;
Stmp5.ui = Stmp1.ui ^ Stmp2.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Stmp1.ui = Stmp1.ui ^ Stmp5.ui;
Stmp2.ui = Stmp2.ui ^ Stmp5.ui;
// If columns 1-2 have been swapped, negate 2nd column of A and V so that V is still a rotation
Stmp5.f = -2.f;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Stmp4.f = 1.f;
Stmp4.f = __fadd_rn(Stmp4.f, Stmp5.f);
Sa12.f = Sa12.f * Stmp4.f;
Sa22.f = Sa22.f * Stmp4.f;
Sa32.f = Sa32.f * Stmp4.f;
Sv12.f = Sv12.f * Stmp4.f;
Sv22.f = Sv22.f * Stmp4.f;
Sv32.f = Sv32.f * Stmp4.f;
// Swap columns 1-3 if necessary
Stmp4.ui = (Stmp1.f < Stmp3.f) ? 0xffffffff : 0;
Stmp5.ui = Sa11.ui ^ Sa13.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Sa11.ui = Sa11.ui ^ Stmp5.ui;
Sa13.ui = Sa13.ui ^ Stmp5.ui;
Stmp5.ui = Sa21.ui ^ Sa23.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Sa21.ui = Sa21.ui ^ Stmp5.ui;
Sa23.ui = Sa23.ui ^ Stmp5.ui;
Stmp5.ui = Sa31.ui ^ Sa33.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Sa31.ui = Sa31.ui ^ Stmp5.ui;
Sa33.ui = Sa33.ui ^ Stmp5.ui;
Stmp5.ui = Sv11.ui ^ Sv13.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Sv11.ui = Sv11.ui ^ Stmp5.ui;
Sv13.ui = Sv13.ui ^ Stmp5.ui;
Stmp5.ui = Sv21.ui ^ Sv23.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Sv21.ui = Sv21.ui ^ Stmp5.ui;
Sv23.ui = Sv23.ui ^ Stmp5.ui;
Stmp5.ui = Sv31.ui ^ Sv33.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Sv31.ui = Sv31.ui ^ Stmp5.ui;
Sv33.ui = Sv33.ui ^ Stmp5.ui;
Stmp5.ui = Stmp1.ui ^ Stmp3.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Stmp1.ui = Stmp1.ui ^ Stmp5.ui;
Stmp3.ui = Stmp3.ui ^ Stmp5.ui;
// If columns 1-3 have been swapped, negate 1st column of A and V so that V is still a rotation
Stmp5.f = -2.f;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Stmp4.f = 1.f;
Stmp4.f = __fadd_rn(Stmp4.f, Stmp5.f);
Sa11.f = Sa11.f * Stmp4.f;
Sa21.f = Sa21.f * Stmp4.f;
Sa31.f = Sa31.f * Stmp4.f;
Sv11.f = Sv11.f * Stmp4.f;
Sv21.f = Sv21.f * Stmp4.f;
Sv31.f = Sv31.f * Stmp4.f;
// Swap columns 2-3 if necessary
Stmp4.ui = (Stmp2.f < Stmp3.f) ? 0xffffffff : 0;
Stmp5.ui = Sa12.ui ^ Sa13.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Sa12.ui = Sa12.ui ^ Stmp5.ui;
Sa13.ui = Sa13.ui ^ Stmp5.ui;
Stmp5.ui = Sa22.ui ^ Sa23.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Sa22.ui = Sa22.ui ^ Stmp5.ui;
Sa23.ui = Sa23.ui ^ Stmp5.ui;
Stmp5.ui = Sa32.ui ^ Sa33.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Sa32.ui = Sa32.ui ^ Stmp5.ui;
Sa33.ui = Sa33.ui ^ Stmp5.ui;
Stmp5.ui = Sv12.ui ^ Sv13.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Sv12.ui = Sv12.ui ^ Stmp5.ui;
Sv13.ui = Sv13.ui ^ Stmp5.ui;
Stmp5.ui = Sv22.ui ^ Sv23.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Sv22.ui = Sv22.ui ^ Stmp5.ui;
Sv23.ui = Sv23.ui ^ Stmp5.ui;
Stmp5.ui = Sv32.ui ^ Sv33.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Sv32.ui = Sv32.ui ^ Stmp5.ui;
Sv33.ui = Sv33.ui ^ Stmp5.ui;
Stmp5.ui = Stmp2.ui ^ Stmp3.ui;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Stmp2.ui = Stmp2.ui ^ Stmp5.ui;
Stmp3.ui = Stmp3.ui ^ Stmp5.ui;
// If columns 2-3 have been swapped, negate 3rd column of A and V so that V is still a rotation
Stmp5.f = -2.f;
Stmp5.ui = Stmp5.ui & Stmp4.ui;
Stmp4.f = 1.f;
Stmp4.f = __fadd_rn(Stmp4.f, Stmp5.f);
Sa13.f = Sa13.f * Stmp4.f;
Sa23.f = Sa23.f * Stmp4.f;
Sa33.f = Sa33.f * Stmp4.f;
Sv13.f = Sv13.f * Stmp4.f;
Sv23.f = Sv23.f * Stmp4.f;
Sv33.f = Sv33.f * Stmp4.f;
//###########################################################
// Construct QR factorization of A*V (=U*D) using Givens rotations
//###########################################################
Su11.f = 1.f;
Su12.f = 0.f;
Su13.f = 0.f;
Su21.f = 0.f;
Su22.f = 1.f;
Su23.f = 0.f;
Su31.f = 0.f;
Su32.f = 0.f;
Su33.f = 1.f;
Ssh.f = Sa21.f * Sa21.f;
Ssh.ui = (Ssh.f >= gsmall_number) ? 0xffffffff : 0;
Ssh.ui = Ssh.ui & Sa21.ui;
Stmp5.f = 0.f;
Sch.f = __fsub_rn(Stmp5.f, Sa11.f);
Sch.f = max(Sch.f, Sa11.f);
Sch.f = max(Sch.f, gsmall_number);
Stmp5.ui = (Sa11.f >= Stmp5.f) ? 0xffffffff : 0;
Stmp1.f = Sch.f * Sch.f;
Stmp2.f = Ssh.f * Ssh.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f * 0.5f;
Stmp3.f = Stmp1.f * Stmp4.f;
Stmp3.f = Stmp1.f * Stmp3.f;
Stmp3.f = Stmp2.f * Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Stmp1.f = Stmp1.f * Stmp2.f;
Sch.f = __fadd_rn(Sch.f, Stmp1.f);
Stmp1.ui = ~Stmp5.ui & Ssh.ui;
Stmp2.ui = ~Stmp5.ui & Sch.ui;
Sch.ui = Stmp5.ui & Sch.ui;
Ssh.ui = Stmp5.ui & Ssh.ui;
Sch.ui = Sch.ui | Stmp1.ui;
Ssh.ui = Ssh.ui | Stmp2.ui;
Stmp1.f = Sch.f * Sch.f;
Stmp2.f = Ssh.f * Ssh.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f * 0.5f;
Stmp3.f = Stmp1.f * Stmp4.f;
Stmp3.f = Stmp1.f * Stmp3.f;
Stmp3.f = Stmp2.f * Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Sch.f = Sch.f * Stmp1.f;
Ssh.f = Ssh.f * Stmp1.f;
Sc.f = Sch.f * Sch.f;
Ss.f = Ssh.f * Ssh.f;
Sc.f = __fsub_rn(Sc.f, Ss.f);
Ss.f = Ssh.f * Sch.f;
Ss.f = __fadd_rn(Ss.f, Ss.f);
//###########################################################
// Rotate matrix A
//###########################################################
Stmp1.f = Ss.f * Sa11.f;
Stmp2.f = Ss.f * Sa21.f;
Sa11.f = Sc.f * Sa11.f;
Sa21.f = Sc.f * Sa21.f;
Sa11.f = __fadd_rn(Sa11.f, Stmp2.f);
Sa21.f = __fsub_rn(Sa21.f, Stmp1.f);
Stmp1.f = Ss.f * Sa12.f;
Stmp2.f = Ss.f * Sa22.f;
Sa12.f = Sc.f * Sa12.f;
Sa22.f = Sc.f * Sa22.f;
Sa12.f = __fadd_rn(Sa12.f, Stmp2.f);
Sa22.f = __fsub_rn(Sa22.f, Stmp1.f);
Stmp1.f = Ss.f * Sa13.f;
Stmp2.f = Ss.f * Sa23.f;
Sa13.f = Sc.f * Sa13.f;
Sa23.f = Sc.f * Sa23.f;
Sa13.f = __fadd_rn(Sa13.f, Stmp2.f);
Sa23.f = __fsub_rn(Sa23.f, Stmp1.f);
//###########################################################
// Update matrix U
//###########################################################
Stmp1.f = Ss.f * Su11.f;
Stmp2.f = Ss.f * Su12.f;
Su11.f = Sc.f * Su11.f;
Su12.f = Sc.f * Su12.f;
Su11.f = __fadd_rn(Su11.f, Stmp2.f);
Su12.f = __fsub_rn(Su12.f, Stmp1.f);
Stmp1.f = Ss.f * Su21.f;
Stmp2.f = Ss.f * Su22.f;
Su21.f = Sc.f * Su21.f;
Su22.f = Sc.f * Su22.f;
Su21.f = __fadd_rn(Su21.f, Stmp2.f);
Su22.f = __fsub_rn(Su22.f, Stmp1.f);
Stmp1.f = Ss.f * Su31.f;
Stmp2.f = Ss.f * Su32.f;
Su31.f = Sc.f * Su31.f;
Su32.f = Sc.f * Su32.f;
Su31.f = __fadd_rn(Su31.f, Stmp2.f);
Su32.f = __fsub_rn(Su32.f, Stmp1.f);
// Second Givens rotation
Ssh.f = Sa31.f * Sa31.f;
Ssh.ui = (Ssh.f >= gsmall_number) ? 0xffffffff : 0;
Ssh.ui = Ssh.ui & Sa31.ui;
Stmp5.f = 0.f;
Sch.f = __fsub_rn(Stmp5.f, Sa11.f);
Sch.f = max(Sch.f, Sa11.f);
Sch.f = max(Sch.f, gsmall_number);
Stmp5.ui = (Sa11.f >= Stmp5.f) ? 0xffffffff : 0;
Stmp1.f = Sch.f * Sch.f;
Stmp2.f = Ssh.f * Ssh.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f * 0.5;
Stmp3.f = Stmp1.f * Stmp4.f;
Stmp3.f = Stmp1.f * Stmp3.f;
Stmp3.f = Stmp2.f * Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Stmp1.f = Stmp1.f * Stmp2.f;
Sch.f = __fadd_rn(Sch.f, Stmp1.f);
Stmp1.ui = ~Stmp5.ui & Ssh.ui;
Stmp2.ui = ~Stmp5.ui & Sch.ui;
Sch.ui = Stmp5.ui & Sch.ui;
Ssh.ui = Stmp5.ui & Ssh.ui;
Sch.ui = Sch.ui | Stmp1.ui;
Ssh.ui = Ssh.ui | Stmp2.ui;
Stmp1.f = Sch.f * Sch.f;
Stmp2.f = Ssh.f * Ssh.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f * 0.5f;
Stmp3.f = Stmp1.f * Stmp4.f;
Stmp3.f = Stmp1.f * Stmp3.f;
Stmp3.f = Stmp2.f * Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Sch.f = Sch.f * Stmp1.f;
Ssh.f = Ssh.f * Stmp1.f;
Sc.f = Sch.f * Sch.f;
Ss.f = Ssh.f * Ssh.f;
Sc.f = __fsub_rn(Sc.f, Ss.f);
Ss.f = Ssh.f * Sch.f;
Ss.f = __fadd_rn(Ss.f, Ss.f);
//###########################################################
// Rotate matrix A
//###########################################################
Stmp1.f = Ss.f * Sa11.f;
Stmp2.f = Ss.f * Sa31.f;
Sa11.f = Sc.f * Sa11.f;
Sa31.f = Sc.f * Sa31.f;
Sa11.f = __fadd_rn(Sa11.f, Stmp2.f);
Sa31.f = __fsub_rn(Sa31.f, Stmp1.f);
Stmp1.f = Ss.f * Sa12.f;
Stmp2.f = Ss.f * Sa32.f;
Sa12.f = Sc.f * Sa12.f;
Sa32.f = Sc.f * Sa32.f;
Sa12.f = __fadd_rn(Sa12.f, Stmp2.f);
Sa32.f = __fsub_rn(Sa32.f, Stmp1.f);
Stmp1.f = Ss.f * Sa13.f;
Stmp2.f = Ss.f * Sa33.f;
Sa13.f = Sc.f * Sa13.f;
Sa33.f = Sc.f * Sa33.f;
Sa13.f = __fadd_rn(Sa13.f, Stmp2.f);
Sa33.f = __fsub_rn(Sa33.f, Stmp1.f);
//###########################################################
// Update matrix U
//###########################################################
Stmp1.f = Ss.f * Su11.f;
Stmp2.f = Ss.f * Su13.f;
Su11.f = Sc.f * Su11.f;
Su13.f = Sc.f * Su13.f;
Su11.f = __fadd_rn(Su11.f, Stmp2.f);
Su13.f = __fsub_rn(Su13.f, Stmp1.f);
Stmp1.f = Ss.f * Su21.f;
Stmp2.f = Ss.f * Su23.f;
Su21.f = Sc.f * Su21.f;
Su23.f = Sc.f * Su23.f;
Su21.f = __fadd_rn(Su21.f, Stmp2.f);
Su23.f = __fsub_rn(Su23.f, Stmp1.f);
Stmp1.f = Ss.f * Su31.f;
Stmp2.f = Ss.f * Su33.f;
Su31.f = Sc.f * Su31.f;
Su33.f = Sc.f * Su33.f;
Su31.f = __fadd_rn(Su31.f, Stmp2.f);
Su33.f = __fsub_rn(Su33.f, Stmp1.f);
// Third Givens Rotation
Ssh.f = Sa32.f * Sa32.f;
Ssh.ui = (Ssh.f >= gsmall_number) ? 0xffffffff : 0;
Ssh.ui = Ssh.ui & Sa32.ui;
Stmp5.f = 0.f;
Sch.f = __fsub_rn(Stmp5.f, Sa22.f);
Sch.f = max(Sch.f, Sa22.f);
Sch.f = max(Sch.f, gsmall_number);
Stmp5.ui = (Sa22.f >= Stmp5.f) ? 0xffffffff : 0;
Stmp1.f = Sch.f * Sch.f;
Stmp2.f = Ssh.f * Ssh.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f * 0.5f;
Stmp3.f = Stmp1.f * Stmp4.f;
Stmp3.f = Stmp1.f * Stmp3.f;
Stmp3.f = Stmp2.f * Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Stmp1.f = Stmp1.f * Stmp2.f;
Sch.f = __fadd_rn(Sch.f, Stmp1.f);
Stmp1.ui = ~Stmp5.ui & Ssh.ui;
Stmp2.ui = ~Stmp5.ui & Sch.ui;
Sch.ui = Stmp5.ui & Sch.ui;
Ssh.ui = Stmp5.ui & Ssh.ui;
Sch.ui = Sch.ui | Stmp1.ui;
Ssh.ui = Ssh.ui | Stmp2.ui;
Stmp1.f = Sch.f * Sch.f;
Stmp2.f = Ssh.f * Ssh.f;
Stmp2.f = __fadd_rn(Stmp1.f, Stmp2.f);
Stmp1.f = __frsqrt_rn(Stmp2.f);
Stmp4.f = Stmp1.f * 0.5f;
Stmp3.f = Stmp1.f * Stmp4.f;
Stmp3.f = Stmp1.f * Stmp3.f;
Stmp3.f = Stmp2.f * Stmp3.f;
Stmp1.f = __fadd_rn(Stmp1.f, Stmp4.f);
Stmp1.f = __fsub_rn(Stmp1.f, Stmp3.f);
Sch.f = Sch.f * Stmp1.f;
Ssh.f = Ssh.f * Stmp1.f;
Sc.f = Sch.f * Sch.f;
Ss.f = Ssh.f * Ssh.f;
Sc.f = __fsub_rn(Sc.f, Ss.f);
Ss.f = Ssh.f * Sch.f;
Ss.f = __fadd_rn(Ss.f, Ss.f);
//###########################################################
// Rotate matrix A
//###########################################################
Stmp1.f = Ss.f * Sa21.f;
Stmp2.f = Ss.f * Sa31.f;
Sa21.f = Sc.f * Sa21.f;
Sa31.f = Sc.f * Sa31.f;
Sa21.f = __fadd_rn(Sa21.f, Stmp2.f);
Sa31.f = __fsub_rn(Sa31.f, Stmp1.f);
Stmp1.f = Ss.f * Sa22.f;
Stmp2.f = Ss.f * Sa32.f;
Sa22.f = Sc.f * Sa22.f;
Sa32.f = Sc.f * Sa32.f;
Sa22.f = __fadd_rn(Sa22.f, Stmp2.f);
Sa32.f = __fsub_rn(Sa32.f, Stmp1.f);
Stmp1.f = Ss.f * Sa23.f;
Stmp2.f = Ss.f * Sa33.f;
Sa23.f = Sc.f * Sa23.f;
Sa33.f = Sc.f * Sa33.f;
Sa23.f = __fadd_rn(Sa23.f, Stmp2.f);
Sa33.f = __fsub_rn(Sa33.f, Stmp1.f);
//###########################################################
// Update matrix U
//###########################################################
Stmp1.f = Ss.f * Su12.f;
Stmp2.f = Ss.f * Su13.f;
Su12.f = Sc.f * Su12.f;
Su13.f = Sc.f * Su13.f;
Su12.f = __fadd_rn(Su12.f, Stmp2.f);
Su13.f = __fsub_rn(Su13.f, Stmp1.f);
Stmp1.f = Ss.f * Su22.f;
Stmp2.f = Ss.f * Su23.f;
Su22.f = Sc.f * Su22.f;
Su23.f = Sc.f * Su23.f;
Su22.f = __fadd_rn(Su22.f, Stmp2.f);
Su23.f = __fsub_rn(Su23.f, Stmp1.f);
Stmp1.f = Ss.f * Su32.f;
Stmp2.f = Ss.f * Su33.f;
Su32.f = Sc.f * Su32.f;
Su33.f = Sc.f * Su33.f;
Su32.f = __fadd_rn(Su32.f, Stmp2.f);
Su33.f = __fsub_rn(Su33.f, Stmp1.f);
v11 = Sv11.f;
v12 = Sv12.f;
v13 = Sv13.f;
v21 = Sv21.f;
v22 = Sv22.f;
v23 = Sv23.f;
v31 = Sv31.f;
v32 = Sv32.f;
v33 = Sv33.f;
u11 = Su11.f;
u12 = Su12.f;
u13 = Su13.f;
u21 = Su21.f;
u22 = Su22.f;
u23 = Su23.f;
u31 = Su31.f;
u32 = Su32.f;
u33 = Su33.f;
s11 = Sa11.f;
//s12 = Sa12.f; s13 = Sa13.f; s21 = Sa21.f;
s22 = Sa22.f;
//s23 = Sa23.f; s31 = Sa31.f; s32 = Sa32.f;
s33 = Sa33.f;
}
/**
\brief 2x2 SVD (singular value decomposition) A=USV'
\param[in] A Input matrix.
\param[out] U Robustly a rotation matrix in Givens form
\param[out] Sigma Vector of singular values sorted with decreasing magnitude. The second one can be negative.
\param[out] V Robustly a rotation matrix in Givens form
*/
template <typename T>
__forceinline__ __host__ __device__
void singularValueDecomposition(const T AA[4], GivensRotation<double>& U, T Sigma[2], GivensRotation<double>& V) {
double S_Sym[4]; ///< column-major
double A[4]{AA[0], AA[1], AA[2], AA[3]};
polarDecomposition(A, U, S_Sym);
double cosine, sine;
double x = S_Sym[0];
double y = S_Sym[2];
double z = S_Sym[3];
double y2 = y * y;
if (y2 == 0) {
// S is already diagonal
cosine = 1;
sine = 0;
Sigma[0] = x;
Sigma[1] = z;
}
else {
double tau = T(0.5) * (x - z);
double w = sqrt(tau * tau + y2);
// w > y > 0
double t;
if (tau > 0) {
// tau + w > w > y > 0 ==> division is safe
t = y / (tau + w);
}
else {
// tau - w < -w < -y < 0 ==> division is safe
t = y / (tau - w);
}
cosine = T(1) / sqrt(t * t + T(1));
sine = -t * cosine;
/*
V = [cosine -sine; sine cosine]
Sigma = V'SV. Only compute the diagonals for efficiency.
Also utilize symmetry of S and don't form V yet.
*/
double c2 = cosine * cosine;
double csy = 2 * cosine * sine * y;
double s2 = sine * sine;
Sigma[0] = c2 * x - csy + s2 * z;
Sigma[1] = s2 * x + csy + c2 * z;
}
// Sorting
// Polar already guarantees negative sign is on the small magnitude singular value.
if (Sigma[0] < Sigma[1]) {
swap(Sigma, Sigma + 1);
V.c = -sine;
V.s = cosine;
}
else {
V.c = cosine;
V.s = sine;
}
U *= V;
}
template<typename T, unsigned int Dim>
__forceinline__ __device__ void svd(const T F[Dim*Dim], T U[Dim*Dim],
T S[Dim], T V[Dim*Dim]) {
printf("Not implemented yet!\n");
}
/**
\brief 2x2 SVD (singular value decomposition) A=USV'
\param[in] A Input matrix.
\param[out] U Robustly a rotation matrix.
\param[out] Sigma Vector of singular values sorted with decreasing magnitude. The second one can be negative.
\param[out] V Robustly a rotation matrix.
*/
template <>
__forceinline__ __device__
void svd<float, 2>(const float A[4], float U[4], float Sigma[2], float V[4])
{
GivensRotation<double> gv(0, 1);
GivensRotation<double> gu(0, 1);
singularValueDecomposition(A, gu, Sigma, gv);
gu.template fill<2, float>(U);
gv.template fill<2, float>(V);
}
template <>
__forceinline__ __device__
void svd<double, 2>(const double A[4], double U[4], double Sigma[2], double V[4])
{
GivensRotation<double> gv(0, 1);
GivensRotation<double> gu(0, 1);
singularValueDecomposition(A, gu, Sigma, gv);
gu.template fill<2, double>(U);
gv.template fill<2, double>(V);
}
template<>
__forceinline__ __device__ void svd<float, 3>(const float F[9], float U[9],
float S[3], float V[9]) {
svd(F[0], F[3], F[6], F[1], F[4], F[7], F[2], F[5], F[8], U[0], U[3], U[6], U[1], U[4], U[7], U[2], U[5], U[8], S[0], S[1], S[2], V[0], V[3], V[6], V[1], V[4], V[7], V[2], V[5], V[8]);
}
template<>
__forceinline__ __device__ void svd<double, 3>(const double F[9], double U[9],
double S[3], double V[9]) {
svd(F[0], F[3], F[6], F[1], F[4], F[7], F[2], F[5], F[8], U[0], U[3], U[6], U[1], U[4], U[7], U[2], U[5], U[8], S[0], S[1], S[2], V[0], V[3], V[6], V[1], V[4], V[7], V[2], V[5], V[8]);
}
} ///< namespace math
} ///< namespace mn
#endif
|
the_stack
|
#include <cfloat>
#include "utility.hpp"
using namespace ppl::common;
namespace ppl {
namespace cv {
namespace cuda {
/**************************** function declaration **************************/
RetCode convertTo(const uchar* src, int rows, int cols, int channels,
int src_stride, float* dst, int dst_stride, float alpha,
float beta, cudaStream_t stream);
RetCode convertTo(const float* src, int rows, int cols, int channels,
int src_stride, uchar* dst, int dst_stride, float alpha,
float beta, cudaStream_t stream);
/********************************* add() ***********************************/
__global__
void addKernel(const float* src0, int rows, int cols, int src0_stride,
int src0_offset, const float* src1, int src1_stride,
int src1_offset, float* dst, int dst_stride, int dst_offset) {
int element_x = ((blockIdx.x << kBlockShiftX0) + threadIdx.x) << 1;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
int offset = element_y * src0_stride;
const float* input0 = (float*)((uchar*)src0 + src0_offset + offset);
const float* input1 = (float*)((uchar*)src1 + src1_offset + offset);
float* output = (float*)((uchar*)dst + dst_offset + element_y * dst_stride);
if (element_x < cols - 1) {
float input_value00, input_value01;
float input_value10, input_value11;
float output_value0, output_value1;
input_value00 = input0[element_x];
input_value01 = input0[element_x + 1];
input_value10 = input1[element_x];
input_value11 = input1[element_x + 1];
output_value0 = input_value00 + input_value10;
output_value1 = input_value01 + input_value11;
output[element_x] = output_value0;
output[element_x + 1] = output_value1;
}
else {
float input_value0, input_value1, output_value;
input_value0 = input0[element_x];
input_value1 = input1[element_x];
output_value = input_value0 + input_value1;
output[element_x] = output_value;
}
}
RetCode add(const float* src0, int rows, int cols, int channels,
int src0_offset, const float* src1, int src1_offset, float* dst,
int dst_stride, int dst_offset, cudaStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(src0_offset >= 0);
PPL_ASSERT(src1_offset >= 0);
PPL_ASSERT(dst_offset >= 0);
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
addKernel<<<grid, block, 0, stream>>>(src0, rows, columns, dst_stride,
src0_offset, src1, dst_stride, src1_offset, dst, dst_stride, dst_offset);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
/******************************* addScalar() *******************************/
__global__
void addScalarKernel(float* dst, int rows, int columns, int stride,
int dst_offset, float value) {
int element_x = (blockIdx.x << kBlockShiftX1) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
if (element_y >= rows || element_x >= columns) {
return;
}
int offset = element_y * stride;
float* output = (float*)((uchar*)dst + dst_offset + offset);
float result = output[element_x];
result += value;
output[element_x] = result;
}
RetCode addScalar(float* dst, int rows, int cols, int channels, int stride,
int dst_offset, float value, cudaStream_t stream) {
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_offset >= 0);
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX1;
block.y = kBlockDimY1;
grid.x = divideUp(columns, kBlockDimX1, kBlockShiftX1);
grid.y = divideUp(rows, kBlockDimY1, kBlockShiftY1);
addScalarKernel<<<grid, block, 0, stream>>>(dst, rows, columns, stride,
dst_offset, value);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
/****************************** multiply() ********************************/
__global__
void multiplyKernel(const float* src0, int rows, int cols, int src0_stride,
int src0_offset, const float* src1, int src1_stride,
int src1_offset, float* dst, int dst_stride, int dst_offset,
float scale) {
int element_x = ((blockIdx.x << kBlockShiftX0) + threadIdx.x) << 1;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
int offset = element_y * src0_stride;
const float* input0 = (float*)((uchar*)src0 + src0_offset + offset);
const float* input1 = (float*)((uchar*)src1 + src1_offset + offset);
float* output = (float*)((uchar*)dst + dst_offset + element_y * dst_stride);
if (element_x < cols - 1) {
float input_value00, input_value01;
float input_value10, input_value11;
float output_value0, output_value1;
input_value00 = input0[element_x];
input_value01 = input0[element_x + 1];
input_value10 = input1[element_x];
input_value11 = input1[element_x + 1];
if (scale == 1.f) {
output_value0 = input_value00 * input_value10;
output_value1 = input_value01 * input_value11;
}
else {
output_value0 = input_value00 * input_value10 * scale;
output_value1 = input_value01 * input_value11 * scale;
}
output[element_x] = output_value0;
output[element_x + 1] = output_value1;
}
else {
float input_value0, input_value1, output_value;
input_value0 = input0[element_x];
input_value1 = input1[element_x];
if (scale == 1.f) {
output_value = input_value0 * input_value1;
}
else {
output_value = input_value0 * input_value1 * scale;
}
output[element_x] = output_value;
}
}
RetCode multiply(const float* src0, int rows, int cols, int channels,
int src0_stride, int src0_offset, const float* src1,
int src1_stride, int src1_offset, float* dst, int dst_stride,
int dst_offset, float scale, cudaStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src0_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(src1_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(src0_offset >= 0);
PPL_ASSERT(src1_offset >= 0);
PPL_ASSERT(dst_offset >= 0);
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
multiplyKernel<<<grid, block, 0, stream>>>(src0, rows, columns, src0_stride,
src0_offset, src1, src1_stride, src1_offset, dst, dst_stride, dst_offset,
scale);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
/******************************* divide() ********************************/
__global__
void divideKernel(const float* src0, int rows, int cols, int src0_stride,
int src0_offset, const float* src1, int src1_stride,
int src1_offset, float* dst, int dst_stride, int dst_offset,
float scale) {
int element_x = ((blockIdx.x << kBlockShiftX0) + threadIdx.x) << 1;
int element_y = (blockIdx.y << kBlockShiftY0) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
int offset = element_y * src0_stride;
const float* input0 = (float*)((uchar*)src0 + src0_offset + offset);
const float* input1 = (float*)((uchar*)src1 + src1_offset + offset);
float* output = (float*)((uchar*)dst + dst_offset + element_y * dst_stride);
if (element_x < cols - 1) {
float input_value00, input_value01;
float input_value10, input_value11;
float output_value0, output_value1;
input_value00 = input0[element_x];
input_value01 = input0[element_x + 1];
input_value10 = input1[element_x];
input_value11 = input1[element_x + 1];
if (scale == 1.f) {
output_value0 = input_value10 == 0 ? 0 : input_value00 / input_value10;
output_value1 = input_value11 == 0 ? 0 : input_value01 / input_value11;
}
else {
output_value0 = input_value10 == 0 ? 0 :
scale * input_value00 / input_value10;
output_value1 = input_value11 == 0 ? 0 :
scale * input_value01 / input_value11;
}
output[element_x] = output_value0;
output[element_x + 1] = output_value1;
}
else {
float input_value0, input_value1, output_value;
input_value0 = input0[element_x];
input_value1 = input1[element_x];
if (scale == 1.f) {
output_value = input_value1 == 0 ? 0 : input_value0 / input_value1;
}
else {
output_value = input_value1 == 0 ? 0 :
scale * input_value0 / input_value1;
}
output[element_x] = output_value;
}
}
RetCode divide(const float* src0, int rows, int cols, int channels,
int src0_stride, int src0_offset, const float* src1,
int src1_stride, int src1_offset, float* dst, int dst_stride,
int dst_offset, float scale, cudaStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(src0_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(src1_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(src0_offset >= 0);
PPL_ASSERT(src1_offset >= 0);
PPL_ASSERT(dst_offset >= 0);
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX0;
block.y = kBlockDimY0;
grid.x = divideUp(divideUp(columns, 2, 1), kBlockDimX0, kBlockShiftX0);
grid.y = divideUp(rows, kBlockDimY0, kBlockShiftY0);
divideKernel<<<grid, block, 0, stream>>>(src0, rows, columns, src0_stride,
src0_offset, src1, src1_stride, src1_offset, dst, dst_stride, dst_offset,
scale);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
/******************************* subtract() ********************************/
__global__
void subtractKernel(const float* src0, int rows, int cols, int stride,
int src0_offset, const float* src1, int src1_offset,
float* dst, int dst_offset) {
int element_x = (blockIdx.x << kBlockShiftX1) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
int offset = element_y * stride;
float* input0 = (float*)((uchar*)src0 + src0_offset + offset);
float* input1 = (float*)((uchar*)src1 + src1_offset + offset);
float* output = (float*)((uchar*)dst + dst_offset + offset);
float value0 = input0[element_x];
float value1 = input1[element_x];
float result = value0 - value1;
output[element_x] = result;
}
RetCode subtract(const float* src0, int rows, int cols, int channels,
int stride, int src0_offset, const float* src1,
int src1_offset, float* dst, int dst_offset,
cudaStream_t stream) {
PPL_ASSERT(src0 != nullptr);
PPL_ASSERT(src1 != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1 || channels == 3 || channels == 4);
PPL_ASSERT(stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(src0_offset >= 0);
PPL_ASSERT(src1_offset >= 0);
PPL_ASSERT(dst_offset >= 0);
int columns = cols * channels;
dim3 block, grid;
block.x = kBlockDimX1;
block.y = kBlockDimY1;
grid.x = divideUp(columns, kBlockDimX1, kBlockShiftX1);
grid.y = divideUp(rows, kBlockDimY1, kBlockShiftY1);
subtractKernel<<<grid, block, 0, stream>>>(src0, rows, columns, stride,
src0_offset, src1, src1_offset, dst, dst_offset);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
/******************************* boxFilter() ********************************/
#define RADIUS 8
#define SMALL_KSIZE RADIUS * 2 + 1
template <typename Tsrc, typename Tdst, typename BorderInterpolation>
__global__
void rowColC1Kernel(const Tsrc* src, int rows, int cols, int src_stride,
int src_offset, int radius_x, int radius_y,
bool is_x_symmetric, bool is_y_symmetric, bool normalize,
float weight, Tdst* dst, int dst_stride, int dst_offset,
BorderInterpolation interpolation) {
__shared__ float data[kDimY0 * 3][(kDimX0 << 2)];
int element_x = ((blockIdx.x << kShiftX0) + threadIdx.x) << 2;
int element_y = (blockIdx.y << kShiftY0) + threadIdx.y;
int bottom = element_x - radius_x;
int top = element_x + radius_x;
if (!is_x_symmetric) {
top -= 1;
}
int data_index, row_index;
Tsrc* input;
float4 value;
float4 sum = make_float4(0.f, 0.f, 0.f, 0.f);
bool isnt_border_block = true;
data_index = radius_x >> (kShiftX0 + 2);
if (blockIdx.x <= data_index) isnt_border_block = false;
data_index = (cols - radius_x) >> (kShiftX0 + 2);
if (blockIdx.x >= data_index) isnt_border_block = false;
if (threadIdx.y < radius_y && element_x < cols) {
row_index = interpolation(rows, radius_y, element_y - radius_y);
input = (Tsrc*)((uchar*)src + src_offset + row_index * src_stride);
if (isnt_border_block) {
for (int i = bottom; i <= top; i++) {
value.x = input[i];
value.y = input[i + 1];
value.z = input[i + 2];
value.w = input[i + 3];
sum += value;
}
}
else {
for (int i = bottom; i <= top; i++) {
data_index = interpolation(cols, radius_x, i);
value.x = input[data_index];
data_index = interpolation(cols, radius_x, i + 1);
value.y = input[data_index];
data_index = interpolation(cols, radius_x, i + 2);
value.z = input[data_index];
data_index = interpolation(cols, radius_x, i + 3);
value.w = input[data_index];
sum += value;
}
}
data_index = threadIdx.x << 2;
data[threadIdx.y][data_index] = sum.x;
data[threadIdx.y][data_index + 1] = sum.y;
data[threadIdx.y][data_index + 2] = sum.z;
data[threadIdx.y][data_index + 3] = sum.w;
}
if (element_y < rows && element_x < cols) {
sum = make_float4(0.f, 0.f, 0.f, 0.f);
input = (Tsrc*)((uchar*)src + src_offset + element_y * src_stride);
if (isnt_border_block) {
for (int i = bottom; i <= top; i++) {
value.x = input[i];
value.y = input[i + 1];
value.z = input[i + 2];
value.w = input[i + 3];
sum += value;
}
}
else {
for (int i = bottom; i <= top; i++) {
data_index = interpolation(cols, radius_x, i);
value.x = input[data_index];
data_index = interpolation(cols, radius_x, i + 1);
value.y = input[data_index];
data_index = interpolation(cols, radius_x, i + 2);
value.z = input[data_index];
data_index = interpolation(cols, radius_x, i + 3);
value.w = input[data_index];
sum += value;
}
}
data_index = threadIdx.x << 2;
data[radius_y + threadIdx.y][data_index] = sum.x;
data[radius_y + threadIdx.y][data_index + 1] = sum.y;
data[radius_y + threadIdx.y][data_index + 2] = sum.z;
data[radius_y + threadIdx.y][data_index + 3] = sum.w;
}
if (threadIdx.y < radius_y && element_x < cols) {
sum = make_float4(0.f, 0.f, 0.f, 0.f);
if (blockIdx.y != gridDim.y - 1) {
row_index = interpolation(rows, radius_y,
((blockIdx.y + 1) << kShiftY0) + threadIdx.y);
}
else {
row_index = interpolation(rows, radius_y, rows + threadIdx.y);
}
input = (Tsrc*)((uchar*)src + src_offset + row_index * src_stride);
if (isnt_border_block) {
for (int i = bottom; i <= top; i++) {
value.x = input[i];
value.y = input[i + 1];
value.z = input[i + 2];
value.w = input[i + 3];
sum += value;
}
}
else {
for (int i = bottom; i <= top; i++) {
data_index = interpolation(cols, radius_x, i);
value.x = input[data_index];
data_index = interpolation(cols, radius_x, i + 1);
value.y = input[data_index];
data_index = interpolation(cols, radius_x, i + 2);
value.z = input[data_index];
data_index = interpolation(cols, radius_x, i + 3);
value.w = input[data_index];
sum += value;
}
}
data_index = threadIdx.x << 2;
if (blockIdx.y != gridDim.y - 1) {
row_index = radius_y + kDimY0 + threadIdx.y;
}
else {
row_index = radius_y + (rows - (blockIdx.y << kShiftY0)) + threadIdx.y;
}
data[row_index][data_index] = sum.x;
data[row_index][data_index + 1] = sum.y;
data[row_index][data_index + 2] = sum.z;
data[row_index][data_index + 3] = sum.w;
}
__syncthreads();
if (element_y < rows && element_x < cols) {
top = (radius_y << 1) + 1;
if (!is_y_symmetric) {
top -= 1;
}
sum = make_float4(0.f, 0.f, 0.f, 0.f);
for (int i = 0; i < top; i++) {
data_index = threadIdx.x << 2;
value.x = data[i + threadIdx.y][data_index];
value.y = data[i + threadIdx.y][data_index + 1];
value.z = data[i + threadIdx.y][data_index + 2];
value.w = data[i + threadIdx.y][data_index + 3];
sum += value;
}
if (normalize) {
sum.x *= weight;
sum.y *= weight;
sum.z *= weight;
sum.w *= weight;
}
Tdst* output = (Tdst*)((uchar*)dst + dst_offset + element_y * dst_stride);
if (sizeof(Tdst) == 1) {
if (element_x < cols - 3) {
output[element_x] = saturateCast(sum.x);
output[element_x + 1] = saturateCast(sum.y);
output[element_x + 2] = saturateCast(sum.z);
output[element_x + 3] = saturateCast(sum.w);
}
else {
output[element_x] = saturateCast(sum.x);
if (element_x < cols - 1) {
output[element_x + 1] = saturateCast(sum.y);
}
if (element_x < cols - 2) {
output[element_x + 2] = saturateCast(sum.z);
}
}
}
else {
if (element_x < cols - 3) {
output[element_x] = sum.x;
output[element_x + 1] = sum.y;
output[element_x + 2] = sum.z;
output[element_x + 3] = sum.w;
}
else {
output[element_x] = sum.x;
if (element_x < cols - 1) {
output[element_x + 1] = sum.y;
}
if (element_x < cols - 2) {
output[element_x + 2] = sum.z;
}
}
}
}
}
template <typename Tsrc, typename Tsrc4, typename BorderInterpolation>
__global__
void rowBatch4Kernel(const Tsrc* src, int rows, int cols, int src_stride,
int src_offset, int radius_x, bool is_x_symmetric,
float* dst, int dst_stride,
BorderInterpolation interpolation) {
int element_x = ((blockIdx.x << kBlockShiftX1) + threadIdx.x) << 2;
int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
if (element_x >= cols || element_y >= rows) {
return;
}
int origin_x = element_x - radius_x;
int top_x = element_x + radius_x;
if (!is_x_symmetric) {
top_x -= 1;
}
int data_index;
Tsrc* input;
Tsrc4 value;
float4 sum = make_float4(0.f, 0.f, 0.f, 0.f);
bool isnt_border_block = true;
data_index = radius_x >> (kBlockShiftX1 + 2);
if (blockIdx.x <= data_index) isnt_border_block = false;
data_index = (cols - radius_x) >> (kBlockShiftX1 + 2);
if (blockIdx.x >= data_index) isnt_border_block = false;
input = (Tsrc*)((uchar*)src + src_offset + element_y * src_stride);
if (isnt_border_block) {
for (int i = origin_x; i <= top_x; i++) {
value.x = input[i];
value.y = input[i + 1];
value.z = input[i + 2];
value.w = input[i + 3];
sum += value;
}
}
else {
for (int i = origin_x; i <= top_x; i++) {
data_index = interpolation(cols, radius_x, i);
value.x = input[data_index];
data_index = interpolation(cols, radius_x, i + 1);
value.y = input[data_index];
data_index = interpolation(cols, radius_x, i + 2);
value.z = input[data_index];
data_index = interpolation(cols, radius_x, i + 3);
value.w = input[data_index];
sum += value;
}
}
float* output = (float*)((uchar*)dst + element_y * dst_stride);
if (element_x < cols - 3) {
output[element_x] = sum.x;
output[element_x + 1] = sum.y;
output[element_x + 2] = sum.z;
output[element_x + 3] = sum.w;
}
else {
output[element_x] = sum.x;
if (element_x < cols - 1) {
output[element_x + 1] = sum.y;
}
if (element_x < cols - 2) {
output[element_x + 2] = sum.z;
}
}
}
template <typename Tdst, typename BorderInterpolation>
__global__
void colSharedKernel(const float* src, int rows, int cols4, int cols,
int src_stride, int radius_y, bool is_y_symmetric,
bool normalize, float weight, Tdst* dst, int dst_stride,
int dst_offset, BorderInterpolation interpolation) {
__shared__ float4 data[kDimY0 * 3][kDimX0];
int element_x = (blockIdx.x << kShiftX0) + threadIdx.x;
int element_y = (blockIdx.y << kShiftY0) + threadIdx.y;
if (element_x >= cols4) {
return;
}
float4* input;
float4 value;
int index;
int ksize_y = (radius_y << 1) + 1;
if (!is_y_symmetric) {
ksize_y -= 1;
}
if (threadIdx.y < radius_y) {
if (blockIdx.y == 0) {
index = interpolation(rows, radius_y, element_y - radius_y);
}
else {
index = element_y - radius_y;
}
input = (float4*)((uchar*)src + index * src_stride);
value = input[element_x];
data[threadIdx.y][threadIdx.x] = value;
}
if (element_y < rows) {
input = (float4*)((uchar*)src + element_y * src_stride);
value = input[element_x];
data[radius_y + threadIdx.y][threadIdx.x] = value;
}
if (threadIdx.y < radius_y) {
index = (rows - radius_y) >> kShiftY0;
if (blockIdx.y >= index) {
if (blockIdx.y != gridDim.y - 1) {
index = interpolation(rows, radius_y, element_y + kDimY0);
input = (float4*)((uchar*)src + index * src_stride);
value = input[element_x];
data[radius_y + kDimY0 + threadIdx.y][threadIdx.x] = value;
}
else {
index = interpolation(rows, radius_y, rows + threadIdx.y);
input = (float4*)((uchar*)src + index * src_stride);
value = input[element_x];
index = rows - (blockIdx.y << kShiftY0);
data[radius_y + index + threadIdx.y][threadIdx.x] = value;
}
}
else {
index = element_y + kDimY0;
input = (float4*)((uchar*)src + index * src_stride);
value = input[element_x];
data[radius_y + kDimY0 + threadIdx.y][threadIdx.x] = value;
}
}
__syncthreads();
if (element_y >= rows) {
return;
}
float4 sum = make_float4(0.f, 0.f, 0.f, 0.f);
for (index = 0; index < ksize_y; index++) {
sum += data[threadIdx.y + index][threadIdx.x];
}
if (normalize) {
sum.x *= weight;
sum.y *= weight;
sum.z *= weight;
sum.w *= weight;
}
Tdst* output = (Tdst*)((uchar*)dst + dst_offset + element_y * dst_stride);
index = element_x << 2;
if (element_x < cols4 - 1) {
if (sizeof(Tdst) == 1) {
output[index] = saturateCast(sum.x);
output[index + 1] = saturateCast(sum.y);
output[index + 2] = saturateCast(sum.z);
output[index + 3] = saturateCast(sum.w);
}
else {
output[index] = sum.x;
output[index + 1] = sum.y;
output[index + 2] = sum.z;
output[index + 3] = sum.w;
}
}
else {
if (sizeof(Tdst) == 1) {
output[index] = saturateCast(sum.x);
if (index < cols - 1) {
output[index + 1] = saturateCast(sum.y);
}
if (index < cols - 2) {
output[index + 2] = saturateCast(sum.z);
}
if (index < cols - 3) {
output[index + 3] = saturateCast(sum.w);
}
}
else {
output[index] = sum.x;
if (index < cols - 1) {
output[index + 1] = sum.y;
}
if (index < cols - 2) {
output[index + 2] = sum.z;
}
if (index < cols - 3) {
output[index + 3] = sum.w;
}
}
}
}
template <typename Tdst, typename BorderInterpolation>
__global__
void colBatch4Kernel(const float* src, int rows, int cols, int src_stride,
int radius_y, bool is_y_symmetric, bool normalize,
float weight, Tdst* dst, int dst_stride, int dst_offset,
BorderInterpolation interpolation) {
__shared__ Tdst data[kBlockDimY1][kBlockDimX1 << 2];
int element_x = (blockIdx.x << (kBlockShiftX1 + 2)) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
if (element_x >= cols || element_y >= rows) {
return;
}
int origin_y = element_y - radius_y;
int top_y = element_y + radius_y;
if (!is_y_symmetric) {
top_y -= 1;
}
int data_index;
float* input;
float value;
float sum = 0.f;
bool isnt_border_block = true;
data_index = radius_y >> kBlockShiftY1;
if (blockIdx.y <= data_index) isnt_border_block = false;
data_index = (rows - radius_y) >> kBlockShiftY1;
if (blockIdx.y >= data_index) isnt_border_block = false;
if (isnt_border_block) {
for (int i = origin_y; i <= top_y; i++) {
input = (float*)((uchar*)src + i * src_stride);
value = input[element_x];
sum += value;
}
}
else {
for (int i = origin_y; i <= top_y; i++) {
data_index = interpolation(rows, radius_y, i);
input = (float*)((uchar*)src + data_index * src_stride);
value = input[element_x];
sum += value;
}
}
if (normalize) {
sum *= weight;
}
if (sizeof(Tdst) == 1) {
data[threadIdx.y][threadIdx.x] = saturateCast(sum);
}
__syncthreads();
Tdst* output = (Tdst*)((uchar*)dst + dst_offset + element_y * dst_stride);
if (sizeof(Tdst) == 1) {
if (threadIdx.x < kBlockDimX1) {
element_x = (((blockIdx.x << kBlockShiftX1) + threadIdx.x) << 2);
data_index = threadIdx.x << 2;
if (element_x < cols - 3) {
output[element_x] = data[threadIdx.y][data_index];
output[element_x + 1] = data[threadIdx.y][data_index + 1];
output[element_x + 2] = data[threadIdx.y][data_index + 2];
output[element_x + 3] = data[threadIdx.y][data_index + 3];
}
else if (element_x < cols) {
output[element_x] = data[threadIdx.y][data_index];
if (element_x < cols - 1) {
output[element_x + 1] = data[threadIdx.y][data_index + 1];
}
if (element_x < cols - 2) {
output[element_x + 2] = data[threadIdx.y][data_index + 2];
}
}
else {
}
}
}
else {
output[element_x] = sum;
}
}
#define RUN_CHANNEL1_SMALL_KERNELS(Tsrc, Tdst, Interpolation) \
Interpolation interpolation; \
rowColC1Kernel<Tsrc, Tdst, Interpolation><<<grid, block, 0, stream>>>(src, \
rows, cols, src_stride, src_offset, radius_x, radius_y, is_x_symmetric, \
is_y_symmetric, normalize, weight, dst, dst_stride, dst_offset, \
interpolation);
#define RUN_KERNELS(Tsrc, Tdst, Interpolation) \
Interpolation interpolation; \
rowBatch4Kernel<Tsrc, Tsrc ## 4, Interpolation><<<grid, block, 0, stream>>>( \
src, rows, cols, src_stride, src_offset, radius_x, is_x_symmetric, buffer, \
pitch, interpolation); \
if (ksize_x <= 33 && ksize_y <= 33) { \
colSharedKernel<Tdst, Interpolation><<<grid1, block1, 0, stream>>>(buffer, \
rows, columns4, columns, pitch, radius_y, is_y_symmetric, normalize, \
weight, dst, dst_stride, dst_offset, interpolation); \
} \
else { \
colBatch4Kernel<Tdst, Interpolation><<<grid2, block2, 0, stream>>>(buffer, \
rows, columns, pitch, radius_y, is_y_symmetric, normalize, weight, dst, \
dst_stride, dst_offset, interpolation); \
}
RetCode boxFilter(const float* src, int rows, int cols, int channels,
int src_stride, int src_offset, int ksize_x, int ksize_y,
bool normalize, float* dst, int dst_stride, int dst_offset,
BorderType border_type, cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(channels == 1);
PPL_ASSERT(src_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * channels * (int)sizeof(float));
PPL_ASSERT(src_offset >= 0);
PPL_ASSERT(dst_offset >= 0);
PPL_ASSERT(ksize_x > 0);
PPL_ASSERT(ksize_y > 0);
PPL_ASSERT(border_type == BORDER_TYPE_REPLICATE ||
border_type == BORDER_TYPE_REFLECT ||
border_type == BORDER_TYPE_REFLECT_101 ||
border_type == BORDER_TYPE_DEFAULT);
cudaError_t code;
if (ksize_x == 1 && ksize_y == 1 && src_stride == dst_stride) {
if (src != dst) {
code = cudaMemcpyAsync(dst, src, rows * src_stride,
cudaMemcpyDeviceToDevice);
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_MEMORY_ERROR;
}
}
return RC_SUCCESS;
}
int radius_x = ksize_x >> 1;
int radius_y = ksize_y >> 1;
bool is_x_symmetric = ksize_x & 1;
bool is_y_symmetric = ksize_y & 1;
float weight = 1.0 / (ksize_x * ksize_y);
if (ksize_x <= SMALL_KSIZE && ksize_y <= SMALL_KSIZE && channels == 1) {
dim3 block, grid;
block.x = kDimX0;
block.y = kDimY0;
grid.x = divideUp(divideUp(cols, 4, 2), kDimX0, kShiftX0);
grid.y = divideUp(rows, kDimY0, kShiftY0);
if (border_type == BORDER_TYPE_REPLICATE) {
RUN_CHANNEL1_SMALL_KERNELS(float, float, ReplicateBorder);
}
else if (border_type == BORDER_TYPE_REFLECT) {
RUN_CHANNEL1_SMALL_KERNELS(float, float, ReflectBorder);
}
else {
RUN_CHANNEL1_SMALL_KERNELS(float, float, Reflect101Border);
}
code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
dim3 block, grid;
block.x = kBlockDimX1;
block.y = kBlockDimY1;
grid.x = divideUp(divideUp(cols, 4, 2), kBlockDimX1, kBlockShiftX1);
grid.y = divideUp(rows, kBlockDimY1, kBlockShiftY1);
dim3 block1, grid1;
block1.x = kDimX0;
block1.y = kDimY0;
int columns = cols * channels;
int columns4 = divideUp(columns, 4, 2);
grid1.x = divideUp(columns4, kDimX0, kShiftX0);
grid1.y = divideUp(rows, kDimY0, kShiftY0);
dim3 block2, grid2;
block2.x = (kBlockDimX1 << 2);
block2.y = kBlockDimY1;
grid2.x = divideUp(columns, (kBlockDimX1 << 2), (kBlockShiftX1 + 2));
grid2.y = divideUp(rows, kBlockDimY1, kBlockShiftY1);
float* buffer;
size_t pitch;
code = cudaMallocPitch(&buffer, &pitch, cols * channels * sizeof(float),
rows);
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_MEMORY_ERROR;
}
if (border_type == BORDER_TYPE_REPLICATE) {
RUN_KERNELS(float, float, ReplicateBorder);
}
else if (border_type == BORDER_TYPE_REFLECT) {
RUN_KERNELS(float, float, ReflectBorder);
}
else {
RUN_KERNELS(float, float, Reflect101Border);
}
cudaFree(buffer);
code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
/***************************** splitChannels() ******************************/
__global__
void split3ChannelsKernel(const float* src, int rows, int cols, int src_stride,
float* dst, int dst_stride, int dst0_offset,
int dst1_offset, int dst2_offset) {
int element_x = (blockIdx.x << kBlockShiftX1) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
int input_x = element_x * 3;
float* input = (float*)((uchar*)src + element_y * src_stride);
float value0 = input[input_x];
float value1 = input[input_x + 1];
float value2 = input[input_x + 2];
int offset = element_y * dst_stride;
float* output0 = (float*)((uchar*)dst + dst0_offset + offset);
float* output1 = (float*)((uchar*)dst + dst1_offset + offset);
float* output2 = (float*)((uchar*)dst + dst2_offset + offset);
output0[element_x] = value0;
output1[element_x] = value1;
output2[element_x] = value2;
}
RetCode split3Channels(const float* src, int rows, int cols, int src_stride,
float* dst, int dst_stride, int dst0_offset,
int dst1_offset, int dst2_offset, cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(src_stride >= cols * 3 * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * (int)sizeof(float));
PPL_ASSERT(dst0_offset >= 0);
PPL_ASSERT(dst1_offset >= 0);
PPL_ASSERT(dst2_offset >= 0);
dim3 block, grid;
block.x = kBlockDimX1;
block.y = kBlockDimY1;
grid.x = divideUp(cols, kBlockDimX1, kBlockShiftX1);
grid.y = divideUp(rows, kBlockDimY1, kBlockShiftY1);
split3ChannelsKernel<<<grid, block, 0, stream>>>(src, rows, cols, src_stride,
dst, dst_stride, dst0_offset, dst1_offset, dst2_offset);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
__global__
void split4ChannelsKernel(const float* src, int rows, int cols, int src_stride,
float* dst, int dst_stride, int dst0_offset,
int dst1_offset, int dst2_offset, int dst3_offset) {
int element_x = (blockIdx.x << kBlockShiftX1) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
int input_x = element_x << 2;
float* input = (float*)((uchar*)src + element_y * src_stride);
float value0 = input[input_x];
float value1 = input[input_x + 1];
float value2 = input[input_x + 2];
float value3 = input[input_x + 3];
int offset = element_y * dst_stride;
float* output0 = (float*)((uchar*)dst + dst0_offset + offset);
float* output1 = (float*)((uchar*)dst + dst1_offset + offset);
float* output2 = (float*)((uchar*)dst + dst2_offset + offset);
float* output3 = (float*)((uchar*)dst + dst3_offset + offset);
output0[element_x] = value0;
output1[element_x] = value1;
output2[element_x] = value2;
output3[element_x] = value3;
}
RetCode split4Channels(const float* src, int rows, int cols, int src_stride,
float* dst, int dst_stride, int dst0_offset,
int dst1_offset, int dst2_offset, int dst3_offset,
cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(src_stride >= cols * 4 * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * (int)sizeof(float));
PPL_ASSERT(dst0_offset >= 0);
PPL_ASSERT(dst1_offset >= 0);
PPL_ASSERT(dst2_offset >= 0);
PPL_ASSERT(dst3_offset >= 0);
dim3 block, grid;
block.x = kBlockDimX1;
block.y = kBlockDimY1;
grid.x = divideUp(cols, kBlockDimX1, kBlockShiftX1);
grid.y = divideUp(rows, kBlockDimY1, kBlockShiftY1);
split4ChannelsKernel<<<grid, block, 0, stream>>>(src, rows, cols, src_stride,
dst, dst_stride, dst0_offset, dst1_offset, dst2_offset, dst3_offset);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
/***************************** mergeChannels() ******************************/
__global__
void merge3ChannelsKernel(const float* src, int rows, int cols, int src_stride,
int src0_offset, int src1_offset, int src2_offset,
float* dst, int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX1) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
int offset = element_y * src_stride;
float* input0 = (float*)((uchar*)src + src0_offset + offset);
float* input1 = (float*)((uchar*)src + src1_offset + offset);
float* input2 = (float*)((uchar*)src + src2_offset + offset);
float value0 = input0[element_x];
float value1 = input1[element_x];
float value2 = input2[element_x];
element_x = element_x * 3;
float* output = (float*)((uchar*)dst + element_y * dst_stride);
output[element_x] = value0;
output[element_x + 1] = value1;
output[element_x + 2] = value2;
}
RetCode merge3Channels(const float* src, int rows, int cols, int src_stride,
int src0_offset, int src1_offset, int src2_offset,
float* dst, int dst_stride, cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(src_stride >= cols * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * 3 * (int)sizeof(float));
PPL_ASSERT(src0_offset >= 0);
PPL_ASSERT(src1_offset >= 0);
PPL_ASSERT(src2_offset >= 0);
dim3 block, grid;
block.x = kBlockDimX1;
block.y = kBlockDimY1;
grid.x = divideUp(cols, kBlockDimX1, kBlockShiftX1);
grid.y = divideUp(rows, kBlockDimY1, kBlockShiftY1);
merge3ChannelsKernel<<<grid, block, 0, stream>>>(src, rows, cols, src_stride,
src0_offset, src1_offset, src2_offset, dst, dst_stride);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
__global__
void merge4ChannelsKernel(const float* src, int rows, int cols, int src_stride,
int src0_offset, int src1_offset, int src2_offset,
int src3_offset, float* dst, int dst_stride) {
int element_x = (blockIdx.x << kBlockShiftX1) + threadIdx.x;
int element_y = (blockIdx.y << kBlockShiftY1) + threadIdx.y;
if (element_y >= rows || element_x >= cols) {
return;
}
int offset = element_y * src_stride;
float* input0 = (float*)((uchar*)src + src0_offset + offset);
float* input1 = (float*)((uchar*)src + src1_offset + offset);
float* input2 = (float*)((uchar*)src + src2_offset + offset);
float* input3 = (float*)((uchar*)src + src3_offset + offset);
float value0 = input0[element_x];
float value1 = input1[element_x];
float value2 = input2[element_x];
float value3 = input3[element_x];
element_x = element_x << 2;
dst_stride >>= 2;
float* output = dst + element_y * dst_stride;
output[element_x] = value0;
output[element_x + 1] = value1;
output[element_x + 2] = value2;
output[element_x + 3] = value3;
}
RetCode merge4Channels(const float* src, int rows, int cols, int src_stride,
int src0_offset, int src1_offset, int src2_offset,
int src3_offset, float* dst, int dst_stride,
cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(src_stride >= cols * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * 4 * (int)sizeof(float));
PPL_ASSERT(src0_offset >= 0);
PPL_ASSERT(src1_offset >= 0);
PPL_ASSERT(src2_offset >= 0);
PPL_ASSERT(src3_offset >= 0);
dim3 block, grid;
block.x = kBlockDimX1;
block.y = kBlockDimY1;
grid.x = divideUp(cols, kBlockDimX1, kBlockShiftX1);
grid.y = divideUp(rows, kBlockDimY1, kBlockShiftY1);
merge4ChannelsKernel<<<grid, block, 0, stream>>>(src, rows, cols, src_stride,
src0_offset, src1_offset, src2_offset, src3_offset, dst, dst_stride);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
/***************************** guidedFilter() ******************************/
/*
* guide image: 1 channel.
* input image: 1 channel.
* output image: 1 channel.
*/
void guidedFilter_1to1(const float* src, int src_rows, int src_cols,
int src_stride, const float* guide, int guide_stride,
float* dst, int dst_stride, int radius, double eps,
BorderType border_type, cudaStream_t stream) {
float* buffer;
size_t pitch;
cudaMallocPitch(&buffer, &pitch, src_cols * sizeof(float), src_rows * 8);
int offset = pitch * src_rows;
float* II = buffer;
float* IP = buffer;
float* meanI = buffer;
float* meanP = buffer;
float* meanII = buffer;
float* meanIP = buffer;
float* varI = buffer;
float* covIP = buffer;
int IP_offset = offset;
int meanI_offset = offset * 2;
int meanP_offset = offset * 3;
int meanII_offset = offset * 4;
int meanIP_offset = offset * 5;
int varI_offset = offset * 6;
int covIP_offset = offset * 7;
multiply(guide, src_rows, src_cols, 1, guide_stride, 0, guide, guide_stride,
0, II, pitch, 0, 1.f, stream);
multiply(guide, src_rows, src_cols, 1, guide_stride, 0, src, src_stride, 0,
IP, pitch, IP_offset, 1.f, stream);
int side_length = (radius << 1) + 1;
boxFilter(guide, src_rows, src_cols, 1, src_stride, 0, side_length,
side_length, true, meanI, pitch, meanI_offset, border_type, stream);
boxFilter(src, src_rows, src_cols, 1, src_stride, 0, side_length, side_length,
true, meanP, pitch, meanP_offset, border_type, stream);
boxFilter(II, src_rows, src_cols, 1, src_stride, 0, side_length, side_length,
true, meanII, pitch, meanII_offset, border_type, stream);
boxFilter(IP, src_rows, src_cols, 1, src_stride, IP_offset, side_length,
side_length, true, meanIP, pitch, meanIP_offset, border_type,
stream);
float* meanII_mul = II;
float* meanIP_mul = IP;
multiply(meanI, src_rows, src_cols, 1, pitch, meanI_offset, meanI, pitch,
meanI_offset, meanII_mul, pitch, 0, 1.f, stream);
multiply(meanI, src_rows, src_cols, 1, pitch, meanI_offset, meanP, pitch,
meanP_offset, meanIP_mul, pitch, IP_offset, 1.f, stream);
subtract(meanII, src_rows, src_cols, 1, pitch, meanII_offset, meanII_mul, 0,
varI, varI_offset, stream);
subtract(meanIP, src_rows, src_cols, 1, pitch, meanIP_offset, meanIP_mul,
IP_offset, covIP, covIP_offset, stream);
float* a = meanII;
float* b = meanIP;
float* aMeanI = covIP;
addScalar(varI, src_rows, src_cols, 1, pitch, varI_offset, eps, stream);
divide(covIP, src_rows, src_cols, 1, pitch, covIP_offset, varI, pitch,
varI_offset, a, pitch, meanII_offset, 1.f, stream);
multiply(a, src_rows, src_cols, 1, pitch, meanII_offset, meanI, pitch,
meanI_offset, aMeanI, pitch, covIP_offset, 1.f, stream);
subtract(meanP, src_rows, src_cols, 1, pitch, meanP_offset, aMeanI,
covIP_offset, b, meanIP_offset, stream);
float* meanA = II;
float* meanB = IP;
boxFilter(a, src_rows, src_cols, 1, src_stride, meanII_offset, side_length,
side_length, true, meanA, pitch, 0, border_type, stream);
boxFilter(b, src_rows, src_cols, 1, src_stride, meanIP_offset, side_length,
side_length, true, meanB, pitch, IP_offset, border_type, stream);
float* meanAI = meanI;
multiply(meanA, src_rows, src_cols, 1, pitch, 0, guide, guide_stride, 0,
meanAI, pitch, meanI_offset, 1.f, stream);
add(meanAI, src_rows, src_cols, 1, meanI_offset, meanB, IP_offset, dst,
dst_stride, 0, stream);
cudaFree(buffer);
}
void guidedFilter_1to1(const float* src, int src_rows, int src_cols,
int src_stride, int src_offset, const float* guide,
int guide_stride, float* dst, int dst_stride,
int dst_offset, int radius, double eps,
BorderType border_type, cudaStream_t stream) {
float* buffer;
size_t pitch;
cudaMallocPitch(&buffer, &pitch, src_cols * sizeof(float), src_rows * 8);
int offset = pitch * src_rows;
float* II = buffer;
float* IP = buffer;
float* meanI = buffer;
float* meanP = buffer;
float* meanII = buffer;
float* meanIP = buffer;
float* varI = buffer;
float* covIP = buffer;
int IP_offset = offset;
int meanI_offset = offset * 2;
int meanP_offset = offset * 3;
int meanII_offset = offset * 4;
int meanIP_offset = offset * 5;
int varI_offset = offset * 6;
int covIP_offset = offset * 7;
multiply(guide, src_rows, src_cols, 1, guide_stride, 0, guide, guide_stride,
0, II, pitch, 0, 1.f, stream);
multiply(guide, src_rows, src_cols, 1, guide_stride, 0, src, src_stride,
src_offset, IP, pitch, IP_offset, 1.f, stream);
int side_length = (radius << 1) + 1;
boxFilter(guide, src_rows, src_cols, 1, src_stride, 0, side_length,
side_length, true, meanI, pitch, meanI_offset, border_type, stream);
boxFilter(src, src_rows, src_cols, 1, src_stride, src_offset, side_length,
side_length, true, meanP, pitch, meanP_offset, border_type, stream);
boxFilter(II, src_rows, src_cols, 1, src_stride, 0, side_length, side_length,
true, meanII, pitch, meanII_offset, border_type, stream);
boxFilter(IP, src_rows, src_cols, 1, src_stride, IP_offset, side_length,
side_length, true, meanIP, pitch, meanIP_offset, border_type,
stream);
float* meanII_mul = II;
float* meanIP_mul = IP;
multiply(meanI, src_rows, src_cols, 1, pitch, meanI_offset, meanI, pitch,
meanI_offset, meanII_mul, pitch, 0, 1.f, stream);
multiply(meanI, src_rows, src_cols, 1, pitch, meanI_offset, meanP, pitch,
meanP_offset, meanIP_mul, pitch, IP_offset, 1.f, stream);
subtract(meanII, src_rows, src_cols, 1, pitch, meanII_offset, meanII_mul, 0,
varI, varI_offset, stream);
subtract(meanIP, src_rows, src_cols, 1, pitch, meanIP_offset, meanIP_mul,
IP_offset, covIP, covIP_offset, stream);
float* a = meanII;
float* b = meanIP;
float* aMeanI = covIP;
addScalar(varI, src_rows, src_cols, 1, pitch, varI_offset, eps, stream);
divide(covIP, src_rows, src_cols, 1, pitch, covIP_offset, varI, pitch,
varI_offset, a, pitch, meanII_offset, 1.f, stream);
multiply(a, src_rows, src_cols, 1, pitch, meanII_offset, meanI, pitch,
meanI_offset, aMeanI, pitch, covIP_offset, 1.f, stream);
subtract(meanP, src_rows, src_cols, 1, pitch, meanP_offset, aMeanI,
covIP_offset, b, meanIP_offset, stream);
float* meanA = II;
float* meanB = IP;
boxFilter(a, src_rows, src_cols, 1, src_stride, meanII_offset, side_length,
side_length, true, meanA, pitch, 0, border_type, stream);
boxFilter(b, src_rows, src_cols, 1, src_stride, meanIP_offset, side_length,
side_length, true, meanB, pitch, IP_offset, border_type, stream);
float* meanAI = meanI;
multiply(meanA, src_rows, src_cols, 1, pitch, 0, guide, guide_stride, 0,
meanAI, pitch, meanI_offset, 1.f, stream);
add(meanAI, src_rows, src_cols, 1, meanI_offset, meanB, IP_offset, dst,
dst_stride, dst_offset, stream);
cudaFree(buffer);
}
void filtering(const float* src, int rows, int cols, int src_channels,
int src_stride, const float* guide, int guide_channels,
int guide_stride, float* dst, int dst_stride, int radius,
float eps, BorderType border_type, cudaStream_t stream) {
if (guide_channels == 1) {
if (src_channels == 1) {
guidedFilter_1to1(src, rows, cols, src_stride, guide, guide_stride,
dst, dst_stride, radius, eps, border_type, stream);
}
else if (src_channels == 3) { // src_channels == 3
float* buffer;
size_t pitch;
cudaMallocPitch(&buffer, &pitch, cols * sizeof(float), rows * 6);
int offset = pitch * rows;
float* src0 = buffer;
float* src1 = buffer;
float* src2 = buffer;
float* dst0 = buffer;
float* dst1 = buffer;
float* dst2 = buffer;
size_t src0_offset = 0;
size_t src1_offset = offset;
size_t src2_offset = offset * 2;
size_t dst0_offset = offset * 3;
size_t dst1_offset = offset * 4;
size_t dst2_offset = offset * 5;
split3Channels(src, rows, cols, src_stride, buffer, pitch, 0, src1_offset,
src2_offset, stream);
guidedFilter_1to1(src0, rows, cols, pitch, src0_offset, guide,
guide_stride, dst0, pitch, dst0_offset, radius, eps,
border_type, stream);
guidedFilter_1to1(src1, rows, cols, pitch, src1_offset, guide,
guide_stride, dst1, pitch, dst1_offset, radius, eps,
border_type, stream);
guidedFilter_1to1(src2, rows, cols, pitch, src2_offset, guide,
guide_stride, dst2, pitch, dst2_offset, radius, eps,
border_type, stream);
merge3Channels(buffer, rows, cols, pitch, dst0_offset, dst1_offset,
dst2_offset, dst, dst_stride, stream);
cudaFree(buffer);
}
else { // src_channels == 4
float* buffer;
size_t pitch;
cudaMallocPitch(&buffer, &pitch, cols * sizeof(float), rows * 8);
int offset = pitch * rows;
float* src0 = buffer;
float* src1 = buffer;
float* src2 = buffer;
float* src3 = buffer;
float* dst0 = buffer;
float* dst1 = buffer;
float* dst2 = buffer;
float* dst3 = buffer;
size_t src0_offset = 0;
size_t src1_offset = offset;
size_t src2_offset = offset * 2;
size_t src3_offset = offset * 3;
size_t dst0_offset = offset * 4;
size_t dst1_offset = offset * 5;
size_t dst2_offset = offset * 6;
size_t dst3_offset = offset * 7;
split4Channels(src, rows, cols, src_stride, buffer, pitch, 0, src1_offset,
src2_offset, src3_offset, stream);
guidedFilter_1to1(src0, rows, cols, pitch, src0_offset, guide,
guide_stride, dst0, pitch, dst0_offset, radius, eps,
border_type, stream);
guidedFilter_1to1(src1, rows, cols, pitch, src1_offset, guide,
guide_stride, dst1, pitch, dst1_offset, radius, eps,
border_type, stream);
guidedFilter_1to1(src2, rows, cols, pitch, src2_offset, guide,
guide_stride, dst2, pitch, dst2_offset, radius, eps,
border_type, stream);
guidedFilter_1to1(src3, rows, cols, pitch, src3_offset, guide,
guide_stride, dst3, pitch, dst3_offset, radius, eps,
border_type, stream);
merge4Channels(buffer, rows, cols, pitch, dst0_offset, dst1_offset,
dst2_offset, dst3_offset, dst, dst_stride, stream);
cudaFree(buffer);
}
}
else { // guide_channels == 3
if (src_channels == 1) {
}
else if (src_channels == 3) { // src_channels == 3
}
else { // src_channels == 4
}
}
}
RetCode guidedFilter(const uchar* src, int rows, int cols, int src_channels,
int src_stride, const uchar* guide, int guide_channels,
int guide_stride, uchar* dst, int dst_stride, int radius,
float eps, BorderType border_type, cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(guide != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(src_stride >= cols * src_channels * (int)sizeof(uchar));
PPL_ASSERT(guide_stride >= cols * guide_channels * (int)sizeof(uchar));
PPL_ASSERT(dst_stride >= cols * src_channels * (int)sizeof(uchar));
PPL_ASSERT(guide_channels == 1);
PPL_ASSERT(src_channels == 1 || src_channels == 3 || src_channels == 4);
PPL_ASSERT(radius > 0);
PPL_ASSERT(eps > 0.0);
PPL_ASSERT(border_type == BORDER_TYPE_REFLECT_101 ||
border_type == BORDER_TYPE_REFLECT);
float* fguide;
float* fsrc;
float* fdst;
size_t fguide_stride, fsrc_stride, fdst_stride;
cudaMallocPitch(&fguide, &fguide_stride,
cols * guide_channels * sizeof(float), rows);
cudaMallocPitch(&fsrc, &fsrc_stride, cols * src_channels * sizeof(float),
rows);
cudaMallocPitch(&fdst, &fdst_stride, cols * src_channels * sizeof(float),
rows);
convertTo(guide, rows, cols, guide_channels, guide_stride, fguide,
fguide_stride, 1, 0.0, stream);
convertTo(src, rows, cols, src_channels, src_stride, fsrc, fsrc_stride,
1, 0.0, stream);
filtering(fsrc, rows, cols, src_channels, fsrc_stride, fguide, guide_channels,
fguide_stride, fdst, fdst_stride, radius, eps, border_type, stream);
convertTo(fdst, rows, cols, src_channels, fdst_stride, dst, dst_stride,
1, 0.0, stream);
cudaFree(fguide);
cudaFree(fsrc);
cudaFree(fdst);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
RetCode guidedFilter(const float* src, int rows, int cols, int src_channels,
int src_stride, const float* guide, int guide_channels,
int guide_stride, float* dst, int dst_stride, int radius,
float eps, BorderType border_type, cudaStream_t stream) {
PPL_ASSERT(src != nullptr);
PPL_ASSERT(guide != nullptr);
PPL_ASSERT(dst != nullptr);
PPL_ASSERT(rows >= 1 && cols >= 1);
PPL_ASSERT(src_stride >= cols * src_channels * (int)sizeof(float));
PPL_ASSERT(guide_stride >= cols * guide_channels * (int)sizeof(float));
PPL_ASSERT(dst_stride >= cols * src_channels * (int)sizeof(float));
PPL_ASSERT(guide_channels == 1);
PPL_ASSERT(src_channels == 1 || src_channels == 3 || src_channels == 4);
PPL_ASSERT(radius > 0);
PPL_ASSERT(eps > 0.0);
PPL_ASSERT(border_type == BORDER_TYPE_REFLECT_101 ||
border_type == BORDER_TYPE_REFLECT);
filtering(src, rows, cols, src_channels, src_stride, guide, guide_channels,
guide_stride, dst, dst_stride, radius, eps, border_type, stream);
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess) {
LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code);
return RC_DEVICE_RUNTIME_ERROR;
}
return RC_SUCCESS;
}
template <>
RetCode GuidedFilter<uchar, 1, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
int guideWidthStride,
const uchar* guideData,
int outWidthStride,
uchar* outData,
int radius,
float eps,
BorderType border_type) {
RetCode code = guidedFilter(inData, height, width, 1, inWidthStride,
guideData, 1, guideWidthStride, outData,
outWidthStride, radius, eps, border_type, stream);
return code;
}
template <>
RetCode GuidedFilter<uchar, 3, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
int guideWidthStride,
const uchar* guideData,
int outWidthStride,
uchar* outData,
int radius,
float eps,
BorderType border_type) {
RetCode code = guidedFilter(inData, height, width, 3, inWidthStride,
guideData, 1, guideWidthStride, outData,
outWidthStride, radius, eps, border_type, stream);
return code;
}
template <>
RetCode GuidedFilter<uchar, 4, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const uchar* inData,
int guideWidthStride,
const uchar* guideData,
int outWidthStride,
uchar* outData,
int radius,
float eps,
BorderType border_type) {
RetCode code = guidedFilter(inData, height, width, 4, inWidthStride,
guideData, 1, guideWidthStride, outData,
outWidthStride, radius, eps, border_type, stream);
return code;
}
template <>
RetCode GuidedFilter<float, 1, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int guideWidthStride,
const float* guideData,
int outWidthStride,
float* outData,
int radius,
float eps,
BorderType border_type) {
inWidthStride *= sizeof(float);
guideWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = guidedFilter(inData, height, width, 1, inWidthStride,
guideData, 1, guideWidthStride, outData,
outWidthStride, radius, eps, border_type, stream);
return code;
}
template <>
RetCode GuidedFilter<float, 3, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int guideWidthStride,
const float* guideData,
int outWidthStride,
float* outData,
int radius,
float eps,
BorderType border_type) {
inWidthStride *= sizeof(float);
guideWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = guidedFilter(inData, height, width, 3, inWidthStride,
guideData, 1, guideWidthStride, outData,
outWidthStride, radius, eps, border_type, stream);
return code;
}
template <>
RetCode GuidedFilter<float, 4, 1>(cudaStream_t stream,
int height,
int width,
int inWidthStride,
const float* inData,
int guideWidthStride,
const float* guideData,
int outWidthStride,
float* outData,
int radius,
float eps,
BorderType border_type) {
inWidthStride *= sizeof(float);
guideWidthStride *= sizeof(float);
outWidthStride *= sizeof(float);
RetCode code = guidedFilter(inData, height, width, 4, inWidthStride,
guideData, 1, guideWidthStride, outData,
outWidthStride, radius, eps, border_type, stream);
return code;
}
} // cuda
} // cv
} // ppl
|
the_stack
|
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include "common.cuh"
#include <kat/tuple.hpp>
#include <cuda/api_wrappers.hpp>
#include <type_traits>
#include <cstdint>
#include <vector>
#include <algorithm>
#include <string>
#include <utility>
//#include "EASTLTest.h"
//EA_DISABLE_VC_WARNING(4623 4625 4413 4510)
namespace test_structs {
struct default_constructible
{
enum : int { default_value = 0x1EE7C0DE };
KAT_HD default_constructible() : value(default_value) {}
int value;
};
namespace op_counts {
__device__ int default_constructions = 0;
__device__ int int_constructions = 0;
__device__ int copy_constructions = 0;
__device__ int move_constructions = 0;
__device__ int copy_assignments = 0;
__device__ int move_assignments = 0;
__device__ int destructions = 0;
}
struct op_counting
{
KAT_HD op_counting() : value() {
#ifndef __CUDA_ARCH__
++default_constructions;
#else
++op_counts::default_constructions;
#endif
}
KAT_HD op_counting(int x) : value(x) {
#ifndef __CUDA_ARCH__
++int_constructions;
#else
++op_counts::int_constructions;
#endif
}
KAT_HD op_counting(const op_counting& x) : value(x.value) {
#ifndef __CUDA_ARCH__
++copy_constructions;
#else
++op_counts::copy_constructions;
#endif
}
KAT_HD op_counting(op_counting&& x) : value(x.value)
{
#ifndef __CUDA_ARCH__
++move_constructions;
#else
++op_counts::move_constructions;
#endif
x.value = 0;
}
KAT_HD op_counting& operator=(const op_counting& x)
{
value = x.value;
#ifndef __CUDA_ARCH__
++copy_assignments;
#else
++op_counts::copy_assignments;
#endif
return *this;
}
KAT_HD op_counting& operator=(op_counting&& x)
{
value = x.value;
x.value = 0;
#ifndef __CUDA_ARCH__
++move_assignments;
#else
++op_counts::move_assignments;
#endif
return *this;
}
KAT_HD ~op_counting() {
#ifndef __CUDA_ARCH__
++destructions;
#else
++op_counts::destructions;
#endif
}
int value;
KAT_HD static void reset_counters()
{
#ifndef __CUDA_ARCH__
default_constructions = 0;
int_constructions = 0;
copy_constructions = 0;
move_constructions = 0;
copy_assignments = 0;
move_assignments = 0;
destructions = 0;
#else
op_counts::default_constructions = 0;
op_counts::int_constructions = 0;
op_counts::copy_constructions = 0;
op_counts::move_constructions = 0;
op_counts::copy_assignments = 0;
op_counts::move_assignments = 0;
op_counts::destructions = 0;
#endif
}
static int default_constructions;
static int int_constructions;
static int copy_constructions;
static int move_constructions;
static int copy_assignments;
static int move_assignments;
static int destructions;
};
int op_counting::default_constructions = 0;
int op_counting::int_constructions = 0;
int op_counting::copy_constructions = 0;
int op_counting::move_constructions = 0;
int op_counting::copy_assignments = 0;
int op_counting::move_assignments = 0;
int op_counting::destructions = 0;
// move_only_type - useful for verifying containers that may hold, e.g., unique_ptrs to make sure move ops are implemented
struct move_only_type
{
move_only_type() = delete;
KAT_HD move_only_type(int val) : value(val) {}
move_only_type(const move_only_type&) = delete;
KAT_HD move_only_type(move_only_type&& x) : value(x.value) { x.value = 0; }
move_only_type& operator=(const move_only_type&) = delete;
KAT_HD move_only_type& operator=(move_only_type&& x)
{
value = x.value;
x.value = 0;
return *this;
}
KAT_HD bool operator==(const move_only_type& o) const { return value == o.value; }
int value;
};
} // namespace test_structs
using kat::tuple;
using kat::tuple_size;
using kat::tuple_element_t;
using kat::get;
using kat::make_tuple;
using std::is_same;
using namespace test_structs;
TEST_SUITE("tuple") {
TEST_CASE("static assertions")
{
using kat::tuple;
using kat::tuple_size;
using kat::tuple_element_t;
using std::is_same;
static_assert(tuple_size<tuple<int>>::value == 1, "tuple_size<tuple<T>> test failed.");
static_assert(tuple_size<const tuple<int>>::value == 1, "tuple_size<const tuple<T>> test failed.");
static_assert(tuple_size<const tuple<const int>>::value == 1, "tuple_size<const tuple<const T>> test failed.");
static_assert(tuple_size<volatile tuple<int>>::value == 1, "tuple_size<volatile tuple<T>> test failed.");
static_assert(tuple_size<const volatile tuple<int>>::value == 1, "tuple_size<const volatile tuple<T>> test failed.");
static_assert(tuple_size<tuple<int, float, bool>>::value == 3, "tuple_size<tuple<T, T, T>> test failed.");
static_assert(is_same<tuple_element_t<0, tuple<int>>, int>::value, "tuple_element<I, T> test failed.");
static_assert(is_same<tuple_element_t<1, tuple<float, int>>, int>::value, "tuple_element<I, T> test failed.");
static_assert(is_same<tuple_element_t<1, tuple<float, const int>>, const int>::value, "tuple_element<I, T> test failed.");
static_assert(is_same<tuple_element_t<1, tuple<float, volatile int>>, volatile int>::value, "tuple_element<I, T> test failed.");
static_assert(is_same<tuple_element_t<1, tuple<float, const volatile int>>, const volatile int>::value, "tuple_element<I, T> test failed.");
static_assert(is_same<tuple_element_t<1, tuple<float, int&>>, int&>::value, "tuple_element<I, T> test failed.");
}
TEST_CASE("get")
{
tuple<int> single_element(1);
CHECK( get<0>(single_element) == 1 );
get<0>(single_element) = 2;
CHECK( get<0>(single_element) == 2 );
get<int>(single_element) = 3;
CHECK( get<int>(single_element) == 3 );
const tuple<int> const_single_element(3);
CHECK( get<int>(const_single_element) == 3 );
tuple<default_constructible> default_constructed;
CHECK( get<0>(default_constructed).value == default_constructible::default_value );
}
TEST_CASE("method invocation counts")
{
op_counting::reset_counters();
{
tuple<op_counting> an_op_counter;
CHECK_UNARY(
(op_counting::default_constructions == 1 &&
get<0>(an_op_counter).value == 0) );
get<0>(an_op_counter).value = 1;
tuple<op_counting> another_op_counter(an_op_counter);
CHECK( true == (
op_counting::default_constructions == 1 &&
op_counting::copy_constructions == 1 &&
get<0>(another_op_counter).value == 1 ) );
get<0>(an_op_counter).value = 2;
another_op_counter = an_op_counter;
CHECK_UNARY(
op_counting::default_constructions == 1 &&
op_counting::copy_constructions == 1 &&
op_counting::copy_assignments == 1 &&
get<0>(another_op_counter).value == 2 );
op_counting::reset_counters();
tuple<op_counting> yet_another_op_counter(op_counting(5));
CHECK_UNARY( (
op_counting::move_constructions == 1 && op_counting::default_constructions == 0 &&
op_counting::copy_constructions == 0 && get<0>(yet_another_op_counter).value == 5 ) );
}
CHECK( op_counting::destructions == 4 );
}
TEST_CASE("get")
{
// Test constructor
tuple<int, float, bool> a_tuple(1, 1.0f, true);
CHECK( get<0>(a_tuple) == 1 );
CHECK( get<1>(a_tuple) == 1.0f );
CHECK( get<2>(a_tuple) == true );
CHECK( get<int>(a_tuple) == 1 );
CHECK( get<float>(a_tuple) == 1.0f );
CHECK( get<bool>(a_tuple) == true );
get<1>(a_tuple) = 2.0f;
CHECK( get<1>(a_tuple) == 2.0f );
// Test copy constructor
tuple<int, float, bool> another_tuple(a_tuple);
CHECK_UNARY( get<0>(another_tuple) == 1 && get<1>(another_tuple) == 2.0f && get<2>(another_tuple) == true );
// Test copy assignment
tuple<int, float, bool> yet_another_tuple(2, 3.0f, true);
CHECK_UNARY( get<0>(yet_another_tuple) == 2 && get<1>(yet_another_tuple) == 3.0f &&
get<2>(yet_another_tuple) == true);
yet_another_tuple = another_tuple;
CHECK_UNARY( get<0>(yet_another_tuple) == 1 && get<1>(yet_another_tuple) == 2.0f &&
get<2>(yet_another_tuple) == true);
// Test converting 'copy' constructor (from a tuple of different type whose members are each convertible)
tuple<double, double, bool> a_different_tuple(a_tuple);
CHECK_UNARY( get<0>(a_different_tuple) == 1.0 && get<1>(a_different_tuple) == 2.0 &&
get<2>(a_different_tuple) == true);
// Test converting assignment operator (from a tuple of different type whose members are each convertible)
tuple<double, double, bool> another_different_tuple;
CHECK_UNARY( get<0>(another_different_tuple) == 0.0 && get<1>(another_different_tuple) == 0.0 &&
get<2>(another_different_tuple) == false);
another_different_tuple = another_tuple;
CHECK_UNARY( get<0>(another_different_tuple) == 1.0 && get<1>(another_different_tuple) == 2.0 &&
get<2>(another_different_tuple) == true);
// Test default initialization (built in types should be value initialized rather than default initialized)
tuple<int, float, bool> a_default_initialized_tuple;
CHECK_UNARY( get<0>(a_default_initialized_tuple) == 0 && get<1>(a_default_initialized_tuple) == 0.0f &&
get<2>(a_default_initialized_tuple) == false);
}
TEST_CASE("more typed get")
{
// Test some other cases with typed-getter
tuple<double, double, bool> a_tuple_with_repeated_type(1.0f, 2.0f, true);
CHECK( get<bool>(a_tuple_with_repeated_type) == true );
tuple<double, bool, double> another_tuple_with_repeated_type(1.0f, true, 2.0f);
CHECK( get<bool>(another_tuple_with_repeated_type) == true );
tuple<bool, double, double> yet_another_tupleWithRepeatedType(true, 1.0f, 2.0f);
CHECK( get<bool>(another_tuple_with_repeated_type) == true );
struct one_float { float val; };
struct second_float { float val; };
tuple<one_float, second_float> a_tuple_of_structs({ 1.0f }, { 2.0f } );
CHECK( get<one_float>(a_tuple_of_structs).val == 1.0f );
CHECK( get<second_float>(a_tuple_of_structs).val == 2.0f );
const tuple<double, double, bool> aConstTuple(a_tuple_with_repeated_type);
const bool& constRef = get<bool>(aConstTuple);
CHECK( constRef == true );
const bool&& constRval = get<bool>(std::move(a_tuple_with_repeated_type));
CHECK( constRval == true );
}
TEST_CASE("more tuple methods")
{
tuple<int, float> a_tuple_with_default_init(1, {});
// tuple construction from pair
std::pair<int, float> a_pair(1, 2.0f);
tuple<int, float> a_tuple(a_pair);
CHECK_UNARY( get<0>(a_tuple) == 1 && get<1>(a_tuple) == 2.0f );
tuple<double, double> another_tuple(a_pair);
CHECK_UNARY( get<0>(another_tuple) == 1.0 && get<1>(another_tuple) == 2.0 );
another_tuple = std::make_pair(2, 3);
CHECK_UNARY( get<0>(another_tuple) == 2.0 && get<1>(another_tuple) == 3.0 );
// operators: ==, !=, <
another_tuple = a_tuple;
CHECK( a_tuple == another_tuple );
CHECK_UNARY( !(a_tuple < another_tuple) && !(another_tuple < a_tuple) );
tuple<double, double> a_default_init_tuple;
CHECK( a_tuple != a_default_init_tuple );
CHECK( a_default_init_tuple < a_tuple );
tuple<int, int, int> a_lesser_tuple(1, 2, 3);
tuple<int, int, int> a_greater_tuple(1, 2, 4);
CHECK_UNARY( a_lesser_tuple < a_greater_tuple && !(a_greater_tuple < a_lesser_tuple) && a_greater_tuple > a_lesser_tuple &&
!(a_lesser_tuple > a_greater_tuple));
// We don't have the library's TestObject here
// tuple<int, float, TestObject> value_tuple(2, 2.0f, TestObject(2));
// tuple<int&, float&, TestObject&> refTup(value_tuple);
// tuple<const int&, const float&, const TestObject&> const_ref_to_tuple(value_tuple);
//
// CHECK( get<0>(refTup) == get<0>(value_tuple) );
// CHECK( get<1>(refTup) == get<1>(value_tuple) );
// CHECK( refTup == value_tuple );
// CHECK( get<0>(refTup) == get<0>(const_ref_to_tuple) );
// CHECK( get<1>(refTup) == get<1>(const_ref_to_tuple) );
// CHECK( const_ref_to_tuple == value_tuple );
// CHECK( const_ref_to_tuple == refTup );
// swap
swap(a_lesser_tuple, a_greater_tuple);
CHECK_UNARY( get<2>(a_lesser_tuple) == 4 && get<2>(a_greater_tuple) == 3 );
swap(a_greater_tuple, a_lesser_tuple);
CHECK( a_lesser_tuple < a_greater_tuple );
}
TEST_CASE("move-only contained type")
{
static_assert(std::is_constructible<move_only_type, move_only_type>::value, "is_constructible type trait giving confusing answers.");
static_assert(std::is_constructible<move_only_type, move_only_type&&>::value, "is_constructible type trait giving wrong answers.");
static_assert(std::is_constructible<move_only_type&&, move_only_type&&>::value, "is_constructible type trait giving bizarre answers.");
tuple<move_only_type> a_tuple_with_move_only_member(1);
CHECK( get<0>(a_tuple_with_move_only_member).value == 1 );
get<0>(a_tuple_with_move_only_member) = move_only_type(2);
CHECK( get<0>(a_tuple_with_move_only_member).value == 2 );
tuple<const move_only_type&> a_tuple_with_ref_to_move_only_member(a_tuple_with_move_only_member);
CHECK( get<0>(a_tuple_with_ref_to_move_only_member).value == 2 );
tuple<const move_only_type&> aTupleWithConstRefToGetMoveOnly(get<0>(a_tuple_with_move_only_member));
CHECK( get<0>(aTupleWithConstRefToGetMoveOnly).value == 2 );
tuple<move_only_type&> a_tuple_with_ref_to_get_move_only(get<0>(a_tuple_with_move_only_member));
CHECK( get<0>(a_tuple_with_ref_to_get_move_only).value == 2 );
}
TEST_CASE("make_tuple")
{
auto a_made_tuple = make_tuple(1, 2.0, true);
CHECK_UNARY( get<0>(a_made_tuple) == 1 && get<1>(a_made_tuple) == 2.0 && get<2>(a_made_tuple) == true );
// TODO: reference_wrapper implementation needs to be finished to enable this code
{
int a = 2;
float b = 3.0f;
auto a_made_tuple_2 = make_tuple(kat::ref(a), b);
get<0>(a_made_tuple_2) = 3;
get<1>(a_made_tuple_2) = 4.0f;
CHECK_UNARY( get<0>(a_made_tuple_2) == 3 && get<1>(a_made_tuple_2) == 4.0f && a == 3 && b == 3.0f );
}
}
TEST_CASE("forward_as_tuple")
{
auto forward_test = [](tuple<move_only_type&&, move_only_type&&> x) -> tuple<move_only_type, move_only_type>
{
return tuple<move_only_type, move_only_type>(std::move(x));
};
tuple<move_only_type, move_only_type> a_movable_tuple(
forward_test(kat::forward_as_tuple(move_only_type(1), move_only_type(2))));
CHECK_UNARY( get<0>(a_movable_tuple).value == 1 && get<1>(a_movable_tuple).value == 2 );
}
TEST_CASE("tie")
{
int a = 0;
double b = 0.0f;
static_assert(std::is_assignable<const kat::detail::ignore_t<int>&, int>::value, "ignore_t not assignable");
static_assert(kat::detail::tuple_assignable<tuple<const kat::detail::ignore_t<int>&>, tuple<int>>::value, "Not assignable");
kat::tie(a, kat::ignore, b) = kat::make_tuple(1, 3, 5);
CHECK_UNARY( a == 1 && b == 5.0f );
}
TEST_CASE("tuple_cat")
{
int a = 0;
double b = 0.0f;
auto concatenated_tuple = tuple_cat(make_tuple(1, 2.0f), make_tuple(3.0, true));
CHECK_UNARY( get<0>(concatenated_tuple) == 1 && get<1>(concatenated_tuple) == 2.0f && get<2>(concatenated_tuple) == 3.0 &&
get<3>(concatenated_tuple) == true);
auto concatenated_tuple_2 = tuple_cat(make_tuple(1, 2.0f), make_tuple(3.0, true), make_tuple(5u, '6'));
CHECK_UNARY( get<0>(concatenated_tuple_2) == 1 && get<1>(concatenated_tuple_2) == 2.0f && get<2>(concatenated_tuple_2) == 3.0 &&
get<3>(concatenated_tuple_2) == true && get<4>(concatenated_tuple_2) == 5u && get<5>(concatenated_tuple_2) == '6');
auto a_catted_ref_tuple = tuple_cat(make_tuple(1), kat::tie(a, kat::ignore, b));
get<1>(a_catted_ref_tuple) = 2;
CHECK( a == 2 );
}
TEST_CASE("empty tuple")
{
tuple<> empty_tuple;
CHECK( tuple_size<decltype(empty_tuple)>::value == 0 );
empty_tuple = make_tuple();
auto another_empty_tuple = make_tuple();
swap(another_empty_tuple, empty_tuple);
}
TEST_CASE("std::tuple compatibility") {
{
tuple<> empty_tuple;
auto empty_std_tuple_1 { static_cast< std::tuple<> >(empty_tuple) };
auto empty_std_tuple_2 { static_cast< std::tuple<> >(kat::make_tuple()) };
std::tuple<> empty_std_tuple_3 = empty_tuple;
// empty_tuple = empty_std_tuple_1;
CHECK (std::is_same<std::tuple<>,decltype(empty_std_tuple_1)>::value);
CHECK (kat::detail::tuple_convertible< std::tuple<>, tuple<> >::value);
}
{
tuple<int, float, bool> a_tuple(1, 1.0f, true);
auto std_tuple_1 { static_cast< std::tuple<int, float, bool> >(a_tuple) };
auto std_tuple_2 { static_cast< std::tuple<int, float, bool> >(kat::make_tuple(1, 1.0f, true)) };
std::tuple<int, float, bool> std_tuple_3 = a_tuple;
// a_tuple = std_tuple_1;
CHECK (std::is_same<std::tuple<int, float, bool>,decltype(std_tuple_1)>::value);
// CHECK (kat::detail::tuple_convertible< std::tuple<int, float, bool>, tuple<int, float, bool> >::value);
// CHECK (kat::tuple_size<std::tuple<int, float, bool>>::value == 3);
// CHECK (kat::detail::tuple_assignable< tuple<int, float, bool>, std::tuple<int, float, bool> >::value);
// std::cout
// << "tuple_size<typename std::remove_reference<tuple<int, float, bool>>::type>::value = "
// << tuple_size<typename std::remove_reference<tuple<int, float, bool>>::type>::value << '\n'
// << "tuple_size<std::tuple<int, float, bool>>::value) = "
// << tuple_size<std::tuple<int, float, bool>>::value << '\n'
// << "kat::detail::make_tuple_types_t<tuple<int, float, bool>> = "
// << util::type_name<kat::detail::make_tuple_types_t<tuple<int, float, bool> > >() << '\n'
// << "make_tuple_types_t<std::tuple<int, float, bool> > = "
// << util::type_name< kat::detail::make_tuple_types_t<std::tuple<int, float, bool> > >() << '\n';
// CHECK (kat::detail::tuple_assignable< tuple<int, float, bool>, tuple<int, float, bool> >::value);
}
// std::tuple<> empty_std_tuple;
// tuple<> empty_tuple_1 { static_cast< kat::tuple<> >(empty_tuple_1) };
// tuple<> empty_tuple_2 { static_cast< kat::tuple<> >(std::make_tuple()) };
// tuple<> empty_tuple_3 = empty_std_tuple_1;
// swap(empty_tuple, empty_std_tuple);
// swap(empty_std_tuple, empty_tuple);
// {
// tuple<move_only_type> a_tuple_with_move_only_member(1);
// auto std_tuple_1 { static_cast< std::tuple<move_only_type> >(a_tuple_with_move_only_member) };
//
// tuple<const move_only_type&> a_tuple_with_ref_to_move_only_member(a_tuple_with_move_only_member);
// std::tuple<> std_tuple_2 { static_cast< std::tuple<const move_only_type> >(a_tuple_with_ref_to_move_only_member) };
//
// tuple<const move_only_type&> aTupleWithConstRefToGetMoveOnly(get<0>(a_tuple_with_move_only_member));
// std::tuple<const move_only_type&> std_tuple_3 { static_cast< std::tuple<const move_only_type&> >(a_tuple_with_ref_to_move_only_member) };
//
// tuple<move_only_type&> a_tuple_with_ref_to_get_move_only(get<0>(a_tuple_with_move_only_member));
// std::tuple<move_only_type&> std_tuple_4 { static_cast< std::tuple<move_only_type&> >(a_tuple_with_ref_to_move_only_member) };
// }
{
// operators: ==, !=, <
tuple<int, float, bool> a_tuple(1, 1.0f, true);
std::tuple<int, float, bool> an_std_tuple = a_tuple;
CHECK( a_tuple == an_std_tuple );
CHECK_UNARY( !(a_tuple < an_std_tuple) && !(an_std_tuple < a_tuple) );
}
{
tuple<int, int, int> a_lesser_tuple(1, 2, 3);
tuple<int, int, int> a_greater_tuple(1, 2, 4);
std::tuple<int, int, int> a_lesser_std_tuple(1, 2, 3);
std::tuple<int, int, int> a_greater_std_tuple(1, 2, 4);
CHECK_UNARY(
a_lesser_tuple < a_greater_std_tuple &&
!(a_greater_tuple < a_lesser_std_tuple) &&
a_greater_tuple > a_lesser_std_tuple &&
!(a_lesser_tuple > a_greater_std_tuple)
);
CHECK_UNARY(
a_lesser_std_tuple < a_greater_tuple &&
!(a_greater_std_tuple < a_lesser_tuple) &&
a_greater_std_tuple > a_lesser_tuple &&
!(a_lesser_std_tuple > a_greater_tuple)
);
}
}
// TODO: Enable this when we've introduced compatibility code of kat::tuple
// and std::tuple on the host side. Also, if we get kat::pair, replicate the
// following tests for that class as well.
/*
TEST_CASE("piecewise_construction")
{
{
struct local
{
local() = default;
local(int a, int b) : mA(a), mB(b) {}
int mA = 0;
int mB = 0;
};
auto t = kat::make_tuple(42, 43);
std::pair<local, local> p(std::piecewise_construct, t, t);
CHECK( p.first.mA == 42 );
CHECK( p.second.mA == 42 );
CHECK( p.first.mB == 43 );
CHECK( p.second.mB == 43 );
}
{
struct local
{
local() = default;
local(int a, int b, int c, int d) : mA(a), mB(b), mC(c), mD(d) {}
int mA = 0;
int mB = 0;
int mC = 0;
int mD = 0;
};
auto t = kat::make_tuple(42, 43, 44, 45);
std::pair<local, local> p(std::piecewise_construct, t, t);
CHECK( p.first.mA == 42 );
CHECK( p.second.mA == 42 );
CHECK( p.first.mB == 43 );
CHECK( p.second.mB == 43 );
CHECK( p.first.mC == 44 );
CHECK( p.second.mC == 44 );
CHECK( p.first.mD == 45 );
CHECK( p.second.mD == 45 );
}
{
struct local1
{
local1() = default;
local1(int a) : mA(a) {}
int mA = 0;
};
struct local2
{
local2() = default;
local2(char a) : mA(a) {}
char mA = 0;
};
auto t1 = kat::make_tuple(42);
auto t2 = kat::make_tuple('a');
std::pair<local1, local2> p(std::piecewise_construct, t1, t2);
CHECK( p.first.mA == 42 );
CHECK( p.second.mA == 'a' );
}
}
*/
#if __cplusplus >= 201703L
TEST_CASE("apply")
{
// test with tuples
{
{
auto result = kat::apply([](int i) { return i; }, make_tuple(1));
CHECK( result == 1 );
}
{
auto result = kat::apply([](int i, int j) { return i + j; }, make_tuple(1, 2));
CHECK( result == 3 );
}
{
auto result = kat::apply([](int i, int j, int k, int m) { return i + j + k + m; }, make_tuple(1, 2, 3, 4));
CHECK( result == 10 );
}
}
// // test with pair
// {
// auto result = kat::apply([](int i, int j) { return i + j; }, make_pair(1, 2));
// CHECK( result == 3 );
// }
// TODO: Test apply with arrays?
}
TEST_CASE("tuple structured bindings") {
kat::tuple<int, int, int> t = {1,2,3};
auto [x,y,z] = t;
CHECK( x == 1 );
CHECK( y == 2 );
CHECK( z == 3 );
}
#endif // __cplusplus >= 201703L
TEST_CASE("tuple_cat") {
void* empty = nullptr;
auto t = kat::make_tuple(empty, true);
auto tc = kat::tuple_cat(kat::make_tuple("asd", 1), t);
static_assert(std::is_same<decltype(tc), kat::tuple<const char*, int, void*, bool>>::value, "type mismatch");
CHECK( std::string("asd") == kat::get<0>(tc) );
CHECK( kat::get<1>(tc) == 1 );
CHECK( kat::get<2>(tc) == nullptr );
CHECK( kat::get<3>(tc) == true );
}
} // TEST_SUITE("tuple")
// EA_RESTORE_VC_WARNING()
|
the_stack
|
// This macro is to control shared memory usage. If set to 1, kernel loads the whole feature map
// into shared memory for reuse; If set to 0, kernel loads data from global memory directly.
// Roi pooling performance is data dependent. You can test which value is better to your data.
// If all bboxes are very small, 0 is recommended, otherwise, shared memory will load many unused
// data; If bboxes have many overlaps, 1 is recommended to avoid duplicate loads.
// 1 requires larger shared memory size. It may fail if it is larger than CUDA allowed per-block
// shared memory size upper bound. Then you have to use 0.
#define ROIPOOLING_FEATURE_MAP_USE_SHMEM 1
template <typename T>
__device__ T getMax();
template <>
__device__ __forceinline__ int8_t getMax<int8_t>()
{
return INT8_MAX;
}
template <>
__device__ __forceinline__ float getMax<float>()
{
return FLT_MAX;
}
// ROI POOLING FORWARD KERNEL
template <typename DATA_T, typename ROI_T, bool INFER_ONLY, bool FM_IN_SMEM>
__global__ void ROIPoolingForwardKernelAligned(int32_t ROICount, const ROI_T* rois,
int32_t N, // feature map size
int32_t C, // feature map size
int32_t H, // feature map size
int32_t W, // feature map size
const DATA_T* featureMap, const int32_t poolingH, const int32_t poolingW, const float spatialScale, DATA_T* top,
int32_t* maxIds, int32_t fmapStep)
{
extern __shared__ float smem[];
DATA_T* feature_shr = (DATA_T*) &smem[0];
int* rois_shr = nullptr;
if (FM_IN_SMEM)
{
rois_shr = (int*) &feature_shr[H * W];
}
else
{
rois_shr = (int*) &feature_shr[0];
feature_shr = nullptr;
}
const int batch = blockIdx.x / C;
const int channel = blockIdx.x % C;
// load ROIs to shared memory
for (int j = threadIdx.x; j < ROICount; j += blockDim.x)
{
int offset = j << 2;
float4 roi = reinterpret_cast<float4*>(const_cast<float*>(rois))[batch * ROICount + j];
// spatialScale = 1.0 / featureStride
// Convert the coordinates to feature map scale
rois_shr[offset] = round(roi.x * spatialScale); //roi_start_w
rois_shr[offset + 1] = round(roi.y * spatialScale); //roi_start_h
rois_shr[offset + 2] = round(roi.z * spatialScale) - round(roi.x * spatialScale); //roi_length_w
rois_shr[offset + 3] = round(roi.w * spatialScale) - round(roi.y * spatialScale); // roi_length_h
}
// NC/xHW
int fmapOffset = blockIdx.x / fmapStep * H * W * fmapStep + blockIdx.x % fmapStep;
// Assumes #CTAs is just enough to cover all channels of all blocks
const DATA_T* bottom_data_offset = featureMap + fmapOffset;
if (FM_IN_SMEM)
{
// load the current channel to the shared memory
for (int j = threadIdx.x; j < H * W; j += blockDim.x)
{
feature_shr[j] = bottom_data_offset[j * fmapStep];
}
}
__syncthreads();
for (int j = threadIdx.x; j < ROICount; j += blockDim.x)
{
const int offset = j << 2;
// Force malformed ROIs to be 1x1
int roi_start_w = rois_shr[offset];
int roi_start_h = rois_shr[offset + 1];
int roi_width = max(rois_shr[offset + 2] + 1, 1);
int roi_height = max(rois_shr[offset + 3] + 1, 1);
float bin_size_h = static_cast<float>(roi_height) / static_cast<float>(poolingH);
float bin_size_w = static_cast<float>(roi_width) / static_cast<float>(poolingW);
for (int ph = 0; ph < poolingH; ++ph)
{
for (int pw = 0; pw < poolingW; ++pw)
{
int hstart = static_cast<int>(floor(static_cast<float>(ph) * bin_size_h));
int wstart = static_cast<int>(floor(static_cast<float>(pw) * bin_size_w));
int hend = static_cast<int>(ceil(static_cast<float>(ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil(static_cast<float>(pw + 1) * bin_size_w));
// Add roi offsets and clip to input boundaries
// In fact, clipping should be done in the RPN, but just in case...
hstart = min(max(hstart + roi_start_h, 0), H);
hend = min(max(hend + roi_start_h, 0), H);
wstart = min(max(wstart + roi_start_w, 0), W);
wend = min(max(wend + roi_start_w, 0), W);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
DATA_T maxval = is_empty ? 0 : -getMax<DATA_T>();
int maxId = -1;
DATA_T data = 0;
for (int h = hstart; h < hend; ++h)
{
for (int w = wstart; w < wend; ++w)
{
int index = h * W + w;
if (FM_IN_SMEM)
{
data = feature_shr[index];
}
else
{
data = bottom_data_offset[index * fmapStep];
}
if (data > maxval)
{
maxval = data;
maxId = index;
}
}
}
top[(((batch * ROICount + j) * C + channel) * poolingH + ph) * poolingW + pw] = maxval;
if (!INFER_ONLY)
{
maxIds[(((batch * ROICount + j) * C + channel) * poolingH + ph) * poolingW + pw] = maxId;
}
} //for:pw
} //for:ph
} // for:j
}
template <typename DATA_T, DLayout_t DATA_L, typename ROI_T, bool INFER_ONLY>
pluginStatus_t ROIPoolingForwardKernelAlignedLauncher(cudaStream_t stream,
const int R, // TOTAL number of rois -> ~nmsMaxOut * N
const int N, // Batch size
const int C, // Channels
const int H, // Input feature map H
const int W, // Input feature map W
const int poolingH, // Output feature map H
const int poolingW, // Output feature map W
const float spatialScale, const void* rois, const void* featureMap, void* top, int* maxIds, size_t deviceSmemSize)
{
size_t roiShmemSize = (R / N) * 4 * sizeof(ROI_T);
#if ROIPOOLING_FEATURE_MAP_USE_SHMEM
size_t shmemSize = H * W * sizeof(DATA_T) + roiShmemSize;
const bool fmap_in_shmem = true;
#else
size_t shmemSize = roiShmemSize;
const bool fmap_in_shmem = false;
#endif
if (shmemSize > deviceSmemSize)
{
return STATUS_BAD_PARAM;
}
// in the aligned version of ROI Pooling R should always be a multiple of N
ASSERT(R % N == 0);
// NC/xHW
int32_t fmapStep = 1;
switch(DATA_L)
{
case NCHW: fmapStep = 1;
break;
case NC4HW: fmapStep = 4;
ASSERT((N * C) % 4 == 0);
break;
case NC32HW: fmapStep = 32;
ASSERT((N * C) % 32 == 0);
break;
default: ASSERT(false);
}
if(shmemSize > 48 * 1024)
{
CHECK(cudaFuncSetAttribute(&ROIPoolingForwardKernelAligned<DATA_T, ROI_T, INFER_ONLY, true>,
cudaFuncAttributeMaxDynamicSharedMemorySize, shmemSize));
}
ROIPoolingForwardKernelAligned<DATA_T, ROI_T, INFER_ONLY, fmap_in_shmem><<<N * C, 256, shmemSize, stream>>>(R / N,
(const ROI_T*) rois,
N, // feature map size
C, // feature map size
H, // feature map size
W, // feature map size
(const DATA_T*) featureMap, poolingH, poolingW, spatialScale, (DATA_T*) top, maxIds, fmapStep);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// ROI POOLING LAUNCH CONFIG
typedef pluginStatus_t (*roiFwd)(cudaStream_t,
const int, //R, // TOTAL number of rois -> ~nmsMaxOut * N
const int, //N, // Batch size
const int, //C, // Channels
const int, //H, // Input feature map H
const int, //W, // Input feature map W
const int, //poolingH, // Output feature map H
const int, //poolingW, // Output feature map W
const float, //spatialScale,
const void*, //rois,
const void*, //featureMap,
void*, //top
int*, //maxIds
size_t); //device shmem size
// struct
struct roiFwdLaunchConfig
{
DataType t_rois;
DataType t_featureMap;
DLayout_t l_featureMap;
DataType t_top;
DLayout_t l_top;
bool inferOnly;
roiFwd function;
roiFwdLaunchConfig(DataType t_rois,
DataType t_featureMap,
DLayout_t l_featureMap,
DataType t_top,
DLayout_t l_top,
bool inferOnly)
: t_rois(t_rois)
, t_featureMap(t_featureMap)
, l_featureMap(l_featureMap)
, t_top(t_top)
, l_top(l_top)
, inferOnly(inferOnly)
{
}
roiFwdLaunchConfig(DataType t_rois,
DataType t_featureMap,
DLayout_t l_featureMap,
DataType t_top,
DLayout_t l_top,
bool inferOnly,
roiFwd function)
: t_rois(t_rois)
, t_featureMap(t_featureMap)
, l_featureMap(l_featureMap)
, t_top(t_top)
, l_top(l_top)
, inferOnly(inferOnly)
, function(function)
{
}
bool operator==(const roiFwdLaunchConfig& other)
{
return (t_rois == other.t_rois)
&& (t_featureMap == other.t_featureMap)
&& (l_featureMap == other.l_featureMap)
&& (t_top == other.t_top)
&& (l_top == other.l_top)
&& (inferOnly == other.inferOnly);
}
};
#define FLOAT32 nvinfer1::DataType::kFLOAT
#define INT8 nvinfer1::DataType::kINT8
static std::array<roiFwdLaunchConfig, 6> roiFwdLCOptions = {
roiFwdLaunchConfig(FLOAT32, FLOAT32, NCHW, FLOAT32, NCHW, true, ROIPoolingForwardKernelAlignedLauncher<float, NCHW, float, true>),
roiFwdLaunchConfig(FLOAT32, FLOAT32, NCHW, FLOAT32, NCHW, false, ROIPoolingForwardKernelAlignedLauncher<float, NCHW, float, false>),
roiFwdLaunchConfig(FLOAT32, INT8, NCHW, INT8, NCHW, true, ROIPoolingForwardKernelAlignedLauncher<int8_t, NCHW, float, true>),
roiFwdLaunchConfig(FLOAT32, INT8, NC4HW, INT8, NCHW, true, ROIPoolingForwardKernelAlignedLauncher<int8_t, NC4HW, float, true>),
roiFwdLaunchConfig(FLOAT32, INT8, NC32HW, INT8, NCHW, true, ROIPoolingForwardKernelAlignedLauncher<int8_t, NC32HW, float, true>),
roiFwdLaunchConfig(FLOAT32, FLOAT32, NC4HW, FLOAT32, NCHW, true, ROIPoolingForwardKernelAlignedLauncher<float, NC4HW, float, true>)};
// ROI INFERENCE
pluginStatus_t roiInference(cudaStream_t stream,
const int R, // TOTAL number of rois -> ~nmsMaxOut * N
const int N, // Batch size
const int C, // Channels
const int H, // Input feature map H
const int W, // Input feature map W
const int poolingH, // Output feature map H
const int poolingW, // Output feature map W
const float spatialScale,
const nvinfer1::DataType t_rois,
const void* rois,
const nvinfer1::DataType t_featureMap,
const DLayout_t l_featureMap,
const void* featureMap,
const nvinfer1::DataType t_top,
const DLayout_t l_top,
void* top,
size_t deviceSmemSize)
{
if (featureMap == NULL || rois == NULL || top == NULL)
{
return STATUS_BAD_PARAM;
}
DEBUG_PRINTF("&&&& ROIS %u\n", hash(rois, R * 4 * sizeof(float)));
DEBUG_PRINTF("&&&& FMAP %u\n", hash(featureMap, N * C * H * W * sizeof(float)));
roiFwdLaunchConfig rflc = roiFwdLaunchConfig(t_rois, t_featureMap, l_featureMap, t_top, l_top, true);
ASSERT_PARAM(R > 0);
for (unsigned i = 0; i < roiFwdLCOptions.size(); i++)
{
if (rflc == roiFwdLCOptions[i])
{
DEBUG_PRINTF("$$$$ ROI KERNEL %d\n", i);
return roiFwdLCOptions[i].function(stream,
R, N, C, H, W, poolingH, poolingW,
spatialScale, rois, featureMap, top, NULL, deviceSmemSize);
}
}
return STATUS_BAD_PARAM;
}
|
the_stack
|
#include "k2/csrc/context.h"
#include "k2/csrc/fsa_utils.h"
namespace k2 {
// field separator within a line for a text form FSA
static constexpr const char *kDelim = " \t";
// Convert a string to an integer. Abort the program on failure.
static int32_t StringToInt(const std::string &s) {
K2_CHECK(!s.empty());
bool ok = false;
char *p = nullptr;
// std::strtol requires a `long` type
long n = std::strtol(s.c_str(), &p, 10); // NOLINT
if (*p == '\0') ok = true;
auto res = static_cast<int32_t>(n);
if (n != res) ok = false; // out of range
K2_CHECK(ok) << "Failed to convert " << s << " to an integer";
return res;
}
// Convert a string to a float. Abort the program on failure.
// TODO(guoguo): We may run into locale problems, with comma vs. period for
// decimals. We have to test if the C code will behave the same
// w.r.t. locale as Python does.
static float StringToFloat(const std::string &s) {
K2_CHECK(!s.empty());
char *p = nullptr;
float f = std::strtof(s.c_str(), &p);
if (*p != '\0') K2_LOG(FATAL) << "Failed to convert " << s << " to a float";
return f;
}
// Trim leading and trailing spaces of a string.
static void TrimString(std::string *s) {
K2_CHECK_NE(s, nullptr);
auto not_space = [](int32_t c) -> bool { return std::isspace(c) == 0; };
s->erase(s->begin(), std::find_if(s->begin(), s->end(), not_space));
s->erase(std::find_if(s->rbegin(), s->rend(), not_space).base(), s->end());
}
/* Split a string to a vector of strings using a set of delimiters.
Example usage:
@code
std::string in = "1 2 3";
const char *delim = " \t";
std::vector<std::string> out;
SplitStringToVector(in, delim, &out);
@endcode
@param [in] in The input string to be split.
@param [in] delim A string of delimiters.
@param [out] out It saves the split result.
*/
static void SplitStringToVector(const std::string &in, const char *delim,
std::vector<std::string> *out) {
K2_CHECK_NE(delim, nullptr);
K2_CHECK_NE(out, nullptr);
out->clear();
std::size_t start = 0;
while (true) {
auto pos = in.find_first_of(delim, start);
if (pos == std::string::npos) break;
auto sub = in.substr(start, pos - start);
start = pos + 1;
TrimString(&sub);
if (!sub.empty()) out->emplace_back(std::move(sub));
}
if (start < in.size()) {
auto sub = in.substr(start);
TrimString(&sub);
if (!sub.empty()) out->emplace_back(std::move(sub));
}
}
/* Create an acceptor from a stream, assuming the acceptor is in the k2 format:
src_state1 dest_state1 label1 score1
src_state2 dest_state2 label2 score2
... ...
final_state
The source states will be in non-descending order, and the final state does
not bear a cost/score -- we put the cost/score on the arc that connects to
the final state and set its label to -1.
@param [in] is The input stream that contains the acceptor.
@return It returns an Fsa on CPU.
*/
static Fsa K2AcceptorFromStream(std::istringstream &is) {
std::vector<Arc> arcs;
std::vector<std::string> splits;
std::string line;
bool finished = false; // when the final state is read, set it to true.
while (std::getline(is, line)) {
SplitStringToVector(line, kDelim,
&splits); // splits is cleared in the function
if (splits.empty()) continue; // this is an empty line
K2_CHECK_EQ(finished, false);
auto num_fields = splits.size();
if (num_fields == 4u) {
// 0 1 2 3
// src_state dest_state label score
int32_t src_state = StringToInt(splits[0]);
int32_t dest_state = StringToInt(splits[1]);
int32_t symbol = StringToInt(splits[2]);
float score = StringToFloat(splits[3]);
arcs.emplace_back(src_state, dest_state, symbol, score);
} else if (num_fields == 1u) {
// 0
// final_state
(void)StringToInt(splits[0]); // this is a final state
finished = true; // set finish
} else {
K2_LOG(FATAL) << "Invalid line: " << line
<< "\nk2 acceptor expects a line with 1 (final_state) or "
"4 (src_state dest_state label score) fields";
}
}
K2_CHECK_EQ(finished, true) << "The last line should be the final state";
bool error = true;
Array1<Arc> array(GetCpuContext(), arcs);
auto fsa = FsaFromArray1(array, &error);
K2_CHECK_EQ(error, false);
return fsa;
}
/* Create a transducer from a stream, assuming the transducer is in the K2
format:
src_state1 dest_state1 label1 aux_label1 score1
src_state2 dest_state2 label2 aux_label2 score2
... ...
final_state
The source states will be in non-descending order, and the final state does
not bear a cost/score -- we put the cost/score on the arc that connects to
the final state and set its label to -1.
@param [in] is The input stream that contains the transducer.
@return It returns an Fsa on CPU.
*/
static Fsa K2TransducerFromStream(std::istringstream &is,
Array1<int32_t> *aux_labels) {
K2_CHECK(aux_labels != nullptr);
std::vector<int32_t> aux_labels_internal;
std::vector<Arc> arcs;
std::vector<std::string> splits;
std::string line;
bool finished = false; // when the final state is read, set it to true.
while (std::getline(is, line)) {
SplitStringToVector(line, kDelim,
&splits); // splits is cleared in the function
if (splits.empty()) continue; // this is an empty line
K2_CHECK_EQ(finished, false);
auto num_fields = splits.size();
if (num_fields == 5u) {
// 0 1 2 3 4
// src_state dest_state label aux_label score
int32_t src_state = StringToInt(splits[0]);
int32_t dest_state = StringToInt(splits[1]);
int32_t symbol = StringToInt(splits[2]);
int32_t aux_label = StringToInt(splits[3]);
float score = StringToFloat(splits[4]);
arcs.emplace_back(src_state, dest_state, symbol, score);
aux_labels_internal.push_back(aux_label);
} else if (num_fields == 1u) {
// 0
// final_state
(void)StringToInt(splits[0]);
finished = true; // set finish
} else {
K2_LOG(FATAL) << "Invalid line: " << line
<< "\nk2 transducer expects a line with 1 (final_state) or "
"5 (src_state dest_state label aux_label score) fields";
}
}
K2_CHECK_EQ(finished, true) << "The last line should be the final state";
auto cpu_context = GetCpuContext();
*aux_labels = Array1<int32_t>(cpu_context, aux_labels_internal);
Array1<Arc> array(cpu_context, arcs);
bool error = true;
auto fsa = FsaFromArray1(array, &error);
K2_CHECK_EQ(error, false);
return fsa;
}
/* Create an acceptor from a stream, assuming the acceptor is in the OpenFST
format:
src_state1 dest_state1 label1 score1
src_state2 dest_state2 label2 score2
... ...
final_state final_score
We will negate the cost/score when we read them in. Also note, OpenFST may
omit the cost/score if it is 0.0.
We always create the super final state. If there are final state(s) in the
original FSA, then we add arc(s) from the original final state(s) to the
super final state, with the (negated) old final state cost/score as its
cost/score, and -1 as its label.
@param [in] is The input stream that contains the acceptor.
@return It returns an Fsa on CPU.
*/
static Fsa OpenFstAcceptorFromStream(std::istringstream &is) {
std::vector<Arc> arcs;
std::vector<std::vector<Arc>> state_to_arcs; // indexed by states
std::vector<std::string> splits;
std::string line;
int32_t max_state = -1;
int32_t num_arcs = 0;
std::vector<int32_t> original_final_states;
std::vector<float> original_final_weights;
while (std::getline(is, line)) {
SplitStringToVector(line, kDelim,
&splits); // splits is cleared in the function
if (splits.empty()) continue; // this is an empty line
auto num_fields = splits.size();
if (num_fields == 3u || num_fields == 4u) {
// 0 1 2
// src_state dest_state label
//
// or
//
// 0 1 2 3
// src_state dest_state label score
int32_t src_state = StringToInt(splits[0]);
int32_t dest_state = StringToInt(splits[1]);
int32_t symbol = StringToInt(splits[2]);
float score = 0.0f;
if (num_fields == 4u) score = -1.0f * StringToFloat(splits[3]);
// Add the arc to "state_to_arcs".
++num_arcs;
max_state = std::max(max_state, std::max(src_state, dest_state));
if (static_cast<int32_t>(state_to_arcs.size()) <= src_state)
state_to_arcs.resize(src_state + 1);
state_to_arcs[src_state].emplace_back(src_state, dest_state, symbol,
score);
} else if (num_fields == 1u || num_fields == 2u) {
// 0 1
// final_state score
float score = 0.0f;
if (num_fields == 2u) score = -1.0f * StringToFloat(splits[1]);
original_final_states.push_back(StringToInt(splits[0]));
original_final_weights.push_back(score);
max_state = std::max(max_state, original_final_states.back());
} else {
K2_LOG(FATAL) << "Invalid line: " << line
<< "\nOpenFST acceptor expects a line with 1 (final_state),"
" 2 (final_state score), 3 (src_state dest_state label) "
"or 4 (src_state dest_state label score) fields.";
}
}
K2_CHECK(is.eof());
// Post processing on final states. If there are final state(s) in the
// original FSA, we add the super final state as well as arc(s) from original
// final state(s) to the super final state. Otherwise, the super final state
// will be added by FsaFromArray1 (since there's no arc with label
// kFinalSymbol).
if (original_final_states.size() > 0) {
K2_CHECK_EQ(original_final_states.size(), original_final_weights.size());
int32_t super_final_state = max_state + 1;
state_to_arcs.resize(super_final_state);
for (std::size_t i = 0; i != original_final_states.size(); ++i) {
state_to_arcs[original_final_states[i]].emplace_back(
original_final_states[i], super_final_state,
-1, // kFinalSymbol
original_final_weights[i]);
++num_arcs;
}
}
// Move arcs from "state_to_arcs" to "arcs".
int32_t arc_index = 0;
arcs.resize(num_arcs);
for (std::size_t s = 0; s < state_to_arcs.size(); ++s) {
for (std::size_t a = 0; a < state_to_arcs[s].size(); ++a) {
K2_CHECK_GT(num_arcs, arc_index);
arcs[arc_index] = state_to_arcs[s][a];
++arc_index;
}
}
K2_CHECK_EQ(num_arcs, arc_index);
bool error = true;
Array1<Arc> array(GetCpuContext(), arcs);
// FsaFromArray1 will add a super final state if the original FSA doesn't have
// a final state.
auto fsa = FsaFromArray1(array, &error);
K2_CHECK_EQ(error, false);
return fsa;
}
/* Create a transducer from a stream, assuming the transducer is in the OpenFST
format:
src_state1 dest_state1 label1 aux_label1 score1
src_state2 dest_state2 label2 aux_label2 score2
... ...
final_state final_score
We will negate the cost/score when we read them in. Also note, OpenFST may
omit the cost/score if it is 0.0.
We always create the super final state. If there are final state(s) in the
original FST, then we add arc(s) from the original final state(s) to the
super final state, with the (negated) old final state cost/score as its
cost/score, -1 as its label and 0 as its aux_label.
@param [in] is The input stream that contains the transducer.
@return It returns an Fsa on CPU.
*/
static Fsa OpenFstTransducerFromStream(std::istringstream &is,
Array1<int32_t> *aux_labels) {
K2_CHECK(aux_labels != nullptr);
std::vector<std::vector<int32_t>> state_to_aux_labels; // indexed by states
std::vector<std::vector<Arc>> state_to_arcs; // indexed by states
std::vector<int32_t> aux_labels_internal;
std::vector<Arc> arcs;
std::vector<std::string> splits;
std::string line;
int32_t max_state = -1;
int32_t num_arcs = 0;
std::vector<int32_t> original_final_states;
std::vector<float> original_final_weights;
while (std::getline(is, line)) {
SplitStringToVector(line, kDelim,
&splits); // splits is cleared in the function
if (splits.empty()) continue; // this is an empty line
auto num_fields = splits.size();
if (num_fields == 4u || num_fields == 5u) {
// 0 1 2 3
// src_state dest_state label aux_label
//
// or
//
// 0 1 2 3 4
// src_state dest_state label aux_label score
int32_t src_state = StringToInt(splits[0]);
int32_t dest_state = StringToInt(splits[1]);
int32_t symbol = StringToInt(splits[2]);
int32_t aux_label = StringToInt(splits[3]);
float score = 0.0f;
if (num_fields == 5u) score = -1.0f * StringToFloat(splits[4]);
// Add the arc to "state_to_arcs", and aux_label to "state_to_aux_labels"
++num_arcs;
max_state = std::max(max_state, std::max(src_state, dest_state));
if (static_cast<int32_t>(state_to_arcs.size()) <= src_state) {
state_to_arcs.resize(src_state + 1);
state_to_aux_labels.resize(src_state + 1);
}
state_to_arcs[src_state].emplace_back(src_state, dest_state, symbol,
score);
state_to_aux_labels[src_state].push_back(aux_label);
} else if (num_fields == 1u || num_fields == 2u) {
// 0
// final_state
//
// or
//
// 0 1
// final_state score
// There could be multiple final states, so we first have to collect all
// the final states, and then work out the super final state.
float score = 0.0f;
if (num_fields == 2u) score = -1.0f * StringToFloat(splits[1]);
original_final_states.push_back(StringToInt(splits[0]));
original_final_weights.push_back(score);
max_state = std::max(max_state, original_final_states.back());
} else {
K2_LOG(FATAL) << "Invalid line: " << line
<< "\nOpenFST transducer expects a line with "
"1 (final_state), 2 (final_state score), "
"4 (src_state dest_state label aux_label) or "
"5 (src_state dest_state label aux_label score) fields.";
}
}
K2_CHECK(is.eof());
// Post processing on final states. If there are final state(s) in the
// original FST, we add the super final state as well as arc(s) from original
// final state(s) to the super final state. Otherwise, the super final state
// will be added by FsaFromArray1 (since there's no arc with label
// kFinalSymbol).
if (original_final_states.size() > 0) {
K2_CHECK_EQ(original_final_states.size(), original_final_weights.size());
int32_t super_final_state = max_state + 1;
state_to_arcs.resize(super_final_state);
state_to_aux_labels.resize(super_final_state);
for (std::size_t i = 0; i != original_final_states.size(); ++i) {
state_to_arcs[original_final_states[i]].emplace_back(
original_final_states[i], super_final_state,
-1, // kFinalSymbol
original_final_weights[i]);
// TODO(guoguo) We are not sure yet what to put as the auxiliary label for
// arcs entering the super final state. The only real choices
// are kEpsilon or kFinalSymbol. We are using kEpsilon for
// now.
state_to_aux_labels[original_final_states[i]].push_back(0); // kEpsilon
++num_arcs;
}
}
// Move arcs from "state_to_arcs" to "arcs", and aux_labels from
// "state_to_aux_labels" to "aux_labels_internal"
int32_t arc_index = 0;
arcs.resize(num_arcs);
aux_labels_internal.resize(num_arcs);
K2_CHECK_EQ(state_to_arcs.size(), state_to_aux_labels.size());
for (std::size_t s = 0; s < state_to_arcs.size(); ++s) {
K2_CHECK_EQ(state_to_arcs[s].size(), state_to_aux_labels[s].size());
for (std::size_t a = 0; a < state_to_arcs[s].size(); ++a) {
K2_CHECK_GT(num_arcs, arc_index);
arcs[arc_index] = state_to_arcs[s][a];
aux_labels_internal[arc_index] = state_to_aux_labels[s][a];
++arc_index;
}
}
K2_CHECK_EQ(num_arcs, arc_index);
auto cpu_context = GetCpuContext();
*aux_labels = Array1<int32_t>(cpu_context, aux_labels_internal);
Array1<Arc> array(cpu_context, arcs);
bool error = true;
// FsaFromArray1 will add a super final state if the original FSA doesn't have
// a final state.
auto fsa = FsaFromArray1(array, &error);
K2_CHECK_EQ(error, false);
return fsa;
}
Fsa FsaFromString(const std::string &s, bool openfst /*= false*/,
Array1<int32_t> *aux_labels /*= nullptr*/) {
std::istringstream is(s);
K2_CHECK(is);
if (openfst == false && aux_labels == nullptr)
return K2AcceptorFromStream(is);
else if (openfst == false && aux_labels != nullptr)
return K2TransducerFromStream(is, aux_labels);
else if (openfst == true && aux_labels == nullptr)
return OpenFstAcceptorFromStream(is);
else if (openfst == true && aux_labels != nullptr)
return OpenFstTransducerFromStream(is, aux_labels);
return Fsa(); // unreachable code
}
std::string FsaToString(const Fsa &fsa, bool openfst /*= false*/,
const Array1<int32_t> *aux_labels /*= nullptr*/) {
K2_CHECK_EQ(fsa.NumAxes(), 2);
if (fsa.Context()->GetDeviceType() != kCpu) {
Fsa _fsa = fsa.To(GetCpuContext());
Array1<int32_t> _aux_labels;
if (aux_labels) _aux_labels = aux_labels->To(_fsa.Context());
return FsaToString(_fsa, openfst, aux_labels ? &_aux_labels : nullptr);
}
K2_CHECK_EQ(fsa.Context()->GetDeviceType(), kCpu);
const Array1<int32_t> &row_splits = fsa.shape.RowSplits(1);
const Array1<Arc> &arcs = fsa.values;
const int32_t *p = nullptr;
if (aux_labels != nullptr) {
K2_CHECK(IsCompatible(fsa, *aux_labels));
K2_CHECK_EQ(aux_labels->Dim(), arcs.Dim());
p = aux_labels->Data();
}
float scale = 1;
if (openfst) scale = -1;
std::ostringstream os;
int32_t n = arcs.Dim();
char sep = ' ';
char line_sep = '\n';
for (int32_t i = 0; i != n; ++i) {
const auto &arc = arcs[i];
os << arc.src_state << sep << arc.dest_state << sep << arc.symbol << sep;
if (p != nullptr) os << p[i] << sep;
os << (scale * arc.score) << line_sep;
}
os << (fsa.shape.Dim0() - 1) << line_sep;
return os.str();
}
Array1<int32_t> GetDestStates(FsaVec &fsas, bool as_idx01) {
K2_CHECK(fsas.NumAxes() == 3);
ContextPtr c = fsas.Context();
int32_t num_arcs = fsas.NumElements();
Array1<int32_t> ans(c, num_arcs);
Arc *arcs_data = fsas.values.Data();
int32_t *ans_data = ans.Data();
if (!as_idx01) {
const Arc *arcs = fsas.values.Data();
auto lambda_set_dest_states1 = [=] __host__ __device__(int32_t arc_idx012) {
ans_data[arc_idx012] = arcs[arc_idx012].dest_state;
};
Eval(c, num_arcs, lambda_set_dest_states1);
} else {
const int32_t *row_ids2 = fsas.RowIds(2).Data();
auto lambda_set_dest_states01 = [=] __host__ __device__(
int32_t arc_idx012) {
int32_t src_state = arcs_data[arc_idx012].src_state,
dest_state = arcs_data[arc_idx012].dest_state;
// (row_ids2[arc_idx012] - src_state) is the same as
// row_splits1[row_ids1[row_ids2[arc_idx012]]]; it's the idx01 of the 1st
// state in this FSA.
ans_data[arc_idx012] = dest_state + (row_ids2[arc_idx012] - src_state);
};
Eval(c, num_arcs, lambda_set_dest_states01);
}
return ans;
}
Ragged<int32_t> GetStateBatches(FsaVec &fsas, bool transpose) {
K2_CHECK(fsas.NumAxes() == 3);
ContextPtr c = fsas.Context();
Array1<int32_t> arc_dest_states = GetDestStates(fsas, true);
MonotonicLowerBound(arc_dest_states, &arc_dest_states);
int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1),
num_arcs = fsas.TotSize(2);
// We can tune `log_power` as a tradeoff between work done and clock time on
// GPU.
int32_t log_power = (c->GetDeviceType() == kCpu ? 0 : 4);
int32_t max_num_states = fsas.shape.MaxSize(1);
// the following avoids doing too much extra work accumulating powers
// of 'dest_states' for very small problem sizes.
while (log_power > 0 && (1 << (1 + log_power)) > max_num_states) log_power--;
// Ignoring edge effects: `dest_states_powers[0]` is just an array indexed by
// state_idx01, that gives us the dest_state_idx01 that would be the beginning
// of the next batch if state_idx01 were the beginning of the current batch.
// So if we follow this chain forward from the start of one of the FSAs until
// it passes the end of this FSA, we get the beginnings of the batches
// we want. The natural algorithm to find the beginnings of the batches
// is sequential.
Array2<int32_t> dest_states_powers(c, log_power + 1, num_states);
const int32_t *arc_dest_states_data = arc_dest_states.Data(),
*fsas_row_splits2_data = fsas.RowSplits(2).Data();
int32_t *dest_states_power_data =
dest_states_powers.Data(); // only process Row[0] below
const int32_t int_max = std::numeric_limits<int32_t>::max();
auto lambda_set_dest_states =
[=] __host__ __device__(int32_t state_idx01) -> void {
int32_t arc_idx01x = fsas_row_splits2_data[state_idx01],
next_arc_idx01x = fsas_row_splits2_data[state_idx01 + 1];
// If this state has arcs, let its `dest_state` be the largest `dest_state`
// of any of its arcs (which is the last one); otherwise, take the
// `dest_state` from the 1st arc of the next state, which is the largest
// value we can take (if the definition is: the highest-numbered state s for
// which neither this state nor any later-numbered state has an arc to a
// state lower than s).
int32_t arc_idx012 =
max(arc_idx01x,
next_arc_idx01x - 1); // if this state has arcs, next_arc_idx01x -
// 1 would be the last arc of this state
int32_t dest_state =
(arc_idx012 < num_arcs ? arc_dest_states_data[arc_idx012] : int_max);
dest_states_power_data[state_idx01] = dest_state;
// if the following fails, it's either a code error or the input FSA had
// cycles.
K2_CHECK_GT(dest_state, state_idx01);
};
Eval(c, num_states, lambda_set_dest_states);
// `num_batches_per_fsa` will be set to the number of batches of states that
// we'll use for each FSA... it corresponds to the number of times we have
// to follow links forward in the dest_states array till we pass the
// end of the array for this fSA.
Array1<int32_t> num_batches_per_fsa(c, num_fsas + 1);
// `batch_starts` will contain the locations of the first state_idx01 for each
// batch, but in an 'un-consolidated' format. Specifically, for FSA with
// index i, the batch_starts for that FSA begin at element fsa.RowSplits(1)[i]
// of `batch_starts`. This is just a convenient layout because we know there
// can't be more batches than there are states. We'll later consolidate the
// information into a single array.
Array1<int32_t> batch_starts(c, num_states + 1);
int32_t *num_batches_per_fsa_data = num_batches_per_fsa.Data(),
*batch_starts_data = batch_starts.Data();
const int32_t *fsas_row_splits1_data = fsas.RowSplits(1).Data();
#if 0
// This is a simple version of the kernel that demonstrates what we're trying
// to do with the more complex code.
auto lambda_set_batch_info_simple = [=] __host__ __device__(int32_t fsa_idx) {
int32_t begin_state_idx01 = fsas_row_splits1_data[fsa_idx],
end_state_idx01 = fsas_row_splits1_data[fsa_idx + 1];
int32_t i = 0, cur_state_idx01 = begin_state_idx01;
while (cur_state_idx01 < end_state_idx01) {
batch_starts_data[begin_state_idx01 + i] = cur_state_idx01;
cur_state_idx01 = dest_states_power_data[cur_state_idx01];
++i;
}
num_batches_per_fsa_data[fsa_idx] = i;
};
Eval(c, num_fsas, lambda_set_batch_info_simple);
#else
int32_t stride = dest_states_powers.ElemStride0();
for (int32_t power = 1; power <= log_power; power++) {
const int32_t *src_data = dest_states_powers.Data() + (power - 1) * stride;
int32_t *dest_data = dest_states_powers.Data() + power * stride;
auto lambda_square_array =
[=] __host__ __device__(int32_t state_idx01) -> void {
int32_t dest_state = src_data[state_idx01],
dest_state_sq =
(dest_state < num_states ? src_data[dest_state] : int_max);
dest_data[state_idx01] = dest_state_sq;
};
Eval(c, num_states, lambda_square_array);
}
// jobs_per_fsa tells us how many separate chains of states we'll follow for
// each FSA.
// jobs_multiple is a kind of trick to ensure any given warp doesn't
// issue more memory requests than it can handle at a time (we drop
// some threads).
int32_t jobs_per_fsa = (1 << log_power),
jobs_multiple = (c->GetDeviceType() == kCuda ? 8 : 1);
while (jobs_multiple > 1 && jobs_per_fsa * jobs_multiple * num_fsas > 10000)
jobs_multiple /= 2; // Likely won't get here. Just reduce multiple if
// num-jobs is ridiculous.
auto dest_states_powers_acc = dest_states_powers.Accessor();
auto lambda_set_batch_info = [=] __host__ __device__(int32_t fsa_idx,
int32_t j) {
if (j % jobs_multiple != 0)
return; // a trick to avoid too much random
// memory access for any given warp
int32_t task_idx = j / jobs_multiple; // Now 0 <= task_idx < jobs_per_fsa.
// The task indexed `task_idx` is responsible for batches numbered
// task_idx, task_idx + jobs_per_fsa, task_index + 2 * job_per_fsa and so
// on, for the FSA numbered `fsa_idx`. Comparing this code to
// `lambda_set_batch_info_simple`, this task is responsible for the
// assignment to batch_starts_data for all i such that i % jobs_per_fsas ==
// task_idx, together with the assignment to num_batchess_per_fsa_data if
// i % jobs_per_fsas == task_idx (here referring to the i value finally
// assigned to that location).
int32_t begin_state_idx01 = fsas_row_splits1_data[fsa_idx],
end_state_idx01 = fsas_row_splits1_data[fsa_idx + 1];
int32_t num_states_this_fsa = end_state_idx01 - begin_state_idx01;
int32_t i = 0, cur_state_idx01 = begin_state_idx01;
if (task_idx >= num_states_this_fsa) return;
// The next loop advances `cur_state_idx01` by
// a number of steps equal to `task_idx`.
for (int32_t m = 0; m < log_power; ++m) {
int32_t n = 1 << m;
if ((task_idx & n) != 0) {
i += n;
int32_t next = dest_states_powers_acc(m, cur_state_idx01);
if (next >= end_state_idx01) return;
cur_state_idx01 = next;
}
}
K2_CHECK_EQ(i, task_idx);
while (1) {
if (i >= num_states_this_fsa) return;
batch_starts_data[begin_state_idx01 + i] = cur_state_idx01;
int32_t next_state_idx01 = dest_states_powers_acc(
log_power,
cur_state_idx01); // advance jobs_per_fsa = (1 << log_power) steps
if (next_state_idx01 >= end_state_idx01) {
// if exactly one step would also be enough to take us past the
// boundary...
if (dest_states_powers_acc(0, cur_state_idx01) >= end_state_idx01) {
num_batches_per_fsa_data[fsa_idx] = i + 1;
}
return;
} else {
i += jobs_per_fsa;
cur_state_idx01 = next_state_idx01;
}
}
};
Eval2(c, num_fsas, jobs_per_fsa * jobs_multiple, lambda_set_batch_info);
#endif
ExclusiveSum(num_batches_per_fsa, &num_batches_per_fsa);
Array1<int32_t> &ans_row_splits1 = num_batches_per_fsa;
int32_t num_batches = num_batches_per_fsa[num_fsas];
Array1<int32_t> ans_row_ids1(c, num_batches);
RowSplitsToRowIds(ans_row_splits1, &ans_row_ids1);
Array1<int32_t> ans_row_splits2(c, num_batches + 1);
const int32_t *ans_row_splits1_data = ans_row_splits1.Data(),
*ans_row_ids1_data = ans_row_ids1.Data();
int32_t *ans_row_splits2_data = ans_row_splits2.Data();
ans_row_splits2.Range(num_batches, 1) = num_states; // The kernel below won't
// set this last element
auto lambda_set_ans_row_splits2 =
[=] __host__ __device__(int32_t idx01) -> void {
int32_t idx0 = ans_row_ids1_data[idx01], // Fsa index
idx0x = ans_row_splits1_data[idx0], idx1 = idx01 - idx0x,
fsas_idx0x = fsas_row_splits1_data[idx0], // 1st state-idx (idx01)
// in fsas_, for this FSA
fsas_idx01 = fsas_idx0x + idx1, // the idx1 is actually the
// batch-index, this statement reflects
// the 'un-consolidated' format of
// `batch_starts`.
this_batch_start = batch_starts_data[fsas_idx01];
ans_row_splits2_data[idx01] = this_batch_start;
};
Eval(c, num_batches, lambda_set_ans_row_splits2);
RaggedShape ans_shape =
RaggedShape3(&ans_row_splits1, &ans_row_ids1, num_batches,
&ans_row_splits2, nullptr, num_states);
Array1<int32_t> ans_value = Range(c, num_states, 0);
if (transpose) {
ans_shape = MakeTransposable(ans_shape);
Ragged<int32_t> ans(ans_shape, ans_value);
return Transpose(ans);
} else {
return Ragged<int32_t>(ans_shape, ans_value);
}
}
Ragged<int32_t> GetIncomingArcs(FsaVec &fsas,
const Array1<int32_t> &dest_states) {
K2_CHECK_EQ(fsas.NumAxes(), 3);
ContextPtr c = fsas.Context();
Ragged<int32_t> dest_states_tensor(fsas.shape, dest_states);
int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1),
num_arcs = fsas.TotSize(2);
Array1<int32_t> incoming_arcs_order =
GetTransposeReordering(dest_states_tensor, num_states),
ans_row_ids2 = dest_states[incoming_arcs_order];
// Note: incoming_arcs_row_ids2 will be monotonically increasing
Array1<int32_t> ans_row_splits2(c, num_states + 1);
RowIdsToRowSplits(ans_row_ids2, &ans_row_splits2);
// Axis 1 corresponds to FSA states, so the row-ids and row-splits for axis
// 1 are the same as for `fsas`.
Array1<int32_t> ans_row_ids1 = fsas.RowIds(1),
ans_row_splits1 = fsas.RowSplits(1);
return Ragged<int32_t>(
RaggedShape3(&ans_row_splits1, &ans_row_ids1, num_states,
&ans_row_splits2, &ans_row_ids2, num_arcs),
incoming_arcs_order);
}
} // namespace k2
|
the_stack
|
for (int x = 0; x < N && x < valid_examples; ++x) { expr; }
__device__ void
train_N_examples(const float * input,
const int *labels,
const float * example_weights,
int valid_examples,
int num_layers,
float * scratch, // shared memory scratch space
const WeightsAccess<weights_tex> & w,
const WeightsAccess<biases_tex> & biases,
const int * architecture,
const int * w_strides,
UpdateFloat * const * w_updates, // wt updates for each layer
UpdateFloat * const * b_updates, // bias upd for each layer
int activation, // activation function
float inhibit, // target value for inhibited neuron)
float fire, // target value for firing neuron
float learning_rate,
int num_threads_in_block,
int num_threads_on_multiprocessor,
int total_neurons,
int max_width,
float * layer_outputs) // global scratch space[total neurons]
{
// access thread id
const unsigned tid = threadIdx.x;
const unsigned block_num = blockIdx.x;
/*************************************************************************/
/* FPROP */
/*************************************************************************/
/* First, copy the inputs into shared memory */
int ni = architecture[0], no, w_stride;
int input_stride = ni;
int scratch_stride = max_width;
for (int x = 0; x < N; ++x) {
scratch[x * scratch_stride + tid]
= (tid < ni ? input[x * input_stride + tid] : 0.0);
}
/* Let everything catch up */
__syncthreads();
float * this_layer_outputs = layer_outputs;
WeightsAccess<weights_tex> layer_weights = w;
WeightsAccess<biases_tex> layer_biases = biases;
for (unsigned l = 0;
l < num_layers;
++l,
__syncthreads(),
layer_weights += ni * w_stride,
layer_biases += no,
this_layer_outputs += no) {
// Get architecture information about the layer:
ni = architecture[l];
no = architecture[l + 1];
w_stride = w_strides[l];
#if 1
// Start off with the bias terms
double initial = (tid < no ? layer_biases[tid] : 0.0);
double accum[N];
FOR_ALL_X(accum[x] = initial);
if (__any(tid < no)) {
for (unsigned i = 0; i < ni; ++i) {
// Coalesced access; maybe texture would be better
float weight
= (tid < no ? layer_weights[i * w_stride + tid] : 0.0);
for (int x = 0; x < N && x < valid_examples; ++x) {
// No bank conflicts as all threads are accessing same value
float inval = scratch[x * scratch_stride + i];
accum[x] += weight * inval;
}
}
}
// Let everything catch up so that we can write to scratch
__syncthreads();
if (__any(tid < no)) {
if (tid < no) {
for (int x = 0; x < N && x < valid_examples; ++x) {
this_layer_outputs[x * total_neurons + tid]
= scratch[x * scratch_stride + tid]
= transform(accum[x], activation);
}
}
}
#else
// WARNING: causes variability
/* We want to have each thread working here, even if no is much less
than the number of threads. To do so, we assign each thread to
a certain o value and a certain subset of the i values, and then
accumulate the updates, broadcasting them at the end.
For example:
32 threads
2 outputs
So we have 16 threads working on each example
100 threads
16 outputs
So we have 7 threads on the first 4 examples, and 6 threads on
the rest.
*/
int nt = num_threads_on_multiprocessor;
int min_threads = nt / no;
int left_over = nt % no;
int max_threads = min_threads + (left_over > 0);
int o = tid % no; // which o value are we working on?
int idx = tid / no; // which thread in that block?
int o_threads = min_threads + (o < left_over);
//double * accum = (double *)(scratch + N * scratch_stride);
double accum[N];
FOR_ALL_X(accum[x] = 0.0);
for (unsigned i = idx; i < ni; i += o_threads) {
float weight = layer_weights[i * w_stride + o];
FOR_ALL_X(accum[x] += weight * scratch[x * scratch_stride + i]);
}
if (max_threads > 1) {
// Multiple threads working per entry; we need to synchronize
__syncthreads();
if (tid < no) {
float bias = layer_biases[tid];
FOR_ALL_X(scratch[x * scratch_stride + tid] = bias);
}
__syncthreads();
/* Now we accumulate them, allowing each thread to increment in its
turn. */
for (unsigned i = 0; i < max_threads; ++i, __syncthreads())
if (i == idx)
FOR_ALL_X(scratch[x * scratch_stride + o] += accum[x]);
if (__any(tid < no)) {
if (tid < no)
for (int x = 0; x < N && x < valid_examples; ++x)
this_layer_outputs[x * total_neurons + tid]
= scratch[x * scratch_stride + tid]
= transform(scratch[x * scratch_stride + tid],
activation);
}
}
else {
// A single thread per entry; no synchronization
float bias = layer_biases[o];
for (int x = 0; x < N && x < valid_examples; ++x) {
// XXX can get rid of this store for output layer if we don't
// want to keep the data
this_layer_outputs[x * total_neurons + o]
= scratch[x * scratch_stride + o]
= transform(accum[x] + bias, activation);
}
}
#endif
}
// layer_biases is no longer used
/*************************************************************************/
/* BPROP */
/*************************************************************************/
/* Make this_layer_outputs point to the outputs of the layer before the
output */
this_layer_outputs -= no;
layer_weights -= ni * w_stride;
/* We keep the outputs of the next layer here to avoid needing to reload
them. */
float prev_outputs[N];
FOR_ALL_X(prev_outputs[x] = scratch[x * scratch_stride + tid]);
__syncthreads();
/* Calculate the error terms for the output units. Scratch contains the
outputs of output layer, so we can access this instead of
this_layer_outputs. */
for (int x = 0; x < N && x < valid_examples; ++x) {
bool correct = (labels[x] == tid);
float wanted = (correct ? fire : inhibit);
scratch[x * scratch_stride + tid]
= (tid < no ? wanted - prev_outputs[x] : 0.0);
}
/* Let everything catch up. Scratch now contains the errors for the
output layer. */
__syncthreads();
/* Backpropegate. */
for (int l = num_layers - 1; l >= 0;
--l,
__syncthreads(),
layer_weights -= (l == -1 ? 0 : architecture[l] * w_strides[l]),
this_layer_outputs -= architecture[l + 1]) {
// Get information about the layer:
ni = architecture[l];
no = architecture[l + 1];
w_stride = w_strides[l];
UpdateFloat * layer_updates = w_updates[l];
UpdateFloat * layer_bias_updates = b_updates[l];
const float * last_layer_outputs = this_layer_outputs - ni;
//float * d = (scratch + N * scratch_stride);
float d[N];
/* Calculate the deltas for the current layer. Scratch contains the
errors from the previous layer on input, and the deltas from the
current layer on output. */
for (int x = 0; x < N && x < valid_examples; ++x) {
float error = scratch[x * scratch_stride + tid];
d[x] = (tid >= no ? 0.0 : delta(prev_outputs[x], error,
activation));
}
if (l > 0) {
// Make sure all threads have caught up so that we can modify error
// without affecting them
__syncthreads();
// Broadcast the d values so that we can use them to calculate the
// errors
for (int x = 0; x < N && x < valid_examples; ++x)
scratch[x * scratch_stride + tid] = d[x];
// Make sure everything can get its d value
__syncthreads();
double total[N];
FOR_ALL_X(total[x] = 0.0);
if (tid < ni) {
for (unsigned o = 0; o < no; ++o) {
float w = layer_weights[tid * w_stride + o];
for (int x = 0; x < N && x < valid_examples; ++x) {
float d = scratch[x * scratch_stride + o];
float update = d * w;
total[x] += update;
}
}
}
// Wait for everything to finish so that we can overwrite the d
// values with the new errors
__syncthreads();
for (int x = 0; x < N && x < valid_examples; ++x)
scratch[x * scratch_stride + tid] = total[x];
/* Scratch now contains the errors from the current layer. */
}
if (tid >= no) {
if (tid <= ni && l > 0) {
// Need to set up prev_outputs
for (int x = 0; x < N && x < valid_examples; ++x) {
float prev = last_layer_outputs[x * total_neurons + tid];
prev_outputs[x] = prev;
}
}
else {
//FOR_ALL_X(prev_outputs[x] = 0.0f);
}
// threads indexed too low just leave
continue;
}
/* Update the weights. */
float k[N];
FOR_ALL_X(k[x] = (x < valid_examples) * example_weights[x] * learning_rate);
/* Now for the updates. In order to avoid trying to write the same
memory over and over, we stagger the starting points so that
each example will start at a different place, thus minimising
conflicting atomic updates when we have multiple multiprocessors
working on the same thing. */
int thread_stride = ni / num_threads_in_block;
if (thread_stride == 0) thread_stride = 1;
int start_at = (block_num * thread_stride) % ni;
for (unsigned i_ = start_at; i_ < ni + start_at; ++i_) {
// Get the real index of i
unsigned i = i_ - (i_ >= ni) * ni;
double total_update = 0.0;
for (int x = 0; x < N && x < valid_examples; ++x) {
// All threads read the same value
float prev
= (l == 0
? input[x * input_stride + i]
: last_layer_outputs[x * total_neurons + i]);
float update = prev * k[x] * d[x];
total_update += update;
// If we've just read the value we need for the next
// block, then store it.
if (i == tid) prev_outputs[x] = prev;
}
atomic_add(layer_updates[i * w_stride + tid], total_update);
}
/* Update the bias */
double total_update = 0.0;
FOR_ALL_X(total_update += k[x] * d[x]);
//layer_bias_updates[tid] += update;
atomic_add(layer_bias_updates[tid], total_update);
}
}
|
the_stack
|
#define WARP_SIZE 32
__device__
unsigned int scanwarp(unsigned int val, volatile unsigned int* sData, const int maxlevel)
{
// The following is the same as 2 * RadixSort::WARP_SIZE * warpId + threadInWarp =
// 64*(threadIdx.x >> 5) + (threadIdx.x & (RadixSort::WARP_SIZE - 1))
int localId = threadIdx.x;
int idx = 2 * localId - (localId & (WARP_SIZE - 1));
sData[idx] = 0;
idx += WARP_SIZE;
sData[idx] = val;
if (0 <= maxlevel) { sData[idx] += sData[idx - 1]; }
if (1 <= maxlevel) { sData[idx] += sData[idx - 2]; }
if (2 <= maxlevel) { sData[idx] += sData[idx - 4]; }
if (3 <= maxlevel) { sData[idx] += sData[idx - 8]; }
if (4 <= maxlevel) { sData[idx] += sData[idx -16]; }
return sData[idx] - val; // convert inclusive -> exclusive
}
//----------------------------------------------------------------------------
// scan4 scans 4*RadixSort::CTA_SIZE numElements in a block (4 per thread), using
// a warp-scan algorithm
//----------------------------------------------------------------------------
__device__
uint4 scan4(const uint4 idata, unsigned int* ptr)
{
unsigned int idx = threadIdx.x;
uint4 val4 = idata;
unsigned int sum[3];
sum[0] = val4.x;
sum[1] = val4.y + sum[0];
sum[2] = val4.z + sum[1];
unsigned int val = val4.w + sum[2];
val = scanwarp(val, ptr, 4);
__syncthreads();
if ((idx & (WARP_SIZE - 1)) == WARP_SIZE - 1)
{
ptr[idx >> 5] = val + val4.w + sum[2];
}
__syncthreads();
if (idx < WARP_SIZE)
ptr[idx] = scanwarp(ptr[idx], ptr, 2);
__syncthreads();
val += ptr[idx >> 5];
val4.x = val;
val4.y = val + sum[0];
val4.z = val + sum[1];
val4.w = val + sum[2];
return val4;
}
__device__
uint4 rank4(const uint4 preds, unsigned int* sMem, unsigned int* numtrue)
{
int localId = threadIdx.x;
int localSize = blockDim.x;
uint4 address = scan4(preds, sMem);
if (localId == localSize - 1)
{
numtrue[0] = address.w + preds.w;
}
__syncthreads();
uint4 rank;
int idx = localId*4;
rank.x = (preds.x) ? address.x : numtrue[0] + idx - address.x;
rank.y = (preds.y) ? address.y : numtrue[0] + idx + 1 - address.y;
rank.z = (preds.z) ? address.z : numtrue[0] + idx + 2 - address.z;
rank.w = (preds.w) ? address.w : numtrue[0] + idx + 3 - address.w;
return rank;
}
__global__ void radixSortBlocksKeysK(
// __global uint4* keysIn,
unsigned int* keysIn,
//__global uint4* keysOut,
unsigned int* keysOut,
const unsigned int nbits,
const unsigned int startbit)
{
int globalId = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ unsigned int numtrue[1];
__shared__ unsigned int sMem[4*CTA_SIZE];
uint4 key = reinterpret_cast<uint4*>(keysIn)[globalId];
__syncthreads();
// radixSortBlockKeysOnly(&key, nbits, startbit, sMem, numtrue);
int localId = threadIdx.x;
int localSize = blockDim.x;
for(unsigned int shift = startbit; shift < (startbit + nbits); ++shift)
{
uint4 lsb;
lsb.x = !((key.x >> shift) & 0x1);
lsb.y = !((key.y >> shift) & 0x1);
lsb.z = !((key.z >> shift) & 0x1);
lsb.w = !((key.w >> shift) & 0x1);
uint4 r;
r = rank4(lsb, sMem, numtrue);
// This arithmetic strides the ranks across 4 CTA_SIZE regions
sMem[(r.x & 3) * localSize + (r.x >> 2)] = key.x;
sMem[(r.y & 3) * localSize + (r.y >> 2)] = key.y;
sMem[(r.z & 3) * localSize + (r.z >> 2)] = key.z;
sMem[(r.w & 3) * localSize + (r.w >> 2)] = key.w;
__syncthreads();
// The above allows us to read without 4-way bank conflicts:
key.x = sMem[localId];
key.y = sMem[localId + localSize];
key.z = sMem[localId + 2 * localSize];
key.w = sMem[localId + 3 * localSize];
__syncthreads();
}
//keysOut[globalId] = key;
reinterpret_cast<uint4*>(keysOut)[globalId] = key;
}
//----------------------------------------------------------------------------
// Given an array with blocks sorted according to a 4-bit radix group, each
// block counts the number of keys that fall into each radix in the group, and
// finds the starting offset of each radix in the block. It then writes the radix
// counts to the counters array, and the starting offsets to the blockOffsets array.
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size
// (fullBlocks) differently than arrays that are not. "loop" is used when persistent
// CTAs are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//
//----------------------------------------------------------------------------
__global__ void findRadixOffsetsK(
//__global uint2* keys,
unsigned int* keys,
unsigned int* counters,
unsigned int* blockOffsets,
const unsigned int startbit,
const unsigned int totalBlocks)
{
__shared__ unsigned int sStartPointers[16];
__shared__ unsigned int sRadix1[2*CTA_SIZE];
unsigned int groupId = blockIdx.x;
unsigned int localId = threadIdx.x;
unsigned int groupSize = blockDim.x;
unsigned int globalId = groupId * groupSize + localId;
// uint2 radix2;
// radix2 = keys[get_global_id(0)];
uint2 radix2 = reinterpret_cast<uint2*>(keys)[globalId];
sRadix1[2 * localId] = (radix2.x >> startbit) & 0xF;
sRadix1[2 * localId + 1] = (radix2.y >> startbit) & 0xF;
// Finds the position where the sRadix1 entries differ and stores start
// index for each radix.
if(localId < 16)
{
sStartPointers[localId] = 0;
}
__syncthreads();
if((localId > 0) && (sRadix1[localId] != sRadix1[localId - 1]) )
{
sStartPointers[sRadix1[localId]] = localId;
}
if(sRadix1[localId + groupSize] != sRadix1[localId + groupSize - 1])
{
sStartPointers[sRadix1[localId + groupSize]] = localId + groupSize;
}
__syncthreads();
if(localId < 16)
{
blockOffsets[groupId*16 + localId] = sStartPointers[localId];
}
__syncthreads();
// Compute the sizes of each block.
if((localId > 0) && (sRadix1[localId] != sRadix1[localId - 1]) )
{
sStartPointers[sRadix1[localId - 1]] =
localId - sStartPointers[sRadix1[localId - 1]];
}
if(sRadix1[localId + groupSize] != sRadix1[localId + groupSize - 1] )
{
sStartPointers[sRadix1[localId + groupSize - 1]] =
localId + groupSize - sStartPointers[sRadix1[localId + groupSize - 1]];
}
if(localId == groupSize - 1)
{
sStartPointers[sRadix1[2 * groupSize - 1]] =
2 * groupSize - sStartPointers[sRadix1[2 * groupSize - 1]];
}
__syncthreads();
if(localId < 16)
{
counters[localId * totalBlocks + groupId] = sStartPointers[localId];
}
}
//----------------------------------------------------------------------------
// reorderData shuffles data in the array globally after the radix offsets
// have been found. On compute version 1.1 and earlier GPUs, this code depends
// on RadixSort::CTA_SIZE being 16 * number of radices (i.e. 16 * 2^nbits).
//
// On compute version 1.1 GPUs ("manualCoalesce=true") this function ensures
// that all writes are coalesced using extra work in the kernel. On later
// GPUs coalescing rules have been relaxed, so this extra overhead hurts
// performance. On these GPUs we set manualCoalesce=false and directly store
// the results.
//
// Template parameters are used to generate efficient code for various special cases
// For example, we have to handle arrays that are a multiple of the block size
// (fullBlocks) differently than arrays that are not. "loop" is used when persistent
// CTAs are used.
//
// By persistent CTAs we mean that we launch only as many thread blocks as can
// be resident in the GPU and no more, rather than launching as many threads as
// we have elements. Persistent CTAs loop over blocks of elements until all work
// is complete. This can be faster in some cases. In our tests it is faster
// for large sorts (and the threshold is higher on compute version 1.1 and earlier
// GPUs than it is on compute version 1.2 GPUs.
//----------------------------------------------------------------------------
__global__ void reorderDataKeysOnlyK(
//__global unsigned int *outKeys,
unsigned int* outKeys,
//__global uint2 *keys,
unsigned int* keys,
//__global unsigned int *blockOffsets,
unsigned int* blockOffsets,
//__global unsigned int *offsets,
unsigned int* offsets,
const unsigned int startbit,
const unsigned int numElements,
const unsigned int totalBlocks)
{
__shared__ unsigned int sOffsets[16];
__shared__ unsigned int sBlockOffsets[16];
__shared__ uint2 sKeys2[CTA_SIZE];
//__local unsigned int *sKeys1 = (__local unsigned int*)sKeys2;
unsigned int *sKeys1 = (unsigned int*)sKeys2;
unsigned int groupId = blockIdx.x;
unsigned int localId = threadIdx.x;
unsigned int groupSize = blockDim.x;
unsigned int globalId = groupId * groupSize + localId;
sKeys2[localId] = reinterpret_cast<uint2*>(keys)[globalId];
if(localId < 16)
{
sOffsets[localId] = offsets[localId * totalBlocks + groupId];
sBlockOffsets[localId] = blockOffsets[groupId * 16 + localId];
}
__syncthreads();
unsigned int radix = (sKeys1[localId] >> startbit) & 0xF;
unsigned int globalOffset = sOffsets[radix] + localId - sBlockOffsets[radix];
if (globalOffset < numElements)
{
outKeys[globalOffset] = sKeys1[localId];
}
radix = (sKeys1[localId + groupSize] >> startbit) & 0xF;
globalOffset = sOffsets[radix] + localId + groupSize - sBlockOffsets[radix];
if (globalOffset < numElements)
{
outKeys[globalOffset] = sKeys1[localId + groupSize];
}
}
|
the_stack
|
#include "ImageMatch.h"
// 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y
// 定义了默认的线程块尺寸
#define DEF_BLOCK_X 32
#define DEF_BLOCK_Y 8
// 宏:FAST_RUN
// 打开该开关,在 Kernel 中将不会搜索当前点的领域,可以加快运行时间,但匹配的
// 准确度将没有那么高
#define FAST_RUN
#ifdef FAST_RUN
// Device 数据,_tpl1x1Gpu
// 为了加快算法的运行时间,用此数据来替代 _tpl3x3Gpu
static __device__ int _tpl1x1Gpu[] = {0, 0};
#else
// Device 数据:_tpl3x3Gpu
// 3 * 3 的模版,为了加快算法的速度,暂时不用
static __device__ int _tpl3x3Gpu[] = { -1, -1, 0, -1, 1, -1,
-1, 0, 0, 0, 1, 0,
-1, 1, 0, 1, 1, 1 };
#endif
// Device 函数:_rotateXYDev(计算旋转表坐标对应的在 TEST 图像上的坐标)
// 计算 TEMPLATE 坐标对应的旋转表坐标在 TEST 图像上所对应的坐标
static __device__ int // 返回值:函数是否正确执行,如果函数
// 正确执行,返回 NO_ERROR
_rotateXYDev(
int x, int y, // TEMPLATE 对应的旋转表的横坐标和纵
// 坐标,旋转中心为原点
int xc, int yc, // 现在匹配的横坐标和纵坐标,以左上角为原点
RotateTable rotatetable, // 旋转表
float angle, // 旋转角
int *rx, // 转换后得到对应 TEST 图像的横坐标
int *ry // 转换后得到对应 TEST 图像的纵坐标
);
// Device 函数:_getSuitValueFromNormalTplDev(在正规化结果中找到合适值)
// 在 TEMPLATE 的指定坐标的一个邻域内找到正规化后与 flag 最接近的一个值
static __device__ float // 返回值:在正规化结果中与 flag 最接近的值
_getSuitValueFromNormalTplDev(
int x, int y, // 在 TEMPLATE 的坐标位置
float *normalizedata, // TEMPLATE 中正规化的结果
size_t pitch, // normalizedata 的 pitch 值
int width, int height, // TEMPLATE 的宽和高
float flag // 需要对比的值
);
// Host 函数:_getCormapMaxIndex(获取 cormapsum 中最大的值的索引)
// 在匹配得到的结果中找到最大的值
static __host__ int // 返回值:cormapsum 中最大的值的索引
_getCormapMaxIndex(
float *cormapcpu, // cormapsum 的数据
int count // cormapsum 中数据的数量
);
// Kernel 函数:_calCorMapSumKer(计算每个点的邻域内 cormap 的和)
// 计算一每个点为中心的邻域内 cormap 的和
static __global__ void // 返回值:Kernel 无返回值
_calCorMapSumKer(
float *cormap, // cormap 的数据
int dwidth, int dheight, // 摄动范围的宽和高
int scope, // 邻域的范围,以 scope * scope 的范围计算
float *cormapsumgpu // 求和得到的结果
);
// Kernel 函数:_matchKer(将一组 TEMPLATE 分别和 TEST 图像进行匹配)
// 将一组 TEMPLATE 图像正规化后,用不同的旋转角与 TEST 图像进行匹配,得到每个
// 点的相关系数
static __global__ void // 返回值:Kernel 无返回值
_matchKer(
float **tplnormalization, // 每个 TEMPLATE 正规化的结果
size_t *tplpitch, // 每个 TEMPLATE 正规化数组的 ptich 值
int tplcount, // TEMPLATE 的数量
int tplwidth, int tplheight, // 每个 TEMPLATE 的宽和高
float *testnormalization, // TEST 正规化的结果
size_t testpitch, // TEST 正规化结果的 pitch 值
int testwidth, int testheight, // TEST 图像的宽和高
RotateTable rotatetable, // 旋转表
float *cormap, // 用来存储每个点匹配得到的相关系数
int offsetx, int offsety, // 摄动范围的偏移量
int dwidth, int dheight, // 摄动范围的宽和高
int tploffx, int tploffy // TEMPLATE 的偏移量
);
// Kernel 函数:_localCheckErrKer(进行局部异常检查)
// 对匹配得到的结果进行局部异常检查
static __global__ void // 返回值:Kernel 无返回值
_localCheckErrKer(
float *besttplnor, // 匹配得到的 TEMPLATE 的正规化数据
size_t besttplpitch, // besttplnor 的 pitch 值
int tplwidth, int tplheight, // TEMPLATE 的宽和高
float *testnormalization, // TEST 图像的正规化数据
size_t testpitch, // testnormalization 的 pitch 值
int testwidth, int testheight, // TEST 图像的宽和高
RotateTable rotatetable, // 旋转表
int *errmap, // 记录异常情况的数组
size_t errmappitch, // errmap 的 pitch 值
int errmapwidth, int errmapheight, // errmap 数组的大小
float errthreshold, // 异常检查的阈值
int mx, int my, // 匹配得到的匹配中心
float angle, // 匹配得到的旋转角
int tploffx, int tploffy // TEMPLATE 的偏移量
);
// Kernel 函数:_binarizeKer(对 errmap 进行二值化)
// 对 errMap 进行二值化
static __global__ void // 返回值:Kernel 无返回值
_binarizeKer(
int *errmap, // errmap 的数据
size_t errmappitch, // errmap 的 pitch 值
int errmapwidth, int errmapheight, // errmap 的宽和高
ImageCuda out // 二值化的输出图像
);
// Kernel 函数:_getMaxWinKer(获取每个 window 高值点的个数)
// 扫描每个 window,获取每个 window 的个数
static __global__ void // 返回值:函数是否正确执行,若正确执行,
// 返回 NO_ERROR
_getMaxWinKer(
ImageCuda errmapimg, // errmap 的二值化图像
Template wintpl, // window 的模版
int *wincountcud // 存放每个 window 的扫描结果
);
// Host 函数:_getDirectRectForErrMap(获取 errmap 的最小有向四边形)
// 获取 errmap 中孤岛的最小有向四边形
static __host__ int // 返回值:函数是否正确执行,若正确执行,
// 返回 NO_ERROR
_getDirectRectForErrMap(
int *errmap, // errmap 数据
size_t errmappitch, // errmap 的 pitch 值
int errmapwidth, // errmap 的宽
int errmapheight, // errmap 的高
int errwinwidth, // window 的宽
int errwinheight, // window 的高
int errwinthreshold, // window 的阈值
DirectedRect *dirrect // 得到的最小有向四边形
);
// Device 函数:_getSuitValueFromNormalTplDev(在正规化结果中找到合适值)
static __device__ float _getSuitValueFromNormalTplDev(int x, int y,
float *normalizedata,
size_t pitch, int width,
int height, float flag)
{
#ifdef FAST_RUN
int *tpl = _tpl1x1Gpu; // 指向 1 * 1 模版的指针
#else
int *tpl = _tpl3x3Gpu; // 指向 3 * 3 模版的指针
#endif
int currx = x, curry = y; // 当前在 TEMPLATE 中的坐标
float currvalue; // 存放当前最接近 flag 的值
float currdiff; // 存放 currvalue 与 flag 的差的绝对值
// 初始化 currvalue 为 (x, y)对应的正规化值
currvalue = *((float *)((char *)normalizedata + curry * pitch) + currx);
// 记录与 flag 最接近的值的差,初始化为(x, y)对应的正规化的值与 flag 的
// 绝对值
float mindiff = fabsf(currvalue - flag);
// 记录与 flag 最接近的值,初始化为(x, y)对应的正规化的值
float minvalue = currvalue;
// 扫描模版的每一个点,找到与 flag 最接近的值
#ifdef FAST_RUN
for (int i = 0; i < 1; i++) {
for (int j = 0; j < 1; j++) {
#else
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
#endif
// 计算当前点的坐标
currx = x + *(tpl++);
curry = y + *(tpl++);
// 判断当前点的坐标是否越界,如果越界则跳过
if (currx >= 0 && currx < width && curry >= 0 && curry < height) {
// 计算当前坐标的正规化的值
currvalue = *((float *)((char *)normalizedata + curry * pitch) +
currx);
// 计算 currvalue 与 flag 的绝对值
currdiff = fabsf(currvalue - flag);
// 如果 currdiff 比 mindiff 小,则用 currvalue 替换 minvalue
if (currdiff < mindiff)
mindiff = currdiff;
minvalue = currvalue;
}
}
}
// 返回与 flag 最接近的值
return minvalue;
}
// Device 函数:_rotateXYDev(计算旋转表坐标对应的在 TEST 图像上的坐标)
static __device__ int _rotateXYDev(int x, int y, int xc, int yc,
RotateTable rotatetable, float angle,
int *rx, int *ry)
{
int errcode; // 局部变量,错误码
float tx, ty; // 存放通过旋转表得到的旋转后的坐标
// 获取旋转表旋转后的坐标
errcode = rotatetable.getRotatePos(x, y, angle, tx, ty);
if (errcode != NO_ERROR)
return errcode;
// 计算旋转后的坐标对应在 TEST 图像上的坐标,先对 tx 和 ty 进行四舍五入
*rx = (int)(tx + 0.5f) + xc;
*ry = (int)(ty + 0.5f) + yc;
// 执行完毕,返回 NO_ERROR
return NO_ERROR;
}
// 成员方法:initNormalizeData(对设置的 TEMPLATE 进行初始化)
__host__ int ImageMatch::initNormalizeData()
{
int cudaerr; // 局部变量,调用 CUDA 系统 API 返回的错误码
// 删除之前的 TEMPLATE 的相关数据
deleteNormalizeData();
// 将临时记录 TEMPLATE 的数量的 tplTmpCount 赋值给 tplCount
this->tplCount = this->tplTmpCount;
// 为 tplNormalization 申请空间
tplNormalization = new float *[this->tplCount];
if (tplNormalization == NULL)
return OUT_OF_MEM;
// 为 ptich 申请空间
pitch = new size_t[this->tplCount];
if (pitch == NULL)
return OUT_OF_MEM;
// 依次为 tplNormalization 的每一个元素分别申请 Device 空间
for (int i = 0; i < tplCount; i++) {
// 申请 Device 空间
cudaerr = cudaMallocPitch((void **)&(tplNormalization[i]), &(pitch[i]),
tplWidth * sizeof (float), tplHeight);
if (cudaerr != cudaSuccess)
return CUDA_ERROR;
}
// 处理完毕,返回 NO_ERROR
return NO_ERROR;
}
// 成员方法:deleteNormalizeData(删除 TEMPLATE 正规化的数据)
__host__ int ImageMatch::deleteNormalizeData()
{
// 先判断 tplNormalization 是否为空,若不为空,则删除每个 tplNormalization
// 的 Device 空间,然后删除 tplNormalization 指向的空间
if (tplNormalization != NULL) {
// 扫描每一个 tplNormalization 成员
for(int i = 0; i < tplCount; i++)
// 释放每一个 tplNormalization 成员指向的 Device 内存空间
cudaFree(tplNormalization[i]);
// 释放 tplNormalization 指向的空间
delete tplNormalization;
// 将 tplNormalization 置空,防止成为野指针
tplNormalization = NULL;
}
// 先判断 pitch 是否为空,若为空,则跳过
if (pitch != NULL) {
// 若不为空,则释放 pitch 指向的内存空间
delete pitch;
// 将 pitch 置空,防止成为野指针
pitch = NULL;
}
// 处理完毕,返回 NO_ERROR
return NO_ERROR;
}
// 成员方法:normalizeForTpl(对设定的 TEMPLATE 进行正规化)
__host__ int ImageMatch::normalizeForTpl()
{
int errcode; //局部变量,错误码
// 先判断是否需要对 TEMPLATE 进行正规化,若不需要,则直接返回,这样可以增
// 加效率
if (this->needNormalization) {
// 先初始化正规化的数据,为存放正规化的结果申请空间
errcode = initNormalizeData();
if (errcode != NO_ERROR)
return errcode;
// 创建一个用来正规化操作的对象
Normalization normal(3);
// 扫每一个 TEMPLATE
for (int i = 0; i < tplCount; i++) {
// 分别对每一个 TEMPLATE 进行正规化操作
errcode = normal.normalize(tplImages[i], tplNormalization[i],
pitch[i], tplWidth, tplHeight, false);
if (errcode != NO_ERROR) {
return errcode;
}
}
// 将 needNormalization 变量设置为 false,防止下次再进行正规化
this->needNormalization = false;
}
// 处理完毕,返回 NO_ERROR
return NO_ERROR;
}
// Host 函数:_getCormapMaxIndex(获取 cormap 中最大的值的索引)
static __host__ int _getCormapMaxIndex(float *cormapcpu, int count)
{
// 记录 cormap 中的最大值,初始化为 cormap 中的第 1 个数
float max = cormapcpu[0];
// 记录 cormap 中的最大值的索引,初始化为第 1 个数
int maxindex = 0;
// 扫描每个 cormap 中的数据
for (int i = 1; i < count; i++) {
// 若当前 cormap 比 max 大,则替换 max 的值,并记录当前的索引
if (max < cormapcpu[i]) {
max = cormapcpu[i];
maxindex = i;
}
}
// 处理完毕,返回 cormap 中最大值的索引
return maxindex;
}
// 对 errMap 进行二值化
static __global__ void _binarizeKer(int *errmap, size_t errmappitch,
int errmapwidth, int errmapheight,
ImageCuda out)
{
// 计算当前像素要处理的点的坐标
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// 判断要处理的点的坐标是否越界,若越界,则直接返回
if (x < 0 || x >= errmapwidth || y < 0 || y >= errmapheight)
return;
// 获取当前坐标对应的 errmap 的指针
int *perr = (int *)((char *)errmap + y * errmappitch) + x;
// 判断当前坐标对应的 errmap 中的值是否大于阈值,若大于,则赋值为 255,否则
// 赋值为 0
*(out.imgMeta.imgData + y * out.pitchBytes + x) = (*perr > 0) ? 255 : 0;
}
// 找 window 内点最大的 window 的中心
static __global__ void _getMaxWinKer(ImageCuda errmapimg, Template wintpl,
int *wincountcud)
{
// 计算当前像素要处理的点的坐标
int dstc = blockIdx.x * blockDim.x + threadIdx.x;
int dstr = blockIdx.y * blockDim.y + threadIdx.y;
// 判断要处理的点的坐标是否越界,若越界,则直接返回
if (dstc < 0 || dstc >= errmapimg.imgMeta.width
|| dstr < 0 || dstr >= errmapimg.imgMeta.height)
return;
// 用来保存临时像素点的坐标的 x 和 y 分量
int dx, dy;
// 用来记录当前模版所在位置的指针
int *curtplptr = wintpl.tplData;
// 用来统计 window 的高值点的个数
int count = 0;
// 用来记录图像某点的位置
unsigned char pix;
// 扫描模版范围内的每个像素点
for (int i = 0; i < wintpl.count; i++) {
// 计算当前模版位置所在像素的 x 和 y 分量,模版使用相邻的两个下标的
// 数组表示一个点,所以使当前模版位置的指针作加一操作
dx = dstc + *(curtplptr++);
dy = dstr + *(curtplptr++);
// 判断 dx 和 dy 是否越界
if (dx >= 0 && dx < errmapimg.imgMeta.width
&& dy >= 0 && dy < errmapimg.imgMeta.height) {
// 获取(dx,dy)所在图像的位置指针
pix = *(errmapimg.imgMeta.imgData + dy * errmapimg.pitchBytes + dx);
// 判断当前像素是否大于 0,若大于 0,则 count 加一
(pix > 0) ? (count++) : 0;
}
}
// 将统计的个数存放到 wincountcud 中
*(wincountcud + dstr * errmapimg.imgMeta.width + dstc) = count;
}
// 获取 errMap 中密度较大的各个孤岛的外接最小有向四边形
static __host__ int _getDirectRectForErrMap(int *errmap, size_t errmappitch,
int errmapwidth, int errmapheight,
int errwinwidth, int errwinheight,
int errwinthreshold,
DirectedRect *dirrect)
{
// 判断 errmap 是否为空,若为空,则返回 NULL_POINTER
if (errmap == NULL || dirrect == NULL)
return NULL_POINTER;
int errcode; // 局部变量,错误码
cudaError_t cudaerr; // 局部变量,CUDA 调用返回错误码
dim3 blocksize;
dim3 gridsize;
// window 的模版
Template *wintpl;
// 从 TemplateFactory 中获取一个矩形模版
errcode = TemplateFactory::getTemplate(&wintpl, TF_SHAPE_BOX,
dim3(errwinwidth, errwinheight));
// 若获取失败,则返回错误
if (errcode != NO_ERROR)
return errcode;
// 将模版数据拷贝到设备端
TemplateBasicOp::copyToCurrentDevice(wintpl);
// 在设备端,用来存放每个 window 的高值点个数
int *wincountcud;
// 用来存放 wincount 的大小
int wincountsize = sizeof (int) * errmapwidth * errmapheight;
// 为 wincountcud 在设备端申请一段内存
cudaerr = cudaMalloc((void **)&wincountcud, wincountsize);
// 若申请失败,则返回错误
if (cudaerr != cudaSuccess) {
TemplateFactory::putTemplate(wintpl);
return CUDA_ERROR;
}
// 计算线程块的大小
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
gridsize.x = (errmapwidth + blocksize.x - 1) / blocksize.x;
gridsize.y = (errmapheight + blocksize.y - 1) / blocksize.y;
// errmap 通过二值化得到的图像数据
Image *errmapimg;
errcode = ImageBasicOp::newImage(&errmapimg);
if (errcode != NO_ERROR)
return errcode;
// 为 errmapimg 在 Device 端申请空间
errcode = ImageBasicOp::makeAtCurrentDevice(errmapimg,
errmapwidth, errmapheight);
if (errcode != NO_ERROR) {
ImageBasicOp::deleteImage(errmapimg);
return errcode;
}
// 将 errmapimg 转化为 ImageCuda 格式
ImageCuda *errmapimgcud;
errmapimgcud = IMAGE_CUDA(errmapimg);
// 调用二值化函数对 errmap 转化为二值图像 errmapimg
_binarizeKer<<<gridsize, blocksize>>>(errmap, errmappitch,
errmapwidth, errmapheight,
*errmapimgcud);
// 若二值化出现错误,则释放内存空间,然后返回 CUDA_ERROR
if (cudaGetLastError() != cudaSuccess) {
TemplateFactory::putTemplate(wintpl);
cudaFree(wincountcud);
return CUDA_ERROR;
}
// 统计每个 window 的高值点的个数
_getMaxWinKer<<<gridsize, blocksize>>>(*errmapimgcud, *wintpl, wincountcud);
// 若 kernel 函数错误,则释放内存空间,然后返回 CUDA_ERROR
if (cudaGetLastError() != cudaSuccess) {
TemplateFactory::putTemplate(wintpl);
cudaFree(wincountcud);
return CUDA_ERROR;
}
// 将模版放回到模版工厂里
TemplateFactory::putTemplate(wintpl);
// 在 CPU 端申请一段空间,用来存放得到的每个 window 的高值点的个数
int *wincount;
wincount = (int *)malloc(wincountsize);
if (wincount == NULL) {
cudaFree(wincountcud);
return OUT_OF_MEM;
}
// 将 wincountcud 中的数据从设备端拷贝到 CPU 端
cudaerr = cudaMemcpy(wincount, wincountcud, wincountsize,
cudaMemcpyDeviceToHost);
if (cudaerr != cudaSuccess) {
cudaFree(wincountcud);
free(wincount);
return CUDA_ERROR;
}
// 记录高值点个数最多的 window 的中心坐标
int maxwinx = 0, maxwiny = 0;
// 记录 window 中高值点最大的数量,初始化为阈值
int maxwinvalue = errwinthreshold;
for (int i = 0; i < errmapheight; i++) {
for (int j = 0; j < errmapwidth; j++) {
if (maxwinvalue < *(wincount + i * errmapwidth + j)) {
// 记录当前高值点最大的中心点
maxwinvalue = *(wincount + i * errmapwidth + j);
maxwinx = j;
maxwiny = i;
}
}
}
// 若最大值等于阈值,则不存在局部异常
if (maxwinvalue == errwinthreshold) {
// 将 dirrect 的所有成员都置零
dirrect->angle = 0.0f;
dirrect->centerPoint[0] = 0;
dirrect->centerPoint[1] = 0;
dirrect->length1 = 0;
dirrect->length2 = 0;
} else {
// 定义一个用来获取最小有向四边形的对象
SmallestDirRect sdr;
// 设置 errmapimg 的 ROI
errmapimg->roiX1 = maxwinx - errwinwidth / 2;
errmapimg->roiY1 = maxwiny - errwinheight / 2;
errmapimg->roiX2 = maxwinx + errwinwidth / 2;
errmapimg->roiY2 = maxwiny + errwinheight / 2;
RoiCopy roicopy;
Image *timg;
ImageBasicOp::newImage(&timg);
roicopy.roiCopyAtHost(errmapimg, timg);
// 调用最小有向四边形算法来得到 errmapimg 的最小有向四边形
errcode = sdr.smallestDirRect(timg, dirrect);
// 若失败,则释放内存空间,然后返回错误
if (errcode != NO_ERROR) {
cudaFree(wincountcud);
free(wincount);
return errcode;
}
}
// 释放空间
cudaFree(wincountcud);
free(wincount);
// 处理完毕,返回
return NO_ERROR;
}
// Kernel 函数:_calCorMapSumKer(计算每个点的邻域内 cormap 的和)
static __global__ void _calCorMapSumKer(float *cormap,
int dwidth, int dheight,
int scope, float *cormapsumgpu)
{
// Kernel 采用三维来处理,其中第一、二维是摄动范围的坐标,第三维用来记录是
// 所有模版的数量,其中 z % 模版数量,表示当前是第几个模版,z / 模版数量
// 表示当前是第几个旋转角度
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z;
// 判断坐标是否越界,如果越界,则直接返回
if (x < 0 || x >= dwidth || y < 0 || y >= dheight)
return ;
// 记录当前处理的坐标
int currx, curry;
// 记录(x, y)邻域内的 cormap 的和,初始化为 0.0
float sum = 0.0f;
// 依次扫描邻域内的每个点,然后求和
for (int i = 0; i < scope; i++) {
for (int j = 0; j < scope; j++) {
// 计算当前需要处理的点的坐标
currx = x + j - scope / 2;
curry = y + i - scope / 2;
// 判断当前坐标是否越界
if (currx >= 0 && currx < dwidth && curry >= 0 && curry < dheight) {
// 将当前坐标对应的 cormap 值加到 sum 中
sum += *(cormap + z * dwidth * dheight + curry * dwidth +
currx);
}
}
}
// 将得到的结果存入 cormapsumgpu 中
*(cormapsumgpu + z * dwidth * dheight + y * dwidth + x) = sum;
}
// Kernel 函数:_matchKer(将一组 TEMPLATE 分别和 TEST 图像进行匹配)
static __global__ void _matchKer(float **tplnormalization, size_t *tplpitch,
int tplcount, int tplwidth, int tplheight,
float *testnormalization, size_t testpitch,
int testwidth, int testheight,
RotateTable rotatetable, float *cormap,
int offsetx, int offsety,
int dwidth, int dheight, int tploffx,
int tploffy)
{
// Kernel 采用三维来处理,其中第一、二维是摄动范围的坐标,第三维用来记录是
// 所有模版的数量,其中 z % 模版数量,表示当前是第几个模版,z / 模版数量
// 表示当前是第几个旋转角度
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z;
// 判断坐标是否越界,如果越界,则直接返回
if (x < 0 || x >= dwidth || y < 0 || y >= dheight)
return;
// 记录原始的(x, y)的坐标,在后面寻址的时候需要使用
int tx = x;
int ty = y;
// 计算实际的(x, y)在 TEST 图像上的坐标,这里只需要加上一个偏移量就能
// 计算出
x += offsetx;
y += offsety;
int errcode; // 局部变量,错误码
int testx, testy; // 局部变量,记录在 TEST 图像上的坐标
float *testnorptr; // 局部变量,指向 TEST 的正规化的结果的指针
float tcormap = 0.0f; // 局部变量,存储(x, y)的相关系数
// 获取旋转角度
float angle = rotatetable.getAngleVal(z / tplcount);
// 计算当前 TEMPLATE 的缩影
int tplindex = z % tplcount;
// 依次扫描当前要处理的 TEMPLATE
for (int i = 0; i < tplheight; i++) {
for (int j = 0; j < tplwidth; j++) {
// 计算每个 TEMPLATE 点旋转 angle 后对应在 TEST 图像上的坐标
errcode = _rotateXYDev(j - tploffx, i - tploffy,
x, y, rotatetable, angle,
&testx, &testy);
// 若返回错误,则跳过,处理下一个点
if (errcode != NO_ERROR)
continue ;
// 如果 testx 和 testy 不在 TEST 图像内,则跳过,处理下一个点
if (testx < 0 || testx >= testwidth ||
testy < 0 || testy >= testheight)
continue;
// 获取旋转后得到的点在 TEST 正规化结果的指针
testnorptr = (float *)((char *)testnormalization +
testy * testpitch) + testx;
// 得到对应的 TEST 正规化的值
float testnor = *testnorptr;
// 在对应 TEMPLATE 的正规化结果的邻域中找到与 testnor 最接近的
float tplnor = _getSuitValueFromNormalTplDev(
j, i,
tplnormalization[tplindex],
tplpitch[tplindex],
tplwidth, tplheight,
testnor);
// 将当前的 TEMPLATE 坐标的相关系数加到 tcormap 中
tcormap += testnor * tplnor;
}
}
// 将计算得到的相关系数写入 cormap 中
*(cormap + z * dwidth * dheight + ty * dwidth + tx) = tcormap;
}
// Kernel 函数:_localCheckErrKer(进行局部异常检查)
static __global__ void _localCheckErrKer(float *besttplnor, size_t besttplpitch,
int tplwidth, int tplheight,
float *testnormalization,
size_t testpitch,
int testwidth, int testheight,
RotateTable rotatetable,
int *errmap, size_t errmappitch,
int errmapwidth, int errmapheight,
float errthreshold, int mx, int my,
float angle, int tploffx, int tploffy)
{
// Kernel 采用二维来处理,第一维为 TEMPLATE 的横坐标,第二维为 TEMPLATE 的
// 纵坐标
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// 判断坐标是否越界,若越界,直接返回
if (x < 0 || x >= tplwidth || y < 0 || y >= tplheight)
return;
int errcode; // 局部变量,错误码
int testx, testy; // 局部变量,记录在 TEST 图像上的坐标
// 计算坐标(x, y)旋转 angle 后在 TEST 图像上的坐标
errcode = _rotateXYDev(x - tploffx, y - tploffy, mx, my, rotatetable,
angle, &testx, &testy);
// 若计算错误,则直接返回
if (errcode != NO_ERROR)
return;
// 判断 TEMPLATE 旋转后,在 TEST 图像上是否越界,若越界,直接返回
if (testx < 0 || testx >= testwidth || testy < 0 || testy >= testheight)
return;
// 计算坐标(testx, testy)在 TEST 图像上的正规化值的指针
float testnor =
*((float *)((char *)testnormalization + testy * testpitch) + testx);
// 获取坐标(x, y)邻域内在 TEMPLATE 上的与 testnor 最接近的值
float tplnor = _getSuitValueFromNormalTplDev(x, y, besttplnor, besttplpitch,
tplwidth, tplheight, testnor);
// 计算(x, y)在 TEMPLATE 上与对应在 TEST 图像上的点的差异
float v = (testnor - tplnor) * (testnor - tplnor);
// 若差距大于阈值,则扩大 200 倍,然后赋值给 errmap,否则置 0
*((int *)((char *)errmap + testy * errmappitch) + testx) =
(v > errthreshold) ? (int)(200.0f * v) : 0;
}
// 宏:FAIL_MEM_FREE
// 该宏用于清理临时申请的内存空间
#define FAIL_MEM_FREE do { \
if (testnormalization != NULL) { \
cudaFree(testnormalization); \
testnormalization = NULL; \
} \
if (bigmem != NULL) { \
cudaFree(bigmem); \
bigmem = NULL; \
} \
if (errmap != NULL) { \
cudaFree(errmap); \
} \
if (cormapcpu != NULL) { \
delete [] cormapcpu; \
cormapcpu = NULL; \
} \
} while (0) \
// 成员方法:imageMatch(用给定图像及不同旋转角对待匹配的图像进行匹配)
__host__ int ImageMatch::imageMatch(Image *matchimage, MatchRes *matchres,
DirectedRect *dirrect)
{
int errcode; // 局部变量,错误码
cudaError_t cudaerr; // 局部变量,CUDA 调用返回错误码
dim3 gridsize;
dim3 blocksize;
// 检查旋转表是否为空
if (rotateTable == NULL)
return NULL_POINTER;
// 检查 TEMPLATE 图像数组,待匹配图像以及存放匹配结果的 matchres 是否为空
if (matchimage == NULL || matchres == NULL)
return NULL_POINTER;
// 初始化旋转表,计算给定的坐标点集对应的旋转表
if (rotateTable->getCurrentState() == NULL_RTT) {
errcode = rotateTable->initRotateTable();
if (errcode != NO_ERROR) {
return errcode;
}
}
// 将 TEST 图像数据拷贝到 Device 内存中
errcode = ImageBasicOp::copyToCurrentDevice(matchimage);
if (errcode != NO_ERROR)
return errcode;
// 对设置的 TEMPLATE 图像进行正规化
errcode = normalizeForTpl();
if (errcode != NO_ERROR)
return errcode;
// 提取待匹配图像的 ROI 子图
ImageCuda matchimageCud;
errcode = ImageBasicOp::roiSubImage(matchimage, &matchimageCud);
if (errcode != NO_ERROR)
return errcode;
// 计算 TEST 图像的宽和高
int testwidth = matchimageCud.imgMeta.width;
int testheight = matchimageCud.imgMeta.height;
// 局部变量,存储 TEST 图像正规化的结果
float *testnormalization = NULL;
// 局部变量,testnormalization 的 ptich 值
size_t testpitch;
// 申请一块足够大的空间,后面需要使用的空间可以直接在这里获取
char *bigmem = NULL;
// 在 Host 上申请一段内存空间,用来将 cormapsumgpu 中的数据拷贝到 Host 上
float *cormapcpu = NULL;
// 申请临时变量,用来存储临时的 errmap 空间和 pitch
int *errmap = NULL;
// 为 testnormalization 申请 Device 空间
cudaerr = cudaMallocPitch((void **)&testnormalization, &testpitch,
testwidth * sizeof (float), testheight);
if (cudaerr != cudaSuccess)
return CUDA_ERROR;
// 创建一个正规化操作的对象
Normalization normal(3);
// 对 TEST 图像进行正规化
errcode = normal.normalize(&(matchimageCud.imgMeta), testnormalization,
testpitch, testwidth, testheight, false);
if (errcode != NO_ERROR) {
FAIL_MEM_FREE;
return errcode;
}
// 计算旋转角的数量
int anglecount = rotateTable->getAngleCount();
int offsetx, offsety; // 记录摄动中心的偏移良
// 距离摄动中心的偏移量
offsetx = dx - dWidth / 2;
offsety = dy - dHeight / 2;
// bigmem 的游标,用来指定剩余内存的起始地址
char *cursor;
cudaerr = cudaMalloc((void **)&bigmem,
tplCount * sizeof (float *) +
tplCount * sizeof (size_t) +
2 * dWidth * dHeight * tplCount * anglecount *
sizeof (float));
// 判断是否申请成功,若失败,释放之前的空间,防止内存泄漏,然后返回错误
if (cudaerr != cudaSuccess) {
FAIL_MEM_FREE;
return CUDA_ERROR;
}
// 游标初始指向 bigmem
cursor = bigmem;
// 用来存储一组 TEMPLATE 正规化的结果的指针,指向 Device
float **tplnormalizationCud;
// 存储一组 TEMPLATE 正规化结果的 ptich 值,指向 Device
size_t *tplpitchCud;
// 从 bigmem 中获取内存空间
tplnormalizationCud = (float **)cursor;
// 更新游标的值
cursor += tplCount * sizeof (float *);
// 将每个 TEMPLATE 的正规化的指针拷贝到 Device 内存中
cudaerr = cudaMemcpy(tplnormalizationCud, tplNormalization,
sizeof (float *) * tplCount, cudaMemcpyHostToDevice);
// 若拷贝失败,则释放先前申请的空间,防止内存泄漏,然后返回错误
if (cudaerr != cudaSuccess) {
FAIL_MEM_FREE;
return CUDA_ERROR;
}
// 从 bigmem 中获取一块内存空间
tplpitchCud = (size_t *)cursor;
// 更新游标的值
cursor += tplCount * sizeof (size_t);
// 将 ptich 拷贝到 Device 内存空间
cudaerr = cudaMemcpy(tplpitchCud, pitch, sizeof (size_t) * tplCount,
cudaMemcpyHostToDevice);
// 若拷贝失败,则释放先前申请的空间,防止内存泄漏,然后返回错误
if (cudaerr != cudaSuccess) {
FAIL_MEM_FREE;
return CUDA_ERROR;
}
// 用来存储摄动范围每个点与每个 TEMPLATE 的不同旋转角匹配得到的相关系数
// 这里先存储的是摄动范围的每个点对每个 TEMPLATE 的匹配结果,然后是同一个旋
// 转角的 TEMPLATE,最后是不同的旋转角
float *cormapgpu = NULL;
// 在 bigmem 中获取内存空间
cormapgpu = (float *)cursor;
// 更新游标的值
cursor += dWidth * dHeight * tplCount * anglecount * sizeof (float);
// 计算 TEMPLATE 的偏移量
int tploffx = tplWidth / 2;
int tploffy = tplHeight / 2;
// 计算线程块的尺寸
// block 使用默认的线程尺寸
blocksize.x = DEF_BLOCK_X;
blocksize.y = DEF_BLOCK_Y;
blocksize.z = 1;
// 采用基本的分块方案
gridsize.x = (dWidth + blocksize.x - 1) / blocksize.x;
gridsize.y = (dHeight + blocksize.y - 1) / blocksize.y;
// 第三维表示所有的 TEMPLATE,其中包括所有的旋转角度
gridsize.z = anglecount * tplCount;
// 调用匹配函数对每个 TEMPLATE 在摄动范围内进行匹配
_matchKer<<<gridsize, blocksize>>>(tplnormalizationCud, tplpitchCud,
tplCount, tplWidth, tplHeight,
testnormalization, testpitch,
testwidth, testheight,
*rotateTable, cormapgpu,
offsetx, offsety, dWidth, dHeight,
tploffx, tploffy);
// 若 Kernel 函数执行失败,则释放先前申请的内存空间,防止内存泄漏,然后返回
if (cudaGetLastError() != cudaSuccess) {
FAIL_MEM_FREE;
return CUDA_ERROR;
}
// 用来存储每个点在邻域内的相关系数的和
float *cormapsumgpu = NULL;
// 从 bigmem 中获取内存空间
cormapsumgpu = (float *)cursor;
// 调用 Kernel 函数对每个点求得的相关系数在 scope 邻域内求和,结果存放在
// cormapsumgpu 指向的内存中
_calCorMapSumKer<<<gridsize, blocksize>>>(cormapgpu, dWidth, dHeight,
scope, cormapsumgpu);
// 若 Kernel 函数执行失败,则释放先前申请的内存空间,防止内存泄漏,然后返回
if (cudaGetLastError() != cudaSuccess) {
FAIL_MEM_FREE;
return CUDA_ERROR;
}
// 在 Host 上申请一段内存空间,用来将 cormapsumgpu 中的数据拷贝到 Host 上
cormapcpu = new float[dWidth * dHeight * tplCount * anglecount];
// 如果内存申请失败,则释放之前申请的空间,然后返回错误
if (cormapcpu == NULL) {
FAIL_MEM_FREE;
return OUT_OF_MEM;
}
// 将 cormapsumgpu 中的数据拷贝到 cormapcpu 中
cudaerr = cudaMemcpy(cormapcpu, cormapsumgpu,
dWidth * dHeight * tplCount * anglecount *
sizeof (float), cudaMemcpyDeviceToHost);
// 若拷贝失败,则释放先前申请的空间,防止内存泄漏,然后返回错误
if (cudaerr != cudaSuccess) {
FAIL_MEM_FREE;
return CUDA_ERROR;
}
// 获取 cormapcpu 中最大值的索引
int maxindex = _getCormapMaxIndex(cormapcpu,
dWidth * dHeight * tplCount * anglecount);
// 计算最佳匹配的旋转
int angleindex = maxindex / (dWidth * dHeight) / tplCount;
matchres->angle = rotateTable->getAngleVal(angleindex);
// 计算最佳匹配的 TEMPLATE 的索引
matchres->tplIndex = (maxindex / (dWidth * dHeight)) % tplCount;
// 计算最佳匹配的 TEST 上的横坐标
matchres->matchX = maxindex % dWidth + offsetx;
// 计算最佳匹配的 TEST 上的纵坐标
matchres->matchY = (maxindex % (dWidth * dHeight)) / dWidth + offsety;
// 计算最佳匹配时的相关系数
matchres->coefficient = cormapcpu[maxindex] /
(scope * scope);
// 如果 errMap 不为空,则进行局部异常检查
if (dirrect != NULL) {
size_t errmappitch;
// errmap 的宽和高
int errMapWidth = testwidth, errMapHeight = testheight;
// 在 Device 端创建 errMap 空间
cudaerr = cudaMallocPitch((void **)&errmap, &errmappitch,
sizeof (int) * errMapWidth, errMapHeight);
// 若创建失败,则释放空间,然后返回
if (cudaerr != cudaSuccess) {
FAIL_MEM_FREE;
return CUDA_ERROR;
}
// 将 errmap 内的数据置 0
cudaMemset2D(errmap, errmappitch, 0, sizeof (int) * errMapWidth,
errMapHeight);
// 先计算线程块的大小
gridsize.x = (tplWidth + blocksize.x - 1) / blocksize.x;
gridsize.y = (tplHeight + blocksize.y - 1) / blocksize.y;
// 进行局部异常检查
_localCheckErrKer<<<gridsize, blocksize>>>(
tplNormalization[matchres->tplIndex],
pitch[matchres->tplIndex], tplWidth, tplHeight,
testnormalization, testpitch, testwidth, testheight,
*rotateTable, errmap, errmappitch, errMapWidth,
errMapHeight, errThreshold, matchres->matchX,
matchres->matchY, matchres->angle, tploffx, tploffy);
// 判断 Kernel 是否发生错误,若有错误,则释放空间,然后返回错误
if (cudaGetLastError() != cudaSuccess) {
FAIL_MEM_FREE;
return CUDA_ERROR;
}
// 获取 errmap 的最小有向四边形
errcode = _getDirectRectForErrMap(errmap, errmappitch,
errMapWidth, errMapHeight,
errWinWidth, errWinHeight,
errWinThreshold, dirrect);
if (errcode != NO_ERROR) {
FAIL_MEM_FREE;
return errcode;
}
}
// 释放之前申请的空间,防止内存泄露
FAIL_MEM_FREE;
// 处理完毕,返回 NO_ERROR
return NO_ERROR;
}
#undef FAIL_MEM_FREE
|
the_stack
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.