hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
93f2767f6353305f8daa66326d1d123864fb694a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathReduce.cu"
#else
THC_API void
THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<real>(state, self, src,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<real>(state, self, src,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, int dim, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
const accreal size = scalar_cast<accreal>(THCTensor_(size)(state, src, dim));
if (!THC_reduceDim<real>(state, self, src,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
ReduceDivide<accreal>{size},
scalar_cast<accreal>(0),
dim,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THC_API void
THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, real value, int dimension, real maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
ptrdiff_t size = THCTensor_(nElement)(state, data)/data->size[0];
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimension)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<real>::gt(value, scalar_cast<real>(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimension)(state, src) > 1, 1, "need at least 2 dimensions");
dim3 grid(data->size[0]);
dim3 threads(32);
hipLaunchKernelGGL(( THCTensor_kernel_renorm<real, accreal>)
, dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm));
hipError_t errcode = hipGetLastError();
if(errcode != hipSuccess)
THError(hipGetErrorString(errcode));
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
THC_API void
THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
TensorUtils<THCTensor>::preserveReduceDimSemantics(
state, self_, THCTensor_(nDimension)(state, src), dimension, keepdim);
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, true>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, true>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API void
THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
TensorUtils<THCTensor>::preserveReduceDimSemantics(
state, self_, THCTensor_(nDimension)(state, src), dimension, keepdim);
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, false>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, false>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API accreal
THCTensor_(stdall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased)));
}
THC_API accreal
THCTensor_(varall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll<real>(state, self,
SquareFunctor<accreal>(mean),
ReduceAdd<accreal>(),
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
scalar_cast<accreal>(THCTensor_(nElement)(state, self) - (biased ? 0 : 1))
);
THCudaCheck(hipGetLastError());
return val;
}
THC_API void
THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, real _value, int dimension, int keepdim)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceDim<real>(state, self, src,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceDim<real>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceDim<real>(state, self, src,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{scalar_cast<accreal>(.5)},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceDim<real>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else {
THC_reduceDim<real>(state, self, src,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{THCNumerics<accreal>::cinv(value)},
scalar_cast<accreal>(0),
dimension, keepdim);
}
THCudaCheck(hipGetLastError());
}
THC_API accreal
THCTensor_(normall)(THCState *state, THCTensor *self, real _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal result;
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceAll<real>(state, self,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceAll<real>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceAll<real>(state, self,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::sqrt(result);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceAll<real>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else {
THC_reduceAll<real>(state, self,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::pow(result,
THCNumerics<accreal>::cinv(value));
}
THCudaCheck(hipGetLastError());
return result;
}
accreal THCTensor_(dist)(THCState *state, THCTensor *self,
THCTensor *src, real _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
self = THCTensor_(newContiguous)(state, self);
ptrdiff_t size = THCTensor_(nElement)(state, self);
src = THCTensor_(newContiguous)(state, src);
thrust::device_ptr<real> self_data(THCTensor_(data)(state, self));
thrust::device_ptr<real> src_data(THCTensor_(data)(state, src));
THCThrustAllocator thrustAlloc(state);
accreal result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
thrust::plus<accreal>(),
ThrustTensorDistOp<real, accreal>(value));
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return THCNumerics<accreal>::pow(result, THCNumerics<accreal>::cinv(value));
}
#endif
THC_API accreal
THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<real>(state, self,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
THC_API accreal
THCTensor_(prodall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<real>(state, self,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
scalar_cast<accreal>(1),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
THC_API accreal
THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
THArgCheck(self->nDimension > 0, 1, "empty Tensor");
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
THC_API real
THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<real>(state, self,
thrust::identity<accreal>{},
ReduceMin<accreal>{},
THCNumerics<accreal>::max(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return scalar_cast<real>(val);
}
THC_API real
THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<real>(state, self,
thrust::identity<accreal>{},
ReduceMax<accreal>{},
THCNumerics<accreal>::min(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return scalar_cast<real>(val);
}
THC_API real
THCTensor_(medianall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
ptrdiff_t nelem, k;
nelem = THCTensor_(nElement)(state, self);
k = (nelem-1) >> 1;
THLongStorage *size = THLongStorage_newWithSize1(nelem);
THCTensor *view = THCTensor_(newView)(state, self, size);
THLongStorage_free(size);
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, indices, view, 0, 0);
val = THCTensor_(get1d)(state, sorted, k);
THCTensor_(free)(state, view);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, indices);
THCudaCheck(hipGetLastError());
return val;
}
THC_API void
THCTensor_(median)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *self,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
int64_t t_size_dim, k;
t_size_dim = THCTensor_(size)(state, self, dimension);
k = (t_size_dim-1) >> 1;
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *sorted_indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, sorted_indices, self, dimension, 0);
THCTensor *newValues = THCTensor_(newNarrow)(state, sorted, dimension, k, 1);
THCudaLongTensor *newIndices = THCudaLongTensor_newNarrow(state, sorted_indices, dimension, k, 1);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, sorted_indices);
if (!keepdim) {
THCTensor_(squeeze1d)(state, newValues, newValues, dimension);
THCudaLongTensor_squeeze1d(state, newIndices, newIndices, dimension);
}
THCTensor_(resizeAs)(state, values, newValues);
THCudaLongTensor_resizeAs(state, indices, newIndices);
THCTensor_(copy)(state, values, newValues);
THCudaLongTensor_copy(state, indices, newIndices);
THCTensor_(free)(state, newValues);
THCudaLongTensor_free(state, newIndices);
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<real, int64_t>
init =
thrust::make_pair<real, int64_t>(
THCNumerics<real>::min(), 0);
return THC_reduceDimIndex<real, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<real, int64_t>());
}
THC_API void
THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<real, int64_t>
init =
thrust::make_pair<real, int64_t>(
THCNumerics<real>::max(), 0);
return THC_reduceDimIndex<real, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<real, int64_t>());
}
#endif
| 93f2767f6353305f8daa66326d1d123864fb694a.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathReduce.cu"
#else
THC_API void
THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<real>(state, self, src,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<real>(state, self, src,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, int dim, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
const accreal size = scalar_cast<accreal>(THCTensor_(size)(state, src, dim));
if (!THC_reduceDim<real>(state, self, src,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
ReduceDivide<accreal>{size},
scalar_cast<accreal>(0),
dim,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THC_API void
THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, real value, int dimension, real maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
ptrdiff_t size = THCTensor_(nElement)(state, data)/data->size[0];
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimension)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<real>::gt(value, scalar_cast<real>(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimension)(state, src) > 1, 1, "need at least 2 dimensions");
dim3 grid(data->size[0]);
dim3 threads(32);
THCTensor_kernel_renorm<real, accreal>
<<<grid, threads, 0, THCState_getCurrentStream(state)>>>
(THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm));
cudaError errcode = cudaGetLastError();
if(errcode != cudaSuccess)
THError(cudaGetErrorString(errcode));
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
THC_API void
THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
TensorUtils<THCTensor>::preserveReduceDimSemantics(
state, self_, THCTensor_(nDimension)(state, src), dimension, keepdim);
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, true>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, true>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API void
THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
TensorUtils<THCTensor>::preserveReduceDimSemantics(
state, self_, THCTensor_(nDimension)(state, src), dimension, keepdim);
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, false>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, false>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API accreal
THCTensor_(stdall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased)));
}
THC_API accreal
THCTensor_(varall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll<real>(state, self,
SquareFunctor<accreal>(mean),
ReduceAdd<accreal>(),
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
scalar_cast<accreal>(THCTensor_(nElement)(state, self) - (biased ? 0 : 1))
);
THCudaCheck(cudaGetLastError());
return val;
}
THC_API void
THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, real _value, int dimension, int keepdim)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceDim<real>(state, self, src,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceDim<real>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceDim<real>(state, self, src,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{scalar_cast<accreal>(.5)},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceDim<real>(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else {
THC_reduceDim<real>(state, self, src,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{THCNumerics<accreal>::cinv(value)},
scalar_cast<accreal>(0),
dimension, keepdim);
}
THCudaCheck(cudaGetLastError());
}
THC_API accreal
THCTensor_(normall)(THCState *state, THCTensor *self, real _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal result;
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceAll<real>(state, self,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceAll<real>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceAll<real>(state, self,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::sqrt(result);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceAll<real>(state, self,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else {
THC_reduceAll<real>(state, self,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::pow(result,
THCNumerics<accreal>::cinv(value));
}
THCudaCheck(cudaGetLastError());
return result;
}
accreal THCTensor_(dist)(THCState *state, THCTensor *self,
THCTensor *src, real _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
self = THCTensor_(newContiguous)(state, self);
ptrdiff_t size = THCTensor_(nElement)(state, self);
src = THCTensor_(newContiguous)(state, src);
thrust::device_ptr<real> self_data(THCTensor_(data)(state, self));
thrust::device_ptr<real> src_data(THCTensor_(data)(state, src));
THCThrustAllocator thrustAlloc(state);
accreal result = thrust::inner_product(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
thrust::plus<accreal>(),
ThrustTensorDistOp<real, accreal>(value));
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return THCNumerics<accreal>::pow(result, THCNumerics<accreal>::cinv(value));
}
#endif
THC_API accreal
THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<real>(state, self,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
THC_API accreal
THCTensor_(prodall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<real>(state, self,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
scalar_cast<accreal>(1),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
THC_API accreal
THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
THArgCheck(self->nDimension > 0, 1, "empty Tensor");
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
THC_API real
THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<real>(state, self,
thrust::identity<accreal>{},
ReduceMin<accreal>{},
THCNumerics<accreal>::max(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return scalar_cast<real>(val);
}
THC_API real
THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll<real>(state, self,
thrust::identity<accreal>{},
ReduceMax<accreal>{},
THCNumerics<accreal>::min(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return scalar_cast<real>(val);
}
THC_API real
THCTensor_(medianall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
ptrdiff_t nelem, k;
nelem = THCTensor_(nElement)(state, self);
k = (nelem-1) >> 1;
THLongStorage *size = THLongStorage_newWithSize1(nelem);
THCTensor *view = THCTensor_(newView)(state, self, size);
THLongStorage_free(size);
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, indices, view, 0, 0);
val = THCTensor_(get1d)(state, sorted, k);
THCTensor_(free)(state, view);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, indices);
THCudaCheck(cudaGetLastError());
return val;
}
THC_API void
THCTensor_(median)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *self,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
int64_t t_size_dim, k;
t_size_dim = THCTensor_(size)(state, self, dimension);
k = (t_size_dim-1) >> 1;
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *sorted_indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, sorted_indices, self, dimension, 0);
THCTensor *newValues = THCTensor_(newNarrow)(state, sorted, dimension, k, 1);
THCudaLongTensor *newIndices = THCudaLongTensor_newNarrow(state, sorted_indices, dimension, k, 1);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, sorted_indices);
if (!keepdim) {
THCTensor_(squeeze1d)(state, newValues, newValues, dimension);
THCudaLongTensor_squeeze1d(state, newIndices, newIndices, dimension);
}
THCTensor_(resizeAs)(state, values, newValues);
THCudaLongTensor_resizeAs(state, indices, newIndices);
THCTensor_(copy)(state, values, newValues);
THCudaLongTensor_copy(state, indices, newIndices);
THCTensor_(free)(state, newValues);
THCudaLongTensor_free(state, newIndices);
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<real, int64_t>
init =
thrust::make_pair<real, int64_t>(
THCNumerics<real>::min(), 0);
return THC_reduceDimIndex<real, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<real, int64_t>());
}
THC_API void
THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<real, int64_t>
init =
thrust::make_pair<real, int64_t>(
THCNumerics<real>::max(), 0);
return THC_reduceDimIndex<real, int64_t>(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<real, int64_t>());
}
#endif
|
8ddf3521c148822c51221f573413942a6b54b73a.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define N 10
#define num_threads 10000
__global__ void increment_naive(int *d)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
tid = tid % N;
d[tid] += 1;
}
__global__ void increment_atomic(int *d)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
tid = tid % N;
atomicAdd(&d[tid], 1);
}
int main()
{
int h[N], *d;
hipMalloc(&d, sizeof(int)*N);
hipMemset(d, 0, sizeof(int)*N);
hipLaunchKernelGGL(( increment_naive), dim3((num_threads/N)), dim3(N), 0, 0, d);
hipMemcpy(h, d, sizeof(int)*N, hipMemcpyDeviceToHost);
for(int i=0; i<N; i++)
std::cout << h[i] << "\n";
hipMemset(d, 0, sizeof(int)*N);
hipLaunchKernelGGL(( increment_atomic), dim3((num_threads/N)), dim3(N), 0, 0, d);
hipMemcpy(h, d, sizeof(int)*N, hipMemcpyDeviceToHost);
for(int i=0; i<N; i++)
std::cout << h[i] << "\n";
}
//12
//12
//12
//12
//12
//12
//12
//12
//12
//12
//1000
//1000
//1000
//1000
//1000
//1000
//1000
//1000
//1000
//1000
| 8ddf3521c148822c51221f573413942a6b54b73a.cu | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 10
#define num_threads 10000
__global__ void increment_naive(int *d)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
tid = tid % N;
d[tid] += 1;
}
__global__ void increment_atomic(int *d)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
tid = tid % N;
atomicAdd(&d[tid], 1);
}
int main()
{
int h[N], *d;
cudaMalloc(&d, sizeof(int)*N);
cudaMemset(d, 0, sizeof(int)*N);
increment_naive<<<(num_threads/N), N>>>(d);
cudaMemcpy(h, d, sizeof(int)*N, cudaMemcpyDeviceToHost);
for(int i=0; i<N; i++)
std::cout << h[i] << "\n";
cudaMemset(d, 0, sizeof(int)*N);
increment_atomic<<<(num_threads/N), N>>>(d);
cudaMemcpy(h, d, sizeof(int)*N, cudaMemcpyDeviceToHost);
for(int i=0; i<N; i++)
std::cout << h[i] << "\n";
}
//12
//12
//12
//12
//12
//12
//12
//12
//12
//12
//1000
//1000
//1000
//1000
//1000
//1000
//1000
//1000
//1000
//1000
|
6aa10299a14494d125766d9e0f30d29bff297e0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../common/book.h"
#define N 79872
__global__ void add(int *a, int *b, int *c) {
int tid = threadIdx.x;
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main(void) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
double start, end;
// allocate the memory on the GPU
HANDLE_ERROR(hipMalloc((void**)&dev_a, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_b, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_c, N * sizeof(int)));
// fill the arrays 'a' and 'b' on the CPU
for (int i = 0; i<N; i++) {
a[i] = i;
b[i] = i * i;
}
// copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR(hipMemcpy(dev_a, a, N * sizeof(int),
hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_b, b, N * sizeof(int),
hipMemcpyHostToDevice));
start = __anElapsedTimeNSEC__;
add << <1, N >> >(dev_a, dev_b, dev_c);
// copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR(hipMemcpy(c, dev_c, N * sizeof(int),
hipMemcpyDeviceToHost));
end = __anElapsedTimeNSEC__;
// display the results
//for (int i = 0; i<N; i++) {
// printf("%d + %d = %d\n", a[i], b[i], c[i]);
//}
// free the memory allocated on the GPU
HANDLE_ERROR(hipFree(dev_a));
HANDLE_ERROR(hipFree(dev_b));
HANDLE_ERROR(hipFree(dev_c));
printf("Need %lf \n",end-start);
system("pause");
return 0;
}
| 6aa10299a14494d125766d9e0f30d29bff297e0b.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "../common/book.h"
#define N 79872
__global__ void add(int *a, int *b, int *c) {
int tid = threadIdx.x;
if (tid < N)
c[tid] = a[tid] + b[tid];
}
int main(void) {
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
double start, end;
// allocate the memory on the GPU
HANDLE_ERROR(cudaMalloc((void**)&dev_a, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_b, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_c, N * sizeof(int)));
// fill the arrays 'a' and 'b' on the CPU
for (int i = 0; i<N; i++) {
a[i] = i;
b[i] = i * i;
}
// copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR(cudaMemcpy(dev_a, a, N * sizeof(int),
cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_b, b, N * sizeof(int),
cudaMemcpyHostToDevice));
start = __anElapsedTimeNSEC__;
add << <1, N >> >(dev_a, dev_b, dev_c);
// copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR(cudaMemcpy(c, dev_c, N * sizeof(int),
cudaMemcpyDeviceToHost));
end = __anElapsedTimeNSEC__;
// display the results
//for (int i = 0; i<N; i++) {
// printf("%d + %d = %d\n", a[i], b[i], c[i]);
//}
// free the memory allocated on the GPU
HANDLE_ERROR(cudaFree(dev_a));
HANDLE_ERROR(cudaFree(dev_b));
HANDLE_ERROR(cudaFree(dev_c));
printf("Need %lf \n",end-start);
system("pause");
return 0;
}
|
61901eb1e4fcf1c0b586b883dae79743efcfdf01.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// Matrix is stored as 1d array in row-major order
typedef struct {
int width;
int height;
int stride;
float *elements;
} Matrix;
#define BLOCK_SIZE 16
#define A_WIDTH 2048
#define A_HEIGHT 2048
#define B_WIDTH 2048
#define B_HEIGHT 2048
#define C_WIDTH 2048
#define C_HEIGHT 2048
__device__ float getElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
__device__ void setElement(const Matrix A, int row, int col, float value)
{
A.elements[row * A.stride + col] = value;
}
__device__ Matrix getSubMatrix(const Matrix A, int row, int col)
{
Matrix ASub;
ASub.width = BLOCK_SIZE;
ASub.height = BLOCK_SIZE;
ASub.stride = A.stride;
ASub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col];
return ASub;
}
__global__ void matmul(const Matrix A, const Matrix B, const Matrix C)
{
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
Matrix CSub = getSubMatrix(C, blockRow, blockCol);
float CValue = 0;
int row = threadIdx.y;
int col = threadIdx.x;
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
Matrix ASub = getSubMatrix(A, blockRow, m);
Matrix BSub = getSubMatrix(B, m, blockCol);
__shared__ float as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float bs[BLOCK_SIZE][BLOCK_SIZE];
as[row][col] = getElement(ASub, row, col);
bs[row][col] = getElement(BSub, row, col);
__syncthreads();
for (int e = 0; e < BLOCK_SIZE; ++e) {
CValue += as[row][e] * bs[e][col];
}
__syncthreads();
}
setElement(CSub, row, col, CValue);
}
void matmulDriver(const Matrix A, const Matrix B, const Matrix C)
{
// Load matrix A into device.
Matrix dA;
dA.width = A.width;
dA.height = A.height;
dA.stride = A.width;
size_t sizeOfA = A.width * A.height * sizeof(float);
hipMalloc(&dA.elements, sizeOfA);
hipMemcpy(dA.elements, A.elements, sizeOfA, hipMemcpyHostToDevice);
// Load matrix B into device.
Matrix dB;
dB.width = B.width;
dB.height = B.height;
dB.stride = B.width;
size_t sizeOfB = B.width * B.height * sizeof(float);
hipMalloc(&dB.elements, sizeOfB);
hipMemcpy(dB.elements, B.elements, sizeOfB, hipMemcpyHostToDevice);
// Allocate matrix C on device.
Matrix dC;
dC.width = C.width;
dC.height = C.height;
dC.stride = C.width;
size_t sizeOfC = C.width * C.height * sizeof(float);
hipMalloc(&dC.elements, sizeOfC);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
hipLaunchKernelGGL(( matmul), dim3(dimGrid), dim3(dimBlock), 0, 0, A, B, C);
hipMemcpy(C.elements, dC.elements, sizeOfC, hipMemcpyDeviceToHost);
hipFree(dA.elements);
hipFree(dB.elements);
hipFree(dC.elements);
}
int main()
{
Matrix A;
A.width = A_WIDTH;
A.height = A_HEIGHT;
size_t sizeOfA = A.width * A.height * sizeof(float);
A.elements = (float *) malloc(sizeOfA);
Matrix B;
B.width = B_WIDTH;
B.height = B_HEIGHT;
size_t sizeOfB = B.width * B.height * sizeof(float);
B.elements = (float *) malloc(sizeOfB);
Matrix C;
C.width = C_WIDTH;
C.height = C_HEIGHT;
size_t sizeOfC = C.width * C.height * sizeof(float);
C.elements = (float *) malloc(sizeOfC);
Matrix C_check;
C_check.width = C_WIDTH;
C_check.height = C_HEIGHT;
C_check.elements = (float *) malloc(sizeOfC);
for (int i = 0; i < A.height; i++) {
for (int j = 0; j < A.width; j++) {
A.elements[i * A.width + j] = i + j;
}
}
for (int i = 0; i < B.height; i++) {
for (int j = 0; j < B.width; j++) {
B.elements[i * B.width + j] = i + j;
}
}
int value;
for (int i = 0; i < C_check.height; i++) {
for (int j = 0; j < C_check.width; j++) {
value = 0.0;
for (int k = 0; k < A.width; k++) {
value += A.elements[i * A.width + k] * B.elements[k * B.width + j];
}
C_check.elements[i * C_check.width + j] = value;
}
}
matmulDriver(A, B, C);
int cmp = memcmp(C_check.elements, C.elements, sizeOfC);
if (cmp == 0) {
printf("Arrays are equal.\n");
} else {
printf("Arrays are equal.\n");
}
return 0;
}
| 61901eb1e4fcf1c0b586b883dae79743efcfdf01.cu | #include <stdio.h>
// Matrix is stored as 1d array in row-major order
typedef struct {
int width;
int height;
int stride;
float *elements;
} Matrix;
#define BLOCK_SIZE 16
#define A_WIDTH 2048
#define A_HEIGHT 2048
#define B_WIDTH 2048
#define B_HEIGHT 2048
#define C_WIDTH 2048
#define C_HEIGHT 2048
__device__ float getElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
__device__ void setElement(const Matrix A, int row, int col, float value)
{
A.elements[row * A.stride + col] = value;
}
__device__ Matrix getSubMatrix(const Matrix A, int row, int col)
{
Matrix ASub;
ASub.width = BLOCK_SIZE;
ASub.height = BLOCK_SIZE;
ASub.stride = A.stride;
ASub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col];
return ASub;
}
__global__ void matmul(const Matrix A, const Matrix B, const Matrix C)
{
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
Matrix CSub = getSubMatrix(C, blockRow, blockCol);
float CValue = 0;
int row = threadIdx.y;
int col = threadIdx.x;
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
Matrix ASub = getSubMatrix(A, blockRow, m);
Matrix BSub = getSubMatrix(B, m, blockCol);
__shared__ float as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float bs[BLOCK_SIZE][BLOCK_SIZE];
as[row][col] = getElement(ASub, row, col);
bs[row][col] = getElement(BSub, row, col);
__syncthreads();
for (int e = 0; e < BLOCK_SIZE; ++e) {
CValue += as[row][e] * bs[e][col];
}
__syncthreads();
}
setElement(CSub, row, col, CValue);
}
void matmulDriver(const Matrix A, const Matrix B, const Matrix C)
{
// Load matrix A into device.
Matrix dA;
dA.width = A.width;
dA.height = A.height;
dA.stride = A.width;
size_t sizeOfA = A.width * A.height * sizeof(float);
cudaMalloc(&dA.elements, sizeOfA);
cudaMemcpy(dA.elements, A.elements, sizeOfA, cudaMemcpyHostToDevice);
// Load matrix B into device.
Matrix dB;
dB.width = B.width;
dB.height = B.height;
dB.stride = B.width;
size_t sizeOfB = B.width * B.height * sizeof(float);
cudaMalloc(&dB.elements, sizeOfB);
cudaMemcpy(dB.elements, B.elements, sizeOfB, cudaMemcpyHostToDevice);
// Allocate matrix C on device.
Matrix dC;
dC.width = C.width;
dC.height = C.height;
dC.stride = C.width;
size_t sizeOfC = C.width * C.height * sizeof(float);
cudaMalloc(&dC.elements, sizeOfC);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
matmul<<<dimGrid, dimBlock>>>(A, B, C);
cudaMemcpy(C.elements, dC.elements, sizeOfC, cudaMemcpyDeviceToHost);
cudaFree(dA.elements);
cudaFree(dB.elements);
cudaFree(dC.elements);
}
int main()
{
Matrix A;
A.width = A_WIDTH;
A.height = A_HEIGHT;
size_t sizeOfA = A.width * A.height * sizeof(float);
A.elements = (float *) malloc(sizeOfA);
Matrix B;
B.width = B_WIDTH;
B.height = B_HEIGHT;
size_t sizeOfB = B.width * B.height * sizeof(float);
B.elements = (float *) malloc(sizeOfB);
Matrix C;
C.width = C_WIDTH;
C.height = C_HEIGHT;
size_t sizeOfC = C.width * C.height * sizeof(float);
C.elements = (float *) malloc(sizeOfC);
Matrix C_check;
C_check.width = C_WIDTH;
C_check.height = C_HEIGHT;
C_check.elements = (float *) malloc(sizeOfC);
for (int i = 0; i < A.height; i++) {
for (int j = 0; j < A.width; j++) {
A.elements[i * A.width + j] = i + j;
}
}
for (int i = 0; i < B.height; i++) {
for (int j = 0; j < B.width; j++) {
B.elements[i * B.width + j] = i + j;
}
}
int value;
for (int i = 0; i < C_check.height; i++) {
for (int j = 0; j < C_check.width; j++) {
value = 0.0;
for (int k = 0; k < A.width; k++) {
value += A.elements[i * A.width + k] * B.elements[k * B.width + j];
}
C_check.elements[i * C_check.width + j] = value;
}
}
matmulDriver(A, B, C);
int cmp = memcmp(C_check.elements, C.elements, sizeOfC);
if (cmp == 0) {
printf("Arrays are equal.\n");
} else {
printf("Arrays are equal.\n");
}
return 0;
}
|
e49e0fe67abc4f88eab4853054b24f941177b6a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "userFitness.h"
#include "hip/hip_runtime_api.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hiprand/hiprand_kernel.h>
__device__ float fitnessOfSearchAgent_SudokuWOA(float* searchAgent, int startPoint) {
float nilai = 0.0f;
int i_d = startPoint;
float* searchAgentTemp = searchAgent;
for (int j = 0; j < 9; j++)
{
bool baris = true, kolom = true, kotak = true;
bool angkaBaris[9], angkaKolom[9], angkaKotak[9];
for (int k = 0; k < 9; k++)
{
angkaBaris[k] = angkaKolom[k] = angkaKotak[k] = false;
}
for (int k = 0; k < 9; k++)
{
int baris_i = i_d + (j * 9) + k;
int kolom_i = i_d + (k * 9) + j;
int kotak_i = i_d + ((3 * j) + (18 * int(j / 3))) + (k + (int(k / 3) * 6));
if (angkaBaris[(abs((int)searchAgentTemp[baris_i]) % 9)]) baris = false;
if (angkaKolom[(abs((int)searchAgentTemp[kolom_i]) % 9)]) kolom = false;
if (angkaKotak[(abs((int)searchAgentTemp[kotak_i]) % 9)]) kotak = false;
angkaBaris[(abs((int)searchAgentTemp[baris_i]) % 9)] = true;
angkaKolom[(abs((int)searchAgentTemp[kolom_i]) % 9)] = true;
angkaKotak[(abs((int)searchAgentTemp[kotak_i]) % 9)] = true;
}
if (baris) nilai += 1.0f;
if (kolom) nilai += 1.0f;
if (kotak) nilai += 1.0f;
}
return nilai;
};
__global__ void fitnessCheckGPU_SudokuWOA(int size, float* searchAgent, float* fitness)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
fitness[i] = fitnessOfSearchAgent_SudokuWOA(searchAgent, i * size);
}
void callFitnessCheckGPU_SudokuWOA(int size, float* searchAgent, float* fitness, long chromosomeAmount) {
fitnessCheckGPU_SudokuWOA << < 1, chromosomeAmount >> >(size, searchAgent, fitness);
} | e49e0fe67abc4f88eab4853054b24f941177b6a7.cu | #include "userFitness.h"
#include "cuda_runtime_api.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <curand_kernel.h>
__device__ float fitnessOfSearchAgent_SudokuWOA(float* searchAgent, int startPoint) {
float nilai = 0.0f;
int i_d = startPoint;
float* searchAgentTemp = searchAgent;
for (int j = 0; j < 9; j++)
{
bool baris = true, kolom = true, kotak = true;
bool angkaBaris[9], angkaKolom[9], angkaKotak[9];
for (int k = 0; k < 9; k++)
{
angkaBaris[k] = angkaKolom[k] = angkaKotak[k] = false;
}
for (int k = 0; k < 9; k++)
{
int baris_i = i_d + (j * 9) + k;
int kolom_i = i_d + (k * 9) + j;
int kotak_i = i_d + ((3 * j) + (18 * int(j / 3))) + (k + (int(k / 3) * 6));
if (angkaBaris[(abs((int)searchAgentTemp[baris_i]) % 9)]) baris = false;
if (angkaKolom[(abs((int)searchAgentTemp[kolom_i]) % 9)]) kolom = false;
if (angkaKotak[(abs((int)searchAgentTemp[kotak_i]) % 9)]) kotak = false;
angkaBaris[(abs((int)searchAgentTemp[baris_i]) % 9)] = true;
angkaKolom[(abs((int)searchAgentTemp[kolom_i]) % 9)] = true;
angkaKotak[(abs((int)searchAgentTemp[kotak_i]) % 9)] = true;
}
if (baris) nilai += 1.0f;
if (kolom) nilai += 1.0f;
if (kotak) nilai += 1.0f;
}
return nilai;
};
__global__ void fitnessCheckGPU_SudokuWOA(int size, float* searchAgent, float* fitness)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
fitness[i] = fitnessOfSearchAgent_SudokuWOA(searchAgent, i * size);
}
void callFitnessCheckGPU_SudokuWOA(int size, float* searchAgent, float* fitness, long chromosomeAmount) {
fitnessCheckGPU_SudokuWOA << < 1, chromosomeAmount >> >(size, searchAgent, fitness);
} |
d1dc2d57ff8ba184df582c03d704c833f2ed63d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "trackerKCFparallel.hpp"
#include <opencv2/cudaarithm.hpp>
#include "dft.cu"
#include "mulspectrums.cu"
#define returnFromUpdate() {fprintf(stderr, "Error in %s line %d while updating frame %d\n", __FILE__, __LINE__, frame);}
/*---------------------------
| TrackerKCFModel
|---------------------------*/
namespace cv{
/**
* \brief Implementation of TrackerModel for KCF algorithm
*/
class TrackerKCFModel : public TrackerModel{
public:
TrackerKCFModel(TrackerKCF::Params /*params*/){}
~TrackerKCFModel(){}
protected:
void modelEstimationImpl( const std::vector<Mat>& /*responses*/ ){}
void modelUpdateImpl(){}
};
} /* namespace cv */
namespace helper {
void MatType( Mat inputMat )
{
int inttype = inputMat.type();
std::string r, a;
uchar depth = inttype & CV_MAT_DEPTH_MASK;
uchar chans = 1 + (inttype >> CV_CN_SHIFT);
switch ( depth ) {
case CV_8U: r = "8U"; a = "Mat.at<uchar>(y,x)"; break;
case CV_8S: r = "8S"; a = "Mat.at<schar>(y,x)"; break;
case CV_16U: r = "16U"; a = "Mat.at<ushort>(y,x)"; break;
case CV_16S: r = "16S"; a = "Mat.at<short>(y,x)"; break;
case CV_32S: r = "32S"; a = "Mat.at<int>(y,x)"; break;
case CV_32F: r = "32F"; a = "Mat.at<float>(y,x)"; break;
case CV_64F: r = "64F"; a = "Mat.at<double>(y,x)"; break;
case CV_32FC2: r = "32FC2"; a = "Mat.at<complex float>(y,x)"; break;
case CV_64FC2: r = "64FC2"; a = "Mat.at<complex double>(y,x)"; break;
default: r = "User"; a = "Mat.at<UKNOWN>(y,x)"; break;
}
r += "C";
r += (chans+'0');
std::cout << "Mat is of type " << r << " and should be accessed with " << a << std::endl;
}
}
namespace cv {
/*
* Constructor
*/
TackerKCFImplParallel::TackerKCFImplParallel( const TrackerKCF::Params ¶meters ) :
params( parameters )
{
isInit = false;
resizeImage = false;
use_custom_extractor_pca = false;
use_custom_extractor_npca = false;
#if TIME
total_lines = num_steps;
for (int i = 0; i < num_steps; i++) {
cumulated_times[i] = 0;
}
#if TIME == 2
for (int i = 0; i < num_steps - 1; i++) {
total_lines += num_steps_details[i];
for (int j = 0; j < max_num_details; j++) {
cumulated_details_times[i][j] = 0;
}
}
#endif
#endif
}
void TackerKCFImplParallel::read( const cv::FileNode& fn ){
params.read( fn );
}
void TackerKCFImplParallel::write( cv::FileStorage& fs ) const {
params.write( fs );
}
/*
* Initialization:
* - creating hann window filter
* - ROI padding
* - creating a gaussian response for the training ground-truth
* - perform FFT to the gaussian response
*/
bool TackerKCFImplParallel::initImpl( const Mat& image, const Rect2d& boundingBox ){
#if TIME
double startInit = CycleTimer::currentSeconds();
#endif
frame=0;
roi = boundingBox;
//calclulate output sigma
output_sigma=sqrt(roi.width*roi.height)*params.output_sigma_factor;
output_sigma=-0.5/(output_sigma*output_sigma);
//resize the ROI whenever needed
if(params.resize && roi.width*roi.height>params.max_patch_size){
resizeImage=true;
roi.x/=2.0;
roi.y/=2.0;
roi.width/=2.0;
roi.height/=2.0;
}
// add padding to the roi
roi.x-=roi.width/2;
roi.y-=roi.height/2;
roi.width*=2;
roi.height*=2;
// initialize the hann window filter
createHanningWindow(hann, roi.size(), CV_64F);
// hann window filter for CN feature
Mat _layer[] = {hann, hann, hann, hann, hann, hann, hann, hann, hann, hann};
merge(_layer, 10, hann_cn);
// create gaussian response
y=Mat::zeros((int)roi.height,(int)roi.width,CV_64F);
for(unsigned i=0;i<roi.height;i++){
for(unsigned j=0;j<roi.width;j++){
y.at<double>(i,j)=(i-roi.height/2+1)*(i-roi.height/2+1)+(j-roi.width/2+1)*(j-roi.width/2+1);
}
}
y*=(double)output_sigma;
cv::exp(y,y);
// perform fourier transfor to the gaussian response
fft2(y,yf);
model=Ptr<TrackerKCFModel>(new TrackerKCFModel(params));
// record the non-compressed descriptors
if((params.desc_npca & GRAY) == GRAY)descriptors_npca.push_back(GRAY);
if((params.desc_npca & CN) == CN)descriptors_npca.push_back(CN);
if(use_custom_extractor_npca)descriptors_npca.push_back(CUSTOM);
features_npca.resize(descriptors_npca.size());
// record the compressed descriptors
if((params.desc_pca & GRAY) == GRAY)descriptors_pca.push_back(GRAY);
if((params.desc_pca & CN) == CN)descriptors_pca.push_back(CN);
if(use_custom_extractor_pca)descriptors_pca.push_back(CUSTOM);
features_pca.resize(descriptors_pca.size());
// accept only the available descriptor modes
CV_Assert(
(params.desc_pca & GRAY) == GRAY
|| (params.desc_npca & GRAY) == GRAY
|| (params.desc_pca & CN) == CN
|| (params.desc_npca & CN) == CN
|| use_custom_extractor_pca
|| use_custom_extractor_npca
);
// Initialize ExtractCN GpuMats
cuda::createContinuous(roi.size(), CV_8UC3, patch_data_gpu);
cuda::createContinuous(roi.size(), CV_16U, indexes_gpu);
hann_cn_gpu.upload(hann_cn);
// Initialize pca_data_gpu GpuMat
cuda::createContinuous(roi.size(), CV_64F, pca_data_gpu);
// Initialize fft2 GpuMats
Size complex_size(roi.size().width/2+1, roi.size().height);
int num_channels = image.channels();
cuda::createContinuous(complex_size, CV_64FC2, xyf_c_gpu);
cuda::createContinuous(roi.size(), CV_64F, xyf_r_gpu);
xf_data_gpu.resize(num_channels);
yf_data_gpu.resize(num_channels);
layers_data_gpu.resize(num_channels);
xyf_v_gpu.resize(num_channels);
for (int i = 0; i < num_channels; i++){
cuda::createContinuous(roi.size(), CV_64F, layers_data_gpu[i]);
cuda::createContinuous(complex_size, CV_64FC2, xf_data_gpu[i]);
cuda::createContinuous(complex_size, CV_64FC2, yf_data_gpu[i]);
}
// Initialize ColorNames
size_t ColorNames_size = 32768 * 10 * sizeof(double); //2^15 * 10
cudaSafeCall(hipMalloc((void**) &ColorNames_gpu, ColorNames_size));
cudaSafeCall(hipMemcpy(ColorNames_gpu, ColorNames, ColorNames_size, hipMemcpyHostToDevice));
#if TIME
printInitializationTime(startInit);
#endif
// TODO: return true only if roi inside the image
return true;
}
/*
* Main part of the KCF algorithm
*/
bool TackerKCFImplParallel::updateImpl( const Mat& image, Rect2d& boundingBox ){
#if TIME
double startUpdate = CycleTimer::currentSeconds();
#endif
double minVal, maxVal; // min-max response
Point minLoc,maxLoc; // min-max location
Mat img=image.clone();
// check the channels of the input image, grayscale is preferred
CV_Assert(img.channels() == 1 || img.channels() == 3);
// resize the image whenever needed
if(resizeImage)resize(img,img,Size(img.cols/2,img.rows/2));
#if TIME
double startDetection = CycleTimer::currentSeconds();
#endif
// detection part
if(frame>0){
#if TIME == 2
double startDetectionDetail = CycleTimer::currentSeconds();
#endif
// extract and pre-process the patch
// get non compressed descriptors
for(unsigned i=0;i<descriptors_npca.size()-extractor_npca.size();i++){
if(!getSubWindow(img,roi, features_npca[i], img_Patch, descriptors_npca[i]))returnFromUpdate();
}
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 0);
#endif
//get non-compressed custom descriptors
for(unsigned i=0,j=(unsigned)(descriptors_npca.size()-extractor_npca.size());i<extractor_npca.size();i++,j++){
if(!getSubWindow(img,roi, features_npca[j], extractor_npca[i]))returnFromUpdate();
}
if(features_npca.size()>0)merge(features_npca,X[1]);
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 1);
#endif
// get compressed descriptors
for(unsigned i=0;i<descriptors_pca.size()-extractor_pca.size();i++){
if(!getSubWindow(img,roi, features_pca[i], img_Patch, descriptors_pca[i]))returnFromUpdate();
}
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 2);
#endif
//get compressed custom descriptors
for(unsigned i=0,j=(unsigned)(descriptors_pca.size()-extractor_pca.size());i<extractor_pca.size();i++,j++){
if(!getSubWindow(img,roi, features_pca[j], extractor_pca[i]))returnFromUpdate();
}
if(features_pca.size()>0)merge(features_pca,X[0]);
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 3);
#endif
//compress the features and the KRSL model
if(params.desc_pca !=0){
compress(proj_mtx,X[0],X[0],data_temp,compress_data);
compress(proj_mtx,Z[0],Zc[0],data_temp,compress_data);
}
// copy the compressed KRLS model
Zc[1] = Z[1];
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 4);
#endif
// merge all features
if(features_npca.size()==0){
x = X[0];
z = Zc[0];
}else if(features_pca.size()==0){
x = X[1];
z = Z[1];
}else{
merge(X,2,x);
merge(Zc,2,z);
}
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 5);
#endif
//compute the gaussian kernel
denseGaussKernel(params.sigma,x,z,k,layers,vxf,vyf,vxyf,xy_data,xyf_data);
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 6);
#endif
// compute the fourier transform of the kernel
fft2(k,kf);
if(frame==1)spec2=Mat_<Vec2d >(kf.rows, kf.cols);
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 7);
#endif
// calculate filter response
if(params.split_coeff)
calcResponse(alphaf,alphaf_den,kf,response, spec, spec2);
else
calcResponse(alphaf,kf,response, spec);
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 8);
#endif
// extract the maximum response
minMaxLoc( response, &minVal, &maxVal, &minLoc, &maxLoc );
roi.x+=(maxLoc.x-roi.width/2+1);
roi.y+=(maxLoc.y-roi.height/2+1);
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 9);
#endif
}
#if TIME
updateTime(startDetection, 0);
double startPatches = CycleTimer::currentSeconds();
#endif
#if TIME == 2
double startPatchesDetail = startPatches;
#endif
// update the bounding box
boundingBox.x=(resizeImage?roi.x*2:roi.x)+(resizeImage?roi.width*2:roi.width)/4;
boundingBox.y=(resizeImage?roi.y*2:roi.y)+(resizeImage?roi.height*2:roi.height)/4;
boundingBox.width = (resizeImage?roi.width*2:roi.width)/2;
boundingBox.height = (resizeImage?roi.height*2:roi.height)/2;
#if TIME == 2
updateTimeDetail(&startPatchesDetail, 1, 0);
#endif
// extract the patch for learning purpose
// get non compressed descriptors
for(unsigned i=0;i<descriptors_npca.size()-extractor_npca.size();i++){
if(!getSubWindow(img,roi, features_npca[i], img_Patch, descriptors_npca[i]))returnFromUpdate();
}
#if TIME == 2
updateTimeDetail(&startPatchesDetail, 1, 1);
#endif
//get non-compressed custom descriptors
for(unsigned i=0,j=(unsigned)(descriptors_npca.size()-extractor_npca.size());i<extractor_npca.size();i++,j++){
if(!getSubWindow(img,roi, features_npca[j], extractor_npca[i]))returnFromUpdate();
}
if(features_npca.size()>0)merge(features_npca,X[1]);
#if TIME == 2
updateTimeDetail(&startPatchesDetail, 1, 2);
#endif
// get compressed descriptors
for(unsigned i=0;i<descriptors_pca.size()-extractor_pca.size();i++){
if(!getSubWindow(img,roi, features_pca[i], img_Patch, descriptors_pca[i]))returnFromUpdate();
}
#if TIME == 2
updateTimeDetail(&startPatchesDetail, 1, 3);
#endif
//get compressed custom descriptors
for(unsigned i=0,j=(unsigned)(descriptors_pca.size()-extractor_pca.size());i<extractor_pca.size();i++,j++){
if(!getSubWindow(img,roi, features_pca[j], extractor_pca[i]))returnFromUpdate();
}
if(features_pca.size()>0)merge(features_pca,X[0]);
#if TIME == 2
updateTimeDetail(&startPatchesDetail, 1, 4);
#endif
//update the training data
if(frame==0){
Z[0] = X[0].clone();
Z[1] = X[1].clone();
}else{
Z[0]=(1.0-params.interp_factor)*Z[0]+params.interp_factor*X[0];
Z[1]=(1.0-params.interp_factor)*Z[1]+params.interp_factor*X[1];
}
#if TIME == 2
updateTimeDetail(&startPatchesDetail, 1, 5);
#endif
#if TIME
updateTime(startPatches, 1);
double startCompression = CycleTimer::currentSeconds();
#endif
#if TIME == 2
double startCompressionDetail = startCompression;
#endif
if(params.desc_pca !=0 || use_custom_extractor_pca){
// initialize the vector of Mat variables
if(frame==0){
layers_pca_data.resize(Z[0].channels());
average_data.resize(Z[0].channels());
}
// feature compression
updateProjectionMatrix(Z[0],old_cov_mtx,proj_mtx,params.pca_learning_rate,params.compressed_size,layers_pca_data,average_data,data_pca, new_covar,w_data,u_data,vt_data);
#if TIME == 2
updateTimeDetail(&startCompressionDetail, 2, 0);
#endif
compress(proj_mtx,X[0],X[0],data_temp,compress_data);
#if TIME == 2
updateTimeDetail(&startCompressionDetail, 2, 1);
#endif
}
// merge all features
if(features_npca.size()==0)
x = X[0];
else if(features_pca.size()==0)
x = X[1];
else
merge(X,2,x);
#if TIME == 2
updateTimeDetail(&startCompressionDetail, 2, 2);
#endif
#if TIME
updateTime(startCompression, 2);
double startLeastSquares = CycleTimer::currentSeconds();
#endif
#if TIME == 2
double startLeastSquaresDetail = startLeastSquares;
#endif
// initialize some required Mat variables
if(frame==0){
layers.resize(x.channels());
vxf.resize(x.channels());
vyf.resize(x.channels());
vxyf.resize(vyf.size());
new_alphaf=Mat_<Vec2d >(yf.rows, yf.cols);
}
#if TIME == 2
updateTimeDetail(&startLeastSquaresDetail, 3, 0);
#endif
// Kernel Regularized Least-Squares, calculate alphas
denseGaussKernel(params.sigma,x,x,k,layers,vxf,vyf,vxyf,xy_data,xyf_data);
#if TIME == 2
updateTimeDetail(&startLeastSquaresDetail, 3, 1);
#endif
// compute the fourier transform of the kernel and add a small value
fft2(k,kf);
#if TIME == 2
updateTimeDetail(&startLeastSquaresDetail, 3, 2);
#endif
kf_lambda=kf+params.lambda;
#if TIME == 2
updateTimeDetail(&startLeastSquaresDetail, 3, 3);
#endif
double den;
if(params.split_coeff){
mulSpectrums(yf,kf,new_alphaf,0);
mulSpectrums(kf,kf_lambda,new_alphaf_den,0);
}else{
for(int i=0;i<yf.rows;i++){
for(int j=0;j<yf.cols;j++){
den = 1.0/(kf_lambda.at<Vec2d>(i,j)[0]*kf_lambda.at<Vec2d>(i,j)[0]+kf_lambda.at<Vec2d>(i,j)[1]*kf_lambda.at<Vec2d>(i,j)[1]);
new_alphaf.at<Vec2d>(i,j)[0]=
(yf.at<Vec2d>(i,j)[0]*kf_lambda.at<Vec2d>(i,j)[0]+yf.at<Vec2d>(i,j)[1]*kf_lambda.at<Vec2d>(i,j)[1])*den;
new_alphaf.at<Vec2d>(i,j)[1]=
(yf.at<Vec2d>(i,j)[1]*kf_lambda.at<Vec2d>(i,j)[0]-yf.at<Vec2d>(i,j)[0]*kf_lambda.at<Vec2d>(i,j)[1])*den;
}
}
}
#if TIME == 2
updateTimeDetail(&startLeastSquaresDetail, 3, 4);
#endif
// update the RLS model
if(frame==0){
alphaf=new_alphaf.clone();
if(params.split_coeff)alphaf_den=new_alphaf_den.clone();
}else{
alphaf=(1.0-params.interp_factor)*alphaf+params.interp_factor*new_alphaf;
if(params.split_coeff)alphaf_den=(1.0-params.interp_factor)*alphaf_den+params.interp_factor*new_alphaf_den;
}
#if TIME == 2
updateTimeDetail(&startLeastSquaresDetail, 3, 5);
#endif
#if TIME
updateTime(startLeastSquares, 3);
updateTime(startUpdate, 4);
printAverageTimes();
#endif
frame++;
return true;
}
/*-------------------------------------
| implementation of the KCF functions
|-------------------------------------*/
/*
* hann window filter
*/
void TackerKCFImplParallel::createHanningWindow(OutputArray dest, const cv::Size winSize, const int type) const {
CV_Assert( type == CV_32FC1 || type == CV_64FC1 );
dest.create(winSize, type);
Mat dst = dest.getMat();
int rows = dst.rows, cols = dst.cols;
AutoBuffer<double> _wc(cols);
double * const wc = (double *)_wc;
double coeff0 = 2.0 * CV_PI / (double)(cols - 1), coeff1 = 2.0f * CV_PI / (double)(rows - 1);
for(int j = 0; j < cols; j++)
wc[j] = 0.5 * (1.0 - cos(coeff0 * j));
if(dst.depth() == CV_32F){
for(int i = 0; i < rows; i++){
float* dstData = dst.ptr<float>(i);
double wr = 0.5 * (1.0 - cos(coeff1 * i));
for(int j = 0; j < cols; j++)
dstData[j] = (float)(wr * wc[j]);
}
}else{
for(int i = 0; i < rows; i++){
double* dstData = dst.ptr<double>(i);
double wr = 0.5 * (1.0 - cos(coeff1 * i));
for(int j = 0; j < cols; j++)
dstData[j] = wr * wc[j];
}
}
// perform batch sqrt for SSE performance gains
//cv::sqrt(dst, dst); //matlab do not use the square rooted version
}
/*
* simplification of fourier transform function in opencv
*/
void inline TackerKCFImplParallel::fft2(const Mat src, Mat & dest) const {
dft(src,dest,DFT_COMPLEX_OUTPUT);
}
void inline TackerKCFImplParallel::fft2(const Mat src, std::vector<Mat> & dest, std::vector<Mat> & layers_data) const {
split(src, layers_data);
for(int i=0;i<src.channels();i++){
dft(layers_data[i],dest[i],DFT_COMPLEX_OUTPUT);
}
}
void inline TackerKCFImplParallel::cudafft2(int num_channels, std::vector<cuda::GpuMat> & dest, std::vector<cuda::GpuMat> & layers_data) {
for (int i = 0; i < num_channels; i++) {
cuda::dft(layers_data[i], dest[i], layers_data[i].size(), DFT_DOUBLE);
}
}
/*
* simplification of inverse fourier transform function in opencv
*/
void inline TackerKCFImplParallel::ifft2(const Mat src, Mat & dest) const {
idft(src,dest,DFT_SCALE+DFT_REAL_OUTPUT);
}
void inline TackerKCFImplParallel::cudaifft2(const cuda::GpuMat src, cuda::GpuMat & dest) {
cuda::GpuMat src_cce;
src_cce = src;
// The size correection is necessary to account for the CCE format
cv::Size dest_size((src.size().width -1)*2,src.size().height);
cuda::dft(src_cce, dest, dest_size,
(DFT_SCALE + DFT_REAL_OUTPUT) | DFT_INVERSE | DFT_DOUBLE);
}
// Expand half a matrix by inferring the complex conjugates of the cols to
// complete the second half
void inline TackerKCFImplParallel::cce2full(const Mat src, Mat & dest) {
// Assume that the original size of the matrix was divisible by 2
Mat result(cv::Size((src.size().width-1)*2,src.size().height),src.type());
for (int j=0; j < (src.size().width-1)*2;j++) {
for (int i = 0; i < src.size().height;i++) {
if (j <src.size().width-1) {
result.at<Vec2d>(i,j)[0] = src.at<Vec2d>(i,j)[0];
result.at<Vec2d>(i,j)[1] = src.at<Vec2d>(i,j)[1];
} else {
// Complex conjugate
result.at<Vec2d>(i,j)[0] = src.at<Vec2d>(i,2*(src.size().width - 1) - j)[0];
result.at<Vec2d>(i,j)[1] = - src.at<Vec2d>(i,2*(src.size().width -1) - j)[1];
}
}
}
dest = result;
}
void inline TackerKCFImplParallel::full2cce(const Mat src, Mat & dest) {
//We take the first half of the matrix
cv::Rect roi(0, 0, src.size().width/2+1, src.size().height);
dest = src(roi);
}
/*
* Point-wise multiplication of two Multichannel Mat data
*/
void inline TackerKCFImplParallel::pixelWiseMult(const std::vector<cuda::GpuMat> src1, const std::vector<cuda::GpuMat> src2, std::vector<cuda::GpuMat> & dest, const int flags, const bool conjB) const {
for(unsigned i=0;i<src1.size();i++){
cv::cuda::mulSpectrums(src1[i], src2[i], dest[i],flags,conjB);
}
}
/*
* Combines all channels in a multi-channels Mat data into a single channel
*/
void inline TackerKCFImplParallel::sumChannels(std::vector<cuda::GpuMat> src, cuda::GpuMat & dest) const {
src[0].copyTo(dest);
for(unsigned i=1;i<src.size();i++){
cuda::add(src[i],dest,dest);
}
}
//void inline
/*
* obtains the projection matrix using PCA
*/
void inline TackerKCFImplParallel::updateProjectionMatrix(const Mat src, Mat & old_cov,Mat & proj_matrix, double pca_rate, int compressed_sz,
std::vector<Mat> & layers_pca,std::vector<Scalar> & average, Mat pca_data, Mat new_cov, Mat w, Mat u, Mat vt) {
GpuMat new_cov_gpu;
double start = CycleTimer::currentSeconds();
CV_Assert(compressed_sz<=src.channels());
split(src,layers_pca);
for (int i=0;i<src.channels();i++){
average[i]=mean(layers_pca[i]);
layers_pca[i]-=average[i];
}
// calc covariance matrix
merge(layers_pca,pca_data);
pca_data=pca_data.reshape(1,src.rows*src.cols);
pca_data_gpu.upload(pca_data);
GpuMat src3;
cuda::gemm(pca_data_gpu, pca_data_gpu, 1.0/(double)(src.rows*src.cols-1),
src3, 0, new_cov_gpu, GEMM_1_T);
new_cov_gpu.download(new_cov);
if(old_cov.rows==0)old_cov=new_cov.clone();
// calc PCA
SVD::compute((1.0-pca_rate)*old_cov+pca_rate*new_cov, w, u, vt);
// extract the projection matrix
proj_matrix=u(Rect(0,0,compressed_sz,src.channels())).clone();
Mat proj_vars=Mat::eye(compressed_sz,compressed_sz,proj_matrix.type());
for(int i=0;i<compressed_sz;i++){
proj_vars.at<double>(i,i)=w.at<double>(i);
}
// update the covariance matrix
old_cov=(1.0-pca_rate)*old_cov+pca_rate*proj_matrix*proj_vars*proj_matrix.t();
}
/*
* compress the features
*/
void inline TackerKCFImplParallel::compress(const Mat proj_matrix, const Mat src, Mat & dest, Mat & data, Mat & compressed) const {
data=src.reshape(1,src.rows*src.cols);
compressed=data*proj_matrix;
dest=compressed.reshape(proj_matrix.cols,src.rows).clone();
}
/*
* obtain the patch and apply hann window filter to it
*/
bool TackerKCFImplParallel::getSubWindow(const Mat img, const Rect _roi, Mat& feat, Mat& patch, TrackerKCF::MODE desc) {
Rect region=_roi;
// return false if roi is outside the image
if((_roi.x+_roi.width<0)
||(_roi.y+_roi.height<0)
||(_roi.x>=img.cols)
||(_roi.y>=img.rows)
)return false;
// extract patch inside the image
if(_roi.x<0){region.x=0;region.width+=_roi.x;}
if(_roi.y<0){region.y=0;region.height+=_roi.y;}
if(_roi.x+_roi.width>img.cols)region.width=img.cols-_roi.x;
if(_roi.y+_roi.height>img.rows)region.height=img.rows-_roi.y;
if(region.width>img.cols)region.width=img.cols;
if(region.height>img.rows)region.height=img.rows;
patch=img(region).clone();
// add some padding to compensate when the patch is outside image border
int addTop,addBottom, addLeft, addRight;
addTop=region.y-_roi.y;
addBottom=(_roi.height+_roi.y>img.rows?_roi.height+_roi.y-img.rows:0);
addLeft=region.x-_roi.x;
addRight=(_roi.width+_roi.x>img.cols?_roi.width+_roi.x-img.cols:0);
copyMakeBorder(patch,patch,addTop,addBottom,addLeft,addRight,BORDER_REPLICATE);
if(patch.rows==0 || patch.cols==0)return false;
// extract the desired descriptors
switch(desc){
case CN:
CV_Assert(img.channels() == 3);
extractCN(patch,feat);
//feat=feat.mul(hann_cn); // hann window filter
break;
default: // GRAY
if(img.channels()>1)
cvtColor(patch,feat, CV_BGR2GRAY);
else
feat=patch;
feat.convertTo(feat,CV_64F);
feat=feat/255.0-0.5; // normalize to range -0.5 .. 0.5
feat=feat.mul(hann); // hann window filter
break;
}
return true;
}
/*
* get feature using external function
*/
bool TackerKCFImplParallel::getSubWindow(const Mat img, const Rect _roi, Mat& feat, void (*f)(const Mat, const Rect, Mat& )) const{
// return false if roi is outside the image
if((_roi.x+_roi.width<0)
||(_roi.y+_roi.height<0)
||(_roi.x>=img.cols)
||(_roi.y>=img.rows)
)return false;
f(img, _roi, feat);
if(_roi.width != feat.cols || _roi.height != feat.rows){
printf("error in customized function of features extractor!\n");
printf("Rules: roi.width==feat.cols && roi.height = feat.rows \n");
}
Mat hann_win;
std::vector<Mat> _layers;
for(int i=0;i<feat.channels();i++)
_layers.push_back(hann);
merge(_layers, hann_win);
feat=feat.mul(hann_win); // hann window filter
return true;
}
__global__ void extractIndexKernel(const cuda::PtrStepSz<uchar3> input,
cuda::PtrStep<ushort> output) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= 0 && x < input.cols && y >= 0 && y < input.rows) {
uchar3 pixel = input(y,x);
output.ptr(y)[x] = (floor((float)pixel.z/8)+32*floor((float)pixel.y/8)+32*32*floor((float)pixel.x/8));
}
}
__global__ void extractCNKernel(const cuda::PtrStepSz<ushort> input,
cuda::PtrStep<double[10]> output, const double *ColorNames) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
if (x >= 0 && x < input.cols && y >= 0 && y < input.rows && k >= 0
&& k < 10) {
short index = input(y,x);
output.ptr(y)[x][k] = ColorNames[10*index + k];
//output.ptr(y)[x] = (floor((float)pixel.z/8)+32*floor((float)pixel.y/8)+32*32*floor((float)pixel.x/8));
}
}
/* Convert BGR to ColorNames
*/
void TackerKCFImplParallel::extractCN(Mat patch_data, Mat & cnFeatures) {
if(cnFeatures.type() != CV_64FC(10)) {
cnFeatures = Mat::zeros(patch_data.rows,patch_data.cols,CV_64FC(10));
}
patch_data_gpu.upload(patch_data);
dim3 cthreads2d(32, 32);
dim3 cblocks2d(
static_cast<int>(::ceil(patch_data_gpu.size().width /
static_cast<double>(cthreads2d.x))),
static_cast<int>(::ceil(patch_data_gpu.size().height /
static_cast<double>(cthreads2d.y))));
hipLaunchKernelGGL(( extractIndexKernel), dim3(cblocks2d), dim3(cthreads2d), 0, 0, patch_data_gpu, indexes_gpu);
cudaSafeCall(hipGetLastError());
cuda::GpuMat cnFeatures_gpu;
cuda::createContinuous(patch_data.size(), CV_64FC(10), cnFeatures_gpu);
dim3 cthreads3d(32, 32, 1);
dim3 cblocks3d(
static_cast<int>(::ceil(patch_data_gpu.size().width /
static_cast<double>(cthreads3d.x))),
static_cast<int>(::ceil(patch_data_gpu.size().height /
static_cast<double>(cthreads3d.y))),
static_cast<int>(::ceil(10 /
static_cast<double>(cthreads3d.z))));
hipLaunchKernelGGL(( extractCNKernel), dim3(cblocks3d), dim3(cthreads3d), 0, 0, indexes_gpu, cnFeatures_gpu, ColorNames_gpu);
cudaSafeCall(hipGetLastError());
cuda::multiply(cnFeatures_gpu, hann_cn_gpu, cnFeatures_gpu);
cnFeatures_gpu.download(cnFeatures);
}
/*
* dense gauss kernel function
*/
void TackerKCFImplParallel::denseGaussKernel(const double sigma, const Mat x_data, const Mat y_data, Mat & k_data,
std::vector<Mat> & layers_data,std::vector<Mat> & xf_data,std::vector<Mat> & yf_data, std::vector<Mat> xyf_v, Mat xy, Mat xyf ) {
// First we download all the data onto the Gpu
int num_channels = x_data.channels();
double normX = norm(x_data, NORM_L2SQR);
double normY = norm(y_data, NORM_L2SQR);
cv::cuda::Stream stream;
split(x_data, layers_data);
for (int i = 0; i < x_data.channels(); i++){
layers_data_gpu[i].upload(layers_data[i], stream);
}
stream.waitForCompletion();
cudafft2(x_data.channels(),xf_data_gpu,layers_data_gpu);
split(y_data, layers_data);
for (int i = 0; i < x_data.channels(); i++){
layers_data_gpu[i].upload(layers_data[i], stream);
}
stream.waitForCompletion();
cudafft2(y_data.channels(),yf_data_gpu,layers_data_gpu);
pixelWiseMult(xf_data_gpu,yf_data_gpu,xyf_v_gpu,0,true);
sumChannels(xyf_v_gpu,xyf_c_gpu);
cudaifft2(xyf_c_gpu,xyf_r_gpu);
xyf_r_gpu.download(xyf);
if(params.wrap_kernel){
shiftRows(xyf, x_data.rows/2);
shiftCols(xyf, x_data.cols/2);
}
//(xx + yy - 2 * xy) / numel(x)
xy=(normX+normY-2*xyf)/(x_data.rows*x_data.cols*x_data.channels());
// TODO: check wether we really need thresholding or not
//threshold(xy,xy,0.0,0.0,THRESH_TOZERO);//max(0, (xx + yy - 2 * xy) / numel(x))
for(int i=0;i<xy.rows;i++){
for(int j=0;j<xy.cols;j++){
if(xy.at<double>(i,j)<0.0)xy.at<double>(i,j)=0.0;
}
}
double sig=-1.0/(sigma*sigma);
xy=sig*xy;
exp(xy, k_data);
}
/* CIRCULAR SHIFT Function
* http://stackoverflow.com/questions/10420454/shift-like-matlab-function-rows-or-columns-of-a-matrix-in-opencv
*/
// circular shift one row from up to down
void TackerKCFImplParallel::shiftRows(Mat& mat) const {
Mat temp;
Mat m;
int _k = (mat.rows-1);
mat.row(_k).copyTo(temp);
for(; _k > 0 ; _k-- ) {
m = mat.row(_k);
mat.row(_k-1).copyTo(m);
}
m = mat.row(0);
temp.copyTo(m);
}
// circular shift n rows from up to down if n > 0, -n rows from down to up if n < 0
void TackerKCFImplParallel::shiftRows(Mat& mat, int n) const {
if( n < 0 ) {
n = -n;
flip(mat,mat,0);
for(int _k=0; _k < n;_k++) {
shiftRows(mat);
}
flip(mat,mat,0);
}else{
for(int _k=0; _k < n;_k++) {
shiftRows(mat);
}
}
}
//circular shift n columns from left to right if n > 0, -n columns from right to left if n < 0
void TackerKCFImplParallel::shiftCols(Mat& mat, int n) const {
if(n < 0){
n = -n;
flip(mat,mat,1);
transpose(mat,mat);
shiftRows(mat,n);
transpose(mat,mat);
flip(mat,mat,1);
}else{
transpose(mat,mat);
shiftRows(mat,n);
transpose(mat,mat);
}
}
/*
* calculate the detection response
*/
void TackerKCFImplParallel::calcResponse(const Mat alphaf_data, const Mat kf_data, Mat & response_data, Mat & spec_data) {
//alpha f--> 2channels ; k --> 1 channel;
mulSpectrums(alphaf_data,kf_data,spec_data,0,false);
ifft2(spec_data,response_data);
}
/*
* calculate the detection response for splitted form
*/
void TackerKCFImplParallel::calcResponse(const Mat alphaf_data, const Mat _alphaf_den, const Mat kf_data, Mat & response_data, Mat & spec_data, Mat & spec2_data) {
mulSpectrums(alphaf_data,kf_data,spec_data,0,false);
//z=(a+bi)/(c+di)=[(ac+bd)+i(bc-ad)]/(c^2+d^2)
double den;
for(int i=0;i<kf_data.rows;i++){
for(int j=0;j<kf_data.cols;j++){
den=1.0/(_alphaf_den.at<Vec2d>(i,j)[0]*_alphaf_den.at<Vec2d>(i,j)[0]+_alphaf_den.at<Vec2d>(i,j)[1]*_alphaf_den.at<Vec2d>(i,j)[1]);
spec2_data.at<Vec2d>(i,j)[0]=
(spec_data.at<Vec2d>(i,j)[0]*_alphaf_den.at<Vec2d>(i,j)[0]+spec_data.at<Vec2d>(i,j)[1]*_alphaf_den.at<Vec2d>(i,j)[1])*den;
spec2_data.at<Vec2d>(i,j)[1]=
(spec_data.at<Vec2d>(i,j)[1]*_alphaf_den.at<Vec2d>(i,j)[0]-spec_data.at<Vec2d>(i,j)[0]*_alphaf_den.at<Vec2d>(i,j)[1])*den;
}
}
ifft2(spec2_data,response_data);
}
void TackerKCFImplParallel::setFeatureExtractor(void (*f)(const Mat, const Rect, Mat&), bool pca_func){
if(pca_func){
extractor_pca.push_back(f);
use_custom_extractor_pca = true;
}else{
extractor_npca.push_back(f);
use_custom_extractor_npca = true;
}
}
/*----------------------------------------------------------------------*/
}
| d1dc2d57ff8ba184df582c03d704c833f2ed63d0.cu | #include "trackerKCFparallel.hpp"
#include <opencv2/cudaarithm.hpp>
#include "dft.cu"
#include "mulspectrums.cu"
#define returnFromUpdate() {fprintf(stderr, "Error in %s line %d while updating frame %d\n", __FILE__, __LINE__, frame);}
/*---------------------------
| TrackerKCFModel
|---------------------------*/
namespace cv{
/**
* \brief Implementation of TrackerModel for KCF algorithm
*/
class TrackerKCFModel : public TrackerModel{
public:
TrackerKCFModel(TrackerKCF::Params /*params*/){}
~TrackerKCFModel(){}
protected:
void modelEstimationImpl( const std::vector<Mat>& /*responses*/ ){}
void modelUpdateImpl(){}
};
} /* namespace cv */
namespace helper {
void MatType( Mat inputMat )
{
int inttype = inputMat.type();
std::string r, a;
uchar depth = inttype & CV_MAT_DEPTH_MASK;
uchar chans = 1 + (inttype >> CV_CN_SHIFT);
switch ( depth ) {
case CV_8U: r = "8U"; a = "Mat.at<uchar>(y,x)"; break;
case CV_8S: r = "8S"; a = "Mat.at<schar>(y,x)"; break;
case CV_16U: r = "16U"; a = "Mat.at<ushort>(y,x)"; break;
case CV_16S: r = "16S"; a = "Mat.at<short>(y,x)"; break;
case CV_32S: r = "32S"; a = "Mat.at<int>(y,x)"; break;
case CV_32F: r = "32F"; a = "Mat.at<float>(y,x)"; break;
case CV_64F: r = "64F"; a = "Mat.at<double>(y,x)"; break;
case CV_32FC2: r = "32FC2"; a = "Mat.at<complex float>(y,x)"; break;
case CV_64FC2: r = "64FC2"; a = "Mat.at<complex double>(y,x)"; break;
default: r = "User"; a = "Mat.at<UKNOWN>(y,x)"; break;
}
r += "C";
r += (chans+'0');
std::cout << "Mat is of type " << r << " and should be accessed with " << a << std::endl;
}
}
namespace cv {
/*
* Constructor
*/
TackerKCFImplParallel::TackerKCFImplParallel( const TrackerKCF::Params ¶meters ) :
params( parameters )
{
isInit = false;
resizeImage = false;
use_custom_extractor_pca = false;
use_custom_extractor_npca = false;
#if TIME
total_lines = num_steps;
for (int i = 0; i < num_steps; i++) {
cumulated_times[i] = 0;
}
#if TIME == 2
for (int i = 0; i < num_steps - 1; i++) {
total_lines += num_steps_details[i];
for (int j = 0; j < max_num_details; j++) {
cumulated_details_times[i][j] = 0;
}
}
#endif
#endif
}
void TackerKCFImplParallel::read( const cv::FileNode& fn ){
params.read( fn );
}
void TackerKCFImplParallel::write( cv::FileStorage& fs ) const {
params.write( fs );
}
/*
* Initialization:
* - creating hann window filter
* - ROI padding
* - creating a gaussian response for the training ground-truth
* - perform FFT to the gaussian response
*/
bool TackerKCFImplParallel::initImpl( const Mat& image, const Rect2d& boundingBox ){
#if TIME
double startInit = CycleTimer::currentSeconds();
#endif
frame=0;
roi = boundingBox;
//calclulate output sigma
output_sigma=sqrt(roi.width*roi.height)*params.output_sigma_factor;
output_sigma=-0.5/(output_sigma*output_sigma);
//resize the ROI whenever needed
if(params.resize && roi.width*roi.height>params.max_patch_size){
resizeImage=true;
roi.x/=2.0;
roi.y/=2.0;
roi.width/=2.0;
roi.height/=2.0;
}
// add padding to the roi
roi.x-=roi.width/2;
roi.y-=roi.height/2;
roi.width*=2;
roi.height*=2;
// initialize the hann window filter
createHanningWindow(hann, roi.size(), CV_64F);
// hann window filter for CN feature
Mat _layer[] = {hann, hann, hann, hann, hann, hann, hann, hann, hann, hann};
merge(_layer, 10, hann_cn);
// create gaussian response
y=Mat::zeros((int)roi.height,(int)roi.width,CV_64F);
for(unsigned i=0;i<roi.height;i++){
for(unsigned j=0;j<roi.width;j++){
y.at<double>(i,j)=(i-roi.height/2+1)*(i-roi.height/2+1)+(j-roi.width/2+1)*(j-roi.width/2+1);
}
}
y*=(double)output_sigma;
cv::exp(y,y);
// perform fourier transfor to the gaussian response
fft2(y,yf);
model=Ptr<TrackerKCFModel>(new TrackerKCFModel(params));
// record the non-compressed descriptors
if((params.desc_npca & GRAY) == GRAY)descriptors_npca.push_back(GRAY);
if((params.desc_npca & CN) == CN)descriptors_npca.push_back(CN);
if(use_custom_extractor_npca)descriptors_npca.push_back(CUSTOM);
features_npca.resize(descriptors_npca.size());
// record the compressed descriptors
if((params.desc_pca & GRAY) == GRAY)descriptors_pca.push_back(GRAY);
if((params.desc_pca & CN) == CN)descriptors_pca.push_back(CN);
if(use_custom_extractor_pca)descriptors_pca.push_back(CUSTOM);
features_pca.resize(descriptors_pca.size());
// accept only the available descriptor modes
CV_Assert(
(params.desc_pca & GRAY) == GRAY
|| (params.desc_npca & GRAY) == GRAY
|| (params.desc_pca & CN) == CN
|| (params.desc_npca & CN) == CN
|| use_custom_extractor_pca
|| use_custom_extractor_npca
);
// Initialize ExtractCN GpuMats
cuda::createContinuous(roi.size(), CV_8UC3, patch_data_gpu);
cuda::createContinuous(roi.size(), CV_16U, indexes_gpu);
hann_cn_gpu.upload(hann_cn);
// Initialize pca_data_gpu GpuMat
cuda::createContinuous(roi.size(), CV_64F, pca_data_gpu);
// Initialize fft2 GpuMats
Size complex_size(roi.size().width/2+1, roi.size().height);
int num_channels = image.channels();
cuda::createContinuous(complex_size, CV_64FC2, xyf_c_gpu);
cuda::createContinuous(roi.size(), CV_64F, xyf_r_gpu);
xf_data_gpu.resize(num_channels);
yf_data_gpu.resize(num_channels);
layers_data_gpu.resize(num_channels);
xyf_v_gpu.resize(num_channels);
for (int i = 0; i < num_channels; i++){
cuda::createContinuous(roi.size(), CV_64F, layers_data_gpu[i]);
cuda::createContinuous(complex_size, CV_64FC2, xf_data_gpu[i]);
cuda::createContinuous(complex_size, CV_64FC2, yf_data_gpu[i]);
}
// Initialize ColorNames
size_t ColorNames_size = 32768 * 10 * sizeof(double); //2^15 * 10
cudaSafeCall(cudaMalloc((void**) &ColorNames_gpu, ColorNames_size));
cudaSafeCall(cudaMemcpy(ColorNames_gpu, ColorNames, ColorNames_size, cudaMemcpyHostToDevice));
#if TIME
printInitializationTime(startInit);
#endif
// TODO: return true only if roi inside the image
return true;
}
/*
* Main part of the KCF algorithm
*/
bool TackerKCFImplParallel::updateImpl( const Mat& image, Rect2d& boundingBox ){
#if TIME
double startUpdate = CycleTimer::currentSeconds();
#endif
double minVal, maxVal; // min-max response
Point minLoc,maxLoc; // min-max location
Mat img=image.clone();
// check the channels of the input image, grayscale is preferred
CV_Assert(img.channels() == 1 || img.channels() == 3);
// resize the image whenever needed
if(resizeImage)resize(img,img,Size(img.cols/2,img.rows/2));
#if TIME
double startDetection = CycleTimer::currentSeconds();
#endif
// detection part
if(frame>0){
#if TIME == 2
double startDetectionDetail = CycleTimer::currentSeconds();
#endif
// extract and pre-process the patch
// get non compressed descriptors
for(unsigned i=0;i<descriptors_npca.size()-extractor_npca.size();i++){
if(!getSubWindow(img,roi, features_npca[i], img_Patch, descriptors_npca[i]))returnFromUpdate();
}
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 0);
#endif
//get non-compressed custom descriptors
for(unsigned i=0,j=(unsigned)(descriptors_npca.size()-extractor_npca.size());i<extractor_npca.size();i++,j++){
if(!getSubWindow(img,roi, features_npca[j], extractor_npca[i]))returnFromUpdate();
}
if(features_npca.size()>0)merge(features_npca,X[1]);
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 1);
#endif
// get compressed descriptors
for(unsigned i=0;i<descriptors_pca.size()-extractor_pca.size();i++){
if(!getSubWindow(img,roi, features_pca[i], img_Patch, descriptors_pca[i]))returnFromUpdate();
}
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 2);
#endif
//get compressed custom descriptors
for(unsigned i=0,j=(unsigned)(descriptors_pca.size()-extractor_pca.size());i<extractor_pca.size();i++,j++){
if(!getSubWindow(img,roi, features_pca[j], extractor_pca[i]))returnFromUpdate();
}
if(features_pca.size()>0)merge(features_pca,X[0]);
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 3);
#endif
//compress the features and the KRSL model
if(params.desc_pca !=0){
compress(proj_mtx,X[0],X[0],data_temp,compress_data);
compress(proj_mtx,Z[0],Zc[0],data_temp,compress_data);
}
// copy the compressed KRLS model
Zc[1] = Z[1];
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 4);
#endif
// merge all features
if(features_npca.size()==0){
x = X[0];
z = Zc[0];
}else if(features_pca.size()==0){
x = X[1];
z = Z[1];
}else{
merge(X,2,x);
merge(Zc,2,z);
}
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 5);
#endif
//compute the gaussian kernel
denseGaussKernel(params.sigma,x,z,k,layers,vxf,vyf,vxyf,xy_data,xyf_data);
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 6);
#endif
// compute the fourier transform of the kernel
fft2(k,kf);
if(frame==1)spec2=Mat_<Vec2d >(kf.rows, kf.cols);
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 7);
#endif
// calculate filter response
if(params.split_coeff)
calcResponse(alphaf,alphaf_den,kf,response, spec, spec2);
else
calcResponse(alphaf,kf,response, spec);
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 8);
#endif
// extract the maximum response
minMaxLoc( response, &minVal, &maxVal, &minLoc, &maxLoc );
roi.x+=(maxLoc.x-roi.width/2+1);
roi.y+=(maxLoc.y-roi.height/2+1);
#if TIME == 2
updateTimeDetail(&startDetectionDetail, 0, 9);
#endif
}
#if TIME
updateTime(startDetection, 0);
double startPatches = CycleTimer::currentSeconds();
#endif
#if TIME == 2
double startPatchesDetail = startPatches;
#endif
// update the bounding box
boundingBox.x=(resizeImage?roi.x*2:roi.x)+(resizeImage?roi.width*2:roi.width)/4;
boundingBox.y=(resizeImage?roi.y*2:roi.y)+(resizeImage?roi.height*2:roi.height)/4;
boundingBox.width = (resizeImage?roi.width*2:roi.width)/2;
boundingBox.height = (resizeImage?roi.height*2:roi.height)/2;
#if TIME == 2
updateTimeDetail(&startPatchesDetail, 1, 0);
#endif
// extract the patch for learning purpose
// get non compressed descriptors
for(unsigned i=0;i<descriptors_npca.size()-extractor_npca.size();i++){
if(!getSubWindow(img,roi, features_npca[i], img_Patch, descriptors_npca[i]))returnFromUpdate();
}
#if TIME == 2
updateTimeDetail(&startPatchesDetail, 1, 1);
#endif
//get non-compressed custom descriptors
for(unsigned i=0,j=(unsigned)(descriptors_npca.size()-extractor_npca.size());i<extractor_npca.size();i++,j++){
if(!getSubWindow(img,roi, features_npca[j], extractor_npca[i]))returnFromUpdate();
}
if(features_npca.size()>0)merge(features_npca,X[1]);
#if TIME == 2
updateTimeDetail(&startPatchesDetail, 1, 2);
#endif
// get compressed descriptors
for(unsigned i=0;i<descriptors_pca.size()-extractor_pca.size();i++){
if(!getSubWindow(img,roi, features_pca[i], img_Patch, descriptors_pca[i]))returnFromUpdate();
}
#if TIME == 2
updateTimeDetail(&startPatchesDetail, 1, 3);
#endif
//get compressed custom descriptors
for(unsigned i=0,j=(unsigned)(descriptors_pca.size()-extractor_pca.size());i<extractor_pca.size();i++,j++){
if(!getSubWindow(img,roi, features_pca[j], extractor_pca[i]))returnFromUpdate();
}
if(features_pca.size()>0)merge(features_pca,X[0]);
#if TIME == 2
updateTimeDetail(&startPatchesDetail, 1, 4);
#endif
//update the training data
if(frame==0){
Z[0] = X[0].clone();
Z[1] = X[1].clone();
}else{
Z[0]=(1.0-params.interp_factor)*Z[0]+params.interp_factor*X[0];
Z[1]=(1.0-params.interp_factor)*Z[1]+params.interp_factor*X[1];
}
#if TIME == 2
updateTimeDetail(&startPatchesDetail, 1, 5);
#endif
#if TIME
updateTime(startPatches, 1);
double startCompression = CycleTimer::currentSeconds();
#endif
#if TIME == 2
double startCompressionDetail = startCompression;
#endif
if(params.desc_pca !=0 || use_custom_extractor_pca){
// initialize the vector of Mat variables
if(frame==0){
layers_pca_data.resize(Z[0].channels());
average_data.resize(Z[0].channels());
}
// feature compression
updateProjectionMatrix(Z[0],old_cov_mtx,proj_mtx,params.pca_learning_rate,params.compressed_size,layers_pca_data,average_data,data_pca, new_covar,w_data,u_data,vt_data);
#if TIME == 2
updateTimeDetail(&startCompressionDetail, 2, 0);
#endif
compress(proj_mtx,X[0],X[0],data_temp,compress_data);
#if TIME == 2
updateTimeDetail(&startCompressionDetail, 2, 1);
#endif
}
// merge all features
if(features_npca.size()==0)
x = X[0];
else if(features_pca.size()==0)
x = X[1];
else
merge(X,2,x);
#if TIME == 2
updateTimeDetail(&startCompressionDetail, 2, 2);
#endif
#if TIME
updateTime(startCompression, 2);
double startLeastSquares = CycleTimer::currentSeconds();
#endif
#if TIME == 2
double startLeastSquaresDetail = startLeastSquares;
#endif
// initialize some required Mat variables
if(frame==0){
layers.resize(x.channels());
vxf.resize(x.channels());
vyf.resize(x.channels());
vxyf.resize(vyf.size());
new_alphaf=Mat_<Vec2d >(yf.rows, yf.cols);
}
#if TIME == 2
updateTimeDetail(&startLeastSquaresDetail, 3, 0);
#endif
// Kernel Regularized Least-Squares, calculate alphas
denseGaussKernel(params.sigma,x,x,k,layers,vxf,vyf,vxyf,xy_data,xyf_data);
#if TIME == 2
updateTimeDetail(&startLeastSquaresDetail, 3, 1);
#endif
// compute the fourier transform of the kernel and add a small value
fft2(k,kf);
#if TIME == 2
updateTimeDetail(&startLeastSquaresDetail, 3, 2);
#endif
kf_lambda=kf+params.lambda;
#if TIME == 2
updateTimeDetail(&startLeastSquaresDetail, 3, 3);
#endif
double den;
if(params.split_coeff){
mulSpectrums(yf,kf,new_alphaf,0);
mulSpectrums(kf,kf_lambda,new_alphaf_den,0);
}else{
for(int i=0;i<yf.rows;i++){
for(int j=0;j<yf.cols;j++){
den = 1.0/(kf_lambda.at<Vec2d>(i,j)[0]*kf_lambda.at<Vec2d>(i,j)[0]+kf_lambda.at<Vec2d>(i,j)[1]*kf_lambda.at<Vec2d>(i,j)[1]);
new_alphaf.at<Vec2d>(i,j)[0]=
(yf.at<Vec2d>(i,j)[0]*kf_lambda.at<Vec2d>(i,j)[0]+yf.at<Vec2d>(i,j)[1]*kf_lambda.at<Vec2d>(i,j)[1])*den;
new_alphaf.at<Vec2d>(i,j)[1]=
(yf.at<Vec2d>(i,j)[1]*kf_lambda.at<Vec2d>(i,j)[0]-yf.at<Vec2d>(i,j)[0]*kf_lambda.at<Vec2d>(i,j)[1])*den;
}
}
}
#if TIME == 2
updateTimeDetail(&startLeastSquaresDetail, 3, 4);
#endif
// update the RLS model
if(frame==0){
alphaf=new_alphaf.clone();
if(params.split_coeff)alphaf_den=new_alphaf_den.clone();
}else{
alphaf=(1.0-params.interp_factor)*alphaf+params.interp_factor*new_alphaf;
if(params.split_coeff)alphaf_den=(1.0-params.interp_factor)*alphaf_den+params.interp_factor*new_alphaf_den;
}
#if TIME == 2
updateTimeDetail(&startLeastSquaresDetail, 3, 5);
#endif
#if TIME
updateTime(startLeastSquares, 3);
updateTime(startUpdate, 4);
printAverageTimes();
#endif
frame++;
return true;
}
/*-------------------------------------
| implementation of the KCF functions
|-------------------------------------*/
/*
* hann window filter
*/
void TackerKCFImplParallel::createHanningWindow(OutputArray dest, const cv::Size winSize, const int type) const {
CV_Assert( type == CV_32FC1 || type == CV_64FC1 );
dest.create(winSize, type);
Mat dst = dest.getMat();
int rows = dst.rows, cols = dst.cols;
AutoBuffer<double> _wc(cols);
double * const wc = (double *)_wc;
double coeff0 = 2.0 * CV_PI / (double)(cols - 1), coeff1 = 2.0f * CV_PI / (double)(rows - 1);
for(int j = 0; j < cols; j++)
wc[j] = 0.5 * (1.0 - cos(coeff0 * j));
if(dst.depth() == CV_32F){
for(int i = 0; i < rows; i++){
float* dstData = dst.ptr<float>(i);
double wr = 0.5 * (1.0 - cos(coeff1 * i));
for(int j = 0; j < cols; j++)
dstData[j] = (float)(wr * wc[j]);
}
}else{
for(int i = 0; i < rows; i++){
double* dstData = dst.ptr<double>(i);
double wr = 0.5 * (1.0 - cos(coeff1 * i));
for(int j = 0; j < cols; j++)
dstData[j] = wr * wc[j];
}
}
// perform batch sqrt for SSE performance gains
//cv::sqrt(dst, dst); //matlab do not use the square rooted version
}
/*
* simplification of fourier transform function in opencv
*/
void inline TackerKCFImplParallel::fft2(const Mat src, Mat & dest) const {
dft(src,dest,DFT_COMPLEX_OUTPUT);
}
void inline TackerKCFImplParallel::fft2(const Mat src, std::vector<Mat> & dest, std::vector<Mat> & layers_data) const {
split(src, layers_data);
for(int i=0;i<src.channels();i++){
dft(layers_data[i],dest[i],DFT_COMPLEX_OUTPUT);
}
}
void inline TackerKCFImplParallel::cudafft2(int num_channels, std::vector<cuda::GpuMat> & dest, std::vector<cuda::GpuMat> & layers_data) {
for (int i = 0; i < num_channels; i++) {
cuda::dft(layers_data[i], dest[i], layers_data[i].size(), DFT_DOUBLE);
}
}
/*
* simplification of inverse fourier transform function in opencv
*/
void inline TackerKCFImplParallel::ifft2(const Mat src, Mat & dest) const {
idft(src,dest,DFT_SCALE+DFT_REAL_OUTPUT);
}
void inline TackerKCFImplParallel::cudaifft2(const cuda::GpuMat src, cuda::GpuMat & dest) {
cuda::GpuMat src_cce;
src_cce = src;
// The size correection is necessary to account for the CCE format
cv::Size dest_size((src.size().width -1)*2,src.size().height);
cuda::dft(src_cce, dest, dest_size,
(DFT_SCALE + DFT_REAL_OUTPUT) | DFT_INVERSE | DFT_DOUBLE);
}
// Expand half a matrix by inferring the complex conjugates of the cols to
// complete the second half
void inline TackerKCFImplParallel::cce2full(const Mat src, Mat & dest) {
// Assume that the original size of the matrix was divisible by 2
Mat result(cv::Size((src.size().width-1)*2,src.size().height),src.type());
for (int j=0; j < (src.size().width-1)*2;j++) {
for (int i = 0; i < src.size().height;i++) {
if (j <src.size().width-1) {
result.at<Vec2d>(i,j)[0] = src.at<Vec2d>(i,j)[0];
result.at<Vec2d>(i,j)[1] = src.at<Vec2d>(i,j)[1];
} else {
// Complex conjugate
result.at<Vec2d>(i,j)[0] = src.at<Vec2d>(i,2*(src.size().width - 1) - j)[0];
result.at<Vec2d>(i,j)[1] = - src.at<Vec2d>(i,2*(src.size().width -1) - j)[1];
}
}
}
dest = result;
}
void inline TackerKCFImplParallel::full2cce(const Mat src, Mat & dest) {
//We take the first half of the matrix
cv::Rect roi(0, 0, src.size().width/2+1, src.size().height);
dest = src(roi);
}
/*
* Point-wise multiplication of two Multichannel Mat data
*/
void inline TackerKCFImplParallel::pixelWiseMult(const std::vector<cuda::GpuMat> src1, const std::vector<cuda::GpuMat> src2, std::vector<cuda::GpuMat> & dest, const int flags, const bool conjB) const {
for(unsigned i=0;i<src1.size();i++){
cv::cuda::mulSpectrums(src1[i], src2[i], dest[i],flags,conjB);
}
}
/*
* Combines all channels in a multi-channels Mat data into a single channel
*/
void inline TackerKCFImplParallel::sumChannels(std::vector<cuda::GpuMat> src, cuda::GpuMat & dest) const {
src[0].copyTo(dest);
for(unsigned i=1;i<src.size();i++){
cuda::add(src[i],dest,dest);
}
}
//void inline
/*
* obtains the projection matrix using PCA
*/
void inline TackerKCFImplParallel::updateProjectionMatrix(const Mat src, Mat & old_cov,Mat & proj_matrix, double pca_rate, int compressed_sz,
std::vector<Mat> & layers_pca,std::vector<Scalar> & average, Mat pca_data, Mat new_cov, Mat w, Mat u, Mat vt) {
GpuMat new_cov_gpu;
double start = CycleTimer::currentSeconds();
CV_Assert(compressed_sz<=src.channels());
split(src,layers_pca);
for (int i=0;i<src.channels();i++){
average[i]=mean(layers_pca[i]);
layers_pca[i]-=average[i];
}
// calc covariance matrix
merge(layers_pca,pca_data);
pca_data=pca_data.reshape(1,src.rows*src.cols);
pca_data_gpu.upload(pca_data);
GpuMat src3;
cuda::gemm(pca_data_gpu, pca_data_gpu, 1.0/(double)(src.rows*src.cols-1),
src3, 0, new_cov_gpu, GEMM_1_T);
new_cov_gpu.download(new_cov);
if(old_cov.rows==0)old_cov=new_cov.clone();
// calc PCA
SVD::compute((1.0-pca_rate)*old_cov+pca_rate*new_cov, w, u, vt);
// extract the projection matrix
proj_matrix=u(Rect(0,0,compressed_sz,src.channels())).clone();
Mat proj_vars=Mat::eye(compressed_sz,compressed_sz,proj_matrix.type());
for(int i=0;i<compressed_sz;i++){
proj_vars.at<double>(i,i)=w.at<double>(i);
}
// update the covariance matrix
old_cov=(1.0-pca_rate)*old_cov+pca_rate*proj_matrix*proj_vars*proj_matrix.t();
}
/*
* compress the features
*/
void inline TackerKCFImplParallel::compress(const Mat proj_matrix, const Mat src, Mat & dest, Mat & data, Mat & compressed) const {
data=src.reshape(1,src.rows*src.cols);
compressed=data*proj_matrix;
dest=compressed.reshape(proj_matrix.cols,src.rows).clone();
}
/*
* obtain the patch and apply hann window filter to it
*/
bool TackerKCFImplParallel::getSubWindow(const Mat img, const Rect _roi, Mat& feat, Mat& patch, TrackerKCF::MODE desc) {
Rect region=_roi;
// return false if roi is outside the image
if((_roi.x+_roi.width<0)
||(_roi.y+_roi.height<0)
||(_roi.x>=img.cols)
||(_roi.y>=img.rows)
)return false;
// extract patch inside the image
if(_roi.x<0){region.x=0;region.width+=_roi.x;}
if(_roi.y<0){region.y=0;region.height+=_roi.y;}
if(_roi.x+_roi.width>img.cols)region.width=img.cols-_roi.x;
if(_roi.y+_roi.height>img.rows)region.height=img.rows-_roi.y;
if(region.width>img.cols)region.width=img.cols;
if(region.height>img.rows)region.height=img.rows;
patch=img(region).clone();
// add some padding to compensate when the patch is outside image border
int addTop,addBottom, addLeft, addRight;
addTop=region.y-_roi.y;
addBottom=(_roi.height+_roi.y>img.rows?_roi.height+_roi.y-img.rows:0);
addLeft=region.x-_roi.x;
addRight=(_roi.width+_roi.x>img.cols?_roi.width+_roi.x-img.cols:0);
copyMakeBorder(patch,patch,addTop,addBottom,addLeft,addRight,BORDER_REPLICATE);
if(patch.rows==0 || patch.cols==0)return false;
// extract the desired descriptors
switch(desc){
case CN:
CV_Assert(img.channels() == 3);
extractCN(patch,feat);
//feat=feat.mul(hann_cn); // hann window filter
break;
default: // GRAY
if(img.channels()>1)
cvtColor(patch,feat, CV_BGR2GRAY);
else
feat=patch;
feat.convertTo(feat,CV_64F);
feat=feat/255.0-0.5; // normalize to range -0.5 .. 0.5
feat=feat.mul(hann); // hann window filter
break;
}
return true;
}
/*
* get feature using external function
*/
bool TackerKCFImplParallel::getSubWindow(const Mat img, const Rect _roi, Mat& feat, void (*f)(const Mat, const Rect, Mat& )) const{
// return false if roi is outside the image
if((_roi.x+_roi.width<0)
||(_roi.y+_roi.height<0)
||(_roi.x>=img.cols)
||(_roi.y>=img.rows)
)return false;
f(img, _roi, feat);
if(_roi.width != feat.cols || _roi.height != feat.rows){
printf("error in customized function of features extractor!\n");
printf("Rules: roi.width==feat.cols && roi.height = feat.rows \n");
}
Mat hann_win;
std::vector<Mat> _layers;
for(int i=0;i<feat.channels();i++)
_layers.push_back(hann);
merge(_layers, hann_win);
feat=feat.mul(hann_win); // hann window filter
return true;
}
__global__ void extractIndexKernel(const cuda::PtrStepSz<uchar3> input,
cuda::PtrStep<ushort> output) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= 0 && x < input.cols && y >= 0 && y < input.rows) {
uchar3 pixel = input(y,x);
output.ptr(y)[x] = (floor((float)pixel.z/8)+32*floor((float)pixel.y/8)+32*32*floor((float)pixel.x/8));
}
}
__global__ void extractCNKernel(const cuda::PtrStepSz<ushort> input,
cuda::PtrStep<double[10]> output, const double *ColorNames) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
if (x >= 0 && x < input.cols && y >= 0 && y < input.rows && k >= 0
&& k < 10) {
short index = input(y,x);
output.ptr(y)[x][k] = ColorNames[10*index + k];
//output.ptr(y)[x] = (floor((float)pixel.z/8)+32*floor((float)pixel.y/8)+32*32*floor((float)pixel.x/8));
}
}
/* Convert BGR to ColorNames
*/
void TackerKCFImplParallel::extractCN(Mat patch_data, Mat & cnFeatures) {
if(cnFeatures.type() != CV_64FC(10)) {
cnFeatures = Mat::zeros(patch_data.rows,patch_data.cols,CV_64FC(10));
}
patch_data_gpu.upload(patch_data);
dim3 cthreads2d(32, 32);
dim3 cblocks2d(
static_cast<int>(std::ceil(patch_data_gpu.size().width /
static_cast<double>(cthreads2d.x))),
static_cast<int>(std::ceil(patch_data_gpu.size().height /
static_cast<double>(cthreads2d.y))));
extractIndexKernel<<<cblocks2d, cthreads2d>>>(patch_data_gpu, indexes_gpu);
cudaSafeCall(cudaGetLastError());
cuda::GpuMat cnFeatures_gpu;
cuda::createContinuous(patch_data.size(), CV_64FC(10), cnFeatures_gpu);
dim3 cthreads3d(32, 32, 1);
dim3 cblocks3d(
static_cast<int>(std::ceil(patch_data_gpu.size().width /
static_cast<double>(cthreads3d.x))),
static_cast<int>(std::ceil(patch_data_gpu.size().height /
static_cast<double>(cthreads3d.y))),
static_cast<int>(std::ceil(10 /
static_cast<double>(cthreads3d.z))));
extractCNKernel<<<cblocks3d, cthreads3d>>>(indexes_gpu, cnFeatures_gpu, ColorNames_gpu);
cudaSafeCall(cudaGetLastError());
cuda::multiply(cnFeatures_gpu, hann_cn_gpu, cnFeatures_gpu);
cnFeatures_gpu.download(cnFeatures);
}
/*
* dense gauss kernel function
*/
void TackerKCFImplParallel::denseGaussKernel(const double sigma, const Mat x_data, const Mat y_data, Mat & k_data,
std::vector<Mat> & layers_data,std::vector<Mat> & xf_data,std::vector<Mat> & yf_data, std::vector<Mat> xyf_v, Mat xy, Mat xyf ) {
// First we download all the data onto the Gpu
int num_channels = x_data.channels();
double normX = norm(x_data, NORM_L2SQR);
double normY = norm(y_data, NORM_L2SQR);
cv::cuda::Stream stream;
split(x_data, layers_data);
for (int i = 0; i < x_data.channels(); i++){
layers_data_gpu[i].upload(layers_data[i], stream);
}
stream.waitForCompletion();
cudafft2(x_data.channels(),xf_data_gpu,layers_data_gpu);
split(y_data, layers_data);
for (int i = 0; i < x_data.channels(); i++){
layers_data_gpu[i].upload(layers_data[i], stream);
}
stream.waitForCompletion();
cudafft2(y_data.channels(),yf_data_gpu,layers_data_gpu);
pixelWiseMult(xf_data_gpu,yf_data_gpu,xyf_v_gpu,0,true);
sumChannels(xyf_v_gpu,xyf_c_gpu);
cudaifft2(xyf_c_gpu,xyf_r_gpu);
xyf_r_gpu.download(xyf);
if(params.wrap_kernel){
shiftRows(xyf, x_data.rows/2);
shiftCols(xyf, x_data.cols/2);
}
//(xx + yy - 2 * xy) / numel(x)
xy=(normX+normY-2*xyf)/(x_data.rows*x_data.cols*x_data.channels());
// TODO: check wether we really need thresholding or not
//threshold(xy,xy,0.0,0.0,THRESH_TOZERO);//max(0, (xx + yy - 2 * xy) / numel(x))
for(int i=0;i<xy.rows;i++){
for(int j=0;j<xy.cols;j++){
if(xy.at<double>(i,j)<0.0)xy.at<double>(i,j)=0.0;
}
}
double sig=-1.0/(sigma*sigma);
xy=sig*xy;
exp(xy, k_data);
}
/* CIRCULAR SHIFT Function
* http://stackoverflow.com/questions/10420454/shift-like-matlab-function-rows-or-columns-of-a-matrix-in-opencv
*/
// circular shift one row from up to down
void TackerKCFImplParallel::shiftRows(Mat& mat) const {
Mat temp;
Mat m;
int _k = (mat.rows-1);
mat.row(_k).copyTo(temp);
for(; _k > 0 ; _k-- ) {
m = mat.row(_k);
mat.row(_k-1).copyTo(m);
}
m = mat.row(0);
temp.copyTo(m);
}
// circular shift n rows from up to down if n > 0, -n rows from down to up if n < 0
void TackerKCFImplParallel::shiftRows(Mat& mat, int n) const {
if( n < 0 ) {
n = -n;
flip(mat,mat,0);
for(int _k=0; _k < n;_k++) {
shiftRows(mat);
}
flip(mat,mat,0);
}else{
for(int _k=0; _k < n;_k++) {
shiftRows(mat);
}
}
}
//circular shift n columns from left to right if n > 0, -n columns from right to left if n < 0
void TackerKCFImplParallel::shiftCols(Mat& mat, int n) const {
if(n < 0){
n = -n;
flip(mat,mat,1);
transpose(mat,mat);
shiftRows(mat,n);
transpose(mat,mat);
flip(mat,mat,1);
}else{
transpose(mat,mat);
shiftRows(mat,n);
transpose(mat,mat);
}
}
/*
* calculate the detection response
*/
void TackerKCFImplParallel::calcResponse(const Mat alphaf_data, const Mat kf_data, Mat & response_data, Mat & spec_data) {
//alpha f--> 2channels ; k --> 1 channel;
mulSpectrums(alphaf_data,kf_data,spec_data,0,false);
ifft2(spec_data,response_data);
}
/*
* calculate the detection response for splitted form
*/
void TackerKCFImplParallel::calcResponse(const Mat alphaf_data, const Mat _alphaf_den, const Mat kf_data, Mat & response_data, Mat & spec_data, Mat & spec2_data) {
mulSpectrums(alphaf_data,kf_data,spec_data,0,false);
//z=(a+bi)/(c+di)=[(ac+bd)+i(bc-ad)]/(c^2+d^2)
double den;
for(int i=0;i<kf_data.rows;i++){
for(int j=0;j<kf_data.cols;j++){
den=1.0/(_alphaf_den.at<Vec2d>(i,j)[0]*_alphaf_den.at<Vec2d>(i,j)[0]+_alphaf_den.at<Vec2d>(i,j)[1]*_alphaf_den.at<Vec2d>(i,j)[1]);
spec2_data.at<Vec2d>(i,j)[0]=
(spec_data.at<Vec2d>(i,j)[0]*_alphaf_den.at<Vec2d>(i,j)[0]+spec_data.at<Vec2d>(i,j)[1]*_alphaf_den.at<Vec2d>(i,j)[1])*den;
spec2_data.at<Vec2d>(i,j)[1]=
(spec_data.at<Vec2d>(i,j)[1]*_alphaf_den.at<Vec2d>(i,j)[0]-spec_data.at<Vec2d>(i,j)[0]*_alphaf_den.at<Vec2d>(i,j)[1])*den;
}
}
ifft2(spec2_data,response_data);
}
void TackerKCFImplParallel::setFeatureExtractor(void (*f)(const Mat, const Rect, Mat&), bool pca_func){
if(pca_func){
extractor_pca.push_back(f);
use_custom_extractor_pca = true;
}else{
extractor_npca.push_back(f);
use_custom_extractor_npca = true;
}
}
/*----------------------------------------------------------------------*/
}
|
06bff03943921be0c165e4c1509a8c6082b148f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include "gputimer.h"
#define BLOCK_SIZE (1024)
double get_walltime()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return (double) (tp.tv_sec + tp.tv_usec*1e-6);
}
void Write(float* buffer, int np, char* output){
FILE *f;
f=fopen(output,"w");
for(int i=0;i<np;++i){
fprintf(f,"%15f \n",buffer[i]);
}
fclose(f);
}
void force_repulsion(int np, const float *pos, float L, float krepulsion,
float *forces)
{
int i, j;
float posi[4];
float rvec[4];
float s2, s, f;
// initialize forces to zero
for (i=0; i<3*np; i++)
forces[i] = 0.;
// loop over all pairs
for (i=0; i<np; i++)
{
posi[0] = pos[3*i ];
posi[1] = pos[3*i+1];
posi[2] = pos[3*i+2];
for (j=i+1; j<np; j++)
{
// compute minimum image difference
rvec[0] = remainder(posi[0] - pos[3*j ], L);
rvec[1] = remainder(posi[1] - pos[3*j+1], L);
rvec[2] = remainder(posi[2] - pos[3*j+2], L);
s2 = rvec[0]*rvec[0] + rvec[1]*rvec[1] + rvec[2]*rvec[2];
if (s2 < 4)
{
s = sqrt(s2);
rvec[0] /= s;
rvec[1] /= s;
rvec[2] /= s;
f = krepulsion*(2.-s);
forces[3*i ] += f*rvec[0];
forces[3*i+1] += f*rvec[1];
forces[3*i+2] += f*rvec[2];
forces[3*j ] += -f*rvec[0];
forces[3*j+1] += -f*rvec[1];
forces[3*j+2] += -f*rvec[2];
}
}
}
}
__global__ void gpu_find_repulsion(int np, float*pos, float L, float krepulsion, float* forces){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<np){
int j;
float posi[3];
float rvec[3];
float s2, s, f;
posi[0] = pos[3*i ];
posi[1] = pos[3*i+1];
posi[2] = pos[3*i+2];
for (j=i+1; j<np; ++j){
// compute minimum image difference
rvec[0] = remainder(posi[0] - pos[3*j ], L);
rvec[1] = remainder(posi[1] - pos[3*j+1], L);
rvec[2] = remainder(posi[2] - pos[3*j+2], L);
s2 = rvec[0]*rvec[0] + rvec[1]*rvec[1] + rvec[2]*rvec[2];
if (s2 < 4){
s = sqrt(s2);
rvec[0] /= s;
rvec[1] /= s;
rvec[2] /= s;
f = krepulsion*(2.-s);
forces[3*i ] += f*rvec[0];
forces[3*i+1] += f*rvec[1];
forces[3*i+2] += f*rvec[2];
atomicAdd(&forces[3*j],-f*rvec[0]);
atomicAdd(&forces[3*j+1],-f*rvec[1]);
atomicAdd(&forces[3*j+2],-f*rvec[2]);
}
}
}
}
int main(int argc, char *argv[])
{
int i;
int np = 100; // default number of particles
float phi = 0.3; // volume fraction
float krepulsion = 125.; // force constant
float *pos;
float *forces;
double time0, time1;
if (argc > 1)
np = atoi(argv[1]);
// compute simulation box width
float L = pow(4./3.*3.1415926536*np/phi, 1./3.);
// generate random particle positions inside simulation box
forces = (float *) malloc(3*np*sizeof(float));
pos = (float *) malloc(3*np*sizeof(float));
for (i=0; i<3*np; i++)
pos[i] = rand()/(float)RAND_MAX*L;
time0 = get_walltime();
force_repulsion(np, pos, L, krepulsion, forces);
time1 = get_walltime();
//print performance and write to file
printf("number of particles: %d\n", np);
printf("elapsed time of cpu program: %f seconds\n", time1-time0);
Write(forces,3*np,"cpu_output");
//reinitialization of forces
// for(int i=0;i<np*3;++i) forces[i]=0.;
//gpu program
float *gpu_pos;
float *gpu_forces;
int bytes=3*np*sizeof(float);
GpuTimer timer;
hipMalloc((void**)&gpu_pos,bytes);
hipMalloc((void**)&gpu_forces,bytes);
hipMemcpy(gpu_pos, pos, bytes, hipMemcpyHostToDevice);
hipMemset(gpu_forces, 0, bytes);
timer.Start();
hipLaunchKernelGGL(( gpu_find_repulsion), dim3((3*np+BLOCK_SIZE-1)/BLOCK_SIZE),dim3(BLOCK_SIZE), 0, 0, np, gpu_pos, L, krepulsion, gpu_forces);
timer.Stop();
hipMemcpy(forces, gpu_forces, bytes, hipMemcpyDeviceToHost);
printf("number of particles: %d\n", np);
printf("elapsed time of gpu program: %f seconds\n", timer.Elapsed()/1000);
Write(forces,3*np,"gpu_output");
printf("speed up of gpu is %f \n",(time1-time0)/(timer.Elapsed()/1000));
hipFree(gpu_pos);
hipFree(gpu_forces);
free(forces);
free(pos);
return 0;
}
| 06bff03943921be0c165e4c1509a8c6082b148f0.cu | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#include "gputimer.h"
#define BLOCK_SIZE (1024)
double get_walltime()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return (double) (tp.tv_sec + tp.tv_usec*1e-6);
}
void Write(float* buffer, int np, char* output){
FILE *f;
f=fopen(output,"w");
for(int i=0;i<np;++i){
fprintf(f,"%15f \n",buffer[i]);
}
fclose(f);
}
void force_repulsion(int np, const float *pos, float L, float krepulsion,
float *forces)
{
int i, j;
float posi[4];
float rvec[4];
float s2, s, f;
// initialize forces to zero
for (i=0; i<3*np; i++)
forces[i] = 0.;
// loop over all pairs
for (i=0; i<np; i++)
{
posi[0] = pos[3*i ];
posi[1] = pos[3*i+1];
posi[2] = pos[3*i+2];
for (j=i+1; j<np; j++)
{
// compute minimum image difference
rvec[0] = remainder(posi[0] - pos[3*j ], L);
rvec[1] = remainder(posi[1] - pos[3*j+1], L);
rvec[2] = remainder(posi[2] - pos[3*j+2], L);
s2 = rvec[0]*rvec[0] + rvec[1]*rvec[1] + rvec[2]*rvec[2];
if (s2 < 4)
{
s = sqrt(s2);
rvec[0] /= s;
rvec[1] /= s;
rvec[2] /= s;
f = krepulsion*(2.-s);
forces[3*i ] += f*rvec[0];
forces[3*i+1] += f*rvec[1];
forces[3*i+2] += f*rvec[2];
forces[3*j ] += -f*rvec[0];
forces[3*j+1] += -f*rvec[1];
forces[3*j+2] += -f*rvec[2];
}
}
}
}
__global__ void gpu_find_repulsion(int np, float*pos, float L, float krepulsion, float* forces){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<np){
int j;
float posi[3];
float rvec[3];
float s2, s, f;
posi[0] = pos[3*i ];
posi[1] = pos[3*i+1];
posi[2] = pos[3*i+2];
for (j=i+1; j<np; ++j){
// compute minimum image difference
rvec[0] = remainder(posi[0] - pos[3*j ], L);
rvec[1] = remainder(posi[1] - pos[3*j+1], L);
rvec[2] = remainder(posi[2] - pos[3*j+2], L);
s2 = rvec[0]*rvec[0] + rvec[1]*rvec[1] + rvec[2]*rvec[2];
if (s2 < 4){
s = sqrt(s2);
rvec[0] /= s;
rvec[1] /= s;
rvec[2] /= s;
f = krepulsion*(2.-s);
forces[3*i ] += f*rvec[0];
forces[3*i+1] += f*rvec[1];
forces[3*i+2] += f*rvec[2];
atomicAdd(&forces[3*j],-f*rvec[0]);
atomicAdd(&forces[3*j+1],-f*rvec[1]);
atomicAdd(&forces[3*j+2],-f*rvec[2]);
}
}
}
}
int main(int argc, char *argv[])
{
int i;
int np = 100; // default number of particles
float phi = 0.3; // volume fraction
float krepulsion = 125.; // force constant
float *pos;
float *forces;
double time0, time1;
if (argc > 1)
np = atoi(argv[1]);
// compute simulation box width
float L = pow(4./3.*3.1415926536*np/phi, 1./3.);
// generate random particle positions inside simulation box
forces = (float *) malloc(3*np*sizeof(float));
pos = (float *) malloc(3*np*sizeof(float));
for (i=0; i<3*np; i++)
pos[i] = rand()/(float)RAND_MAX*L;
time0 = get_walltime();
force_repulsion(np, pos, L, krepulsion, forces);
time1 = get_walltime();
//print performance and write to file
printf("number of particles: %d\n", np);
printf("elapsed time of cpu program: %f seconds\n", time1-time0);
Write(forces,3*np,"cpu_output");
//reinitialization of forces
// for(int i=0;i<np*3;++i) forces[i]=0.;
//gpu program
float *gpu_pos;
float *gpu_forces;
int bytes=3*np*sizeof(float);
GpuTimer timer;
cudaMalloc((void**)&gpu_pos,bytes);
cudaMalloc((void**)&gpu_forces,bytes);
cudaMemcpy(gpu_pos, pos, bytes, cudaMemcpyHostToDevice);
cudaMemset(gpu_forces, 0, bytes);
timer.Start();
gpu_find_repulsion<<<(3*np+BLOCK_SIZE-1)/BLOCK_SIZE,BLOCK_SIZE>>>(np, gpu_pos, L, krepulsion, gpu_forces);
timer.Stop();
cudaMemcpy(forces, gpu_forces, bytes, cudaMemcpyDeviceToHost);
printf("number of particles: %d\n", np);
printf("elapsed time of gpu program: %f seconds\n", timer.Elapsed()/1000);
Write(forces,3*np,"gpu_output");
printf("speed up of gpu is %f \n",(time1-time0)/(timer.Elapsed()/1000));
cudaFree(gpu_pos);
cudaFree(gpu_forces);
free(forces);
free(pos);
return 0;
}
|
2046965ad57889be01cf2f88387c6c7becaf9936.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define BOOST_GPU_ENABLED __host__ __device__
#include "config.h"
#define BOOST_TEST_DYN_LINK
#include <boost/test/unit_test.hpp>
#include "util/cuda_util.hpp"
#include "util/boost/boost_array_openfpm.hpp"
#include "Vector/map_vector.hpp"
#include "scan_cuda.cuh"
#ifndef SCAN_WITH_CUB
#define SCAN_WITH_CUB
#endif
#include "scan_ofp.cuh"
BOOST_AUTO_TEST_SUITE( scan_tests )
template<typename cnt_type, typename ids_type>
void test_compress()
{
openfpm::vector<aggregate<cnt_type,cnt_type>,CudaMemory,typename memory_traits_inte<aggregate<cnt_type,cnt_type>>::type,memory_traits_inte> cl_n;
openfpm::vector<aggregate<ids_type>,CudaMemory,typename memory_traits_inte<aggregate<ids_type>>::type,memory_traits_inte> compressed;
// fill some counting
cl_n.resize(12);
cl_n.template get<0>(0) = 3;
cl_n.template get<0>(1) = 5;
cl_n.template get<0>(2) = 8;
cl_n.template get<0>(3) = 1;
cl_n.template get<0>(4) = 0;
cl_n.template get<0>(5) = 0;
cl_n.template get<0>(6) = 21;
cl_n.template get<0>(7) = 4;
cl_n.template get<0>(8) = 4;
cl_n.template get<0>(9) = 6;
cl_n.template get<0>(10) = 10;
compressed.resize(cl_n.size());
auto ite = cl_n.getGPUIterator();
ite.thr.x /= 4;
cl_n.template hostToDevice<0>();
hipLaunchKernelGGL(( compress4<cnt_type,ids_type>), dim3(ite.wthr),dim3(ite.thr), 0, 0, cl_n.size(),
static_cast<cnt_type *>(cl_n.template getDeviceBuffer<0>()),
static_cast<ids_type *>(compressed.template getDeviceBuffer<0>()));
compressed.template deviceToHost<0>();
BOOST_REQUIRE_EQUAL(compressed.template get<0>(0),3);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(1),5);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(2),8);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(3),1);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(4),0);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(5),0);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(6),21);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(7),4);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(8),4);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(9),6);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(10),10);
}
template<typename cnt_type, typename ids_type>
void test_breduce()
{
openfpm::vector<aggregate<ids_type>,CudaMemory,typename memory_traits_inte<aggregate<ids_type>>::type,memory_traits_inte> cl_n;
openfpm::vector<aggregate<cnt_type>,CudaMemory,typename memory_traits_inte<aggregate<cnt_type>>::type,memory_traits_inte> red;
cl_n.resize(8192);
constexpr int THREADS = 128;
constexpr int ratio = 4*sizeof(cnt_type)/sizeof(ids_type);
constexpr int red_tot = THREADS * ratio;
int nblocks = ((cl_n.size() / (ratio) ) + THREADS - 1 ) / THREADS;
// fill with some data
openfpm::vector<cnt_type> block_red;
block_red.resize(nblocks);
for (size_t i = 0 ; i < cl_n.size() ; i++)
{
if ((i % red_tot)/256 == 0)
{
cl_n.template get<0>(i) = i%128;
block_red.get(i/red_tot) += cl_n.template get<0>(i);
}
else if ((i % red_tot)/256 == 1)
{
cl_n.template get<0>(i) = i%7;
block_red.get(i/red_tot) += cl_n.template get<0>(i);
}
else if ((i % red_tot)/256 == 2)
{
cl_n.template get<0>(i) = i%13;
block_red.get(i/red_tot) += cl_n.template get<0>(i);
}
else if ((i % red_tot)/256 == 3)
{
cl_n.template get<0>(i) = i%17;
block_red.get(i/red_tot) += cl_n.template get<0>(i);
}
else if ((i % red_tot)/256 == 4)
{
cl_n.template get<0>(i) = i%128;
block_red.get(i/red_tot) += cl_n.template get<0>(i);
}
else if ((i % red_tot)/256 == 5)
{
cl_n.template get<0>(i) = i%7;
block_red.get(i/red_tot) += cl_n.template get<0>(i);
}
else if ((i % red_tot)/256 == 6)
{
cl_n.template get<0>(i) = i%13;
block_red.get(i/red_tot) += cl_n.template get<0>(i);
}
else if ((i % red_tot)/256 == 7)
{
cl_n.template get<0>(i) = i%17;
block_red.get(i/red_tot) += cl_n.template get<0>(i);
}
}
red.resize(nblocks);
cl_n.template hostToDevice<0>();
hipLaunchKernelGGL(( breduce<THREADS/32,cnt_type,ids_type,ratio_reduction<cnt_type,ids_type>>), dim3(nblocks),dim3(THREADS), 0, 0, cl_n.size()/ratio*4,
static_cast<cnt_type *>(cl_n.template getDeviceBuffer<0>()),
static_cast<cnt_type *>(red.template getDeviceBuffer<0>()));
red.template deviceToHost<0>();
for (size_t i = 0 ; i < red.size() ; i++)
{
BOOST_REQUIRE_EQUAL(red.template get<0>(i),block_red.get(i));
}
}
template<typename cnt_type>
void test_bexscan()
{
openfpm::vector<aggregate<cnt_type>,CudaMemory,typename memory_traits_inte<aggregate<cnt_type>>::type,memory_traits_inte> base;
base.resize(500);
constexpr int THREADS = 128;
// fill with some data
for (size_t i = 0 ; i < base.size() ; i++)
{base.template get<0>(i) = 1;}
int nblocks = base.size();
base.template hostToDevice<0>();
hipLaunchKernelGGL(( bexscan<THREADS,cnt_type>), dim3(1),dim3(THREADS),nblocks*sizeof(unsigned int), 0, nblocks,
static_cast<cnt_type *>(base.template getDeviceBuffer<0>()));
base.template deviceToHost<0>();
for (size_t i = 0 ; i < base.size() ; i++)
{
BOOST_REQUIRE_EQUAL(base.template get<0>(i),i+1);
}
}
template<typename cnt_type, typename ids_type>
void test_gexscan()
{
size_t nb = 16;
openfpm::vector<aggregate<cnt_type>,CudaMemory,typename memory_traits_inte<aggregate<cnt_type>>::type,memory_traits_inte> base;
openfpm::vector<aggregate<ids_type>,CudaMemory,typename memory_traits_inte<aggregate<ids_type>>::type,memory_traits_inte> cl_n;
openfpm::vector<aggregate<cnt_type>,CudaMemory,typename memory_traits_inte<aggregate<cnt_type>>::type,memory_traits_inte> cl_n_scan;
constexpr int ratio = sizeof(cnt_type)/sizeof(ids_type);
constexpr int THREADS = 128;
constexpr int expand = THREADS * ratio;
base.resize(nb);
cl_n.resize(expand*nb);
cl_n_scan.resize(expand*nb);
// fill with some data
for (size_t i = 0 ; i < base.size() ; i++)
{base.template get<0>(i) = (i+1)*120*THREADS/4*ratio;}
for (size_t i = 0 ; i < cl_n.size() ; i++)
{cl_n.template get<0>(i) = i%16;}
int nblocks = cl_n.size() / ratio;
cl_n.template hostToDevice<0>();
base.template hostToDevice<0>();
hipLaunchKernelGGL(( gexscan<THREADS/32,ratio_extend<cnt_type,ids_type>>) , dim3(cl_n.size() / ratio / THREADS), dim3(THREADS) , 0, 0, nblocks,
static_cast<typename ratio_extend<cnt_type,ids_type>::cnt_type4 *>(cl_n.template getDeviceBuffer<0>()),
static_cast<cnt_type *>(base.template getDeviceBuffer<0>()),
static_cast<typename ratio_extend<cnt_type,ids_type>::cnt_type4 *>(cl_n_scan.template getDeviceBuffer<0>()));
cl_n_scan.template deviceToHost<0>();
BOOST_REQUIRE_EQUAL(0,cl_n_scan.template get<0>(0));
size_t scan = 0;
for (size_t i = 1 ; i < cl_n_scan.size() ; i++)
{
scan += cl_n.template get<0>(i-1);
BOOST_REQUIRE_EQUAL(cl_n_scan.template get<0>(i),scan);
}
}
BOOST_AUTO_TEST_CASE (test_breduce_func )
{
test_breduce<unsigned int, unsigned char>();
test_breduce<unsigned int, unsigned short>();
test_breduce<unsigned int, unsigned int>();
test_breduce<unsigned int, char>();
test_breduce<unsigned int, short>();
test_breduce<unsigned int, int>();
test_breduce<int, unsigned char>();
test_breduce<int, unsigned short>();
test_breduce<int, unsigned int>();
test_breduce<int, char>();
test_breduce<int, short>();
test_breduce<int, int>();
}
BOOST_AUTO_TEST_CASE(test_compress_functions)
{
test_compress<unsigned int, unsigned char>();
test_compress<unsigned int, unsigned short>();
test_compress<unsigned int, unsigned int>();
test_compress<unsigned int, char>();
test_compress<unsigned int, short>();
test_compress<unsigned int, int>();
test_compress<int, unsigned char>();
test_compress<int, unsigned short>();
test_compress<int, unsigned int>();
test_compress<int, char>();
test_compress<int, short>();
test_compress<int, int>();
}
BOOST_AUTO_TEST_CASE(test_bexscan_functions)
{
test_bexscan<unsigned int>();
test_bexscan<int>();
}
BOOST_AUTO_TEST_CASE( test_gexscan_funcs )
{
std::cout << "Test cell list GPU base func" << "\n";
test_gexscan<unsigned int, unsigned char>();
test_gexscan<unsigned int, unsigned short>();
test_gexscan<unsigned int, unsigned int>();
test_gexscan<int, unsigned char>();
test_gexscan<int, unsigned short>();
test_gexscan<int, unsigned int>();
std::cout << "End cell list GPU" << "\n";
// Test the cell list
}
template<typename cnt_type, typename ids_type>
void test_scan(size_t num)
{
openfpm::vector<aggregate<cnt_type>,CudaMemory,typename memory_traits_inte<aggregate<cnt_type>>::type,memory_traits_inte> cl_n;
openfpm::vector<aggregate<cnt_type>,CudaMemory,typename memory_traits_inte<aggregate<cnt_type>>::type,memory_traits_inte> cl_n_scan;
cl_n.resize(num);
// fill with some data
for (size_t i = 0 ; i < cl_n.size() ; i++)
{cl_n.template get<0>(i) = 255.0*rand()/RAND_MAX;}
cl_n.template hostToDevice<0>();
scan<cnt_type,ids_type>sc;
sc.scan_(cl_n,cl_n_scan);
cl_n_scan.template deviceToHost<0>();
BOOST_REQUIRE_EQUAL(0,cl_n_scan.template get<0>(0));
size_t scan = 0;
for (size_t i = 1 ; i < cl_n_scan.size() ; i++)
{
scan += cl_n.template get<0>(i-1);
BOOST_REQUIRE_EQUAL(cl_n_scan.template get<0>(i),scan);
}
}
BOOST_AUTO_TEST_CASE( test_scan_algo )
{
std::cout << "Test GPU obsolete scan" << "\n";
test_scan<unsigned int, unsigned char>(8192);
test_scan<unsigned int, unsigned char>(25);
test_scan<unsigned int, unsigned char>(139);
test_scan<unsigned int, unsigned char>(1025);
test_scan<unsigned int, unsigned short>(8192);
test_scan<unsigned int, unsigned short>(25);
test_scan<unsigned int, unsigned short>(139);
test_scan<unsigned int, unsigned short>(1025);
test_scan<unsigned int, unsigned int>(8192);
test_scan<unsigned int, unsigned int>(25);
test_scan<unsigned int, unsigned int>(139);
test_scan<unsigned int, unsigned int>(1025);
std::cout << "End GPU obsolete scan" << "\n";
// Test the cell list
}
BOOST_AUTO_TEST_CASE( test_scan_cub_wrapper )
{
std::cout << "Test scan CUB" << "\n";
openfpm::vector_gpu<aggregate<unsigned int>> input;
openfpm::vector_gpu<aggregate<unsigned int>> output;
openfpm::vector_gpu<aggregate<unsigned char>> temporal;
input.resize(10000);
output.resize(10000);
// fill input
for (size_t i = 0 ; i < 10000; i++)
{
input.template get<0>(i) = 10.0*(float)rand() / RAND_MAX;
}
input.template hostToDevice<0>();
mgpu::ofp_context_t context;
openfpm::scan((unsigned int *)input.template getDeviceBuffer<0>(),input.size(),(unsigned int *)output.template getDeviceBuffer<0>(),context);
output.template deviceToHost<0>();
size_t cnt = 0;
for (size_t i = 0 ; i < input.size() ; i++)
{
BOOST_REQUIRE_EQUAL(cnt,output.template get<0>(i));
cnt += input.template get<0>(i);
}
std::cout << "End scan CUB" << "\n";
// Test the cell list
}
BOOST_AUTO_TEST_SUITE_END()
| 2046965ad57889be01cf2f88387c6c7becaf9936.cu | #define BOOST_GPU_ENABLED __host__ __device__
#include "config.h"
#define BOOST_TEST_DYN_LINK
#include <boost/test/unit_test.hpp>
#include "util/cuda_util.hpp"
#include "util/boost/boost_array_openfpm.hpp"
#include "Vector/map_vector.hpp"
#include "scan_cuda.cuh"
#ifndef SCAN_WITH_CUB
#define SCAN_WITH_CUB
#endif
#include "scan_ofp.cuh"
BOOST_AUTO_TEST_SUITE( scan_tests )
template<typename cnt_type, typename ids_type>
void test_compress()
{
openfpm::vector<aggregate<cnt_type,cnt_type>,CudaMemory,typename memory_traits_inte<aggregate<cnt_type,cnt_type>>::type,memory_traits_inte> cl_n;
openfpm::vector<aggregate<ids_type>,CudaMemory,typename memory_traits_inte<aggregate<ids_type>>::type,memory_traits_inte> compressed;
// fill some counting
cl_n.resize(12);
cl_n.template get<0>(0) = 3;
cl_n.template get<0>(1) = 5;
cl_n.template get<0>(2) = 8;
cl_n.template get<0>(3) = 1;
cl_n.template get<0>(4) = 0;
cl_n.template get<0>(5) = 0;
cl_n.template get<0>(6) = 21;
cl_n.template get<0>(7) = 4;
cl_n.template get<0>(8) = 4;
cl_n.template get<0>(9) = 6;
cl_n.template get<0>(10) = 10;
compressed.resize(cl_n.size());
auto ite = cl_n.getGPUIterator();
ite.thr.x /= 4;
cl_n.template hostToDevice<0>();
compress4<cnt_type,ids_type><<<ite.wthr,ite.thr>>>(cl_n.size(),
static_cast<cnt_type *>(cl_n.template getDeviceBuffer<0>()),
static_cast<ids_type *>(compressed.template getDeviceBuffer<0>()));
compressed.template deviceToHost<0>();
BOOST_REQUIRE_EQUAL(compressed.template get<0>(0),3);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(1),5);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(2),8);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(3),1);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(4),0);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(5),0);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(6),21);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(7),4);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(8),4);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(9),6);
BOOST_REQUIRE_EQUAL(compressed.template get<0>(10),10);
}
template<typename cnt_type, typename ids_type>
void test_breduce()
{
openfpm::vector<aggregate<ids_type>,CudaMemory,typename memory_traits_inte<aggregate<ids_type>>::type,memory_traits_inte> cl_n;
openfpm::vector<aggregate<cnt_type>,CudaMemory,typename memory_traits_inte<aggregate<cnt_type>>::type,memory_traits_inte> red;
cl_n.resize(8192);
constexpr int THREADS = 128;
constexpr int ratio = 4*sizeof(cnt_type)/sizeof(ids_type);
constexpr int red_tot = THREADS * ratio;
int nblocks = ((cl_n.size() / (ratio) ) + THREADS - 1 ) / THREADS;
// fill with some data
openfpm::vector<cnt_type> block_red;
block_red.resize(nblocks);
for (size_t i = 0 ; i < cl_n.size() ; i++)
{
if ((i % red_tot)/256 == 0)
{
cl_n.template get<0>(i) = i%128;
block_red.get(i/red_tot) += cl_n.template get<0>(i);
}
else if ((i % red_tot)/256 == 1)
{
cl_n.template get<0>(i) = i%7;
block_red.get(i/red_tot) += cl_n.template get<0>(i);
}
else if ((i % red_tot)/256 == 2)
{
cl_n.template get<0>(i) = i%13;
block_red.get(i/red_tot) += cl_n.template get<0>(i);
}
else if ((i % red_tot)/256 == 3)
{
cl_n.template get<0>(i) = i%17;
block_red.get(i/red_tot) += cl_n.template get<0>(i);
}
else if ((i % red_tot)/256 == 4)
{
cl_n.template get<0>(i) = i%128;
block_red.get(i/red_tot) += cl_n.template get<0>(i);
}
else if ((i % red_tot)/256 == 5)
{
cl_n.template get<0>(i) = i%7;
block_red.get(i/red_tot) += cl_n.template get<0>(i);
}
else if ((i % red_tot)/256 == 6)
{
cl_n.template get<0>(i) = i%13;
block_red.get(i/red_tot) += cl_n.template get<0>(i);
}
else if ((i % red_tot)/256 == 7)
{
cl_n.template get<0>(i) = i%17;
block_red.get(i/red_tot) += cl_n.template get<0>(i);
}
}
red.resize(nblocks);
cl_n.template hostToDevice<0>();
breduce<THREADS/32,cnt_type,ids_type,ratio_reduction<cnt_type,ids_type>><<<nblocks,THREADS>>>(cl_n.size()/ratio*4,
static_cast<cnt_type *>(cl_n.template getDeviceBuffer<0>()),
static_cast<cnt_type *>(red.template getDeviceBuffer<0>()));
red.template deviceToHost<0>();
for (size_t i = 0 ; i < red.size() ; i++)
{
BOOST_REQUIRE_EQUAL(red.template get<0>(i),block_red.get(i));
}
}
template<typename cnt_type>
void test_bexscan()
{
openfpm::vector<aggregate<cnt_type>,CudaMemory,typename memory_traits_inte<aggregate<cnt_type>>::type,memory_traits_inte> base;
base.resize(500);
constexpr int THREADS = 128;
// fill with some data
for (size_t i = 0 ; i < base.size() ; i++)
{base.template get<0>(i) = 1;}
int nblocks = base.size();
base.template hostToDevice<0>();
bexscan<THREADS,cnt_type><<<1,THREADS,nblocks*sizeof(unsigned int)>>>(nblocks,
static_cast<cnt_type *>(base.template getDeviceBuffer<0>()));
base.template deviceToHost<0>();
for (size_t i = 0 ; i < base.size() ; i++)
{
BOOST_REQUIRE_EQUAL(base.template get<0>(i),i+1);
}
}
template<typename cnt_type, typename ids_type>
void test_gexscan()
{
size_t nb = 16;
openfpm::vector<aggregate<cnt_type>,CudaMemory,typename memory_traits_inte<aggregate<cnt_type>>::type,memory_traits_inte> base;
openfpm::vector<aggregate<ids_type>,CudaMemory,typename memory_traits_inte<aggregate<ids_type>>::type,memory_traits_inte> cl_n;
openfpm::vector<aggregate<cnt_type>,CudaMemory,typename memory_traits_inte<aggregate<cnt_type>>::type,memory_traits_inte> cl_n_scan;
constexpr int ratio = sizeof(cnt_type)/sizeof(ids_type);
constexpr int THREADS = 128;
constexpr int expand = THREADS * ratio;
base.resize(nb);
cl_n.resize(expand*nb);
cl_n_scan.resize(expand*nb);
// fill with some data
for (size_t i = 0 ; i < base.size() ; i++)
{base.template get<0>(i) = (i+1)*120*THREADS/4*ratio;}
for (size_t i = 0 ; i < cl_n.size() ; i++)
{cl_n.template get<0>(i) = i%16;}
int nblocks = cl_n.size() / ratio;
cl_n.template hostToDevice<0>();
base.template hostToDevice<0>();
gexscan<THREADS/32,ratio_extend<cnt_type,ids_type>> <<< cl_n.size() / ratio / THREADS, THREADS >>>(nblocks,
static_cast<typename ratio_extend<cnt_type,ids_type>::cnt_type4 *>(cl_n.template getDeviceBuffer<0>()),
static_cast<cnt_type *>(base.template getDeviceBuffer<0>()),
static_cast<typename ratio_extend<cnt_type,ids_type>::cnt_type4 *>(cl_n_scan.template getDeviceBuffer<0>()));
cl_n_scan.template deviceToHost<0>();
BOOST_REQUIRE_EQUAL(0,cl_n_scan.template get<0>(0));
size_t scan = 0;
for (size_t i = 1 ; i < cl_n_scan.size() ; i++)
{
scan += cl_n.template get<0>(i-1);
BOOST_REQUIRE_EQUAL(cl_n_scan.template get<0>(i),scan);
}
}
BOOST_AUTO_TEST_CASE (test_breduce_func )
{
test_breduce<unsigned int, unsigned char>();
test_breduce<unsigned int, unsigned short>();
test_breduce<unsigned int, unsigned int>();
test_breduce<unsigned int, char>();
test_breduce<unsigned int, short>();
test_breduce<unsigned int, int>();
test_breduce<int, unsigned char>();
test_breduce<int, unsigned short>();
test_breduce<int, unsigned int>();
test_breduce<int, char>();
test_breduce<int, short>();
test_breduce<int, int>();
}
BOOST_AUTO_TEST_CASE(test_compress_functions)
{
test_compress<unsigned int, unsigned char>();
test_compress<unsigned int, unsigned short>();
test_compress<unsigned int, unsigned int>();
test_compress<unsigned int, char>();
test_compress<unsigned int, short>();
test_compress<unsigned int, int>();
test_compress<int, unsigned char>();
test_compress<int, unsigned short>();
test_compress<int, unsigned int>();
test_compress<int, char>();
test_compress<int, short>();
test_compress<int, int>();
}
BOOST_AUTO_TEST_CASE(test_bexscan_functions)
{
test_bexscan<unsigned int>();
test_bexscan<int>();
}
BOOST_AUTO_TEST_CASE( test_gexscan_funcs )
{
std::cout << "Test cell list GPU base func" << "\n";
test_gexscan<unsigned int, unsigned char>();
test_gexscan<unsigned int, unsigned short>();
test_gexscan<unsigned int, unsigned int>();
test_gexscan<int, unsigned char>();
test_gexscan<int, unsigned short>();
test_gexscan<int, unsigned int>();
std::cout << "End cell list GPU" << "\n";
// Test the cell list
}
template<typename cnt_type, typename ids_type>
void test_scan(size_t num)
{
openfpm::vector<aggregate<cnt_type>,CudaMemory,typename memory_traits_inte<aggregate<cnt_type>>::type,memory_traits_inte> cl_n;
openfpm::vector<aggregate<cnt_type>,CudaMemory,typename memory_traits_inte<aggregate<cnt_type>>::type,memory_traits_inte> cl_n_scan;
cl_n.resize(num);
// fill with some data
for (size_t i = 0 ; i < cl_n.size() ; i++)
{cl_n.template get<0>(i) = 255.0*rand()/RAND_MAX;}
cl_n.template hostToDevice<0>();
scan<cnt_type,ids_type>sc;
sc.scan_(cl_n,cl_n_scan);
cl_n_scan.template deviceToHost<0>();
BOOST_REQUIRE_EQUAL(0,cl_n_scan.template get<0>(0));
size_t scan = 0;
for (size_t i = 1 ; i < cl_n_scan.size() ; i++)
{
scan += cl_n.template get<0>(i-1);
BOOST_REQUIRE_EQUAL(cl_n_scan.template get<0>(i),scan);
}
}
BOOST_AUTO_TEST_CASE( test_scan_algo )
{
std::cout << "Test GPU obsolete scan" << "\n";
test_scan<unsigned int, unsigned char>(8192);
test_scan<unsigned int, unsigned char>(25);
test_scan<unsigned int, unsigned char>(139);
test_scan<unsigned int, unsigned char>(1025);
test_scan<unsigned int, unsigned short>(8192);
test_scan<unsigned int, unsigned short>(25);
test_scan<unsigned int, unsigned short>(139);
test_scan<unsigned int, unsigned short>(1025);
test_scan<unsigned int, unsigned int>(8192);
test_scan<unsigned int, unsigned int>(25);
test_scan<unsigned int, unsigned int>(139);
test_scan<unsigned int, unsigned int>(1025);
std::cout << "End GPU obsolete scan" << "\n";
// Test the cell list
}
BOOST_AUTO_TEST_CASE( test_scan_cub_wrapper )
{
std::cout << "Test scan CUB" << "\n";
openfpm::vector_gpu<aggregate<unsigned int>> input;
openfpm::vector_gpu<aggregate<unsigned int>> output;
openfpm::vector_gpu<aggregate<unsigned char>> temporal;
input.resize(10000);
output.resize(10000);
// fill input
for (size_t i = 0 ; i < 10000; i++)
{
input.template get<0>(i) = 10.0*(float)rand() / RAND_MAX;
}
input.template hostToDevice<0>();
mgpu::ofp_context_t context;
openfpm::scan((unsigned int *)input.template getDeviceBuffer<0>(),input.size(),(unsigned int *)output.template getDeviceBuffer<0>(),context);
output.template deviceToHost<0>();
size_t cnt = 0;
for (size_t i = 0 ; i < input.size() ; i++)
{
BOOST_REQUIRE_EQUAL(cnt,output.template get<0>(i));
cnt += input.template get<0>(i);
}
std::cout << "End scan CUB" << "\n";
// Test the cell list
}
BOOST_AUTO_TEST_SUITE_END()
|
aef776dddab516c009a3672d3ace27df45d68bbe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/histogram.h>
#include <NDArrayFactory.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename X, typename Z>
void _CUDA_G histogramKernel(void *xBuffer, Nd4jLong *xShapeInfo, void *zBuffer, Nd4jLong *zShapeInfo, void *allocationPointer, void *reductionPointer, Nd4jLong numBins, X* min_val, X* max_val) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
auto dx = reinterpret_cast<X*>(xBuffer);
auto result = reinterpret_cast<Z*>(zBuffer);
__shared__ Z *bins;
__shared__ int length;
__shared__ Z *reductor;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
bins = (Z *) shmem;
reductor = ((Z *) allocationPointer) + (numBins * blockIdx.x);
length = shape::length(xShapeInfo);
}
__syncthreads();
X binSize = X((*max_val - *min_val) / numBins);
// nullify bins
for (int e = threadIdx.x; e < numBins; e += blockDim.x) {
bins[e] = (Z) 0;
}
__syncthreads();
for (int e = tid; e < length; e += blockDim.x * gridDim.x) {
int idx = int((dx[e] - *min_val) / binSize);
idx = math::nd4j_max(idx, 0); //atomicMax(&idx, 0);//atomicMax(&idx, 0);
idx = math::nd4j_min(idx, int(numBins - 1)); //atomicMin(&idx, int(numBins - 1));
nd4j::math::atomics::nd4j_atomicAdd<Z>(&bins[idx], (Z)1);
}
__syncthreads();
// at this point all bins in shared memory are calculated, so we aggregate them now via threadfence trick
// transfer shared memory to reduction memory
if (gridDim.x > 1) {
unsigned int *tc = (unsigned int *)reductionPointer;
__shared__ bool amLast;
for (int e = threadIdx.x; e < numBins; e += blockDim.x) {
reductor[e] = bins[e];
}
__threadfence();
__syncthreads();
if (threadIdx.x == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
// nullify shared memory for future accumulation
for (int e = threadIdx.x; e < numBins; e += blockDim.x) {
bins[e] = (Z) 0;
}
// accumulate reduced bins
for (int r = 0; r < gridDim.x; r++) {
Z *ptrBuf = ((Z *)allocationPointer) + (r * numBins);
for (int e = threadIdx.x; e < numBins; e += blockDim.x) {
math::atomics::nd4j_atomicAdd(&bins[e], ptrBuf[e]);
}
}
__syncthreads();
// write them out to Z
for (int e = threadIdx.x; e < numBins; e += blockDim.x) {
result[e] = bins[e];
}
}
} else {
// if there's only 1 block - just write away data
for (int e = threadIdx.x; e < numBins; e += blockDim.x) {
result[e] = bins[e];
}
}
}
template <typename X, typename Z>
static void histogram_(nd4j::LaunchContext *context, void *xBuffer, Nd4jLong *xShapeInfo, Nd4jLong *dxShapeInfo, void *zBuffer, Nd4jLong *zShapeInfo, Nd4jLong numBins, void* min_val, void* max_val) {
int numThreads = 256;
int numBlocks = nd4j::math::nd4j_max<int>(256, nd4j::math::nd4j_min<int>(1, shape::length(xShapeInfo) / numThreads));
int workspaceSize = numBlocks * numBins;
auto tmp = NDArrayFactory::create<Z>('c', {workspaceSize});
hipLaunchKernelGGL(( histogramKernel<X, Z>), dim3(numBlocks), dim3(numThreads), 32768, *context->getCudaStream(), xBuffer, dxShapeInfo, zBuffer, zShapeInfo, tmp.getSpecialBuffer(), context->getReductionPointer(), numBins, reinterpret_cast<X*>(min_val), reinterpret_cast<X*>(max_val));
hipStreamSynchronize(*context->getCudaStream());
}
void histogramHelper(nd4j::LaunchContext *context, NDArray &input, NDArray &output) {
Nd4jLong numBins = output.lengthOf();
NDArray::registerSpecialUse({&output}, {&input});
auto min_val = input.reduceNumber(reduce::SameOps::Min);
auto max_val = input.reduceNumber(reduce::SameOps::Max);
// min_val.printIndexedBuffer("MIN");
// max_val.printIndexedBuffer("MAX");
BUILD_DOUBLE_SELECTOR(input.dataType(), output.dataType(), histogram_, (context, input.specialBuffer(), input.shapeInfo(), input.specialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), numBins, min_val.specialBuffer(), max_val.specialBuffer()), LIBND4J_TYPES, INTEGER_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
}
}
}
} | aef776dddab516c009a3672d3ace27df45d68bbe.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/histogram.h>
#include <NDArrayFactory.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename X, typename Z>
void _CUDA_G histogramKernel(void *xBuffer, Nd4jLong *xShapeInfo, void *zBuffer, Nd4jLong *zShapeInfo, void *allocationPointer, void *reductionPointer, Nd4jLong numBins, X* min_val, X* max_val) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
auto dx = reinterpret_cast<X*>(xBuffer);
auto result = reinterpret_cast<Z*>(zBuffer);
__shared__ Z *bins;
__shared__ int length;
__shared__ Z *reductor;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
bins = (Z *) shmem;
reductor = ((Z *) allocationPointer) + (numBins * blockIdx.x);
length = shape::length(xShapeInfo);
}
__syncthreads();
X binSize = X((*max_val - *min_val) / numBins);
// nullify bins
for (int e = threadIdx.x; e < numBins; e += blockDim.x) {
bins[e] = (Z) 0;
}
__syncthreads();
for (int e = tid; e < length; e += blockDim.x * gridDim.x) {
int idx = int((dx[e] - *min_val) / binSize);
idx = math::nd4j_max(idx, 0); //atomicMax(&idx, 0);//atomicMax(&idx, 0);
idx = math::nd4j_min(idx, int(numBins - 1)); //atomicMin(&idx, int(numBins - 1));
nd4j::math::atomics::nd4j_atomicAdd<Z>(&bins[idx], (Z)1);
}
__syncthreads();
// at this point all bins in shared memory are calculated, so we aggregate them now via threadfence trick
// transfer shared memory to reduction memory
if (gridDim.x > 1) {
unsigned int *tc = (unsigned int *)reductionPointer;
__shared__ bool amLast;
for (int e = threadIdx.x; e < numBins; e += blockDim.x) {
reductor[e] = bins[e];
}
__threadfence();
__syncthreads();
if (threadIdx.x == 0) {
unsigned int ticket = atomicInc(&tc[16384], gridDim.x);
amLast = (ticket == gridDim.x - 1);
}
__syncthreads();
if (amLast) {
tc[16384] = 0;
// nullify shared memory for future accumulation
for (int e = threadIdx.x; e < numBins; e += blockDim.x) {
bins[e] = (Z) 0;
}
// accumulate reduced bins
for (int r = 0; r < gridDim.x; r++) {
Z *ptrBuf = ((Z *)allocationPointer) + (r * numBins);
for (int e = threadIdx.x; e < numBins; e += blockDim.x) {
math::atomics::nd4j_atomicAdd(&bins[e], ptrBuf[e]);
}
}
__syncthreads();
// write them out to Z
for (int e = threadIdx.x; e < numBins; e += blockDim.x) {
result[e] = bins[e];
}
}
} else {
// if there's only 1 block - just write away data
for (int e = threadIdx.x; e < numBins; e += blockDim.x) {
result[e] = bins[e];
}
}
}
template <typename X, typename Z>
static void histogram_(nd4j::LaunchContext *context, void *xBuffer, Nd4jLong *xShapeInfo, Nd4jLong *dxShapeInfo, void *zBuffer, Nd4jLong *zShapeInfo, Nd4jLong numBins, void* min_val, void* max_val) {
int numThreads = 256;
int numBlocks = nd4j::math::nd4j_max<int>(256, nd4j::math::nd4j_min<int>(1, shape::length(xShapeInfo) / numThreads));
int workspaceSize = numBlocks * numBins;
auto tmp = NDArrayFactory::create<Z>('c', {workspaceSize});
histogramKernel<X, Z><<<numBlocks, numThreads, 32768, *context->getCudaStream()>>>(xBuffer, dxShapeInfo, zBuffer, zShapeInfo, tmp.getSpecialBuffer(), context->getReductionPointer(), numBins, reinterpret_cast<X*>(min_val), reinterpret_cast<X*>(max_val));
cudaStreamSynchronize(*context->getCudaStream());
}
void histogramHelper(nd4j::LaunchContext *context, NDArray &input, NDArray &output) {
Nd4jLong numBins = output.lengthOf();
NDArray::registerSpecialUse({&output}, {&input});
auto min_val = input.reduceNumber(reduce::SameOps::Min);
auto max_val = input.reduceNumber(reduce::SameOps::Max);
// min_val.printIndexedBuffer("MIN");
// max_val.printIndexedBuffer("MAX");
BUILD_DOUBLE_SELECTOR(input.dataType(), output.dataType(), histogram_, (context, input.specialBuffer(), input.shapeInfo(), input.specialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), numBins, min_val.specialBuffer(), max_val.specialBuffer()), LIBND4J_TYPES, INTEGER_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
}
}
}
} |
5dd381e42fa6ff4e3006dd0aeafa26fb60d8fc1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Part of the following code in this file refs to
// https://github.com/msracver/Deformable-ConvNets/blob/master/faster_rcnn/operator_cxx/deformable_convolution.cu
//
// Copyright (c) 2017 Microsoft
// Licensed under The Apache-2.0 License [see LICENSE for details]
// \file deformable_psroi_pooling.cu
// \brief
// \author Yi Li, Guodong Zhang, Jifeng Dai
#pragma once
#include <algorithm>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/deformable_conv_filter.cu.h"
#include "paddle/fluid/operators/deformable_conv_func.h"
#include "paddle/fluid/operators/deformable_conv_v1_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using CUDADeviceContext = paddle::platform::CUDADeviceContext;
static constexpr int kNumCUDAThread = 512;
static constexpr int kNumMaximumNumBlock = 4096;
static inline int NumBlock(const int N) {
return ::min((N + kNumCUDAThread - 1) / kNumCUDAThread,
kNumMaximumNumBlock);
}
template <typename T>
__global__ void DeformableCol2imCUDAKernel(
const int nthreads, const T* data_col, const T* data_offset,
const int channels, const int height, const int width, const int kernel_h,
const int kernel_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int channel_per_deformable_group, const int batch_size,
const int deformable_group, const int height_col, const int width_col,
T* grad_im) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (size_t thread = index; thread < nthreads; thread += offset) {
const int j = (thread / width_col / height_col / batch_size) % kernel_w;
const int i =
(thread / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c =
thread / width_col / height_col / batch_size / kernel_w / kernel_h;
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = thread % width_col;
int h_out = (thread / width_col) % height_col;
int b = (thread / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const T* data_offset_ptr = data_offset +
(b * deformable_group + deformable_group_index) *
2 * kernel_h * kernel_w * height_col *
width_col;
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const T offset_h = data_offset_ptr[data_offset_h_ptr];
const T offset_w = data_offset_ptr[data_offset_w_ptr];
const T cur_inv_h_data = h_in + i * dilation_h + offset_h;
const T cur_inv_w_data = w_in + j * dilation_w + offset_w;
const T cur_top_grad = data_col[thread];
const int cur_h = static_cast<int>(cur_inv_h_data);
const int cur_w = static_cast<int>(cur_inv_w_data);
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 &&
cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1) {
int cur_bottom_grad_pos =
((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
T weight =
DmcnGetGradientWeight(cur_inv_h_data, cur_inv_w_data, cur_h + dy,
cur_w + dx, height, width);
platform::CudaAtomicAdd(grad_im + cur_bottom_grad_pos,
weight * cur_top_grad);
}
}
}
}
}
template <typename T>
inline void DeformableCol2im(const platform::CUDADeviceContext& ctx,
const T* data_col, const T* data_offset,
const std::vector<int64_t> im_shape,
const std::vector<int64_t> col_shape,
const std::vector<int64_t> kernel_shape,
const std::vector<int> pad,
const std::vector<int> stride,
const std::vector<int> dilation,
const int deformable_group, T* grad_im) {
int channel_per_deformable_group = im_shape[0] / deformable_group;
int num_kernels = col_shape[0] * col_shape[1] * col_shape[2] * col_shape[3];
int blocks = NumBlock(num_kernels);
int threads = kNumCUDAThread;
hipLaunchKernelGGL(( DeformableCol2imCUDAKernel<T>),
dim3(blocks), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream(),
num_kernels, data_col, data_offset, im_shape[0], im_shape[1], im_shape[2],
kernel_shape[2], kernel_shape[3], pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1], channel_per_deformable_group, col_shape[1],
deformable_group, col_shape[2], col_shape[3], grad_im);
}
template <typename T>
__global__ void DeformableCol2imCoordCUDAKernel(
const int nthreads, const T* data_col, const T* data_im,
const T* data_offset, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int dilation_h,
const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col, T* grad_offset) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (size_t i = index; i < nthreads; i += offset) {
T val = 0, mval = 0;
const int w = i % width_col;
const int h = (i / width_col) % height_col;
const int c = (i / width_col / height_col) % offset_channels;
const int b = (i / width_col / height_col) / offset_channels;
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const T* data_col_ptr = data_col +
deformable_group_index *
channel_per_deformable_group * batch_size *
width_col * height_col;
const T* data_im_ptr = data_im +
(b * deformable_group + deformable_group_index) *
channel_per_deformable_group / kernel_h /
kernel_w * height * width;
const T* data_offset_ptr = data_offset +
(b * deformable_group + deformable_group_index) *
2 * kernel_h * kernel_w * height_col *
width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = offset_c / 2; col_c < channel_per_deformable_group;
col_c += col_step) {
const int col_pos =
(((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i =
(col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr =
(((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr =
(((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col +
w_out);
const T offset_h = data_offset_ptr[data_offset_h_ptr];
const T offset_w = data_offset_ptr[data_offset_w_ptr];
T inv_h = h_in + i * dilation_h + offset_h;
T inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -2;
} else {
mval += data_col_ptr[col_pos] *
DmcnIm2colBilinear(data_im_ptr + cnt * height * width, width,
height, width, inv_h, inv_w);
}
const T weight = DmcnGetCoordinateWeight(
inv_h, inv_w, height, width, data_im_ptr + cnt * height * width,
width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[i] = val;
}
}
template <typename T>
inline void DeformableCol2imCoord(
const platform::CUDADeviceContext& ctx, const T* data_col, const T* data_im,
const T* data_offset, const std::vector<int64_t> im_shape,
const std::vector<int64_t> col_shape,
const std::vector<int64_t> kernel_shape, const std::vector<int> paddings,
const std::vector<int> strides, const std::vector<int> dilations,
const int deformable_groups, T* grad_offset) {
int num_kernels = 2 * kernel_shape[2] * kernel_shape[3] * col_shape[1] *
col_shape[2] * col_shape[3] * deformable_groups;
int channel_per_deformable_group = col_shape[0] / deformable_groups;
int blocks = NumBlock(num_kernels);
int threads = kNumCUDAThread;
hipLaunchKernelGGL(( DeformableCol2imCoordCUDAKernel<T>),
dim3(blocks), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream(),
num_kernels, data_col, data_im, data_offset, im_shape[0], im_shape[1],
im_shape[2], kernel_shape[2], kernel_shape[3], paddings[0], paddings[1],
strides[0], strides[1], dilations[0], dilations[1],
channel_per_deformable_group, col_shape[1],
2 * kernel_shape[2] * kernel_shape[3] * deformable_groups,
deformable_groups, col_shape[2], col_shape[3], grad_offset);
}
template <typename T>
__global__ void DeformableIm2colCUDAKernel(
const int nthreads, const T* data_im, const T* data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group, const int batch_size,
const int num_channels, const int deformable_group, const int height_col,
const int width_col, T* data_col) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (size_t i = index; i < nthreads; i += offset) {
const int w_col = i % width_col;
const int h_col = (i / width_col) % height_col;
const int b_col = (i / width_col) / height_col % batch_size;
const int c_im = (i / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
T* data_col_ptr =
data_col +
((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
const T* data_im_ptr =
data_im + (b_col * num_channels + c_im) * height * width;
const T* data_offset_ptr =
data_offset +
(b_col * deformable_group + deformable_group_index) * 2 * kernel_h *
kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col +
w_col;
const T offset_h = data_offset_ptr[data_offset_h_ptr];
const T offset_w = data_offset_ptr[data_offset_w_ptr];
T val = static_cast<T>(0);
const T h_im = h_in + i * dilation_h + offset_h;
const T w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) {
val =
DmcnIm2colBilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
template <typename T>
inline void DeformableIm2col(const platform::CUDADeviceContext& ctx,
const T* data_im, const T* data_offset,
const std::vector<int64_t> im_shape,
const std::vector<int64_t> col_shape,
const std::vector<int64_t> filter_shape,
const std::vector<int> paddings,
const std::vector<int> strides,
const std::vector<int> dilations,
const int deformable_groups, T* data_col) {
int channel_per_deformable_group = im_shape[0] / deformable_groups;
int num_kernels = im_shape[0] * col_shape[1] * col_shape[2] * col_shape[3];
int blocks = NumBlock(num_kernels);
int threads = kNumCUDAThread;
// get outputs of im2col with offset by bilinear interpolation
hipLaunchKernelGGL(( DeformableIm2colCUDAKernel<T>),
dim3(blocks), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream(),
num_kernels, data_im, data_offset, im_shape[1], im_shape[2],
filter_shape[2], filter_shape[3], paddings[0], paddings[1], strides[0],
strides[1], dilations[0], dilations[1], channel_per_deformable_group,
col_shape[1], im_shape[0], deformable_groups, col_shape[2], col_shape[3],
data_col);
}
template <typename T>
class DeformableConvV1CUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const Tensor* input = ctx.Input<Tensor>("Input");
const Tensor offset = *ctx.Input<Tensor>("Offset");
Tensor filter = *ctx.Input<Tensor>("Filter");
Tensor* output = ctx.Output<Tensor>("Output");
output->mutable_data<T>(ctx.GetPlace());
auto& dev_ctx = ctx.template device_context<CUDADeviceContext>();
const int groups = ctx.Attr<int>("groups");
const int deformable_groups = ctx.Attr<int>("deformable_groups");
const int im2col_step = ctx.Attr<int>("im2col_step");
const std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
const std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
const std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
const int batch_size = static_cast<int>(input->dims()[0]);
std::vector<int64_t> filter_shape_vec(phi::vectorize(filter.dims()));
std::vector<int64_t> output_shape_vec(phi::vectorize(output->dims()));
// col_shape_vec: {c_i * k_h * k_w, im2col_step, o_h, o_w}
std::vector<int64_t> col_buffer_shape_vec(filter_shape_vec.size());
col_buffer_shape_vec[0] =
input->dims()[1] * filter.dims()[2] * filter.dims()[3];
col_buffer_shape_vec[1] = im2col_step;
for (size_t j = 0; j < filter_shape_vec.size() - 2; ++j) {
col_buffer_shape_vec[j + 2] = output_shape_vec[j + 2];
}
framework::DDim col_shape(phi::make_ddim(col_buffer_shape_vec));
std::vector<int64_t> output_buffer_shape_vec(1);
output_buffer_shape_vec[0] = batch_size * output_shape_vec[1] *
output_shape_vec[2] * output_shape_vec[3];
framework::DDim output_shape(phi::make_ddim(output_buffer_shape_vec));
Tensor col_buffer;
Tensor output_buffer;
col_buffer =
ctx.AllocateTmpTensor<T, CUDADeviceContext>(col_shape, dev_ctx);
output_buffer =
ctx.AllocateTmpTensor<T, CUDADeviceContext>(output_shape, dev_ctx);
int64_t M = output_shape_vec[1] / groups;
int64_t N = im2col_step * output_shape_vec[2] * output_shape_vec[3];
int64_t K =
input->dims()[1] * filter_shape_vec[2] * filter_shape_vec[3] / groups;
Tensor weight_3d;
weight_3d.ShareDataWith(filter).Resize(phi::make_ddim({groups, M, K}));
Tensor col_buffer_3d;
col_buffer_3d.ShareDataWith(col_buffer)
.Resize(phi::make_ddim({groups, K, N}));
Tensor output_4d;
output_4d.ShareDataWith(output_buffer)
.Resize(phi::make_ddim({batch_size / im2col_step, groups, M, N}));
output_4d.mutable_data<T>(ctx.GetPlace());
framework::DDim input_shape =
phi::slice_ddim(input->dims(), 1, input->dims().size());
std::vector<int64_t> input_shape_vec = phi::vectorize(input_shape);
int input_dim = input->numel() / input->dims()[0];
int input_offset_dim = offset.numel() / offset.dims()[0];
auto blas = phi::funcs::GetBlas<CUDADeviceContext, T>(dev_ctx);
const T* input_ptr = input->data<T>();
const T* offset_ptr = offset.data<T>();
col_buffer.mutable_data<T>(ctx.GetPlace());
T* col_buffer_ptr = col_buffer.data<T>();
for (int i = 0; i < batch_size / im2col_step; ++i) {
DeformableIm2col(dev_ctx, input_ptr + i * im2col_step * input_dim,
offset_ptr + i * im2col_step * input_offset_dim,
input_shape_vec, col_buffer_shape_vec, filter_shape_vec,
paddings, strides, dilations, deformable_groups,
col_buffer_ptr);
Tensor output_3d = output_4d.Slice(i, i + 1).Resize(
phi::slice_ddim(output_4d.dims(), 1, output_4d.dims().size()));
// get the product of pixel and weight
for (int g = 0; g < groups; ++g) {
Tensor weight_3d_slice = weight_3d.Slice(g, g + 1).Resize(
phi::slice_ddim(weight_3d.dims(), 1, weight_3d.dims().size()));
Tensor col_buffer_3d_slice =
col_buffer_3d.Slice(g, g + 1).Resize(phi::slice_ddim(
col_buffer_3d.dims(), 1, col_buffer_3d.dims().size()));
Tensor output_3d_slice = output_3d.Slice(g, g + 1).Resize(
phi::slice_ddim(output_3d.dims(), 1, output_3d.dims().size()));
blas.MatMul(weight_3d_slice, false, col_buffer_3d_slice, false, T(1.0),
&output_3d_slice, T(0.0));
}
}
output->ShareDataWith(output_buffer)
.Resize(phi::make_ddim(output_shape_vec));
}
};
template <typename T>
class DeformableConvV1GradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const Tensor* output_grad =
ctx.Input<Tensor>(framework::GradVarName("Output"));
Tensor* input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
Tensor* filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));
Tensor* offset_grad = ctx.Output<Tensor>(framework::GradVarName("Offset"));
const Tensor* input = ctx.Input<Tensor>("Input");
Tensor offset = *ctx.Input<Tensor>("Offset");
Tensor filter = *ctx.Input<Tensor>("Filter");
if (!input_grad && !filter_grad && !offset_grad) return;
int groups = ctx.Attr<int>("groups");
int deformable_groups = ctx.Attr<int>("deformable_groups");
int im2col_step = ctx.Attr<int>("im2col_step");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
auto& dev_ctx = ctx.template device_context<CUDADeviceContext>();
const int batch_size = static_cast<int>(input->dims()[0]);
framework::DDim input_shape =
phi::slice_ddim(input->dims(), 1, input->dims().size());
std::vector<int64_t> input_shape_vec = phi::vectorize(input_shape);
std::vector<int64_t> filter_shape_vec(phi::vectorize(filter.dims()));
std::vector<int64_t> output_shape_vec(phi::vectorize(output_grad->dims()));
std::vector<int64_t> col_buffer_shape_vec(filter_shape_vec.size());
col_buffer_shape_vec[0] =
input->dims()[1] * filter.dims()[2] * filter.dims()[3];
col_buffer_shape_vec[1] = im2col_step;
for (size_t j = 0; j < filter_shape_vec.size() - 2; ++j) {
col_buffer_shape_vec[j + 2] = output_shape_vec[j + 2];
}
framework::DDim col_shape(phi::make_ddim(col_buffer_shape_vec));
std::vector<int64_t> output_buffer_shape_vec(1);
output_buffer_shape_vec[0] = batch_size * output_shape_vec[1] *
output_shape_vec[2] * output_shape_vec[3];
framework::DDim output_shape(phi::make_ddim(output_buffer_shape_vec));
Tensor col_buffer;
Tensor output_buffer;
col_buffer =
ctx.AllocateTmpTensor<T, CUDADeviceContext>(col_shape, dev_ctx);
output_buffer =
ctx.AllocateTmpTensor<T, CUDADeviceContext>(output_shape, dev_ctx);
output_buffer.ShareDataWith(*output_grad);
int64_t M =
input_shape_vec[0] / groups * filter_shape_vec[2] * filter_shape_vec[3];
int64_t N = im2col_step * output_shape_vec[2] * output_shape_vec[3];
int64_t K = output_shape_vec[1] / groups;
framework::DDim weight_3d_shape = {groups, K, M};
framework::DDim out_grad_4d_shape = {batch_size / im2col_step, groups, K,
N};
framework::DDim col_buffer_3d_shape = {groups, M, N};
framework::DDim filter_grad_shape = {groups, K, M};
Tensor weight_3d;
weight_3d.ShareDataWith(filter).Resize(weight_3d_shape);
Tensor out_grad_4d;
out_grad_4d.ShareDataWith(output_buffer).Resize(out_grad_4d_shape);
Tensor col_buffer_3d;
col_buffer_3d.ShareDataWith(col_buffer).Resize(col_buffer_3d_shape);
phi::funcs::SetConstant<CUDADeviceContext, T> set_zero;
auto blas = phi::funcs::GetBlas<CUDADeviceContext, T>(dev_ctx);
col_buffer.mutable_data<T>(ctx.GetPlace());
col_buffer_3d.mutable_data<T>(ctx.GetPlace());
out_grad_4d.mutable_data<T>(ctx.GetPlace());
int input_dim = input->numel() / input->dims()[0];
int input_offset_dim = offset.numel() / offset.dims()[0];
if (filter_grad) {
filter_grad->mutable_data<T>(ctx.GetPlace());
filter_grad->Resize(filter_grad_shape);
set_zero(dev_ctx, filter_grad, static_cast<T>(0));
}
if (input_grad) {
input_grad->mutable_data<T>(ctx.GetPlace());
set_zero(dev_ctx, input_grad, static_cast<T>(0));
}
if (offset_grad) {
offset_grad->mutable_data<T>(ctx.GetPlace());
set_zero(dev_ctx, offset_grad, static_cast<T>(0));
}
for (int i = 0; i < batch_size / im2col_step; ++i) {
Tensor out_grad_3d = out_grad_4d.Slice(i, i + 1).Resize(
phi::slice_ddim(out_grad_4d.dims(), 1, out_grad_4d.dims().size()));
for (int g = 0; g < groups; ++g) {
Tensor weight_3d_slice = weight_3d.Slice(g, g + 1).Resize(
phi::slice_ddim(weight_3d.dims(), 1, weight_3d.dims().size()));
Tensor out_grad_3d_slice = out_grad_3d.Slice(g, g + 1).Resize(
phi::slice_ddim(out_grad_3d.dims(), 1, out_grad_3d.dims().size()));
Tensor col_buffer_3d_slice =
col_buffer_3d.Slice(g, g + 1).Resize(phi::slice_ddim(
col_buffer_3d.dims(), 1, col_buffer_3d.dims().size()));
blas.MatMul(weight_3d_slice, true, out_grad_3d_slice, false, T(1.0),
&col_buffer_3d_slice, T(0.0));
}
col_buffer.Resize(col_shape);
T* col_buffer_ptr = col_buffer.data<T>();
const T* input_ptr = input->data<T>();
const T* offset_ptr = offset.data<T>();
if (offset_grad) {
T* offset_grad_ptr = offset_grad->data<T>();
// get grad of offset
DeformableCol2imCoord(
dev_ctx, col_buffer_ptr, input_ptr + i * im2col_step * input_dim,
offset_ptr + i * im2col_step * input_offset_dim, input_shape_vec,
col_buffer_shape_vec, filter_shape_vec, paddings, strides,
dilations, deformable_groups,
offset_grad_ptr + i * im2col_step * input_offset_dim);
}
if (input_grad) {
T* input_grad_ptr = input_grad->data<T>();
// get grad of input
DeformableCol2im(dev_ctx, col_buffer_ptr,
offset_ptr + i * im2col_step * input_offset_dim,
input_shape_vec, col_buffer_shape_vec,
filter_shape_vec, paddings, strides, dilations,
deformable_groups,
input_grad_ptr + i * im2col_step * input_dim);
input_grad->Resize(input->dims());
}
DeformableIm2col(dev_ctx, input_ptr + i * im2col_step * input_dim,
offset_ptr + i * im2col_step * input_offset_dim,
input_shape_vec, col_buffer_shape_vec, filter_shape_vec,
paddings, strides, dilations, deformable_groups,
col_buffer_ptr);
col_buffer_3d.Resize(col_buffer_3d_shape);
if (filter_grad) {
Tensor dweight_3d;
dweight_3d = ctx.AllocateTmpTensor<T, CUDADeviceContext>(
filter_grad_shape, dev_ctx);
for (int g = 0; g < groups; ++g) {
Tensor out_grad_3d_slice =
out_grad_3d.Slice(g, g + 1).Resize(phi::slice_ddim(
out_grad_3d.dims(), 1, out_grad_3d.dims().size()));
Tensor col_buffer_3d_slice =
col_buffer_3d.Slice(g, g + 1).Resize(phi::slice_ddim(
col_buffer_3d.dims(), 1, col_buffer_3d.dims().size()));
Tensor dweight_3d_slice = dweight_3d.Slice(g, g + 1).Resize(
phi::slice_ddim(dweight_3d.dims(), 1, dweight_3d.dims().size()));
blas.MatMul(out_grad_3d_slice, false, col_buffer_3d_slice, true,
T(1.0), &dweight_3d_slice, T(0.0));
}
hipLaunchKernelGGL(( FilterGradAddupCUDAKernel<T>), dim3(NumBlock(dweight_3d.numel())),
dim3(kNumCUDAThread), 0, dev_ctx.stream(),
dweight_3d.numel(), groups, K, M, dweight_3d.data<T>(),
filter_grad->data<T>());
}
}
if (filter_grad) {
filter_grad->Resize(filter.dims());
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(deformable_conv_v1,
ops::DeformableConvV1CUDAKernel<float>,
ops::DeformableConvV1CUDAKernel<double>);
REGISTER_OP_CUDA_KERNEL(deformable_conv_v1_grad,
ops::DeformableConvV1GradCUDAKernel<float>,
ops::DeformableConvV1GradCUDAKernel<double>);
| 5dd381e42fa6ff4e3006dd0aeafa26fb60d8fc1c.cu | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Part of the following code in this file refs to
// https://github.com/msracver/Deformable-ConvNets/blob/master/faster_rcnn/operator_cxx/deformable_convolution.cu
//
// Copyright (c) 2017 Microsoft
// Licensed under The Apache-2.0 License [see LICENSE for details]
// \file deformable_psroi_pooling.cu
// \brief
// \author Yi Li, Guodong Zhang, Jifeng Dai
#pragma once
#include <algorithm>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/deformable_conv_filter.cu.h"
#include "paddle/fluid/operators/deformable_conv_func.h"
#include "paddle/fluid/operators/deformable_conv_v1_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using CUDADeviceContext = paddle::platform::CUDADeviceContext;
static constexpr int kNumCUDAThread = 512;
static constexpr int kNumMaximumNumBlock = 4096;
static inline int NumBlock(const int N) {
return std::min((N + kNumCUDAThread - 1) / kNumCUDAThread,
kNumMaximumNumBlock);
}
template <typename T>
__global__ void DeformableCol2imCUDAKernel(
const int nthreads, const T* data_col, const T* data_offset,
const int channels, const int height, const int width, const int kernel_h,
const int kernel_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int channel_per_deformable_group, const int batch_size,
const int deformable_group, const int height_col, const int width_col,
T* grad_im) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (size_t thread = index; thread < nthreads; thread += offset) {
const int j = (thread / width_col / height_col / batch_size) % kernel_w;
const int i =
(thread / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c =
thread / width_col / height_col / batch_size / kernel_w / kernel_h;
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = thread % width_col;
int h_out = (thread / width_col) % height_col;
int b = (thread / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const T* data_offset_ptr = data_offset +
(b * deformable_group + deformable_group_index) *
2 * kernel_h * kernel_w * height_col *
width_col;
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const T offset_h = data_offset_ptr[data_offset_h_ptr];
const T offset_w = data_offset_ptr[data_offset_w_ptr];
const T cur_inv_h_data = h_in + i * dilation_h + offset_h;
const T cur_inv_w_data = w_in + j * dilation_w + offset_w;
const T cur_top_grad = data_col[thread];
const int cur_h = static_cast<int>(cur_inv_h_data);
const int cur_w = static_cast<int>(cur_inv_w_data);
for (int dy = -2; dy <= 2; dy++) {
for (int dx = -2; dx <= 2; dx++) {
if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 &&
cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1) {
int cur_bottom_grad_pos =
((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
T weight =
DmcnGetGradientWeight(cur_inv_h_data, cur_inv_w_data, cur_h + dy,
cur_w + dx, height, width);
platform::CudaAtomicAdd(grad_im + cur_bottom_grad_pos,
weight * cur_top_grad);
}
}
}
}
}
template <typename T>
inline void DeformableCol2im(const platform::CUDADeviceContext& ctx,
const T* data_col, const T* data_offset,
const std::vector<int64_t> im_shape,
const std::vector<int64_t> col_shape,
const std::vector<int64_t> kernel_shape,
const std::vector<int> pad,
const std::vector<int> stride,
const std::vector<int> dilation,
const int deformable_group, T* grad_im) {
int channel_per_deformable_group = im_shape[0] / deformable_group;
int num_kernels = col_shape[0] * col_shape[1] * col_shape[2] * col_shape[3];
int blocks = NumBlock(num_kernels);
int threads = kNumCUDAThread;
DeformableCol2imCUDAKernel<T><<<
blocks, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream()>>>(
num_kernels, data_col, data_offset, im_shape[0], im_shape[1], im_shape[2],
kernel_shape[2], kernel_shape[3], pad[0], pad[1], stride[0], stride[1],
dilation[0], dilation[1], channel_per_deformable_group, col_shape[1],
deformable_group, col_shape[2], col_shape[3], grad_im);
}
template <typename T>
__global__ void DeformableCol2imCoordCUDAKernel(
const int nthreads, const T* data_col, const T* data_im,
const T* data_offset, const int channels, const int height, const int width,
const int kernel_h, const int kernel_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w, const int dilation_h,
const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col, T* grad_offset) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (size_t i = index; i < nthreads; i += offset) {
T val = 0, mval = 0;
const int w = i % width_col;
const int h = (i / width_col) % height_col;
const int c = (i / width_col / height_col) % offset_channels;
const int b = (i / width_col / height_col) / offset_channels;
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const T* data_col_ptr = data_col +
deformable_group_index *
channel_per_deformable_group * batch_size *
width_col * height_col;
const T* data_im_ptr = data_im +
(b * deformable_group + deformable_group_index) *
channel_per_deformable_group / kernel_h /
kernel_w * height * width;
const T* data_offset_ptr = data_offset +
(b * deformable_group + deformable_group_index) *
2 * kernel_h * kernel_w * height_col *
width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = offset_c / 2; col_c < channel_per_deformable_group;
col_c += col_step) {
const int col_pos =
(((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i =
(col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr =
(((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr =
(((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col +
w_out);
const T offset_h = data_offset_ptr[data_offset_h_ptr];
const T offset_w = data_offset_ptr[data_offset_w_ptr];
T inv_h = h_in + i * dilation_h + offset_h;
T inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) {
inv_h = inv_w = -2;
} else {
mval += data_col_ptr[col_pos] *
DmcnIm2colBilinear(data_im_ptr + cnt * height * width, width,
height, width, inv_h, inv_w);
}
const T weight = DmcnGetCoordinateWeight(
inv_h, inv_w, height, width, data_im_ptr + cnt * height * width,
width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[i] = val;
}
}
template <typename T>
inline void DeformableCol2imCoord(
const platform::CUDADeviceContext& ctx, const T* data_col, const T* data_im,
const T* data_offset, const std::vector<int64_t> im_shape,
const std::vector<int64_t> col_shape,
const std::vector<int64_t> kernel_shape, const std::vector<int> paddings,
const std::vector<int> strides, const std::vector<int> dilations,
const int deformable_groups, T* grad_offset) {
int num_kernels = 2 * kernel_shape[2] * kernel_shape[3] * col_shape[1] *
col_shape[2] * col_shape[3] * deformable_groups;
int channel_per_deformable_group = col_shape[0] / deformable_groups;
int blocks = NumBlock(num_kernels);
int threads = kNumCUDAThread;
DeformableCol2imCoordCUDAKernel<T><<<
blocks, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream()>>>(
num_kernels, data_col, data_im, data_offset, im_shape[0], im_shape[1],
im_shape[2], kernel_shape[2], kernel_shape[3], paddings[0], paddings[1],
strides[0], strides[1], dilations[0], dilations[1],
channel_per_deformable_group, col_shape[1],
2 * kernel_shape[2] * kernel_shape[3] * deformable_groups,
deformable_groups, col_shape[2], col_shape[3], grad_offset);
}
template <typename T>
__global__ void DeformableIm2colCUDAKernel(
const int nthreads, const T* data_im, const T* data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group, const int batch_size,
const int num_channels, const int deformable_group, const int height_col,
const int width_col, T* data_col) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (size_t i = index; i < nthreads; i += offset) {
const int w_col = i % width_col;
const int h_col = (i / width_col) % height_col;
const int b_col = (i / width_col) / height_col % batch_size;
const int c_im = (i / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
T* data_col_ptr =
data_col +
((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
const T* data_im_ptr =
data_im + (b_col * num_channels + c_im) * height * width;
const T* data_offset_ptr =
data_offset +
(b_col * deformable_group + deformable_group_index) * 2 * kernel_h *
kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
const int data_offset_h_ptr =
((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr =
((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col +
w_col;
const T offset_h = data_offset_ptr[data_offset_h_ptr];
const T offset_w = data_offset_ptr[data_offset_w_ptr];
T val = static_cast<T>(0);
const T h_im = h_in + i * dilation_h + offset_h;
const T w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) {
val =
DmcnIm2colBilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
template <typename T>
inline void DeformableIm2col(const platform::CUDADeviceContext& ctx,
const T* data_im, const T* data_offset,
const std::vector<int64_t> im_shape,
const std::vector<int64_t> col_shape,
const std::vector<int64_t> filter_shape,
const std::vector<int> paddings,
const std::vector<int> strides,
const std::vector<int> dilations,
const int deformable_groups, T* data_col) {
int channel_per_deformable_group = im_shape[0] / deformable_groups;
int num_kernels = im_shape[0] * col_shape[1] * col_shape[2] * col_shape[3];
int blocks = NumBlock(num_kernels);
int threads = kNumCUDAThread;
// get outputs of im2col with offset by bilinear interpolation
DeformableIm2colCUDAKernel<T><<<
blocks, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream()>>>(
num_kernels, data_im, data_offset, im_shape[1], im_shape[2],
filter_shape[2], filter_shape[3], paddings[0], paddings[1], strides[0],
strides[1], dilations[0], dilations[1], channel_per_deformable_group,
col_shape[1], im_shape[0], deformable_groups, col_shape[2], col_shape[3],
data_col);
}
template <typename T>
class DeformableConvV1CUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const Tensor* input = ctx.Input<Tensor>("Input");
const Tensor offset = *ctx.Input<Tensor>("Offset");
Tensor filter = *ctx.Input<Tensor>("Filter");
Tensor* output = ctx.Output<Tensor>("Output");
output->mutable_data<T>(ctx.GetPlace());
auto& dev_ctx = ctx.template device_context<CUDADeviceContext>();
const int groups = ctx.Attr<int>("groups");
const int deformable_groups = ctx.Attr<int>("deformable_groups");
const int im2col_step = ctx.Attr<int>("im2col_step");
const std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
const std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
const std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
const int batch_size = static_cast<int>(input->dims()[0]);
std::vector<int64_t> filter_shape_vec(phi::vectorize(filter.dims()));
std::vector<int64_t> output_shape_vec(phi::vectorize(output->dims()));
// col_shape_vec: {c_i * k_h * k_w, im2col_step, o_h, o_w}
std::vector<int64_t> col_buffer_shape_vec(filter_shape_vec.size());
col_buffer_shape_vec[0] =
input->dims()[1] * filter.dims()[2] * filter.dims()[3];
col_buffer_shape_vec[1] = im2col_step;
for (size_t j = 0; j < filter_shape_vec.size() - 2; ++j) {
col_buffer_shape_vec[j + 2] = output_shape_vec[j + 2];
}
framework::DDim col_shape(phi::make_ddim(col_buffer_shape_vec));
std::vector<int64_t> output_buffer_shape_vec(1);
output_buffer_shape_vec[0] = batch_size * output_shape_vec[1] *
output_shape_vec[2] * output_shape_vec[3];
framework::DDim output_shape(phi::make_ddim(output_buffer_shape_vec));
Tensor col_buffer;
Tensor output_buffer;
col_buffer =
ctx.AllocateTmpTensor<T, CUDADeviceContext>(col_shape, dev_ctx);
output_buffer =
ctx.AllocateTmpTensor<T, CUDADeviceContext>(output_shape, dev_ctx);
int64_t M = output_shape_vec[1] / groups;
int64_t N = im2col_step * output_shape_vec[2] * output_shape_vec[3];
int64_t K =
input->dims()[1] * filter_shape_vec[2] * filter_shape_vec[3] / groups;
Tensor weight_3d;
weight_3d.ShareDataWith(filter).Resize(phi::make_ddim({groups, M, K}));
Tensor col_buffer_3d;
col_buffer_3d.ShareDataWith(col_buffer)
.Resize(phi::make_ddim({groups, K, N}));
Tensor output_4d;
output_4d.ShareDataWith(output_buffer)
.Resize(phi::make_ddim({batch_size / im2col_step, groups, M, N}));
output_4d.mutable_data<T>(ctx.GetPlace());
framework::DDim input_shape =
phi::slice_ddim(input->dims(), 1, input->dims().size());
std::vector<int64_t> input_shape_vec = phi::vectorize(input_shape);
int input_dim = input->numel() / input->dims()[0];
int input_offset_dim = offset.numel() / offset.dims()[0];
auto blas = phi::funcs::GetBlas<CUDADeviceContext, T>(dev_ctx);
const T* input_ptr = input->data<T>();
const T* offset_ptr = offset.data<T>();
col_buffer.mutable_data<T>(ctx.GetPlace());
T* col_buffer_ptr = col_buffer.data<T>();
for (int i = 0; i < batch_size / im2col_step; ++i) {
DeformableIm2col(dev_ctx, input_ptr + i * im2col_step * input_dim,
offset_ptr + i * im2col_step * input_offset_dim,
input_shape_vec, col_buffer_shape_vec, filter_shape_vec,
paddings, strides, dilations, deformable_groups,
col_buffer_ptr);
Tensor output_3d = output_4d.Slice(i, i + 1).Resize(
phi::slice_ddim(output_4d.dims(), 1, output_4d.dims().size()));
// get the product of pixel and weight
for (int g = 0; g < groups; ++g) {
Tensor weight_3d_slice = weight_3d.Slice(g, g + 1).Resize(
phi::slice_ddim(weight_3d.dims(), 1, weight_3d.dims().size()));
Tensor col_buffer_3d_slice =
col_buffer_3d.Slice(g, g + 1).Resize(phi::slice_ddim(
col_buffer_3d.dims(), 1, col_buffer_3d.dims().size()));
Tensor output_3d_slice = output_3d.Slice(g, g + 1).Resize(
phi::slice_ddim(output_3d.dims(), 1, output_3d.dims().size()));
blas.MatMul(weight_3d_slice, false, col_buffer_3d_slice, false, T(1.0),
&output_3d_slice, T(0.0));
}
}
output->ShareDataWith(output_buffer)
.Resize(phi::make_ddim(output_shape_vec));
}
};
template <typename T>
class DeformableConvV1GradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const Tensor* output_grad =
ctx.Input<Tensor>(framework::GradVarName("Output"));
Tensor* input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
Tensor* filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));
Tensor* offset_grad = ctx.Output<Tensor>(framework::GradVarName("Offset"));
const Tensor* input = ctx.Input<Tensor>("Input");
Tensor offset = *ctx.Input<Tensor>("Offset");
Tensor filter = *ctx.Input<Tensor>("Filter");
if (!input_grad && !filter_grad && !offset_grad) return;
int groups = ctx.Attr<int>("groups");
int deformable_groups = ctx.Attr<int>("deformable_groups");
int im2col_step = ctx.Attr<int>("im2col_step");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
auto& dev_ctx = ctx.template device_context<CUDADeviceContext>();
const int batch_size = static_cast<int>(input->dims()[0]);
framework::DDim input_shape =
phi::slice_ddim(input->dims(), 1, input->dims().size());
std::vector<int64_t> input_shape_vec = phi::vectorize(input_shape);
std::vector<int64_t> filter_shape_vec(phi::vectorize(filter.dims()));
std::vector<int64_t> output_shape_vec(phi::vectorize(output_grad->dims()));
std::vector<int64_t> col_buffer_shape_vec(filter_shape_vec.size());
col_buffer_shape_vec[0] =
input->dims()[1] * filter.dims()[2] * filter.dims()[3];
col_buffer_shape_vec[1] = im2col_step;
for (size_t j = 0; j < filter_shape_vec.size() - 2; ++j) {
col_buffer_shape_vec[j + 2] = output_shape_vec[j + 2];
}
framework::DDim col_shape(phi::make_ddim(col_buffer_shape_vec));
std::vector<int64_t> output_buffer_shape_vec(1);
output_buffer_shape_vec[0] = batch_size * output_shape_vec[1] *
output_shape_vec[2] * output_shape_vec[3];
framework::DDim output_shape(phi::make_ddim(output_buffer_shape_vec));
Tensor col_buffer;
Tensor output_buffer;
col_buffer =
ctx.AllocateTmpTensor<T, CUDADeviceContext>(col_shape, dev_ctx);
output_buffer =
ctx.AllocateTmpTensor<T, CUDADeviceContext>(output_shape, dev_ctx);
output_buffer.ShareDataWith(*output_grad);
int64_t M =
input_shape_vec[0] / groups * filter_shape_vec[2] * filter_shape_vec[3];
int64_t N = im2col_step * output_shape_vec[2] * output_shape_vec[3];
int64_t K = output_shape_vec[1] / groups;
framework::DDim weight_3d_shape = {groups, K, M};
framework::DDim out_grad_4d_shape = {batch_size / im2col_step, groups, K,
N};
framework::DDim col_buffer_3d_shape = {groups, M, N};
framework::DDim filter_grad_shape = {groups, K, M};
Tensor weight_3d;
weight_3d.ShareDataWith(filter).Resize(weight_3d_shape);
Tensor out_grad_4d;
out_grad_4d.ShareDataWith(output_buffer).Resize(out_grad_4d_shape);
Tensor col_buffer_3d;
col_buffer_3d.ShareDataWith(col_buffer).Resize(col_buffer_3d_shape);
phi::funcs::SetConstant<CUDADeviceContext, T> set_zero;
auto blas = phi::funcs::GetBlas<CUDADeviceContext, T>(dev_ctx);
col_buffer.mutable_data<T>(ctx.GetPlace());
col_buffer_3d.mutable_data<T>(ctx.GetPlace());
out_grad_4d.mutable_data<T>(ctx.GetPlace());
int input_dim = input->numel() / input->dims()[0];
int input_offset_dim = offset.numel() / offset.dims()[0];
if (filter_grad) {
filter_grad->mutable_data<T>(ctx.GetPlace());
filter_grad->Resize(filter_grad_shape);
set_zero(dev_ctx, filter_grad, static_cast<T>(0));
}
if (input_grad) {
input_grad->mutable_data<T>(ctx.GetPlace());
set_zero(dev_ctx, input_grad, static_cast<T>(0));
}
if (offset_grad) {
offset_grad->mutable_data<T>(ctx.GetPlace());
set_zero(dev_ctx, offset_grad, static_cast<T>(0));
}
for (int i = 0; i < batch_size / im2col_step; ++i) {
Tensor out_grad_3d = out_grad_4d.Slice(i, i + 1).Resize(
phi::slice_ddim(out_grad_4d.dims(), 1, out_grad_4d.dims().size()));
for (int g = 0; g < groups; ++g) {
Tensor weight_3d_slice = weight_3d.Slice(g, g + 1).Resize(
phi::slice_ddim(weight_3d.dims(), 1, weight_3d.dims().size()));
Tensor out_grad_3d_slice = out_grad_3d.Slice(g, g + 1).Resize(
phi::slice_ddim(out_grad_3d.dims(), 1, out_grad_3d.dims().size()));
Tensor col_buffer_3d_slice =
col_buffer_3d.Slice(g, g + 1).Resize(phi::slice_ddim(
col_buffer_3d.dims(), 1, col_buffer_3d.dims().size()));
blas.MatMul(weight_3d_slice, true, out_grad_3d_slice, false, T(1.0),
&col_buffer_3d_slice, T(0.0));
}
col_buffer.Resize(col_shape);
T* col_buffer_ptr = col_buffer.data<T>();
const T* input_ptr = input->data<T>();
const T* offset_ptr = offset.data<T>();
if (offset_grad) {
T* offset_grad_ptr = offset_grad->data<T>();
// get grad of offset
DeformableCol2imCoord(
dev_ctx, col_buffer_ptr, input_ptr + i * im2col_step * input_dim,
offset_ptr + i * im2col_step * input_offset_dim, input_shape_vec,
col_buffer_shape_vec, filter_shape_vec, paddings, strides,
dilations, deformable_groups,
offset_grad_ptr + i * im2col_step * input_offset_dim);
}
if (input_grad) {
T* input_grad_ptr = input_grad->data<T>();
// get grad of input
DeformableCol2im(dev_ctx, col_buffer_ptr,
offset_ptr + i * im2col_step * input_offset_dim,
input_shape_vec, col_buffer_shape_vec,
filter_shape_vec, paddings, strides, dilations,
deformable_groups,
input_grad_ptr + i * im2col_step * input_dim);
input_grad->Resize(input->dims());
}
DeformableIm2col(dev_ctx, input_ptr + i * im2col_step * input_dim,
offset_ptr + i * im2col_step * input_offset_dim,
input_shape_vec, col_buffer_shape_vec, filter_shape_vec,
paddings, strides, dilations, deformable_groups,
col_buffer_ptr);
col_buffer_3d.Resize(col_buffer_3d_shape);
if (filter_grad) {
Tensor dweight_3d;
dweight_3d = ctx.AllocateTmpTensor<T, CUDADeviceContext>(
filter_grad_shape, dev_ctx);
for (int g = 0; g < groups; ++g) {
Tensor out_grad_3d_slice =
out_grad_3d.Slice(g, g + 1).Resize(phi::slice_ddim(
out_grad_3d.dims(), 1, out_grad_3d.dims().size()));
Tensor col_buffer_3d_slice =
col_buffer_3d.Slice(g, g + 1).Resize(phi::slice_ddim(
col_buffer_3d.dims(), 1, col_buffer_3d.dims().size()));
Tensor dweight_3d_slice = dweight_3d.Slice(g, g + 1).Resize(
phi::slice_ddim(dweight_3d.dims(), 1, dweight_3d.dims().size()));
blas.MatMul(out_grad_3d_slice, false, col_buffer_3d_slice, true,
T(1.0), &dweight_3d_slice, T(0.0));
}
FilterGradAddupCUDAKernel<T><<<NumBlock(dweight_3d.numel()),
kNumCUDAThread, 0, dev_ctx.stream()>>>(
dweight_3d.numel(), groups, K, M, dweight_3d.data<T>(),
filter_grad->data<T>());
}
}
if (filter_grad) {
filter_grad->Resize(filter.dims());
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(deformable_conv_v1,
ops::DeformableConvV1CUDAKernel<float>,
ops::DeformableConvV1CUDAKernel<double>);
REGISTER_OP_CUDA_KERNEL(deformable_conv_v1_grad,
ops::DeformableConvV1GradCUDAKernel<float>,
ops::DeformableConvV1GradCUDAKernel<double>);
|
c81b83ea38ece0ca448282122e76b5b3a3a51234.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
// This example fuses gather before GEMM and scatter after GEMM into the same
// GEMM kernel. Gather and scatter operation is controled by an index vector
// to select rows or columns from A, B, C or D matrices.
//
// Suppose, all matrices are column major. The pseudo code of the fused kernel
// in this example is essentially
//
// for (int i = 0; i < problem_size.m(); ++i) {
// for (int j = 0; j < options.index_size; ++j) {
// int b_c_d_col = tensor_indices.at({j, 0});
//
// for (int k = 0; k < options.index_size; ++k) {
// tensor_d_ref.at({i, b_c_d_col}) +=
// alpha * tensor_a.at({i, k}) * tensor_b.at({k, b_c_d_col});
// }
// }
//
// Note that the index vector contains unique random integers with max to be N - 1
//
// The gather/scatter operation works best when we can still keep the biggest
// alignment. For example, when the matrix is row major, we select rows. When
// the matrix is column major, we select columns.
//
// Not all the combination of gather and scatter are legal. For example, if A is
// row major and C/D is column major, we cannot gather A and scatter C/D at the
// same time.
//
// Also, we don't check the index value is legal and index array point is valid
// for the sake of the performance.
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <algorithm>
#include <iostream>
#include <fstream>
#include <random>
#include <numeric>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
hipError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
hipError_t error = hipSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
int index_size;
bool reference_check;
int iterations;
Options():
help(false),
problem_size({248, 1024, 1024}),
index_size(240),
reference_check(true),
iterations(20) { }
bool valid() {
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("index_size", index_size);
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "36_gather_scatter_fusion example\n\n"
<< " This example uses the CUTLASS Library to fuse gather/scatter into GEMM\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m=<int> GEMM M dimension\n"
<< " --n=<int> GEMM N dimension\n"
<< " --k=<int> GEMM K dimension\n"
<< " --index_size=<int> size of N dimension index\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/36_gather_scatter_fusion/36_gather_scatter_fusion --m=1024 --n=512 --k=1024 \\\n"
<< " --index_size=128\n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = problem_size.product();
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = float; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A
using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B
using ElementOutput = float; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices.
// Column Major for Matrix A, B and C.
//
using LayoutInputA = cutlass::layout::ColumnMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::ColumnMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128, N = 128, K = 32
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 32>; // <- warp tile M = 64, N = 64, K = 32
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 16>; // <- MMA Op tile M = 8, N = 8, K = 4
// 16, 8, 8 -> Turing
// 16, 8, 16 -> Ampere
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// Define the epilogue operation as LinearCombination. This is approximately equal to
//
// d_ij = alpha * sum_k(a_ik * b_kj) + c_ij
//
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- this is the number of elements per
// vectorized memory access. For half
// precision, it's 8 elements. This becomes
// the vector width of math instructions in
// epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 5;
// Ampere -> 4/5
// Turing -> 2
using Gemm = cutlass::gemm::device::GemmUniversal<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
8, /*alignmentA*/
8, /*alignmentB*/
cutlass::arch::OpMultiplyAdd,
cutlass::ComplexTransform::kNone,
cutlass::ComplexTransform::kNone,
false, /*GatherA*/
true, /*GatherB*/
true /*ScatterD*/
>;
int run(Options &options) {
// ================================================================================
// Initialization setup
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size = options.problem_size;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size_real(problem_size.m(),
options.index_size,
problem_size.k());
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d_scattered(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(7),
ElementInputA(-8),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputA(7),
ElementInputA(-8),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(7),
ElementOutput(-8),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d_scattered.host_view()); // <- fill matrix D on host with zeros
cutlass::HostTensor<int, LayoutOutput> tensor_indices(
{options.index_size, 1}); // <- Create scatter indices with dimensions val_len x 1
// <- Fill tensor_b_indices on host with unique random integers
std::vector<int> to_fill(problem_size.n()) ; // vector with ints.
std::iota (std::begin(to_fill), std::end(to_fill), 0); // Fill with 0, 1, ...., problem_size.n()
std::random_shuffle(to_fill.begin(), to_fill.end());
memcpy(tensor_indices.host_data(), to_fill.data(), options.index_size * sizeof(int));
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_indices.sync_device();
tensor_c.sync_device();
tensor_d_scattered.sync_device();
// Initialize alpha/beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
ElementComputeEpilogue beta = ElementComputeEpilogue(1);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size_real, // <- problem size of matrix multiplication
split_k_slices, // <- k-dimension split factor
{alpha, beta}, // <- alpha, beta
tensor_a.device_data(), // <- reference to matrix A on device
tensor_b.device_data(), // <- reference to matrix B on device
tensor_c.device_data(), // <- reference to matrix C on device
tensor_d_scattered.device_data(), // <- reference to matrix D on device
tensor_a.layout().capacity(problem_size.mk()),
tensor_b.layout().capacity(cutlass::make_Coord(options.index_size, problem_size.n())),
tensor_c.layout().capacity(problem_size.mn()),
tensor_d_scattered.layout().capacity(problem_size.mn()),
tensor_a.layout().stride(),
tensor_b.layout().stride(),
tensor_c.layout().stride(),
tensor_d_scattered.layout().stride(),
nullptr, // <- pointer to index vector to gather A on device
tensor_indices.device_data(), // <- pointer to index vector to gather B on device
tensor_indices.device_data()}; // <- pointer to index vector to scatter D on device
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// CPU reference calculation
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d_ref(problem_size.mn());
cutlass::reference::host::TensorFill(
tensor_d_ref.host_view()); // <- Fill matrix D on host with zeros
status = gemm_op();
hipDeviceSynchronize();
CUTLASS_CHECK(status);
if (options.reference_check) {
for (int i = 0; i < problem_size.m(); ++i) {
for (int j = 0; j < options.index_size; ++j) {
int b_c_d_col = tensor_indices.at({j, 0});
for (int k = 0; k < problem_size.k(); ++k) {
tensor_d_ref.at({i, b_c_d_col}) +=
alpha * tensor_a.at({i, k}) * tensor_b.at({k, b_c_d_col});
}
tensor_d_ref.at({i, b_c_d_col}) += (beta * tensor_c.at({i, b_c_d_col}));
}
}
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d_scattered.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_d_scattered.host_view(),
tensor_d_ref.host_view());
if (!passed) {
std::cout << "Failed!\n";
std::stringstream fname;
fname << "error_gather_GEMM_scatter_fusion.txt";
std::cerr << "Dumping results in " << fname.str() << "\n";
std::ofstream file(fname.str());
file
<< "A =\n" << tensor_a.host_view()
<< "\nB =\n" << tensor_b.host_view()
<< "\nindices =\n" << tensor_indices.host_view()
<< "\nC =\n" << tensor_c.host_view()
<< "\n\nReference =\n" << tensor_d_ref.host_view()
<< "\nComputed =\n" << tensor_d_scattered.host_view();
return -1;
} else {
std::cout << "Passed!\n";
}
}
// Result structure
Result result;
//
// Construct events
//
hipEvent_t events[2];
for (auto & event : events) {
result.error = hipEventCreate(&event);
if (result.error != hipSuccess) {
std::cerr << "hipEventCreate() failed: " << hipGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMMs
result.error = hipEventRecord(events[0]);
if (result.error != hipSuccess) {
std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl;
return -1;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
}
//
// Stop profiling loop
//
// Record an event when the GEMMs are complete
result.error = hipEventRecord(events[1]);
if (result.error != hipSuccess) {
std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl;
return -1;
}
// Wait for work on the device to complete.
result.error = hipEventSynchronize(events[1]);
if (result.error != hipSuccess) {
std::cerr << "hipEventSynchronize() failed: " << hipGetErrorString(result.error) << std::endl;
return -1;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = hipEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != hipSuccess) {
std::cerr << "cudaEventElapsed() failed: " << hipGetErrorString(result.error) << std::endl;
return -1;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)hipEventDestroy(event);
}
std::cout << "Runtime: " << result.runtime_ms << " ms\n";
std::cout << " GFLOPs: " << result.gflops << "\n";
return 0;
}
int main(int argc, const char ** argv) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
hipDeviceProp_t props;
CUDA_CHECK(hipGetDeviceProperties(&props, 0));
if (!(props.major >= 8)) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << "\n";
return 0;
}
if (!options.valid()) {
std::cerr << "Invalid problem." << "\n";
return -1;
}
return run(options);
}
| c81b83ea38ece0ca448282122e76b5b3a3a51234.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
// This example fuses gather before GEMM and scatter after GEMM into the same
// GEMM kernel. Gather and scatter operation is controled by an index vector
// to select rows or columns from A, B, C or D matrices.
//
// Suppose, all matrices are column major. The pseudo code of the fused kernel
// in this example is essentially
//
// for (int i = 0; i < problem_size.m(); ++i) {
// for (int j = 0; j < options.index_size; ++j) {
// int b_c_d_col = tensor_indices.at({j, 0});
//
// for (int k = 0; k < options.index_size; ++k) {
// tensor_d_ref.at({i, b_c_d_col}) +=
// alpha * tensor_a.at({i, k}) * tensor_b.at({k, b_c_d_col});
// }
// }
//
// Note that the index vector contains unique random integers with max to be N - 1
//
// The gather/scatter operation works best when we can still keep the biggest
// alignment. For example, when the matrix is row major, we select rows. When
// the matrix is column major, we select columns.
//
// Not all the combination of gather and scatter are legal. For example, if A is
// row major and C/D is column major, we cannot gather A and scatter C/D at the
// same time.
//
// Also, we don't check the index value is legal and index array point is valid
// for the sake of the performance.
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <iostream>
#include <fstream>
#include <random>
#include <numeric>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
/////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
int index_size;
bool reference_check;
int iterations;
Options():
help(false),
problem_size({248, 1024, 1024}),
index_size(240),
reference_check(true),
iterations(20) { }
bool valid() {
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("index_size", index_size);
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "36_gather_scatter_fusion example\n\n"
<< " This example uses the CUTLASS Library to fuse gather/scatter into GEMM\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m=<int> GEMM M dimension\n"
<< " --n=<int> GEMM N dimension\n"
<< " --k=<int> GEMM K dimension\n"
<< " --index_size=<int> size of N dimension index\n"
<< " --iterations=<int> Number of profiling iterations to perform.\n\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/36_gather_scatter_fusion/36_gather_scatter_fusion --m=1024 --n=512 --k=1024 \\\n"
<< " --index_size=128\n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = problem_size.product();
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = float; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = cutlass::half_t; // <- data type of elements in input matrix A
using ElementInputB = cutlass::half_t; // <- data type of elements in input matrix B
using ElementOutput = float; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices.
// Column Major for Matrix A, B and C.
//
using LayoutInputA = cutlass::layout::ColumnMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::ColumnMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128, N = 128, K = 32
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 32>; // <- warp tile M = 64, N = 64, K = 32
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 16>; // <- MMA Op tile M = 8, N = 8, K = 4
// 16, 8, 8 -> Turing
// 16, 8, 16 -> Ampere
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// Define the epilogue operation as LinearCombination. This is approximately equal to
//
// d_ij = alpha * sum_k(a_ik * b_kj) + c_ij
//
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- this is the number of elements per
// vectorized memory access. For half
// precision, it's 8 elements. This becomes
// the vector width of math instructions in
// epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 5;
// Ampere -> 4/5
// Turing -> 2
using Gemm = cutlass::gemm::device::GemmUniversal<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages,
8, /*alignmentA*/
8, /*alignmentB*/
cutlass::arch::OpMultiplyAdd,
cutlass::ComplexTransform::kNone,
cutlass::ComplexTransform::kNone,
false, /*GatherA*/
true, /*GatherB*/
true /*ScatterD*/
>;
int run(Options &options) {
// ================================================================================
// Initialization setup
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size = options.problem_size;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size_real(problem_size.m(),
options.index_size,
problem_size.k());
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d_scattered(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(7),
ElementInputA(-8),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputA(7),
ElementInputA(-8),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(7),
ElementOutput(-8),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d_scattered.host_view()); // <- fill matrix D on host with zeros
cutlass::HostTensor<int, LayoutOutput> tensor_indices(
{options.index_size, 1}); // <- Create scatter indices with dimensions val_len x 1
// <- Fill tensor_b_indices on host with unique random integers
std::vector<int> to_fill(problem_size.n()) ; // vector with ints.
std::iota (std::begin(to_fill), std::end(to_fill), 0); // Fill with 0, 1, ...., problem_size.n()
std::random_shuffle(to_fill.begin(), to_fill.end());
memcpy(tensor_indices.host_data(), to_fill.data(), options.index_size * sizeof(int));
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_indices.sync_device();
tensor_c.sync_device();
tensor_d_scattered.sync_device();
// Initialize alpha/beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
ElementComputeEpilogue beta = ElementComputeEpilogue(1);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{
cutlass::gemm::GemmUniversalMode::kGemm,
problem_size_real, // <- problem size of matrix multiplication
split_k_slices, // <- k-dimension split factor
{alpha, beta}, // <- alpha, beta
tensor_a.device_data(), // <- reference to matrix A on device
tensor_b.device_data(), // <- reference to matrix B on device
tensor_c.device_data(), // <- reference to matrix C on device
tensor_d_scattered.device_data(), // <- reference to matrix D on device
tensor_a.layout().capacity(problem_size.mk()),
tensor_b.layout().capacity(cutlass::make_Coord(options.index_size, problem_size.n())),
tensor_c.layout().capacity(problem_size.mn()),
tensor_d_scattered.layout().capacity(problem_size.mn()),
tensor_a.layout().stride(),
tensor_b.layout().stride(),
tensor_c.layout().stride(),
tensor_d_scattered.layout().stride(),
nullptr, // <- pointer to index vector to gather A on device
tensor_indices.device_data(), // <- pointer to index vector to gather B on device
tensor_indices.device_data()}; // <- pointer to index vector to scatter D on device
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// CPU reference calculation
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d_ref(problem_size.mn());
cutlass::reference::host::TensorFill(
tensor_d_ref.host_view()); // <- Fill matrix D on host with zeros
status = gemm_op();
cudaDeviceSynchronize();
CUTLASS_CHECK(status);
if (options.reference_check) {
for (int i = 0; i < problem_size.m(); ++i) {
for (int j = 0; j < options.index_size; ++j) {
int b_c_d_col = tensor_indices.at({j, 0});
for (int k = 0; k < problem_size.k(); ++k) {
tensor_d_ref.at({i, b_c_d_col}) +=
alpha * tensor_a.at({i, k}) * tensor_b.at({k, b_c_d_col});
}
tensor_d_ref.at({i, b_c_d_col}) += (beta * tensor_c.at({i, b_c_d_col}));
}
}
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d_scattered.sync_host();
bool passed = cutlass::reference::host::TensorEquals(
tensor_d_scattered.host_view(),
tensor_d_ref.host_view());
if (!passed) {
std::cout << "Failed!\n";
std::stringstream fname;
fname << "error_gather_GEMM_scatter_fusion.txt";
std::cerr << "Dumping results in " << fname.str() << "\n";
std::ofstream file(fname.str());
file
<< "A =\n" << tensor_a.host_view()
<< "\nB =\n" << tensor_b.host_view()
<< "\nindices =\n" << tensor_indices.host_view()
<< "\nC =\n" << tensor_c.host_view()
<< "\n\nReference =\n" << tensor_d_ref.host_view()
<< "\nComputed =\n" << tensor_d_scattered.host_view();
return -1;
} else {
std::cout << "Passed!\n";
}
}
// Result structure
Result result;
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMMs
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
}
//
// Stop profiling loop
//
// Record an event when the GEMMs are complete
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
std::cout << "Runtime: " << result.runtime_ms << " ms\n";
std::cout << " GFLOPs: " << result.gflops << "\n";
return 0;
}
int main(int argc, const char ** argv) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (!(props.major >= 8)) {
std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << "\n";
return 0;
}
if (!options.valid()) {
std::cerr << "Invalid problem." << "\n";
return -1;
}
return run(options);
}
|
419f3243eb720816f80b225395271b998c138b4e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/*
* Initialize array values on the host.
*/
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
/*
* Double elements in parallel on the GPU.
*/
__global__
void doubleElements(int *a, int N)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
a[i] *= 2;
}
}
/*
* Check all elements have been doubled on the host.
*/
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main()
{
int N = 100;
int *a;
size_t size = N * sizeof(int);
/*
* Refactor this memory allocation to provide a pointer
* `a` that can be used on both the host and the device.
*/
hipMallocManaged(&a, size);
init(a, N);
size_t threads_per_block = 256;
size_t number_of_blocks = (N + threads_per_block - 1) / threads_per_block;
/*
* This launch will not work until the pointer `a` is also
* available to the device.
*/
hipLaunchKernelGGL(( doubleElements), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, a, N);
hipDeviceSynchronize();
bool areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE");
/*
* Refactor to free memory that has been allocated to be
* accessed by both the host and the device.
*/
hipFree(a);
}
| 419f3243eb720816f80b225395271b998c138b4e.cu | #include <stdio.h>
/*
* Initialize array values on the host.
*/
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
/*
* Double elements in parallel on the GPU.
*/
__global__
void doubleElements(int *a, int N)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
a[i] *= 2;
}
}
/*
* Check all elements have been doubled on the host.
*/
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main()
{
int N = 100;
int *a;
size_t size = N * sizeof(int);
/*
* Refactor this memory allocation to provide a pointer
* `a` that can be used on both the host and the device.
*/
cudaMallocManaged(&a, size);
init(a, N);
size_t threads_per_block = 256;
size_t number_of_blocks = (N + threads_per_block - 1) / threads_per_block;
/*
* This launch will not work until the pointer `a` is also
* available to the device.
*/
doubleElements<<<number_of_blocks, threads_per_block>>>(a, N);
cudaDeviceSynchronize();
bool areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE");
/*
* Refactor to free memory that has been allocated to be
* accessed by both the host and the device.
*/
cudaFree(a);
}
|
9357f69b2c5459093746cbf2f9095991cd240ccf.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <hiprand/hiprand.h>
#include <hip/hip_runtime_api.h>
#include "hipcub/hipcub.hpp"
#include "hipcub/hipcub.hpp"
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <sys/time.h>
#include <algorithm>
// #include "printFunctions.cuh"
// #include "generateProblems.cuh"
// #include "topk.h"
using namespace std;
using namespace cub;
#define maxThreadsPerBlock 1024
/**
* Computes the histogram over the digit values of an array of keys that MUST have a length of an integer multiple of (KPT * blockDim.x).
* The padding to the integer multiple can be done by adding 0's at the end and subtracting the number of padded 0's from the final result's 0 bin.
* The 2^NUM_BITS possible counts (0..2^NUM_BITSNUM_BITS-1) will be placed in global_histo.
* @param keys [IN] The keys for which to compute the histogram
* @param digit [IN]
* @param global_histo [OUT] The array of element counts, MUST be 256 in size.
* @param per_block_histo [OUT]
*/
template<
typename KeyT, // Data type of the keys within device memory. Data will be twiddled (if necessary) to unsigned type
typename IndexT, // Data type used for key's offsets and counters (limits number of supported keys, uint = 2^32)
int NUM_BITS, // Number of bits being sorted at a time
int KPT, // Number of keys per thread
int TPB, // Number of threads per block
int PRE_SORT_RUNS_LENGTH // For values greater than 1, this causes to sort a thread's keys by runs of a given length to improve run-length encoded updates to shared memory.
>
__global__ void rdxsrt_histogram(KeyT *__restrict__ keys, const uint digit, IndexT *global_histo)
{
/*** TYPEDEFs***/
typedef Traits<KeyT> KeyTraits;
typedef typename KeyTraits::UnsignedBits UnsignedBits;
/*typedef LoadUnit<IndexT, RDXSRT_LOAD_STRIDE_WARP, KPT, TPB> KeyLoader;*/
/*** DECLARATIONS ***/
UnsignedBits tloc_keys[KPT]; // local keys in a thread
uint tloc_masked[KPT];
__shared__ uint shared_bins[0x01<<NUM_BITS]; // allocate a shared histogram in shared memory
/*** INIT SHARED HISTO ***/
if(threadIdx.x < 32){
#pragma unroll
for(int i=0;i<(0x01<<NUM_BITS);i+=32){
shared_bins[i+threadIdx.x] = 0;
}
}
__syncthreads();
/*** GET KEYS & PREPARE KEYS FOR HISTO ***/
// Bucket index used to determine the memory offset of the bucket's global histogram
const uint bucket_idx = 0;
// This thread block's keys memory offset, pointing to the index of its first key
const IndexT block_offset = (blockDim.x * blockIdx.x * KPT);
// Load keys
// KeyLoader(block_offset, threadIdx.x).template LoadStrided<UnsignedBits, KeyT, 0, KPT>(keys, tloc_keys);
#pragma unroll
for (int i=0; i<KPT; i++) {
tloc_keys[i] = reinterpret_cast<UnsignedBits*>(keys)[block_offset + threadIdx.x + blockDim.x * i];
}
#if true || USE_RLE_HISTO
// Mask
#pragma unroll
for (int i=0; i<KPT; i++) {
tloc_keys[i] = KeyTraits::TwiddleIn(tloc_keys[i]);
tloc_masked[i] = (tloc_keys[i]>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1); // get the bin index
}
/*** COMPUTE HISTO ***/
uint rle = 1;
#pragma unroll
for(int i=1; i<KPT; i++){
if(tloc_masked[i] == tloc_masked[i-1]) // decrease the number of atomicAdd as much as possible
rle++;
else{
atomicAdd(&shared_bins[tloc_masked[i-1]], rle);
rle=1;
}
}
atomicAdd(&shared_bins[tloc_masked[KPT-1]], rle);
#else
#pragma unroll
for(int i=0; i<KPT; i++){
tloc_masked[i] = (tloc_keys[i]>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1);
atomicAdd(&shared_bins[tloc_masked[i]], 1);
}
#endif
// Make sure we've got the counts from all threads
__syncthreads();
/*** Write shared histo to global histo ***/
if(threadIdx.x < 32){
for(int i=0;i<(0x01<<NUM_BITS);i+=32){
atomicAdd(&global_histo[(0x01<<NUM_BITS)*bucket_idx+i+threadIdx.x], shared_bins[i+threadIdx.x]); // actually bucket_idx is 0 all the time (according to the code), thus we have global_histo index equal to shared_bins index
// per_block_histo[blockIdx.x*(0x01<<NUM_BITS)+i+threadIdx.x] = shared_bins[i+threadIdx.x];
}
}
}
template<
typename KeyT, // Data type of the keys within device memory. Data will be twiddled (if necessary) to unsigned type
typename IndexT, // Data type used for key's offsets and counters (limits number of supported keys, uint = 2^32)
int NUM_BITS, // Number of bits being sorted at a time
int KPT, // Number of keys per thread
int TPB, // Number of threads per block
int PRE_SORT_RUNS_LENGTH // For values greater than 1, this causes to sort a thread's keys by runs of a given length to improve run-length encoded updates to shared memory.
>
__global__ void rdxsrt_histogram_with_guards(KeyT *__restrict__ keys, const uint digit, IndexT *global_histo, const IndexT total_keys, const int block_index_offset)
{
/*** TYPEDEFs***/
typedef Traits<KeyT> KeyTraits;
typedef typename KeyTraits::UnsignedBits UnsignedBits;
/*typedef LoadUnit<IndexT, RDXSRT_LOAD_STRIDE_WARP, KPT, TPB> KeyLoader;*/
/*** DECLARATIONS ***/
UnsignedBits tloc_keys[KPT];
uint tloc_masked[KPT];
__shared__ uint shared_bins[(0x01<<NUM_BITS) + 1];
/*** INIT SHARED HISTO ***/
if (threadIdx.x < 32) {
#pragma unroll
for(int i=0;i<(0x01<<NUM_BITS);i+=32){
shared_bins[i+threadIdx.x] = 0;
}
}
__syncthreads();
/*** GET KEYS & PREPARE KEYS FOR HISTO ***/
// Bucket index used to determine the memory offset of the bucket's global histogram
const uint bucket_idx = 0;
// This thread block's keys memory offset, pointing to the index of its first key
const IndexT block_offset = (blockDim.x * (block_index_offset + blockIdx.x) * KPT);
// Maximum number of keys the block may fetch
const IndexT block_max_num_keys = total_keys - block_offset;
// KeyLoader(block_offset, threadIdx.x).template LoadStridedWithGuards<UnsignedBits, KeyT, 0, KPT>(keys, tloc_keys, block_max_num_keys);
#pragma unroll
for (int i=0; i<KPT; i++) {
if ((threadIdx.x + blockDim.x * i) < block_max_num_keys) {
tloc_keys[i] = reinterpret_cast<UnsignedBits*>(keys)[block_offset + threadIdx.x + blockDim.x * i];
}
}
#pragma unroll
for(int i=0; i<KPT; i++){
// if(KeyLoader(block_offset, threadIdx.x).ThreadIndexInBounds(block_max_num_keys, i)){
if ((threadIdx.x + blockDim.x * i) < block_max_num_keys) {
tloc_keys[i] = KeyTraits::TwiddleIn(tloc_keys[i]);
tloc_masked[i] = (tloc_keys[i]>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1);
atomicAdd(&shared_bins[tloc_masked[i]], 1);
}
}
// Make sure we've got the counts from all threads
__syncthreads();
/*** Write shared histo to global histo ***/
if(threadIdx.x < 32){
for(int i=0;i<(0x01<<NUM_BITS);i+=32){
atomicAdd(&global_histo[(0x01<<NUM_BITS)*bucket_idx+i+threadIdx.x], shared_bins[i+threadIdx.x]);
// per_block_histo[(block_index_offset + blockIdx.x)*(0x01<<NUM_BITS)+i+threadIdx.x] = shared_bins[i+threadIdx.x];
}
}
}
/**
* Makes a single pass over the input array to find entries whose digit is equal to selected digit value and greater than
* digit value. Entries equal to digit value are written to keys_buffer for future processing, entries greater
* are written to output array.
* @param d_keys_in [IN] The keys for which to compute the histogram
* @param d_values_in [IN] The values corresponding to the keys
* @param digit [IN] Digit index (0 => highest digit, 3 => lowest digit for 32-bit)
* @param digit_val [IN] Digit value.
* @param num_items [IN] Number of entries.
* @param d_keys_buffer [OUT] Entries with x[digit] = digit_val.
* @param d_keys_out [OUT] Entries with x[digit] > digit_val.
* @param d_values_buffer [OUT] Entry values with x[digit] = digit_val.
* @param d_values_out [OUT] Entry values with x[digit] > digit_val.
* @param d_index_buffer [OUT] Index into d_keys_buffer.
* @param d_index_out [OUT] Index into d_keys_out.
*/
template<
typename KeyT, // Data type of the keys within device memory. Data will be twiddled (if necessary) to unsigned type
typename IndexT, // Data type used for key's offsets and counters (limits number of supported keys, uint = 2^32)
int NUM_BITS, // Number of bits being sorted at a time
int KPT, // Number of keys per thread
int TPB // Number of threads per block
>
__global__ void select_kth_bucket(KeyT* d_keys_in, unsigned int* d_values_in, const uint digit, const uint digit_val, uint num_items,
KeyT* d_keys_buffer, KeyT* d_keys_out, unsigned int* d_values_buffer, unsigned int* d_values_out, uint* d_index_buffer, uint* d_index_out)
{
typedef Traits<KeyT> KeyTraits;
typedef typename KeyTraits::UnsignedBits UnsignedBits;
// Specialize BlockLoad for a 1D block of TPB threads owning KPT integer items each
typedef cub::BlockLoad<UnsignedBits, TPB, KPT, BLOCK_LOAD_TRANSPOSE> BlockLoadT;
// Specialize BlockScan type for our thread block
typedef BlockScan<int, TPB, BLOCK_SCAN_RAKING> BlockScanT;
// in some sense, tile means block
const int tile_size = TPB * KPT;
int tile_idx = blockIdx.x; // Current tile index
int tile_offset = tile_idx * tile_size;
// Allocate shared memory for BlockLoad
__shared__ union TempStorage
{
typename BlockLoadT::TempStorage load_items;
typename BlockScanT::TempStorage scan;
int offset[1];
UnsignedBits raw_exchange[2 * TPB * KPT];
} temp_storage;
// Load a segment of consecutive items that are blocked across threads
UnsignedBits key_entries[KPT];
unsigned int value_entries[KPT];
/*float payload_entries[KPT];*/
int selection_flags[KPT];
int selection_indices[KPT];
int num_tiles = (num_items + tile_size - 1) / tile_size;
int num_tile_items = tile_size;
bool is_last_tile = false;
if (tile_idx == num_tiles - 1) {
num_tile_items = num_items - tile_offset;
is_last_tile = true;
}
// Load keys and values
if (is_last_tile) {
BlockLoadT(temp_storage.load_items).Load(reinterpret_cast<UnsignedBits*>(d_keys_in) + tile_offset, key_entries, num_tile_items);
__syncthreads();
BlockLoadT(temp_storage.load_items).Load(reinterpret_cast<unsigned int*>(d_values_in) + tile_offset, value_entries, num_tile_items);
}
else {
BlockLoadT(temp_storage.load_items).Load(reinterpret_cast<UnsignedBits*>(d_keys_in) + tile_offset, key_entries);
__syncthreads();
BlockLoadT(temp_storage.load_items).Load(reinterpret_cast<unsigned int*>(d_values_in) + tile_offset, value_entries);
}
__syncthreads();
/*** Step 1: Find keys with digit value to selected digit value ***/
#pragma unroll
for (int ITEM = 0; ITEM < KPT; ++ITEM)
{
// Out-of-bounds items are selection_flags
selection_flags[ITEM] = 0;
if (!is_last_tile || (int(threadIdx.x * KPT) + ITEM < num_tile_items)) {
UnsignedBits key = KeyTraits::TwiddleIn(key_entries[ITEM]);
uint masked_key = (key>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1);
selection_flags[ITEM] = (masked_key > digit_val);
}
}
__syncthreads();
// Compute exclusive prefix sum
int num_selected;
BlockScanT(temp_storage.scan).ExclusiveSum(selection_flags, selection_indices, num_selected);
__syncthreads();
if (num_selected > 0) {
int index_out;
if (threadIdx.x == 0) {
// Find index into keys_out array
index_out = atomicAdd(d_index_out, num_selected);
temp_storage.offset[0] = index_out;
}
__syncthreads();
index_out = temp_storage.offset[0];
__syncthreads();
// Compact and scatter items
#pragma unroll
for (int ITEM = 0; ITEM < KPT; ++ITEM)
{
int local_scatter_offset = selection_indices[ITEM];
if (selection_flags[ITEM])
{
temp_storage.raw_exchange[local_scatter_offset] = key_entries[ITEM];
temp_storage.raw_exchange[tile_size + local_scatter_offset] = value_entries[ITEM];
/*temp_storage.raw_exchange[tile_size + local_scatter_offset] = payload_entries[ITEM];*/
}
}
__syncthreads();
// Write out matched entries to output array
for (int item = threadIdx.x; item < num_selected; item += TPB)
{
reinterpret_cast<UnsignedBits*>(d_keys_out)[index_out + item] = temp_storage.raw_exchange[item];
d_values_out[index_out + item] = temp_storage.raw_exchange[tile_size + item];
}
__syncthreads();
#if 0
for (int item = threadIdx.x; item < num_selected; item += TPB)
{
payload_out[num_selections_prefix + item] = temp_storage.raw_exchange[tile_size + item];
}
#endif
}
/*** Step 2: Find entries that have digit equal to digit value ***/
#pragma unroll
for (int ITEM = 0; ITEM < KPT; ++ITEM)
{
// Out-of-bounds items are selection_flags
selection_flags[ITEM] = 0;
if (!is_last_tile || (int(threadIdx.x * KPT) + ITEM < num_tile_items)) {
UnsignedBits key = KeyTraits::TwiddleIn(key_entries[ITEM]);
uint masked_key = (key>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1);
selection_flags[ITEM] = (masked_key == digit_val);
}
}
__syncthreads();
// Compute exclusive prefix sum
BlockScanT(temp_storage.scan).ExclusiveSum(selection_flags, selection_indices, num_selected);
__syncthreads();
if (num_selected > 0) {
int index_buffer;
if (threadIdx.x == 0) {
index_buffer = atomicAdd(d_index_buffer, num_selected);
temp_storage.offset[0] = index_buffer;
}
__syncthreads();
index_buffer = temp_storage.offset[0];
__syncthreads();
// Compact and scatter items
#pragma unroll
for (int ITEM = 0; ITEM < KPT; ++ITEM)
{
int local_scatter_offset = selection_indices[ITEM];
if (selection_flags[ITEM])
{
temp_storage.raw_exchange[local_scatter_offset] = key_entries[ITEM];
temp_storage.raw_exchange[tile_size + local_scatter_offset] = value_entries[ITEM];
/*temp_storage.raw_exchange[tile_size + local_scatter_offset] = payload_entries[ITEM];*/
}
}
__syncthreads();
// Write out output entries
for (int item = threadIdx.x; item < num_selected; item += TPB)
{
reinterpret_cast<UnsignedBits*>(d_keys_buffer)[index_buffer + item] = temp_storage.raw_exchange[item];
d_values_buffer[index_buffer + item] = temp_storage.raw_exchange[tile_size + item];
}
__syncthreads();
}
}
__global__ void set_index_array(unsigned int* array, unsigned int len) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int gridSize = blockDim.x * gridDim.x;
while (i < len) {
array[i] = i;
i += gridSize;
}
}
#define KPT 16
#define TPB 384
#define DIGIT_BITS 8
hipError_t CUDARadixSelectTopK(torch::Tensor d_keys_in,
torch::Tensor d_indices_in,
unsigned int num_items,
unsigned int k,
float *d_keys_out,
unsigned int *d_values_out) {
hipError_t error = hipSuccess;
// get helper buffers
// unsigned int *d_histogram = buf->histogram;
// unsigned int *d_index_out = buf->index_out;
// unsigned int *d_index_buffer = buf->index_buffer;
// float* keys_double_buffer[2] = {buf->keys_buffer0, buf->keys_buffer1};
// unsigned int* values_double_buffer[2] = {buf->value_buffer0, buf->value_buffer1};
unsigned char current_keys_buffer = 0;
//initialize buffer with empty tensor
//unsigned int *d_histogram = (uint*)torch::zeros(256*128, torch::TensorOptions().dtype(torch::kInt).device(d_keys_in.device())).data_ptr();
//unsigned int *d_index_out = (uint*)torch::zeros(128, torch::TensorOptions().dtype(torch::kInt).device(d_keys_in.device())).data_ptr();
//unsigned int *d_index_buffer = (uint*)torch::zeros(128, torch::TensorOptions().dtype(torch::kInt).device(d_keys_in.device())).data_ptr();
unsigned int *d_histogram, *d_index_out, *d_index_buffer;
hipMalloc(&d_histogram, 256*128);
hipMalloc(&d_index_out, 128);
hipMalloc(&d_index_buffer, 128);
torch::Tensor keys_double_tensor[2] = {d_keys_in.clone(), d_keys_in.clone()};
torch::Tensor indices_double_tensor[2] = {d_indices_in.clone(), d_indices_in.clone()};
float* keys_double_buffer[2] = {(float*)keys_double_tensor[0].data_ptr(), (float*)keys_double_tensor[1].data_ptr()};
unsigned int* values_double_buffer[2] = {(unsigned int*)indices_double_tensor[0].data_ptr(), (unsigned int*)indices_double_tensor[1].data_ptr()};
//float* keys_double_buffer[2] = {(float*)d_keys_in.clone().data_ptr(),
// (float*)d_keys_in.clone().data_ptr()};
//unsigned int* values_double_buffer[2] = {(uint*)d_indices_in.clone().data_ptr(),
// (uint*)d_indices_in.clone().data_ptr()};
// Set the index into output array to 0.
hipMemset(d_index_out, 0, 4);
unsigned int KPB = KPT * TPB;
unsigned int *h_histogram = new unsigned int[256];
// set value array (index)
// int blocksPerGrid = (int) ceil(1.0 * num_items / TPB);
// set_index_array<<<blocksPerGrid, TPB, 0>>>(values_double_buffer[current_keys_buffer], num_items);
// enumerate each digit (32-bit data (float32) / 8-bit/pass, so that's 4 digit in total)
for (unsigned int digit = 0; digit < 4; digit++) {
unsigned int num_blocks = num_items / KPB;// Pass-0 rough processing blocks (floor on purpose)
unsigned int processed_elements = num_blocks * KPB;// Pass-0 number of rough processed elements
unsigned int remaining_elements = num_items - processed_elements;// Do the remaining elements with a check in the inner loop
unsigned int remainder_blocks = (KPB - 1 + remaining_elements) / KPB;// Number of blocks required for remaining elements (typically 0 or 1)
/******************************************************************************************/
/* Caluclate Histogram */
/******************************************************************************************/
// Zero out the histogram
hipMemset(d_histogram, 0, 256 * sizeof(int));
float* d_current_keys_in = keys_double_buffer[current_keys_buffer];
unsigned int* d_current_value_in = values_double_buffer[current_keys_buffer];
if (num_blocks > 0)
hipLaunchKernelGGL(( rdxsrt_histogram<float, uint, DIGIT_BITS, KPT, TPB, 9>), dim3(num_blocks), dim3(TPB), 0, 0, d_current_keys_in, digit, d_histogram);
if (remaining_elements > 0)
hipLaunchKernelGGL(( rdxsrt_histogram_with_guards<float, uint, DIGIT_BITS, KPT, TPB, 9>), dim3(remainder_blocks), dim3(TPB), 0, 0, d_current_keys_in, digit, d_histogram, num_items, num_blocks);
/******************************************************************************************/
/* Find the bin which contains the Kth largest element */
/******************************************************************************************/
hipMemcpy(h_histogram, d_histogram, 256 * sizeof(uint), hipMemcpyDeviceToHost);
// currently we find the bin on host, hence we need to synchronize the stream
// hipStreamSynchronize(stream);
unsigned int rolling_sum = 0;
unsigned int digit_val;
for (int i = 255; i >= 0; i--) {
if ((rolling_sum + h_histogram[i]) > k) {
digit_val = i;
k -= rolling_sum;
break;
}
rolling_sum += h_histogram[i];
}
hipMemset(d_index_buffer, 0, 4);
hipLaunchKernelGGL(( select_kth_bucket<float, unsigned int, DIGIT_BITS, KPT, TPB>), dim3(num_blocks + remainder_blocks), dim3(TPB), 0, 0, d_current_keys_in,
d_current_value_in,
digit,
digit_val,
num_items,
keys_double_buffer[1-current_keys_buffer],
d_keys_out,
values_double_buffer[1-current_keys_buffer],
d_values_out,
d_index_buffer,
d_index_out);
uint h_index_out;
uint h_index_buffer;
hipMemcpy(&h_index_out, d_index_out, sizeof(uint), hipMemcpyDeviceToHost);
hipMemcpy(&h_index_buffer, d_index_buffer, sizeof(uint), hipMemcpyDeviceToHost);
// hipStreamSynchronize(stream);
// Update number of items to reflect reduced number of elements.
num_items = h_index_buffer;
if (k == 0) break;
else if (k != 0 && digit == 3) {
// We are at last digit and k != 4 implies that kth value has repetition.
// Copy any of the repeated values(and keys!) to out array to complete the array.
hipMemcpy(d_keys_out + h_index_out, keys_double_buffer[1-current_keys_buffer] ,k * sizeof(float), hipMemcpyDeviceToDevice);
hipMemcpy(d_values_out + h_index_out, values_double_buffer[1-current_keys_buffer], k * sizeof(float), hipMemcpyDeviceToDevice);
k -= k;
}
current_keys_buffer = 1 - current_keys_buffer;
}
delete[] h_histogram;
hipFree(d_histogram);
hipFree(d_index_out);
hipFree(d_index_buffer);
}
// __global__ void _Uint32ToInt32(int *dst_data,
// unsigned int *src_data,
// unsigned int n)
// {
// // set thread ID
// unsigned int tid = threadIdx.x;
// unsigned int gridSize = blockDim.x * gridDim.x;
// unsigned int i = blockIdx.x * blockDim.x + tid;
// unsigned int blockSize = blockDim.x;
// while (i < n) {
// dst_data[i] = (int)src_data[i];
// i += gridSize;
// }
// }
// void Uint32ToInt32(int *dst_data,
// unsigned int *src_data,
// unsigned int num_elements)
// {
// int blocksPerGrid = (int) ceil(1.0 * num_elements / maxThreadsPerBlock);
// _Uint32ToInt32<<<blocksPerGrid, maxThreadsPerBlock, 0>>>(dst_data, src_data, num_elements);
// }
std::vector<torch::Tensor> rdxtopk_cuda(
torch::Tensor input,torch::Tensor indices, unsigned int k) {
unsigned int num_items = input.numel();
auto d_keys_out = torch::zeros(k, torch::TensorOptions().dtype(torch::kFloat32).device(input.device()));
auto d_values_out = torch::zeros(k, torch::TensorOptions().dtype(torch::kInt).device(input.device()));
CUDARadixSelectTopK(input,indices,
num_items,
k,
(float*)d_keys_out.data_ptr(),
(uint*)d_values_out.data_ptr());
// Uint32ToInt32((int*)d_values_out.data_ptr(), (uint*)d_values_out.data_ptr(), k);
return {d_keys_out, d_values_out};
}
| 9357f69b2c5459093746cbf2f9095991cd240ccf.cu | #include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
#include <curand.h>
#include <cuda_runtime_api.h>
#include "cub/device/device_radix_sort.cuh"
#include "cub/util_allocator.cuh"
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <sys/time.h>
#include <algorithm>
// #include "printFunctions.cuh"
// #include "generateProblems.cuh"
// #include "topk.h"
using namespace std;
using namespace cub;
#define maxThreadsPerBlock 1024
/**
* Computes the histogram over the digit values of an array of keys that MUST have a length of an integer multiple of (KPT * blockDim.x).
* The padding to the integer multiple can be done by adding 0's at the end and subtracting the number of padded 0's from the final result's 0 bin.
* The 2^NUM_BITS possible counts (0..2^NUM_BITSNUM_BITS-1) will be placed in global_histo.
* @param keys [IN] The keys for which to compute the histogram
* @param digit [IN]
* @param global_histo [OUT] The array of element counts, MUST be 256 in size.
* @param per_block_histo [OUT]
*/
template<
typename KeyT, // Data type of the keys within device memory. Data will be twiddled (if necessary) to unsigned type
typename IndexT, // Data type used for key's offsets and counters (limits number of supported keys, uint = 2^32)
int NUM_BITS, // Number of bits being sorted at a time
int KPT, // Number of keys per thread
int TPB, // Number of threads per block
int PRE_SORT_RUNS_LENGTH // For values greater than 1, this causes to sort a thread's keys by runs of a given length to improve run-length encoded updates to shared memory.
>
__global__ void rdxsrt_histogram(KeyT *__restrict__ keys, const uint digit, IndexT *global_histo)
{
/*** TYPEDEFs***/
typedef Traits<KeyT> KeyTraits;
typedef typename KeyTraits::UnsignedBits UnsignedBits;
/*typedef LoadUnit<IndexT, RDXSRT_LOAD_STRIDE_WARP, KPT, TPB> KeyLoader;*/
/*** DECLARATIONS ***/
UnsignedBits tloc_keys[KPT]; // local keys in a thread
uint tloc_masked[KPT];
__shared__ uint shared_bins[0x01<<NUM_BITS]; // allocate a shared histogram in shared memory
/*** INIT SHARED HISTO ***/
if(threadIdx.x < 32){
#pragma unroll
for(int i=0;i<(0x01<<NUM_BITS);i+=32){
shared_bins[i+threadIdx.x] = 0;
}
}
__syncthreads();
/*** GET KEYS & PREPARE KEYS FOR HISTO ***/
// Bucket index used to determine the memory offset of the bucket's global histogram
const uint bucket_idx = 0;
// This thread block's keys memory offset, pointing to the index of its first key
const IndexT block_offset = (blockDim.x * blockIdx.x * KPT);
// Load keys
// KeyLoader(block_offset, threadIdx.x).template LoadStrided<UnsignedBits, KeyT, 0, KPT>(keys, tloc_keys);
#pragma unroll
for (int i=0; i<KPT; i++) {
tloc_keys[i] = reinterpret_cast<UnsignedBits*>(keys)[block_offset + threadIdx.x + blockDim.x * i];
}
#if true || USE_RLE_HISTO
// Mask
#pragma unroll
for (int i=0; i<KPT; i++) {
tloc_keys[i] = KeyTraits::TwiddleIn(tloc_keys[i]);
tloc_masked[i] = (tloc_keys[i]>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1); // get the bin index
}
/*** COMPUTE HISTO ***/
uint rle = 1;
#pragma unroll
for(int i=1; i<KPT; i++){
if(tloc_masked[i] == tloc_masked[i-1]) // decrease the number of atomicAdd as much as possible
rle++;
else{
atomicAdd(&shared_bins[tloc_masked[i-1]], rle);
rle=1;
}
}
atomicAdd(&shared_bins[tloc_masked[KPT-1]], rle);
#else
#pragma unroll
for(int i=0; i<KPT; i++){
tloc_masked[i] = (tloc_keys[i]>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1);
atomicAdd(&shared_bins[tloc_masked[i]], 1);
}
#endif
// Make sure we've got the counts from all threads
__syncthreads();
/*** Write shared histo to global histo ***/
if(threadIdx.x < 32){
for(int i=0;i<(0x01<<NUM_BITS);i+=32){
atomicAdd(&global_histo[(0x01<<NUM_BITS)*bucket_idx+i+threadIdx.x], shared_bins[i+threadIdx.x]); // actually bucket_idx is 0 all the time (according to the code), thus we have global_histo index equal to shared_bins index
// per_block_histo[blockIdx.x*(0x01<<NUM_BITS)+i+threadIdx.x] = shared_bins[i+threadIdx.x];
}
}
}
template<
typename KeyT, // Data type of the keys within device memory. Data will be twiddled (if necessary) to unsigned type
typename IndexT, // Data type used for key's offsets and counters (limits number of supported keys, uint = 2^32)
int NUM_BITS, // Number of bits being sorted at a time
int KPT, // Number of keys per thread
int TPB, // Number of threads per block
int PRE_SORT_RUNS_LENGTH // For values greater than 1, this causes to sort a thread's keys by runs of a given length to improve run-length encoded updates to shared memory.
>
__global__ void rdxsrt_histogram_with_guards(KeyT *__restrict__ keys, const uint digit, IndexT *global_histo, const IndexT total_keys, const int block_index_offset)
{
/*** TYPEDEFs***/
typedef Traits<KeyT> KeyTraits;
typedef typename KeyTraits::UnsignedBits UnsignedBits;
/*typedef LoadUnit<IndexT, RDXSRT_LOAD_STRIDE_WARP, KPT, TPB> KeyLoader;*/
/*** DECLARATIONS ***/
UnsignedBits tloc_keys[KPT];
uint tloc_masked[KPT];
__shared__ uint shared_bins[(0x01<<NUM_BITS) + 1];
/*** INIT SHARED HISTO ***/
if (threadIdx.x < 32) {
#pragma unroll
for(int i=0;i<(0x01<<NUM_BITS);i+=32){
shared_bins[i+threadIdx.x] = 0;
}
}
__syncthreads();
/*** GET KEYS & PREPARE KEYS FOR HISTO ***/
// Bucket index used to determine the memory offset of the bucket's global histogram
const uint bucket_idx = 0;
// This thread block's keys memory offset, pointing to the index of its first key
const IndexT block_offset = (blockDim.x * (block_index_offset + blockIdx.x) * KPT);
// Maximum number of keys the block may fetch
const IndexT block_max_num_keys = total_keys - block_offset;
// KeyLoader(block_offset, threadIdx.x).template LoadStridedWithGuards<UnsignedBits, KeyT, 0, KPT>(keys, tloc_keys, block_max_num_keys);
#pragma unroll
for (int i=0; i<KPT; i++) {
if ((threadIdx.x + blockDim.x * i) < block_max_num_keys) {
tloc_keys[i] = reinterpret_cast<UnsignedBits*>(keys)[block_offset + threadIdx.x + blockDim.x * i];
}
}
#pragma unroll
for(int i=0; i<KPT; i++){
// if(KeyLoader(block_offset, threadIdx.x).ThreadIndexInBounds(block_max_num_keys, i)){
if ((threadIdx.x + blockDim.x * i) < block_max_num_keys) {
tloc_keys[i] = KeyTraits::TwiddleIn(tloc_keys[i]);
tloc_masked[i] = (tloc_keys[i]>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1);
atomicAdd(&shared_bins[tloc_masked[i]], 1);
}
}
// Make sure we've got the counts from all threads
__syncthreads();
/*** Write shared histo to global histo ***/
if(threadIdx.x < 32){
for(int i=0;i<(0x01<<NUM_BITS);i+=32){
atomicAdd(&global_histo[(0x01<<NUM_BITS)*bucket_idx+i+threadIdx.x], shared_bins[i+threadIdx.x]);
// per_block_histo[(block_index_offset + blockIdx.x)*(0x01<<NUM_BITS)+i+threadIdx.x] = shared_bins[i+threadIdx.x];
}
}
}
/**
* Makes a single pass over the input array to find entries whose digit is equal to selected digit value and greater than
* digit value. Entries equal to digit value are written to keys_buffer for future processing, entries greater
* are written to output array.
* @param d_keys_in [IN] The keys for which to compute the histogram
* @param d_values_in [IN] The values corresponding to the keys
* @param digit [IN] Digit index (0 => highest digit, 3 => lowest digit for 32-bit)
* @param digit_val [IN] Digit value.
* @param num_items [IN] Number of entries.
* @param d_keys_buffer [OUT] Entries with x[digit] = digit_val.
* @param d_keys_out [OUT] Entries with x[digit] > digit_val.
* @param d_values_buffer [OUT] Entry values with x[digit] = digit_val.
* @param d_values_out [OUT] Entry values with x[digit] > digit_val.
* @param d_index_buffer [OUT] Index into d_keys_buffer.
* @param d_index_out [OUT] Index into d_keys_out.
*/
template<
typename KeyT, // Data type of the keys within device memory. Data will be twiddled (if necessary) to unsigned type
typename IndexT, // Data type used for key's offsets and counters (limits number of supported keys, uint = 2^32)
int NUM_BITS, // Number of bits being sorted at a time
int KPT, // Number of keys per thread
int TPB // Number of threads per block
>
__global__ void select_kth_bucket(KeyT* d_keys_in, unsigned int* d_values_in, const uint digit, const uint digit_val, uint num_items,
KeyT* d_keys_buffer, KeyT* d_keys_out, unsigned int* d_values_buffer, unsigned int* d_values_out, uint* d_index_buffer, uint* d_index_out)
{
typedef Traits<KeyT> KeyTraits;
typedef typename KeyTraits::UnsignedBits UnsignedBits;
// Specialize BlockLoad for a 1D block of TPB threads owning KPT integer items each
typedef cub::BlockLoad<UnsignedBits, TPB, KPT, BLOCK_LOAD_TRANSPOSE> BlockLoadT;
// Specialize BlockScan type for our thread block
typedef BlockScan<int, TPB, BLOCK_SCAN_RAKING> BlockScanT;
// in some sense, tile means block
const int tile_size = TPB * KPT;
int tile_idx = blockIdx.x; // Current tile index
int tile_offset = tile_idx * tile_size;
// Allocate shared memory for BlockLoad
__shared__ union TempStorage
{
typename BlockLoadT::TempStorage load_items;
typename BlockScanT::TempStorage scan;
int offset[1];
UnsignedBits raw_exchange[2 * TPB * KPT];
} temp_storage;
// Load a segment of consecutive items that are blocked across threads
UnsignedBits key_entries[KPT];
unsigned int value_entries[KPT];
/*float payload_entries[KPT];*/
int selection_flags[KPT];
int selection_indices[KPT];
int num_tiles = (num_items + tile_size - 1) / tile_size;
int num_tile_items = tile_size;
bool is_last_tile = false;
if (tile_idx == num_tiles - 1) {
num_tile_items = num_items - tile_offset;
is_last_tile = true;
}
// Load keys and values
if (is_last_tile) {
BlockLoadT(temp_storage.load_items).Load(reinterpret_cast<UnsignedBits*>(d_keys_in) + tile_offset, key_entries, num_tile_items);
__syncthreads();
BlockLoadT(temp_storage.load_items).Load(reinterpret_cast<unsigned int*>(d_values_in) + tile_offset, value_entries, num_tile_items);
}
else {
BlockLoadT(temp_storage.load_items).Load(reinterpret_cast<UnsignedBits*>(d_keys_in) + tile_offset, key_entries);
__syncthreads();
BlockLoadT(temp_storage.load_items).Load(reinterpret_cast<unsigned int*>(d_values_in) + tile_offset, value_entries);
}
__syncthreads();
/*** Step 1: Find keys with digit value to selected digit value ***/
#pragma unroll
for (int ITEM = 0; ITEM < KPT; ++ITEM)
{
// Out-of-bounds items are selection_flags
selection_flags[ITEM] = 0;
if (!is_last_tile || (int(threadIdx.x * KPT) + ITEM < num_tile_items)) {
UnsignedBits key = KeyTraits::TwiddleIn(key_entries[ITEM]);
uint masked_key = (key>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1);
selection_flags[ITEM] = (masked_key > digit_val);
}
}
__syncthreads();
// Compute exclusive prefix sum
int num_selected;
BlockScanT(temp_storage.scan).ExclusiveSum(selection_flags, selection_indices, num_selected);
__syncthreads();
if (num_selected > 0) {
int index_out;
if (threadIdx.x == 0) {
// Find index into keys_out array
index_out = atomicAdd(d_index_out, num_selected);
temp_storage.offset[0] = index_out;
}
__syncthreads();
index_out = temp_storage.offset[0];
__syncthreads();
// Compact and scatter items
#pragma unroll
for (int ITEM = 0; ITEM < KPT; ++ITEM)
{
int local_scatter_offset = selection_indices[ITEM];
if (selection_flags[ITEM])
{
temp_storage.raw_exchange[local_scatter_offset] = key_entries[ITEM];
temp_storage.raw_exchange[tile_size + local_scatter_offset] = value_entries[ITEM];
/*temp_storage.raw_exchange[tile_size + local_scatter_offset] = payload_entries[ITEM];*/
}
}
__syncthreads();
// Write out matched entries to output array
for (int item = threadIdx.x; item < num_selected; item += TPB)
{
reinterpret_cast<UnsignedBits*>(d_keys_out)[index_out + item] = temp_storage.raw_exchange[item];
d_values_out[index_out + item] = temp_storage.raw_exchange[tile_size + item];
}
__syncthreads();
#if 0
for (int item = threadIdx.x; item < num_selected; item += TPB)
{
payload_out[num_selections_prefix + item] = temp_storage.raw_exchange[tile_size + item];
}
#endif
}
/*** Step 2: Find entries that have digit equal to digit value ***/
#pragma unroll
for (int ITEM = 0; ITEM < KPT; ++ITEM)
{
// Out-of-bounds items are selection_flags
selection_flags[ITEM] = 0;
if (!is_last_tile || (int(threadIdx.x * KPT) + ITEM < num_tile_items)) {
UnsignedBits key = KeyTraits::TwiddleIn(key_entries[ITEM]);
uint masked_key = (key>>((sizeof(KeyT)*8)-(NUM_BITS*(digit+1))))&((0x01<<NUM_BITS)-1);
selection_flags[ITEM] = (masked_key == digit_val);
}
}
__syncthreads();
// Compute exclusive prefix sum
BlockScanT(temp_storage.scan).ExclusiveSum(selection_flags, selection_indices, num_selected);
__syncthreads();
if (num_selected > 0) {
int index_buffer;
if (threadIdx.x == 0) {
index_buffer = atomicAdd(d_index_buffer, num_selected);
temp_storage.offset[0] = index_buffer;
}
__syncthreads();
index_buffer = temp_storage.offset[0];
__syncthreads();
// Compact and scatter items
#pragma unroll
for (int ITEM = 0; ITEM < KPT; ++ITEM)
{
int local_scatter_offset = selection_indices[ITEM];
if (selection_flags[ITEM])
{
temp_storage.raw_exchange[local_scatter_offset] = key_entries[ITEM];
temp_storage.raw_exchange[tile_size + local_scatter_offset] = value_entries[ITEM];
/*temp_storage.raw_exchange[tile_size + local_scatter_offset] = payload_entries[ITEM];*/
}
}
__syncthreads();
// Write out output entries
for (int item = threadIdx.x; item < num_selected; item += TPB)
{
reinterpret_cast<UnsignedBits*>(d_keys_buffer)[index_buffer + item] = temp_storage.raw_exchange[item];
d_values_buffer[index_buffer + item] = temp_storage.raw_exchange[tile_size + item];
}
__syncthreads();
}
}
__global__ void set_index_array(unsigned int* array, unsigned int len) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int gridSize = blockDim.x * gridDim.x;
while (i < len) {
array[i] = i;
i += gridSize;
}
}
#define KPT 16
#define TPB 384
#define DIGIT_BITS 8
cudaError_t CUDARadixSelectTopK(torch::Tensor d_keys_in,
torch::Tensor d_indices_in,
unsigned int num_items,
unsigned int k,
float *d_keys_out,
unsigned int *d_values_out) {
cudaError error = cudaSuccess;
// get helper buffers
// unsigned int *d_histogram = buf->histogram;
// unsigned int *d_index_out = buf->index_out;
// unsigned int *d_index_buffer = buf->index_buffer;
// float* keys_double_buffer[2] = {buf->keys_buffer0, buf->keys_buffer1};
// unsigned int* values_double_buffer[2] = {buf->value_buffer0, buf->value_buffer1};
unsigned char current_keys_buffer = 0;
//initialize buffer with empty tensor
//unsigned int *d_histogram = (uint*)torch::zeros(256*128, torch::TensorOptions().dtype(torch::kInt).device(d_keys_in.device())).data_ptr();
//unsigned int *d_index_out = (uint*)torch::zeros(128, torch::TensorOptions().dtype(torch::kInt).device(d_keys_in.device())).data_ptr();
//unsigned int *d_index_buffer = (uint*)torch::zeros(128, torch::TensorOptions().dtype(torch::kInt).device(d_keys_in.device())).data_ptr();
unsigned int *d_histogram, *d_index_out, *d_index_buffer;
cudaMalloc(&d_histogram, 256*128);
cudaMalloc(&d_index_out, 128);
cudaMalloc(&d_index_buffer, 128);
torch::Tensor keys_double_tensor[2] = {d_keys_in.clone(), d_keys_in.clone()};
torch::Tensor indices_double_tensor[2] = {d_indices_in.clone(), d_indices_in.clone()};
float* keys_double_buffer[2] = {(float*)keys_double_tensor[0].data_ptr(), (float*)keys_double_tensor[1].data_ptr()};
unsigned int* values_double_buffer[2] = {(unsigned int*)indices_double_tensor[0].data_ptr(), (unsigned int*)indices_double_tensor[1].data_ptr()};
//float* keys_double_buffer[2] = {(float*)d_keys_in.clone().data_ptr(),
// (float*)d_keys_in.clone().data_ptr()};
//unsigned int* values_double_buffer[2] = {(uint*)d_indices_in.clone().data_ptr(),
// (uint*)d_indices_in.clone().data_ptr()};
// Set the index into output array to 0.
cudaMemset(d_index_out, 0, 4);
unsigned int KPB = KPT * TPB;
unsigned int *h_histogram = new unsigned int[256];
// set value array (index)
// int blocksPerGrid = (int) ceil(1.0 * num_items / TPB);
// set_index_array<<<blocksPerGrid, TPB, 0>>>(values_double_buffer[current_keys_buffer], num_items);
// enumerate each digit (32-bit data (float32) / 8-bit/pass, so that's 4 digit in total)
for (unsigned int digit = 0; digit < 4; digit++) {
unsigned int num_blocks = num_items / KPB;// Pass-0 rough processing blocks (floor on purpose)
unsigned int processed_elements = num_blocks * KPB;// Pass-0 number of rough processed elements
unsigned int remaining_elements = num_items - processed_elements;// Do the remaining elements with a check in the inner loop
unsigned int remainder_blocks = (KPB - 1 + remaining_elements) / KPB;// Number of blocks required for remaining elements (typically 0 or 1)
/******************************************************************************************/
/* Caluclate Histogram */
/******************************************************************************************/
// Zero out the histogram
cudaMemset(d_histogram, 0, 256 * sizeof(int));
float* d_current_keys_in = keys_double_buffer[current_keys_buffer];
unsigned int* d_current_value_in = values_double_buffer[current_keys_buffer];
if (num_blocks > 0)
rdxsrt_histogram<float, uint, DIGIT_BITS, KPT, TPB, 9><<<num_blocks, TPB, 0>>>(d_current_keys_in, digit, d_histogram);
if (remaining_elements > 0)
rdxsrt_histogram_with_guards<float, uint, DIGIT_BITS, KPT, TPB, 9><<<remainder_blocks, TPB, 0>>>(d_current_keys_in, digit, d_histogram, num_items, num_blocks);
/******************************************************************************************/
/* Find the bin which contains the Kth largest element */
/******************************************************************************************/
cudaMemcpy(h_histogram, d_histogram, 256 * sizeof(uint), cudaMemcpyDeviceToHost);
// currently we find the bin on host, hence we need to synchronize the stream
// cudaStreamSynchronize(stream);
unsigned int rolling_sum = 0;
unsigned int digit_val;
for (int i = 255; i >= 0; i--) {
if ((rolling_sum + h_histogram[i]) > k) {
digit_val = i;
k -= rolling_sum;
break;
}
rolling_sum += h_histogram[i];
}
cudaMemset(d_index_buffer, 0, 4);
select_kth_bucket<float, unsigned int, DIGIT_BITS, KPT, TPB><<<num_blocks + remainder_blocks, TPB, 0>>>(d_current_keys_in,
d_current_value_in,
digit,
digit_val,
num_items,
keys_double_buffer[1-current_keys_buffer],
d_keys_out,
values_double_buffer[1-current_keys_buffer],
d_values_out,
d_index_buffer,
d_index_out);
uint h_index_out;
uint h_index_buffer;
cudaMemcpy(&h_index_out, d_index_out, sizeof(uint), cudaMemcpyDeviceToHost);
cudaMemcpy(&h_index_buffer, d_index_buffer, sizeof(uint), cudaMemcpyDeviceToHost);
// cudaStreamSynchronize(stream);
// Update number of items to reflect reduced number of elements.
num_items = h_index_buffer;
if (k == 0) break;
else if (k != 0 && digit == 3) {
// We are at last digit and k != 4 implies that kth value has repetition.
// Copy any of the repeated values(and keys!) to out array to complete the array.
cudaMemcpy(d_keys_out + h_index_out, keys_double_buffer[1-current_keys_buffer] ,k * sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemcpy(d_values_out + h_index_out, values_double_buffer[1-current_keys_buffer], k * sizeof(float), cudaMemcpyDeviceToDevice);
k -= k;
}
current_keys_buffer = 1 - current_keys_buffer;
}
delete[] h_histogram;
cudaFree(d_histogram);
cudaFree(d_index_out);
cudaFree(d_index_buffer);
}
// __global__ void _Uint32ToInt32(int *dst_data,
// unsigned int *src_data,
// unsigned int n)
// {
// // set thread ID
// unsigned int tid = threadIdx.x;
// unsigned int gridSize = blockDim.x * gridDim.x;
// unsigned int i = blockIdx.x * blockDim.x + tid;
// unsigned int blockSize = blockDim.x;
// while (i < n) {
// dst_data[i] = (int)src_data[i];
// i += gridSize;
// }
// }
// void Uint32ToInt32(int *dst_data,
// unsigned int *src_data,
// unsigned int num_elements)
// {
// int blocksPerGrid = (int) ceil(1.0 * num_elements / maxThreadsPerBlock);
// _Uint32ToInt32<<<blocksPerGrid, maxThreadsPerBlock, 0>>>(dst_data, src_data, num_elements);
// }
std::vector<torch::Tensor> rdxtopk_cuda(
torch::Tensor input,torch::Tensor indices, unsigned int k) {
unsigned int num_items = input.numel();
auto d_keys_out = torch::zeros(k, torch::TensorOptions().dtype(torch::kFloat32).device(input.device()));
auto d_values_out = torch::zeros(k, torch::TensorOptions().dtype(torch::kInt).device(input.device()));
CUDARadixSelectTopK(input,indices,
num_items,
k,
(float*)d_keys_out.data_ptr(),
(uint*)d_values_out.data_ptr());
// Uint32ToInt32((int*)d_values_out.data_ptr(), (uint*)d_values_out.data_ptr(), k);
return {d_keys_out, d_values_out};
}
|
f3dbd27f2cb8160a8ded6daefc71adcaec9b0cdf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlobpcg_maxpy.cu normal z -> c, Tue Sep 2 12:38:33 2014
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
__global__ void
magma_clobpcg_maxpy_kernel( magma_int_t num_rows,
magma_int_t num_vecs,
magmaFloatComplex *X,
magmaFloatComplex *Y){
int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index
if( row<num_rows ){
for( int i=0; i<num_vecs; i++ ){
Y[ row + i*num_rows ] += X[ row + i*num_rows ];
}
}
}
/**
Purpose
-------
This routine computes a axpy for a mxn matrix:
Y = X + Y
It replaces:
magma_caxpy(m*n, c_one, Y, 1, X, 1);
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
X = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param
num_rows magma_int_t
number of rows
@param
num_vecs magma_int_t
number of vectors
@param
X magmaFloatComplex*
input vector X
@param
Y magmaFloatComplex*
input/output vector Y
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_clobpcg_maxpy( magma_int_t num_rows,
magma_int_t num_vecs,
magmaFloatComplex *X,
magmaFloatComplex *Y){
// every thread handles one row
magma_int_t block_size = BLOCK_SIZE;
dim3 block( block_size );
dim3 grid( (num_rows+block_size-1)/block_size );
hipLaunchKernelGGL(( magma_clobpcg_maxpy_kernel), dim3(grid), dim3(block), 0, magma_stream ,
num_rows, num_vecs, X, Y );
return MAGMA_SUCCESS;
}
| f3dbd27f2cb8160a8ded6daefc71adcaec9b0cdf.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlobpcg_maxpy.cu normal z -> c, Tue Sep 2 12:38:33 2014
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
__global__ void
magma_clobpcg_maxpy_kernel( magma_int_t num_rows,
magma_int_t num_vecs,
magmaFloatComplex *X,
magmaFloatComplex *Y){
int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index
if( row<num_rows ){
for( int i=0; i<num_vecs; i++ ){
Y[ row + i*num_rows ] += X[ row + i*num_rows ];
}
}
}
/**
Purpose
-------
This routine computes a axpy for a mxn matrix:
Y = X + Y
It replaces:
magma_caxpy(m*n, c_one, Y, 1, X, 1);
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
X = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param
num_rows magma_int_t
number of rows
@param
num_vecs magma_int_t
number of vectors
@param
X magmaFloatComplex*
input vector X
@param
Y magmaFloatComplex*
input/output vector Y
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_clobpcg_maxpy( magma_int_t num_rows,
magma_int_t num_vecs,
magmaFloatComplex *X,
magmaFloatComplex *Y){
// every thread handles one row
magma_int_t block_size = BLOCK_SIZE;
dim3 block( block_size );
dim3 grid( (num_rows+block_size-1)/block_size );
magma_clobpcg_maxpy_kernel<<< grid, block, 0, magma_stream >>>
( num_rows, num_vecs, X, Y );
return MAGMA_SUCCESS;
}
|
f550b44a152270fec4c01db572550181c6651e4d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <limits.h>
#include <stdlib.h>
#include <float.h>
#include <iostream>
#include <vector>
#include <algorithm>
#include <chrono>
#include <hip/hip_runtime.h>
#include "kernels.h"
#define CSV 0
#if(CSV)
#define PS(X, S) std::cout << X << ", " << S << ", "; fflush(stdout);
#define PV(X) std::cout << X << ", "; fflush(stdout);
#else
#define PS(X, S) std::cout << X << " " << S <<" :\n"; fflush(stdout);
#define PV(X) std::cout << "\t" << #X << " \t: " << X << "\n"; fflush(stdout);
#endif
/*
* Produce 64-bits of pseudo-randomness
* Note: not very "good" or "random"
*/
template<typename vec_t>
vec_t rand64() {
vec_t rtn;
do {
uint32_t * rtn32 = (uint32_t *)&rtn;
rtn32[0] = rand();
if(sizeof(vec_t) > 4) rtn32[1] = rand();
} while(!(rtn < getPositiveInfinity<vec_t>() &&
rtn > getNegativeInfinity<vec_t>()));
return rtn;
}
/*
* Perform <runs> merges of two sorted pseudorandom <vec_t> arrays of length <size>
* Checks the output of each merge for correctness
*/
#define PADDING 1024
template<typename vec_t, uint32_t blocks, uint32_t threads, bool timing>
void mergeType(const uint64_t size, const uint32_t runs) {
// Prepare host and device vectors
std::vector<vec_t> hA (size + PADDING);
std::vector<vec_t> hB (size + PADDING);
std::vector<vec_t> hC (2*size + PADDING);
vec_t *dA;
vec_t *dB;
vec_t *dC;
hipMalloc((void**)&dA, (size + PADDING) * sizeof(vec_t));
hipMalloc((void**)&dB, (size + PADDING) * sizeof(vec_t));
hipMalloc((void**)&dC, (2*size + PADDING) * sizeof(vec_t));
uint32_t *dpi; // diagonal_path_intersections;
hipMalloc((void**)&dpi, (2 * (blocks + 1)) * sizeof(uint32_t));
uint32_t errors = 0;
double total_time = 0.0;
for(uint32_t r = 0; r < runs; r++) {
// Generate two sorted psuedorandom arrays
for (uint64_t n = 0; n < size; n++) {
hA[n] = rand64<vec_t>();
hB[n] = rand64<vec_t>();
}
for (uint64_t n = size; n < size + PADDING; n++) {
hA[n] = getPositiveInfinity<vec_t>();
hB[n] = getPositiveInfinity<vec_t>();
}
std::sort(hA.begin(), hA.end());
std::sort(hB.begin(), hB.end());
hipMemcpy(dA, hA.data(), (size + PADDING) * sizeof(vec_t), hipMemcpyHostToDevice);
hipMemcpy(dB, hB.data(), (size + PADDING) * sizeof(vec_t), hipMemcpyHostToDevice);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
// Perform the global diagonal intersection search to divide work among SMs
hipLaunchKernelGGL(( workloadDiagonals<vec_t>), dim3(blocks), dim3(32), 0, 0, dA, size, dB, size, dpi);
// Merge between global diagonals independently on each block
hipLaunchKernelGGL(( mergeSinglePath<vec_t,false,false>) , dim3(blocks), dim3(threads), 0, 0,
dA, size, dB, size, dpi, dC, size * 2);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
total_time += time;
// Test for errors
hipMemcpy(hC.data(), dC, size * sizeof(vec_t), hipMemcpyDeviceToHost);
for(uint32_t i = 1; i < size; i++) {
errors += hC[i] < hC[i-1];
}
}
hipFree(dA);
hipFree(dB);
hipFree(dC);
hipFree(dpi);
PV(errors); // Print error info
printf("%s. ", errors ? "FAIL" : "PASS");
if (timing)
printf("Average kernel execution time: %f (us).\n", (total_time * 1e-3f) / runs);
else
printf("Warmup run\n");
}
/*
* Performs <runs> merge tests for each type at a given size
*/
template<uint32_t blocks, uint32_t threads>
void mergeAllTypes(const uint64_t size, const uint32_t runs) {
PS("uint32_t", size) mergeType<uint32_t, blocks, threads, false>(size, runs); printf("\n");
PS("uint32_t", size) mergeType<uint32_t, blocks, threads, true>(size, runs); printf("\n");
PS("float", size) mergeType<float, blocks, threads, false>(size, runs); printf("\n");
PS("float", size) mergeType<float, blocks, threads, true>(size, runs); printf("\n");
PS("uint64_t", size) mergeType<uint64_t, blocks, threads, false>(size, runs); printf("\n");
PS("uint64_t", size) mergeType<uint64_t, blocks, threads, true>(size, runs); printf("\n");
PS("double", size) mergeType<double, blocks, threads, false>(size, runs); printf("\n");
PS("double", size) mergeType<double, blocks, threads, true>(size, runs); printf("\n");
}
int main(int argc, char *argv[]) {
if (argc != 3) {
printf("Usage: %s <length of the arrays> <runs>\n", argv[0]);
return 1;
}
// length is sufficiently large;
// otherwise there are invalid global reads in the kernel mergeSinglePath
const uint64_t length = atol(argv[1]);
const uint32_t runs = atoi(argv[2]);
const int blocks = 112;
const int threads = 128; // do not change
mergeAllTypes<blocks, threads>(length, runs);
return 0;
}
| f550b44a152270fec4c01db572550181c6651e4d.cu | #include <stdio.h>
#include <stdint.h>
#include <limits.h>
#include <stdlib.h>
#include <float.h>
#include <iostream>
#include <vector>
#include <algorithm>
#include <chrono>
#include <cuda.h>
#include "kernels.h"
#define CSV 0
#if(CSV)
#define PS(X, S) std::cout << X << ", " << S << ", "; fflush(stdout);
#define PV(X) std::cout << X << ", "; fflush(stdout);
#else
#define PS(X, S) std::cout << X << " " << S <<" :\n"; fflush(stdout);
#define PV(X) std::cout << "\t" << #X << " \t: " << X << "\n"; fflush(stdout);
#endif
/*
* Produce 64-bits of pseudo-randomness
* Note: not very "good" or "random"
*/
template<typename vec_t>
vec_t rand64() {
vec_t rtn;
do {
uint32_t * rtn32 = (uint32_t *)&rtn;
rtn32[0] = rand();
if(sizeof(vec_t) > 4) rtn32[1] = rand();
} while(!(rtn < getPositiveInfinity<vec_t>() &&
rtn > getNegativeInfinity<vec_t>()));
return rtn;
}
/*
* Perform <runs> merges of two sorted pseudorandom <vec_t> arrays of length <size>
* Checks the output of each merge for correctness
*/
#define PADDING 1024
template<typename vec_t, uint32_t blocks, uint32_t threads, bool timing>
void mergeType(const uint64_t size, const uint32_t runs) {
// Prepare host and device vectors
std::vector<vec_t> hA (size + PADDING);
std::vector<vec_t> hB (size + PADDING);
std::vector<vec_t> hC (2*size + PADDING);
vec_t *dA;
vec_t *dB;
vec_t *dC;
cudaMalloc((void**)&dA, (size + PADDING) * sizeof(vec_t));
cudaMalloc((void**)&dB, (size + PADDING) * sizeof(vec_t));
cudaMalloc((void**)&dC, (2*size + PADDING) * sizeof(vec_t));
uint32_t *dpi; // diagonal_path_intersections;
cudaMalloc((void**)&dpi, (2 * (blocks + 1)) * sizeof(uint32_t));
uint32_t errors = 0;
double total_time = 0.0;
for(uint32_t r = 0; r < runs; r++) {
// Generate two sorted psuedorandom arrays
for (uint64_t n = 0; n < size; n++) {
hA[n] = rand64<vec_t>();
hB[n] = rand64<vec_t>();
}
for (uint64_t n = size; n < size + PADDING; n++) {
hA[n] = getPositiveInfinity<vec_t>();
hB[n] = getPositiveInfinity<vec_t>();
}
std::sort(hA.begin(), hA.end());
std::sort(hB.begin(), hB.end());
cudaMemcpy(dA, hA.data(), (size + PADDING) * sizeof(vec_t), cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB.data(), (size + PADDING) * sizeof(vec_t), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
// Perform the global diagonal intersection search to divide work among SMs
workloadDiagonals<vec_t><<<blocks, 32>>> (dA, size, dB, size, dpi);
// Merge between global diagonals independently on each block
mergeSinglePath<vec_t,false,false> <<<blocks, threads>>>
(dA, size, dB, size, dpi, dC, size * 2);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
total_time += time;
// Test for errors
cudaMemcpy(hC.data(), dC, size * sizeof(vec_t), cudaMemcpyDeviceToHost);
for(uint32_t i = 1; i < size; i++) {
errors += hC[i] < hC[i-1];
}
}
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
cudaFree(dpi);
PV(errors); // Print error info
printf("%s. ", errors ? "FAIL" : "PASS");
if (timing)
printf("Average kernel execution time: %f (us).\n", (total_time * 1e-3f) / runs);
else
printf("Warmup run\n");
}
/*
* Performs <runs> merge tests for each type at a given size
*/
template<uint32_t blocks, uint32_t threads>
void mergeAllTypes(const uint64_t size, const uint32_t runs) {
PS("uint32_t", size) mergeType<uint32_t, blocks, threads, false>(size, runs); printf("\n");
PS("uint32_t", size) mergeType<uint32_t, blocks, threads, true>(size, runs); printf("\n");
PS("float", size) mergeType<float, blocks, threads, false>(size, runs); printf("\n");
PS("float", size) mergeType<float, blocks, threads, true>(size, runs); printf("\n");
PS("uint64_t", size) mergeType<uint64_t, blocks, threads, false>(size, runs); printf("\n");
PS("uint64_t", size) mergeType<uint64_t, blocks, threads, true>(size, runs); printf("\n");
PS("double", size) mergeType<double, blocks, threads, false>(size, runs); printf("\n");
PS("double", size) mergeType<double, blocks, threads, true>(size, runs); printf("\n");
}
int main(int argc, char *argv[]) {
if (argc != 3) {
printf("Usage: %s <length of the arrays> <runs>\n", argv[0]);
return 1;
}
// length is sufficiently large;
// otherwise there are invalid global reads in the kernel mergeSinglePath
const uint64_t length = atol(argv[1]);
const uint32_t runs = atoi(argv[2]);
const int blocks = 112;
const int threads = 128; // do not change
mergeAllTypes<blocks, threads>(length, runs);
return 0;
}
|
2a19f4c97aca3415b60ed4513b06932d89a1d5ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <chrono>
#include <hiprand/hiprand_kernel.h>
#include <limits>
#include <hipcub/hipcub.hpp>
#include "../configuration.h"
#include "util/util.h"
static const int kNullptr = std::numeric_limits<int>::max();
static const int kNumBlockSize = 256;
static const char kCellTypeNormal = 1;
static const char kCellTypeProducer = 2;
using IndexT = int;
using CellPointerT = IndexT;
#include "../dataset.h"
__device__ DeviceArray<IndexT, kMaxDegree>* d_Cell_incoming;
__device__ DeviceArray<IndexT, kMaxDegree>* d_Cell_outgoing;
__device__ int* d_Cell_max_velocity;
__device__ int* d_Cell_current_max_velocity;
__device__ int* d_Cell_num_incoming;
__device__ int* d_Cell_num_outgoing;
__device__ float* d_Cell_x;
__device__ float* d_Cell_y;
__device__ bool* d_Cell_is_target;
__device__ hiprandState_t* d_Cell_random_state;
__device__ char* d_Cell_type;
__device__ hiprandState_t* d_Car_random_state;
__device__ DeviceArray<IndexT, kMaxVelocity>* d_Car_path;
__device__ int* d_Car_path_length;
__device__ int* d_Car_velocity;
__device__ int* d_Car_max_velocity;
__device__ bool* d_Cell_has_car;
__device__ bool* d_Cell_should_occupy;
__device__ int d_num_cells;
int host_num_cells;
class TrafficLight {
private:
DeviceArray<IndexT, kMaxDegree> cells_;
int num_cells_;
int timer_;
int phase_time_;
int phase_;
public:
__device__ TrafficLight(int num_cells, int phase_time)
: num_cells_(num_cells), timer_(0), phase_time_(phase_time), phase_(0) {}
__device__ void set_cell(int idx, IndexT cell) {
assert(cell != kNullptr);
cells_[idx] = cell;
}
__device__ void step();
};
// TODO: Consider migrating to SoaAlloc.
TrafficLight* h_traffic_lights;
__device__ TrafficLight* d_traffic_lights;
// Only for rendering.
__device__ int dev_num_cells;
__device__ float* dev_Cell_pos_x;
__device__ float* dev_Cell_pos_y;
__device__ bool* dev_Cell_occupied;
float* host_Cell_pos_x;
float* host_Cell_pos_y;
bool* host_Cell_occupied;
float* host_data_Cell_pos_x;
float* host_data_Cell_pos_y;
bool* host_data_Cell_occupied;
__device__ int Cell_current_max_velocity(IndexT self) {
return d_Cell_current_max_velocity[self];
}
__device__ int Cell_max_velocity(IndexT self) {
return d_Cell_max_velocity[self];
}
__device__ void Cell_set_current_max_velocity(IndexT self, int v) {
d_Cell_current_max_velocity[self] = v;
}
__device__ void Cell_remove_speed_limit(IndexT self) {
d_Cell_current_max_velocity[self] = d_Cell_max_velocity[self];
}
__device__ int Cell_num_incoming(IndexT self) {
return d_Cell_num_incoming[self];
}
__device__ void Cell_set_num_incoming(IndexT self, int num) {
d_Cell_num_incoming[self] = num;
}
__device__ int Cell_num_outgoing(IndexT self) {
return d_Cell_num_outgoing[self];
}
__device__ void Cell_set_num_outgoing(IndexT self, int num) {
d_Cell_num_outgoing[self] = num;
}
__device__ IndexT get_incoming(IndexT self, int idx) {
return d_Cell_incoming[self][idx];
}
__device__ void Cell_set_incoming(IndexT self, int idx, IndexT cell) {
assert(cell != kNullptr);
d_Cell_incoming[self][idx] = cell;
}
__device__ IndexT Cell_get_outgoing(IndexT self, int idx) {
return d_Cell_outgoing[self][idx];
}
__device__ void Cell_set_outgoing(IndexT self, int idx, IndexT cell) {
assert(cell != kNullptr);
d_Cell_outgoing[self][idx] = cell;
}
__device__ float Cell_x(IndexT self) { return d_Cell_x[self]; }
__device__ float Cell_y(IndexT self) { return d_Cell_y[self]; }
__device__ bool Cell_is_free(IndexT self) { return !d_Cell_has_car[self]; }
__device__ bool Cell_is_sink(IndexT self) { return d_Cell_num_outgoing[self] == 0; }
__device__ bool Cell_is_target(IndexT self) { return d_Cell_is_target[self]; }
__device__ void Cell_set_target(IndexT self) { d_Cell_is_target[self] = true; }
__device__ int Car_random_int(IndexT self, int a, int b) {
return hiprand(&d_Car_random_state[self]) % (b - a) + a;
}
__device__ int Car_velocity(IndexT self) { return d_Car_velocity[self]; }
__device__ int Car_max_velocity(IndexT self) { return d_Car_max_velocity[self]; }
__device__ void Cell_occupy(IndexT self, IndexT car) {
assert(d_Cell_has_car[car]);
assert(Cell_is_free(self));
d_Cell_should_occupy[self] = true;
d_Car_velocity[self] = d_Car_velocity[car];
d_Car_max_velocity[self] = d_Car_max_velocity[car];
d_Car_random_state[self] = d_Car_random_state[car];
for (int i = 0; i < kMaxVelocity; ++i) {
d_Car_path[self][i] = d_Car_path[car][i];
}
d_Car_path_length[self] = d_Car_path_length[car];
}
__device__ void Cell_release(IndexT self) {
assert(!Cell_is_free(self));
d_Cell_has_car[self] = false;
}
__device__ IndexT Car_next_step(IndexT self, IndexT position) {
// Almost random walk.
const uint32_t num_outgoing = d_Cell_num_outgoing[position];
assert(num_outgoing > 0);
// Need some kind of return statement here.
return d_Cell_outgoing[position][Car_random_int(self, 0, num_outgoing)];
}
__device__ void Car_step_initialize_iteration(IndexT self) {
// Reset calculated path. This forces cars with a random moving behavior to
// select a new path in every iteration. Otherwise, cars might get "stuck"
// on a full network if many cars are waiting for the one in front of them in
// a cycle.
d_Car_path_length[self] = 0;
}
__device__ void Car_step_accelerate(IndexT self) {
// Speed up the car by 1 or 2 units.
int speedup = Car_random_int(self, 0, 2) + 1;
d_Car_velocity[self] = d_Car_max_velocity[self] < d_Car_velocity[self] + speedup
? d_Car_max_velocity[self] : d_Car_velocity[self] + speedup;
}
__device__ void Car_step_extend_path(IndexT self) {
IndexT cell = self;
IndexT next_cell;
for (int i = 0; i < d_Car_velocity[self]; ++i) {
if (Cell_is_sink(cell) || Cell_is_target(cell)) {
break;
}
next_cell = Car_next_step(self, cell);
assert(next_cell != cell);
if (!Cell_is_free(next_cell)) break;
cell = next_cell;
d_Car_path[self][i] = cell;
d_Car_path_length[self] = d_Car_path_length[self] + 1;
}
d_Car_velocity[self] = d_Car_path_length[self];
}
__device__ void Car_step_constraint_velocity(IndexT self) {
// This is actually only needed for the very first iteration, because a car
// may be positioned on a traffic light cell.
if (d_Car_velocity[self] > Cell_current_max_velocity(self)) {
d_Car_velocity[self] = Cell_current_max_velocity(self);
}
int path_index = 0;
int distance = 1;
while (distance <= d_Car_velocity[self]) {
// Invariant: Movement of up to `distance - 1` many cells at `velocity_`
// is allowed.
// Now check if next cell can be entered.
IndexT next_cell = d_Car_path[self][path_index];
// Avoid collision.
if (!Cell_is_free(next_cell)) {
// Cannot enter cell.
--distance;
d_Car_velocity[self] = distance;
break;
} // else: Can enter next cell.
if (d_Car_velocity[self] > Cell_current_max_velocity(next_cell)) {
// Car is too fast for this cell.
if (Cell_current_max_velocity(next_cell) > distance - 1) {
// Even if we slow down, we would still make progress.
d_Car_velocity[self] = Cell_current_max_velocity(next_cell);
} else {
// Do not enter the next cell.
--distance;
assert(distance >= 0);
d_Car_velocity[self] = distance;
break;
}
}
++distance;
++path_index;
}
--distance;
#ifndef NDEBUG
for (int i = 0; i < d_Car_velocity[self]; ++i) {
assert(Cell_is_free(d_Car_path[self][i]));
assert(i == 0 || d_Car_path[self][i - 1] != d_Car_path[self][i]);
}
// TODO: Check why the cast is necessary.
assert(distance <= d_Car_velocity[self]);
#endif // NDEBUG
}
__device__ void Car_step_move(IndexT self) {
IndexT cell = self;
for (int i = 0; i < d_Car_velocity[self]; ++i) {
assert(d_Car_path[self][i] != cell);
cell = d_Car_path[self][i];
assert(cell != self);
assert(Cell_is_free(cell));
}
if (d_Car_velocity[self] > 0) {
Cell_occupy(cell, self);
Cell_release(self);
}
}
__device__ void Car_step_slow_down(IndexT self) {
// 20% change of slowdown.
if (hiprand_uniform(&d_Car_random_state[self]) < 0.2 && d_Car_velocity[self] > 0) {
d_Car_velocity[self] = d_Car_velocity[self] - 1;
}
}
__device__ void TrafficLight::step() {
if (num_cells_ > 0) {
timer_ = (timer_ + 1) % phase_time_;
if (timer_ == 0) {
assert(cells_[phase_] != kNullptr);
Cell_set_current_max_velocity(cells_[phase_], 0);
phase_ = (phase_ + 1) % num_cells_;
Cell_remove_speed_limit(cells_[phase_]);
}
}
}
__device__ void Car_commit_occupy(IndexT self) {
if (d_Cell_should_occupy[self]) {
d_Cell_should_occupy[self] = false;
d_Cell_has_car[self] = true;
}
if (Cell_is_sink(self) || Cell_is_target(self)) {
// Remove car from the simulation. Will be added again in the next
// iteration.
Cell_release(self);
}
}
__device__ IndexT new_Car(int seed, IndexT cell, int max_velocity) {
assert(Cell_is_free(cell));
d_Cell_has_car[cell] = true;
d_Car_path_length[cell] = 0;
d_Car_velocity[cell] = 0;
d_Car_max_velocity[cell] = max_velocity;
hiprand_init(seed, 0, 0, &d_Car_random_state[cell]);
return cell;
}
__device__ void ProducerCell_create_car(IndexT self) {
assert(d_Cell_type[self] == kCellTypeProducer);
if (Cell_is_free(self)) {
float r = hiprand_uniform(&d_Cell_random_state[self]);
if (r < kCarAllocationRatio) {
IndexT new_car = new_Car(
/*seed=*/ hiprand(&d_Cell_random_state[self]), /*cell=*/ self,
/*max_velocity=*/ hiprand(&d_Cell_random_state[self]) % (kMaxVelocity/2)
+ kMaxVelocity/2);
}
}
}
__device__ IndexT new_Cell(int max_velocity, float x, float y) {
IndexT idx = atomicAdd(&d_num_cells, 1);
d_Cell_max_velocity[idx] = max_velocity;
d_Cell_current_max_velocity[idx] = max_velocity;
d_Cell_num_incoming[idx] = 0;
d_Cell_num_outgoing[idx] = 0;
d_Cell_x[idx] = x;
d_Cell_y[idx] = y;
d_Cell_is_target[idx] = false;
d_Cell_type[idx] = kCellTypeNormal;
d_Cell_should_occupy[idx] = false;
d_Cell_has_car[idx] = false;
return idx;
}
__device__ IndexT new_ProducerCell(int max_velocity, float x, float y, int seed) {
IndexT idx = new_Cell(max_velocity, x, y);
d_Cell_type[idx] = kCellTypeProducer;
hiprand_init(seed, 0, 0, &d_Cell_random_state[idx]);
return idx;
}
__global__ void kernel_traffic_light_step() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < kNumIntersections; i += blockDim.x * gridDim.x) {
d_traffic_lights[i].step();
}
}
__global__ void kernel_create_nodes() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < kNumIntersections; i += blockDim.x * gridDim.x) {
hiprandState_t state;
hiprand_init(i, 0, 0, &state);
assert(d_nodes[i].x >= 0 && d_nodes[i].x <= 1);
assert(d_nodes[i].y >= 0 && d_nodes[i].y <= 1);
for (int j = 0; j < d_nodes[i].num_outgoing; ++j) {
d_nodes[i].cell_out[j] = new_Cell(
/*max_velocity=*/ hiprand(&state) % (kMaxVelocity/2)
+ kMaxVelocity/2,
d_nodes[i].x, d_nodes[i].y);
}
}
}
__device__ IndexT connect_intersections(IndexT from, Node* target,
int incoming_idx, hiprandState_t& state) {
// Create edge.
float dx = target->x - d_Cell_x[from];
float dy = target->y - d_Cell_y[from];
float dist = sqrt(dx*dx + dy*dy);
int steps = dist/kCellLength;
float step_x = dx/steps;
float step_y = dy/steps;
IndexT prev = from;
for (int j = 0; j < steps; ++j) {
float new_x = d_Cell_x[from] + j*step_x;
float new_y = d_Cell_y[from] + j*step_y;
assert(new_x >= 0 && new_x <= 1);
assert(new_y >= 0 && new_y <= 1);
IndexT next;
if (hiprand_uniform(&state) < kProducerRatio) {
next = new_ProducerCell(
d_Cell_max_velocity[prev], new_x, new_y,
hiprand(&state));
} else {
next = new_Cell(
d_Cell_max_velocity[prev], new_x, new_y);
}
if (hiprand_uniform(&state) < kTargetRatio) {
Cell_set_target(next);
}
Cell_set_num_outgoing(prev, 1);
Cell_set_outgoing(prev, 0, next);
Cell_set_num_incoming(next, 1);
Cell_set_incoming(next, 0, prev);
prev = next;
}
// Connect to all outgoing nodes of target.
Cell_set_num_outgoing(prev, target->num_outgoing);
for (int i = 0; i < target->num_outgoing; ++i) {
IndexT next = target->cell_out[i];
// num_incoming set later.
Cell_set_outgoing(prev, i, next);
Cell_set_incoming(next, incoming_idx, prev);
}
return prev;
}
__global__ void kernel_create_edges() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < kNumIntersections; i += blockDim.x * gridDim.x) {
hiprandState_t state;
hiprand_init(i, 0, 0, &state);
for (int k = 0; k < d_nodes[i].num_outgoing; ++k) {
int target = d_nodes[i].node_out[k];
int target_pos = d_nodes[i].node_out_pos[k];
IndexT last = connect_intersections(
d_nodes[i].cell_out[k], &d_nodes[target], target_pos, state);
Cell_set_current_max_velocity(last, 0);
d_nodes[target].cell_in[target_pos] = last;
}
}
}
__global__ void kernel_create_traffic_lights() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < kNumIntersections; i += blockDim.x * gridDim.x) {
new(d_traffic_lights + i) TrafficLight(
/*num_cells=*/ d_nodes[i].num_incoming,
/*phase_time=*/ 5);
for (int j = 0; j < d_nodes[i].num_outgoing; ++j) {
Cell_set_num_incoming(d_nodes[i].cell_out[j], d_nodes[i].num_incoming);
}
for (int j = 0; j < d_nodes[i].num_incoming; ++j) {
d_traffic_lights[i].set_cell(j, d_nodes[i].cell_in[j]);
Cell_set_current_max_velocity(d_nodes[i].cell_in[j], 0); // Set to "red".
}
}
}
void create_street_network() {
int zero = 0;
hipMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0,
hipMemcpyHostToDevice);
hipMalloc(&h_nodes, sizeof(Node)*kNumIntersections);
hipMemcpyToSymbol(d_nodes, &h_nodes, sizeof(Node*), 0,
hipMemcpyHostToDevice);
hipMalloc(&h_traffic_lights, sizeof(TrafficLight)*kNumIntersections);
hipMemcpyToSymbol(d_traffic_lights, &h_traffic_lights,
sizeof(TrafficLight*), 0, hipMemcpyHostToDevice);
gpuErrchk(hipDeviceSynchronize());
// Create basic structure on host.
create_network_structure();
hipLaunchKernelGGL(( kernel_create_nodes),
dim3((kNumIntersections + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_create_edges),
dim3((kNumIntersections + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_create_traffic_lights),
dim3((kNumIntersections + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
// Allocate helper data structures for rendering.
hipMemcpyFromSymbol(&host_num_cells, d_num_cells, sizeof(int), 0,
hipMemcpyDeviceToHost);
hipMalloc(&host_Cell_pos_x, sizeof(float)*host_num_cells);
hipMemcpyToSymbol(dev_Cell_pos_x, &host_Cell_pos_x, sizeof(float*), 0,
hipMemcpyHostToDevice);
hipMalloc(&host_Cell_pos_y, sizeof(float)*host_num_cells);
hipMemcpyToSymbol(dev_Cell_pos_y, &host_Cell_pos_y, sizeof(float*), 0,
hipMemcpyHostToDevice);
hipMalloc(&host_Cell_occupied, sizeof(bool)*host_num_cells);
hipMemcpyToSymbol(dev_Cell_occupied, &host_Cell_occupied, sizeof(bool*), 0,
hipMemcpyHostToDevice);
host_data_Cell_pos_x = (float*) malloc(sizeof(float)*host_num_cells);
host_data_Cell_pos_y = (float*) malloc(sizeof(float)*host_num_cells);
host_data_Cell_occupied = (bool*) malloc(sizeof(bool)*host_num_cells);
#ifndef NDEBUG
printf("Number of cells: %i\n", host_num_cells);
#endif // NDEBUG
}
void step_traffic_lights() {
// TODO: Consider migrating this to SoaAlloc.
hipLaunchKernelGGL(( kernel_traffic_light_step),
dim3((kNumIntersections + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
__device__ void Cell_add_to_rendering_array(IndexT self) {
int idx = atomicAdd(&dev_num_cells, 1);
dev_Cell_pos_x[idx] = d_Cell_x[self];
dev_Cell_pos_y[idx] = d_Cell_y[self];
dev_Cell_occupied[idx] = !Cell_is_free(self);
}
__global__ void kernel_Cell_add_to_rendering_array() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < d_num_cells; i += blockDim.x * gridDim.x) {
Cell_add_to_rendering_array(i);
}
}
void transfer_data() {
int zero = 0;
hipMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_Cell_add_to_rendering_array),
dim3((host_num_cells + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipMemcpy(host_data_Cell_pos_x, host_Cell_pos_x,
sizeof(float)*host_num_cells, hipMemcpyDeviceToHost);
hipMemcpy(host_data_Cell_pos_y, host_Cell_pos_y,
sizeof(float)*host_num_cells, hipMemcpyDeviceToHost);
hipMemcpy(host_data_Cell_occupied, host_Cell_occupied,
sizeof(bool)*host_num_cells, hipMemcpyDeviceToHost);
gpuErrchk(hipDeviceSynchronize());
}
__global__ void kernel_ProducerCell_create_car() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < d_num_cells; i += blockDim.x * gridDim.x) {
if (d_Cell_type[i] == kCellTypeProducer) {
ProducerCell_create_car(i);
}
}
}
__device__ void Car_step_prepare_path(IndexT self) {
Car_step_initialize_iteration(self);
Car_step_accelerate(self);
Car_step_extend_path(self);
Car_step_constraint_velocity(self);
Car_step_slow_down(self);
}
__global__ void kernel_Car_step_prepare_path() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < d_num_cells; i += blockDim.x * gridDim.x) {
if (d_Cell_has_car[i]) {
Car_step_prepare_path(i);
}
}
}
__global__ void kernel_Car_commit_occupy() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < d_num_cells; i += blockDim.x * gridDim.x) {
if (d_Cell_has_car[i] || d_Cell_should_occupy[i]) {
Car_commit_occupy(i);
}
}
}
__global__ void kernel_Car_step_move() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < d_num_cells; i += blockDim.x * gridDim.x) {
if (d_Cell_has_car[i]) {
Car_step_move(i);
}
}
}
__device__ int d_checksum;
__global__ void kernel_compute_checksum() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < d_num_cells; i += blockDim.x * gridDim.x) {
if (d_Cell_has_car[i]) {
atomicAdd(&d_checksum, 1);
}
}
}
int checksum() {
int zero = 0;
hipMemcpyToSymbol(d_checksum, &zero, sizeof(int), 0, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_compute_checksum), dim3(128), dim3(128), 0, 0, );
int result;
hipMemcpyFromSymbol(&result, d_checksum, sizeof(int), 0, hipMemcpyDeviceToHost);
return result;
}
void step() {
hipLaunchKernelGGL(( kernel_ProducerCell_create_car),
dim3((host_num_cells + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
step_traffic_lights();
hipLaunchKernelGGL(( kernel_Car_step_prepare_path),
dim3((host_num_cells + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_Car_step_move),
dim3((host_num_cells + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
hipLaunchKernelGGL(( kernel_Car_commit_occupy),
dim3((host_num_cells + kNumBlockSize - 1) / kNumBlockSize),
dim3(kNumBlockSize), 0, 0, );
gpuErrchk(hipDeviceSynchronize());
}
void allocate_memory() {
DeviceArray<IndexT, kMaxDegree>* h_Cell_incoming;
hipMalloc(&h_Cell_incoming, sizeof(DeviceArray<IndexT, kMaxDegree>)*kMaxNumCells);
hipMemcpyToSymbol(d_Cell_incoming, &h_Cell_incoming,
sizeof(DeviceArray<IndexT, kMaxDegree>*), 0, hipMemcpyHostToDevice);
DeviceArray<IndexT, kMaxDegree>* h_Cell_outgoing;
hipMalloc(&h_Cell_outgoing, sizeof(DeviceArray<IndexT, kMaxDegree>)*kMaxNumCells);
hipMemcpyToSymbol(d_Cell_outgoing, &h_Cell_outgoing,
sizeof(DeviceArray<IndexT, kMaxDegree>*), 0, hipMemcpyHostToDevice);
int* h_Cell_max_velocity;
hipMalloc(&h_Cell_max_velocity, sizeof(int)*kMaxNumCells);
hipMemcpyToSymbol(d_Cell_max_velocity, &h_Cell_max_velocity, sizeof(int*),
0, hipMemcpyHostToDevice);
int* h_Cell_current_max_velocity;
hipMalloc(&h_Cell_current_max_velocity, sizeof(int)*kMaxNumCells);
hipMemcpyToSymbol(d_Cell_current_max_velocity, &h_Cell_current_max_velocity, sizeof(int*),
0, hipMemcpyHostToDevice);
int* h_Cell_num_incoming;
hipMalloc(&h_Cell_num_incoming, sizeof(int)*kMaxNumCells);
hipMemcpyToSymbol(d_Cell_num_incoming, &h_Cell_num_incoming, sizeof(int*),
0, hipMemcpyHostToDevice);
int* h_Cell_num_outgoing;
hipMalloc(&h_Cell_num_outgoing, sizeof(int)*kMaxNumCells);
hipMemcpyToSymbol(d_Cell_num_outgoing, &h_Cell_num_outgoing, sizeof(int*),
0, hipMemcpyHostToDevice);
float* h_Cell_x;
hipMalloc(&h_Cell_x, sizeof(float)*kMaxNumCells);
hipMemcpyToSymbol(d_Cell_x, &h_Cell_x, sizeof(float*),
0, hipMemcpyHostToDevice);
float* h_Cell_y;
hipMalloc(&h_Cell_y, sizeof(float)*kMaxNumCells);
hipMemcpyToSymbol(d_Cell_y, &h_Cell_y, sizeof(float*),
0, hipMemcpyHostToDevice);
bool* h_Cell_is_target;
hipMalloc(&h_Cell_is_target, sizeof(bool)*kMaxNumCells);
hipMemcpyToSymbol(d_Cell_is_target, &h_Cell_is_target, sizeof(bool*),
0, hipMemcpyHostToDevice);
hiprandState_t* h_Cell_random_state;
hipMalloc(&h_Cell_random_state, sizeof(hiprandState_t)*kMaxNumCells);
hipMemcpyToSymbol(d_Cell_random_state, &h_Cell_random_state, sizeof(hiprandState_t*),
0, hipMemcpyHostToDevice);
char* h_Cell_type;
hipMalloc(&h_Cell_type, sizeof(char)*kMaxNumCells);
hipMemcpyToSymbol(d_Cell_type, &h_Cell_type, sizeof(char*),
0, hipMemcpyHostToDevice);
hiprandState_t* h_Car_random_state;
hipMalloc(&h_Car_random_state, sizeof(hiprandState_t)*kMaxNumCells);
hipMemcpyToSymbol(d_Car_random_state, &h_Car_random_state, sizeof(hiprandState_t*),
0, hipMemcpyHostToDevice);
DeviceArray<IndexT, kMaxVelocity>* h_Car_path;
hipMalloc(&h_Car_path, sizeof(DeviceArray<IndexT, kMaxVelocity>)*kMaxNumCells);
hipMemcpyToSymbol(d_Car_path, &h_Car_path,
sizeof(DeviceArray<IndexT, kMaxVelocity>*),
0, hipMemcpyHostToDevice);
int* h_Car_path_length;
hipMalloc(&h_Car_path_length, sizeof(int)*kMaxNumCells);
hipMemcpyToSymbol(d_Car_path_length, &h_Car_path_length, sizeof(int*),
0, hipMemcpyHostToDevice);
int* h_Car_velocity;
hipMalloc(&h_Car_velocity, sizeof(int)*kMaxNumCells);
hipMemcpyToSymbol(d_Car_velocity, &h_Car_velocity, sizeof(int*),
0, hipMemcpyHostToDevice);
int* h_Car_max_velocity;
hipMalloc(&h_Car_max_velocity, sizeof(int)*kMaxNumCells);
hipMemcpyToSymbol(d_Car_max_velocity, &h_Car_max_velocity, sizeof(int*),
0, hipMemcpyHostToDevice);
bool* h_Cell_has_car;
hipMalloc(&h_Cell_has_car, sizeof(bool)*kMaxNumCells);
hipMemcpyToSymbol(d_Cell_has_car, &h_Cell_has_car, sizeof(bool*),
0, hipMemcpyHostToDevice);
bool* h_Cell_should_occupy;
hipMalloc(&h_Cell_should_occupy, sizeof(bool)*kMaxNumCells);
hipMemcpyToSymbol(d_Cell_should_occupy, &h_Cell_should_occupy, sizeof(bool*),
0, hipMemcpyHostToDevice);
int zero = 0;
hipMemcpyToSymbol(d_num_cells, &zero, sizeof(int), 0, hipMemcpyHostToDevice);
}
int main(int /*argc*/, char** /*argv*/) {
allocate_memory();
create_street_network();
auto time_start = std::chrono::system_clock::now();
for (int i = 0; i < kNumIterations; ++i) {
step();
}
auto time_end = std::chrono::system_clock::now();
auto elapsed = time_end - time_start;
auto millis = std::chrono::duration_cast<std::chrono::milliseconds>(elapsed)
.count();
#ifndef NDEBUG
printf("Checksum: %i\n", checksum());
#endif // NDEBUG
printf("%lu\n", millis);
}
| 2a19f4c97aca3415b60ed4513b06932d89a1d5ad.cu | #include <assert.h>
#include <chrono>
#include <curand_kernel.h>
#include <limits>
#include <cub/cub.cuh>
#include "../configuration.h"
#include "util/util.h"
static const int kNullptr = std::numeric_limits<int>::max();
static const int kNumBlockSize = 256;
static const char kCellTypeNormal = 1;
static const char kCellTypeProducer = 2;
using IndexT = int;
using CellPointerT = IndexT;
#include "../dataset.h"
__device__ DeviceArray<IndexT, kMaxDegree>* d_Cell_incoming;
__device__ DeviceArray<IndexT, kMaxDegree>* d_Cell_outgoing;
__device__ int* d_Cell_max_velocity;
__device__ int* d_Cell_current_max_velocity;
__device__ int* d_Cell_num_incoming;
__device__ int* d_Cell_num_outgoing;
__device__ float* d_Cell_x;
__device__ float* d_Cell_y;
__device__ bool* d_Cell_is_target;
__device__ curandState_t* d_Cell_random_state;
__device__ char* d_Cell_type;
__device__ curandState_t* d_Car_random_state;
__device__ DeviceArray<IndexT, kMaxVelocity>* d_Car_path;
__device__ int* d_Car_path_length;
__device__ int* d_Car_velocity;
__device__ int* d_Car_max_velocity;
__device__ bool* d_Cell_has_car;
__device__ bool* d_Cell_should_occupy;
__device__ int d_num_cells;
int host_num_cells;
class TrafficLight {
private:
DeviceArray<IndexT, kMaxDegree> cells_;
int num_cells_;
int timer_;
int phase_time_;
int phase_;
public:
__device__ TrafficLight(int num_cells, int phase_time)
: num_cells_(num_cells), timer_(0), phase_time_(phase_time), phase_(0) {}
__device__ void set_cell(int idx, IndexT cell) {
assert(cell != kNullptr);
cells_[idx] = cell;
}
__device__ void step();
};
// TODO: Consider migrating to SoaAlloc.
TrafficLight* h_traffic_lights;
__device__ TrafficLight* d_traffic_lights;
// Only for rendering.
__device__ int dev_num_cells;
__device__ float* dev_Cell_pos_x;
__device__ float* dev_Cell_pos_y;
__device__ bool* dev_Cell_occupied;
float* host_Cell_pos_x;
float* host_Cell_pos_y;
bool* host_Cell_occupied;
float* host_data_Cell_pos_x;
float* host_data_Cell_pos_y;
bool* host_data_Cell_occupied;
__device__ int Cell_current_max_velocity(IndexT self) {
return d_Cell_current_max_velocity[self];
}
__device__ int Cell_max_velocity(IndexT self) {
return d_Cell_max_velocity[self];
}
__device__ void Cell_set_current_max_velocity(IndexT self, int v) {
d_Cell_current_max_velocity[self] = v;
}
__device__ void Cell_remove_speed_limit(IndexT self) {
d_Cell_current_max_velocity[self] = d_Cell_max_velocity[self];
}
__device__ int Cell_num_incoming(IndexT self) {
return d_Cell_num_incoming[self];
}
__device__ void Cell_set_num_incoming(IndexT self, int num) {
d_Cell_num_incoming[self] = num;
}
__device__ int Cell_num_outgoing(IndexT self) {
return d_Cell_num_outgoing[self];
}
__device__ void Cell_set_num_outgoing(IndexT self, int num) {
d_Cell_num_outgoing[self] = num;
}
__device__ IndexT get_incoming(IndexT self, int idx) {
return d_Cell_incoming[self][idx];
}
__device__ void Cell_set_incoming(IndexT self, int idx, IndexT cell) {
assert(cell != kNullptr);
d_Cell_incoming[self][idx] = cell;
}
__device__ IndexT Cell_get_outgoing(IndexT self, int idx) {
return d_Cell_outgoing[self][idx];
}
__device__ void Cell_set_outgoing(IndexT self, int idx, IndexT cell) {
assert(cell != kNullptr);
d_Cell_outgoing[self][idx] = cell;
}
__device__ float Cell_x(IndexT self) { return d_Cell_x[self]; }
__device__ float Cell_y(IndexT self) { return d_Cell_y[self]; }
__device__ bool Cell_is_free(IndexT self) { return !d_Cell_has_car[self]; }
__device__ bool Cell_is_sink(IndexT self) { return d_Cell_num_outgoing[self] == 0; }
__device__ bool Cell_is_target(IndexT self) { return d_Cell_is_target[self]; }
__device__ void Cell_set_target(IndexT self) { d_Cell_is_target[self] = true; }
__device__ int Car_random_int(IndexT self, int a, int b) {
return curand(&d_Car_random_state[self]) % (b - a) + a;
}
__device__ int Car_velocity(IndexT self) { return d_Car_velocity[self]; }
__device__ int Car_max_velocity(IndexT self) { return d_Car_max_velocity[self]; }
__device__ void Cell_occupy(IndexT self, IndexT car) {
assert(d_Cell_has_car[car]);
assert(Cell_is_free(self));
d_Cell_should_occupy[self] = true;
d_Car_velocity[self] = d_Car_velocity[car];
d_Car_max_velocity[self] = d_Car_max_velocity[car];
d_Car_random_state[self] = d_Car_random_state[car];
for (int i = 0; i < kMaxVelocity; ++i) {
d_Car_path[self][i] = d_Car_path[car][i];
}
d_Car_path_length[self] = d_Car_path_length[car];
}
__device__ void Cell_release(IndexT self) {
assert(!Cell_is_free(self));
d_Cell_has_car[self] = false;
}
__device__ IndexT Car_next_step(IndexT self, IndexT position) {
// Almost random walk.
const uint32_t num_outgoing = d_Cell_num_outgoing[position];
assert(num_outgoing > 0);
// Need some kind of return statement here.
return d_Cell_outgoing[position][Car_random_int(self, 0, num_outgoing)];
}
__device__ void Car_step_initialize_iteration(IndexT self) {
// Reset calculated path. This forces cars with a random moving behavior to
// select a new path in every iteration. Otherwise, cars might get "stuck"
// on a full network if many cars are waiting for the one in front of them in
// a cycle.
d_Car_path_length[self] = 0;
}
__device__ void Car_step_accelerate(IndexT self) {
// Speed up the car by 1 or 2 units.
int speedup = Car_random_int(self, 0, 2) + 1;
d_Car_velocity[self] = d_Car_max_velocity[self] < d_Car_velocity[self] + speedup
? d_Car_max_velocity[self] : d_Car_velocity[self] + speedup;
}
__device__ void Car_step_extend_path(IndexT self) {
IndexT cell = self;
IndexT next_cell;
for (int i = 0; i < d_Car_velocity[self]; ++i) {
if (Cell_is_sink(cell) || Cell_is_target(cell)) {
break;
}
next_cell = Car_next_step(self, cell);
assert(next_cell != cell);
if (!Cell_is_free(next_cell)) break;
cell = next_cell;
d_Car_path[self][i] = cell;
d_Car_path_length[self] = d_Car_path_length[self] + 1;
}
d_Car_velocity[self] = d_Car_path_length[self];
}
__device__ void Car_step_constraint_velocity(IndexT self) {
// This is actually only needed for the very first iteration, because a car
// may be positioned on a traffic light cell.
if (d_Car_velocity[self] > Cell_current_max_velocity(self)) {
d_Car_velocity[self] = Cell_current_max_velocity(self);
}
int path_index = 0;
int distance = 1;
while (distance <= d_Car_velocity[self]) {
// Invariant: Movement of up to `distance - 1` many cells at `velocity_`
// is allowed.
// Now check if next cell can be entered.
IndexT next_cell = d_Car_path[self][path_index];
// Avoid collision.
if (!Cell_is_free(next_cell)) {
// Cannot enter cell.
--distance;
d_Car_velocity[self] = distance;
break;
} // else: Can enter next cell.
if (d_Car_velocity[self] > Cell_current_max_velocity(next_cell)) {
// Car is too fast for this cell.
if (Cell_current_max_velocity(next_cell) > distance - 1) {
// Even if we slow down, we would still make progress.
d_Car_velocity[self] = Cell_current_max_velocity(next_cell);
} else {
// Do not enter the next cell.
--distance;
assert(distance >= 0);
d_Car_velocity[self] = distance;
break;
}
}
++distance;
++path_index;
}
--distance;
#ifndef NDEBUG
for (int i = 0; i < d_Car_velocity[self]; ++i) {
assert(Cell_is_free(d_Car_path[self][i]));
assert(i == 0 || d_Car_path[self][i - 1] != d_Car_path[self][i]);
}
// TODO: Check why the cast is necessary.
assert(distance <= d_Car_velocity[self]);
#endif // NDEBUG
}
__device__ void Car_step_move(IndexT self) {
IndexT cell = self;
for (int i = 0; i < d_Car_velocity[self]; ++i) {
assert(d_Car_path[self][i] != cell);
cell = d_Car_path[self][i];
assert(cell != self);
assert(Cell_is_free(cell));
}
if (d_Car_velocity[self] > 0) {
Cell_occupy(cell, self);
Cell_release(self);
}
}
__device__ void Car_step_slow_down(IndexT self) {
// 20% change of slowdown.
if (curand_uniform(&d_Car_random_state[self]) < 0.2 && d_Car_velocity[self] > 0) {
d_Car_velocity[self] = d_Car_velocity[self] - 1;
}
}
__device__ void TrafficLight::step() {
if (num_cells_ > 0) {
timer_ = (timer_ + 1) % phase_time_;
if (timer_ == 0) {
assert(cells_[phase_] != kNullptr);
Cell_set_current_max_velocity(cells_[phase_], 0);
phase_ = (phase_ + 1) % num_cells_;
Cell_remove_speed_limit(cells_[phase_]);
}
}
}
__device__ void Car_commit_occupy(IndexT self) {
if (d_Cell_should_occupy[self]) {
d_Cell_should_occupy[self] = false;
d_Cell_has_car[self] = true;
}
if (Cell_is_sink(self) || Cell_is_target(self)) {
// Remove car from the simulation. Will be added again in the next
// iteration.
Cell_release(self);
}
}
__device__ IndexT new_Car(int seed, IndexT cell, int max_velocity) {
assert(Cell_is_free(cell));
d_Cell_has_car[cell] = true;
d_Car_path_length[cell] = 0;
d_Car_velocity[cell] = 0;
d_Car_max_velocity[cell] = max_velocity;
curand_init(seed, 0, 0, &d_Car_random_state[cell]);
return cell;
}
__device__ void ProducerCell_create_car(IndexT self) {
assert(d_Cell_type[self] == kCellTypeProducer);
if (Cell_is_free(self)) {
float r = curand_uniform(&d_Cell_random_state[self]);
if (r < kCarAllocationRatio) {
IndexT new_car = new_Car(
/*seed=*/ curand(&d_Cell_random_state[self]), /*cell=*/ self,
/*max_velocity=*/ curand(&d_Cell_random_state[self]) % (kMaxVelocity/2)
+ kMaxVelocity/2);
}
}
}
__device__ IndexT new_Cell(int max_velocity, float x, float y) {
IndexT idx = atomicAdd(&d_num_cells, 1);
d_Cell_max_velocity[idx] = max_velocity;
d_Cell_current_max_velocity[idx] = max_velocity;
d_Cell_num_incoming[idx] = 0;
d_Cell_num_outgoing[idx] = 0;
d_Cell_x[idx] = x;
d_Cell_y[idx] = y;
d_Cell_is_target[idx] = false;
d_Cell_type[idx] = kCellTypeNormal;
d_Cell_should_occupy[idx] = false;
d_Cell_has_car[idx] = false;
return idx;
}
__device__ IndexT new_ProducerCell(int max_velocity, float x, float y, int seed) {
IndexT idx = new_Cell(max_velocity, x, y);
d_Cell_type[idx] = kCellTypeProducer;
curand_init(seed, 0, 0, &d_Cell_random_state[idx]);
return idx;
}
__global__ void kernel_traffic_light_step() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < kNumIntersections; i += blockDim.x * gridDim.x) {
d_traffic_lights[i].step();
}
}
__global__ void kernel_create_nodes() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < kNumIntersections; i += blockDim.x * gridDim.x) {
curandState_t state;
curand_init(i, 0, 0, &state);
assert(d_nodes[i].x >= 0 && d_nodes[i].x <= 1);
assert(d_nodes[i].y >= 0 && d_nodes[i].y <= 1);
for (int j = 0; j < d_nodes[i].num_outgoing; ++j) {
d_nodes[i].cell_out[j] = new_Cell(
/*max_velocity=*/ curand(&state) % (kMaxVelocity/2)
+ kMaxVelocity/2,
d_nodes[i].x, d_nodes[i].y);
}
}
}
__device__ IndexT connect_intersections(IndexT from, Node* target,
int incoming_idx, curandState_t& state) {
// Create edge.
float dx = target->x - d_Cell_x[from];
float dy = target->y - d_Cell_y[from];
float dist = sqrt(dx*dx + dy*dy);
int steps = dist/kCellLength;
float step_x = dx/steps;
float step_y = dy/steps;
IndexT prev = from;
for (int j = 0; j < steps; ++j) {
float new_x = d_Cell_x[from] + j*step_x;
float new_y = d_Cell_y[from] + j*step_y;
assert(new_x >= 0 && new_x <= 1);
assert(new_y >= 0 && new_y <= 1);
IndexT next;
if (curand_uniform(&state) < kProducerRatio) {
next = new_ProducerCell(
d_Cell_max_velocity[prev], new_x, new_y,
curand(&state));
} else {
next = new_Cell(
d_Cell_max_velocity[prev], new_x, new_y);
}
if (curand_uniform(&state) < kTargetRatio) {
Cell_set_target(next);
}
Cell_set_num_outgoing(prev, 1);
Cell_set_outgoing(prev, 0, next);
Cell_set_num_incoming(next, 1);
Cell_set_incoming(next, 0, prev);
prev = next;
}
// Connect to all outgoing nodes of target.
Cell_set_num_outgoing(prev, target->num_outgoing);
for (int i = 0; i < target->num_outgoing; ++i) {
IndexT next = target->cell_out[i];
// num_incoming set later.
Cell_set_outgoing(prev, i, next);
Cell_set_incoming(next, incoming_idx, prev);
}
return prev;
}
__global__ void kernel_create_edges() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < kNumIntersections; i += blockDim.x * gridDim.x) {
curandState_t state;
curand_init(i, 0, 0, &state);
for (int k = 0; k < d_nodes[i].num_outgoing; ++k) {
int target = d_nodes[i].node_out[k];
int target_pos = d_nodes[i].node_out_pos[k];
IndexT last = connect_intersections(
d_nodes[i].cell_out[k], &d_nodes[target], target_pos, state);
Cell_set_current_max_velocity(last, 0);
d_nodes[target].cell_in[target_pos] = last;
}
}
}
__global__ void kernel_create_traffic_lights() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < kNumIntersections; i += blockDim.x * gridDim.x) {
new(d_traffic_lights + i) TrafficLight(
/*num_cells=*/ d_nodes[i].num_incoming,
/*phase_time=*/ 5);
for (int j = 0; j < d_nodes[i].num_outgoing; ++j) {
Cell_set_num_incoming(d_nodes[i].cell_out[j], d_nodes[i].num_incoming);
}
for (int j = 0; j < d_nodes[i].num_incoming; ++j) {
d_traffic_lights[i].set_cell(j, d_nodes[i].cell_in[j]);
Cell_set_current_max_velocity(d_nodes[i].cell_in[j], 0); // Set to "red".
}
}
}
void create_street_network() {
int zero = 0;
cudaMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&h_nodes, sizeof(Node)*kNumIntersections);
cudaMemcpyToSymbol(d_nodes, &h_nodes, sizeof(Node*), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&h_traffic_lights, sizeof(TrafficLight)*kNumIntersections);
cudaMemcpyToSymbol(d_traffic_lights, &h_traffic_lights,
sizeof(TrafficLight*), 0, cudaMemcpyHostToDevice);
gpuErrchk(cudaDeviceSynchronize());
// Create basic structure on host.
create_network_structure();
kernel_create_nodes<<<
(kNumIntersections + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_create_edges<<<
(kNumIntersections + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_create_traffic_lights<<<
(kNumIntersections + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
// Allocate helper data structures for rendering.
cudaMemcpyFromSymbol(&host_num_cells, d_num_cells, sizeof(int), 0,
cudaMemcpyDeviceToHost);
cudaMalloc(&host_Cell_pos_x, sizeof(float)*host_num_cells);
cudaMemcpyToSymbol(dev_Cell_pos_x, &host_Cell_pos_x, sizeof(float*), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&host_Cell_pos_y, sizeof(float)*host_num_cells);
cudaMemcpyToSymbol(dev_Cell_pos_y, &host_Cell_pos_y, sizeof(float*), 0,
cudaMemcpyHostToDevice);
cudaMalloc(&host_Cell_occupied, sizeof(bool)*host_num_cells);
cudaMemcpyToSymbol(dev_Cell_occupied, &host_Cell_occupied, sizeof(bool*), 0,
cudaMemcpyHostToDevice);
host_data_Cell_pos_x = (float*) malloc(sizeof(float)*host_num_cells);
host_data_Cell_pos_y = (float*) malloc(sizeof(float)*host_num_cells);
host_data_Cell_occupied = (bool*) malloc(sizeof(bool)*host_num_cells);
#ifndef NDEBUG
printf("Number of cells: %i\n", host_num_cells);
#endif // NDEBUG
}
void step_traffic_lights() {
// TODO: Consider migrating this to SoaAlloc.
kernel_traffic_light_step<<<
(kNumIntersections + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
}
__device__ void Cell_add_to_rendering_array(IndexT self) {
int idx = atomicAdd(&dev_num_cells, 1);
dev_Cell_pos_x[idx] = d_Cell_x[self];
dev_Cell_pos_y[idx] = d_Cell_y[self];
dev_Cell_occupied[idx] = !Cell_is_free(self);
}
__global__ void kernel_Cell_add_to_rendering_array() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < d_num_cells; i += blockDim.x * gridDim.x) {
Cell_add_to_rendering_array(i);
}
}
void transfer_data() {
int zero = 0;
cudaMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0,
cudaMemcpyHostToDevice);
kernel_Cell_add_to_rendering_array<<<
(host_num_cells + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
cudaMemcpy(host_data_Cell_pos_x, host_Cell_pos_x,
sizeof(float)*host_num_cells, cudaMemcpyDeviceToHost);
cudaMemcpy(host_data_Cell_pos_y, host_Cell_pos_y,
sizeof(float)*host_num_cells, cudaMemcpyDeviceToHost);
cudaMemcpy(host_data_Cell_occupied, host_Cell_occupied,
sizeof(bool)*host_num_cells, cudaMemcpyDeviceToHost);
gpuErrchk(cudaDeviceSynchronize());
}
__global__ void kernel_ProducerCell_create_car() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < d_num_cells; i += blockDim.x * gridDim.x) {
if (d_Cell_type[i] == kCellTypeProducer) {
ProducerCell_create_car(i);
}
}
}
__device__ void Car_step_prepare_path(IndexT self) {
Car_step_initialize_iteration(self);
Car_step_accelerate(self);
Car_step_extend_path(self);
Car_step_constraint_velocity(self);
Car_step_slow_down(self);
}
__global__ void kernel_Car_step_prepare_path() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < d_num_cells; i += blockDim.x * gridDim.x) {
if (d_Cell_has_car[i]) {
Car_step_prepare_path(i);
}
}
}
__global__ void kernel_Car_commit_occupy() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < d_num_cells; i += blockDim.x * gridDim.x) {
if (d_Cell_has_car[i] || d_Cell_should_occupy[i]) {
Car_commit_occupy(i);
}
}
}
__global__ void kernel_Car_step_move() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < d_num_cells; i += blockDim.x * gridDim.x) {
if (d_Cell_has_car[i]) {
Car_step_move(i);
}
}
}
__device__ int d_checksum;
__global__ void kernel_compute_checksum() {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < d_num_cells; i += blockDim.x * gridDim.x) {
if (d_Cell_has_car[i]) {
atomicAdd(&d_checksum, 1);
}
}
}
int checksum() {
int zero = 0;
cudaMemcpyToSymbol(d_checksum, &zero, sizeof(int), 0, cudaMemcpyHostToDevice);
kernel_compute_checksum<<<128, 128>>>();
int result;
cudaMemcpyFromSymbol(&result, d_checksum, sizeof(int), 0, cudaMemcpyDeviceToHost);
return result;
}
void step() {
kernel_ProducerCell_create_car<<<
(host_num_cells + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
step_traffic_lights();
kernel_Car_step_prepare_path<<<
(host_num_cells + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_Car_step_move<<<
(host_num_cells + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
kernel_Car_commit_occupy<<<
(host_num_cells + kNumBlockSize - 1) / kNumBlockSize,
kNumBlockSize>>>();
gpuErrchk(cudaDeviceSynchronize());
}
void allocate_memory() {
DeviceArray<IndexT, kMaxDegree>* h_Cell_incoming;
cudaMalloc(&h_Cell_incoming, sizeof(DeviceArray<IndexT, kMaxDegree>)*kMaxNumCells);
cudaMemcpyToSymbol(d_Cell_incoming, &h_Cell_incoming,
sizeof(DeviceArray<IndexT, kMaxDegree>*), 0, cudaMemcpyHostToDevice);
DeviceArray<IndexT, kMaxDegree>* h_Cell_outgoing;
cudaMalloc(&h_Cell_outgoing, sizeof(DeviceArray<IndexT, kMaxDegree>)*kMaxNumCells);
cudaMemcpyToSymbol(d_Cell_outgoing, &h_Cell_outgoing,
sizeof(DeviceArray<IndexT, kMaxDegree>*), 0, cudaMemcpyHostToDevice);
int* h_Cell_max_velocity;
cudaMalloc(&h_Cell_max_velocity, sizeof(int)*kMaxNumCells);
cudaMemcpyToSymbol(d_Cell_max_velocity, &h_Cell_max_velocity, sizeof(int*),
0, cudaMemcpyHostToDevice);
int* h_Cell_current_max_velocity;
cudaMalloc(&h_Cell_current_max_velocity, sizeof(int)*kMaxNumCells);
cudaMemcpyToSymbol(d_Cell_current_max_velocity, &h_Cell_current_max_velocity, sizeof(int*),
0, cudaMemcpyHostToDevice);
int* h_Cell_num_incoming;
cudaMalloc(&h_Cell_num_incoming, sizeof(int)*kMaxNumCells);
cudaMemcpyToSymbol(d_Cell_num_incoming, &h_Cell_num_incoming, sizeof(int*),
0, cudaMemcpyHostToDevice);
int* h_Cell_num_outgoing;
cudaMalloc(&h_Cell_num_outgoing, sizeof(int)*kMaxNumCells);
cudaMemcpyToSymbol(d_Cell_num_outgoing, &h_Cell_num_outgoing, sizeof(int*),
0, cudaMemcpyHostToDevice);
float* h_Cell_x;
cudaMalloc(&h_Cell_x, sizeof(float)*kMaxNumCells);
cudaMemcpyToSymbol(d_Cell_x, &h_Cell_x, sizeof(float*),
0, cudaMemcpyHostToDevice);
float* h_Cell_y;
cudaMalloc(&h_Cell_y, sizeof(float)*kMaxNumCells);
cudaMemcpyToSymbol(d_Cell_y, &h_Cell_y, sizeof(float*),
0, cudaMemcpyHostToDevice);
bool* h_Cell_is_target;
cudaMalloc(&h_Cell_is_target, sizeof(bool)*kMaxNumCells);
cudaMemcpyToSymbol(d_Cell_is_target, &h_Cell_is_target, sizeof(bool*),
0, cudaMemcpyHostToDevice);
curandState_t* h_Cell_random_state;
cudaMalloc(&h_Cell_random_state, sizeof(curandState_t)*kMaxNumCells);
cudaMemcpyToSymbol(d_Cell_random_state, &h_Cell_random_state, sizeof(curandState_t*),
0, cudaMemcpyHostToDevice);
char* h_Cell_type;
cudaMalloc(&h_Cell_type, sizeof(char)*kMaxNumCells);
cudaMemcpyToSymbol(d_Cell_type, &h_Cell_type, sizeof(char*),
0, cudaMemcpyHostToDevice);
curandState_t* h_Car_random_state;
cudaMalloc(&h_Car_random_state, sizeof(curandState_t)*kMaxNumCells);
cudaMemcpyToSymbol(d_Car_random_state, &h_Car_random_state, sizeof(curandState_t*),
0, cudaMemcpyHostToDevice);
DeviceArray<IndexT, kMaxVelocity>* h_Car_path;
cudaMalloc(&h_Car_path, sizeof(DeviceArray<IndexT, kMaxVelocity>)*kMaxNumCells);
cudaMemcpyToSymbol(d_Car_path, &h_Car_path,
sizeof(DeviceArray<IndexT, kMaxVelocity>*),
0, cudaMemcpyHostToDevice);
int* h_Car_path_length;
cudaMalloc(&h_Car_path_length, sizeof(int)*kMaxNumCells);
cudaMemcpyToSymbol(d_Car_path_length, &h_Car_path_length, sizeof(int*),
0, cudaMemcpyHostToDevice);
int* h_Car_velocity;
cudaMalloc(&h_Car_velocity, sizeof(int)*kMaxNumCells);
cudaMemcpyToSymbol(d_Car_velocity, &h_Car_velocity, sizeof(int*),
0, cudaMemcpyHostToDevice);
int* h_Car_max_velocity;
cudaMalloc(&h_Car_max_velocity, sizeof(int)*kMaxNumCells);
cudaMemcpyToSymbol(d_Car_max_velocity, &h_Car_max_velocity, sizeof(int*),
0, cudaMemcpyHostToDevice);
bool* h_Cell_has_car;
cudaMalloc(&h_Cell_has_car, sizeof(bool)*kMaxNumCells);
cudaMemcpyToSymbol(d_Cell_has_car, &h_Cell_has_car, sizeof(bool*),
0, cudaMemcpyHostToDevice);
bool* h_Cell_should_occupy;
cudaMalloc(&h_Cell_should_occupy, sizeof(bool)*kMaxNumCells);
cudaMemcpyToSymbol(d_Cell_should_occupy, &h_Cell_should_occupy, sizeof(bool*),
0, cudaMemcpyHostToDevice);
int zero = 0;
cudaMemcpyToSymbol(d_num_cells, &zero, sizeof(int), 0, cudaMemcpyHostToDevice);
}
int main(int /*argc*/, char** /*argv*/) {
allocate_memory();
create_street_network();
auto time_start = std::chrono::system_clock::now();
for (int i = 0; i < kNumIterations; ++i) {
step();
}
auto time_end = std::chrono::system_clock::now();
auto elapsed = time_end - time_start;
auto millis = std::chrono::duration_cast<std::chrono::milliseconds>(elapsed)
.count();
#ifndef NDEBUG
printf("Checksum: %i\n", checksum());
#endif // NDEBUG
printf("%lu\n", millis);
}
|
ccf0584aa3fee1cceea2e58018e48804fafe7899.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| ccf0584aa3fee1cceea2e58018e48804fafe7899.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using LayoutDst = cutlass::layout::TensorNCxHWx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 64>;
using WarpShape = cutlass::gemm::GemmShape<32, 16, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 16, 16, false,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
567af869c41298bcdd11f223c9d6436b8154b058.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* GEMM is a General Matrix Multiply - a subroutine in the Basic Linear Algebra Subprograms library*/
/* Includes, system */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#ifdef HOST
/* ======================================================= */
/* Simple implementation of dgemm */
/* ======================================================= */
void vectorAdd(int n, double *a, double *b, double *c) {
int i;
for(i=0; i<n; i++) {
c[i] = a[i] + b[i];
}
}
void print_rst(int n, double *h_c_cuda, double *h_c)
{
int i;
for (i = 0; i < n; i++){
if(i%10 == 0) printf("--->i:%d\n", i);
printf("%.1f(%.1f) ", h_c_cuda[i], h_c[i]);
}
}
int main(int argc, char **argv)
{
double *h_A, *h_B, *h_C, *h_C_simple;
int n2, N;
int i;
int size=0;
struct timeval tv1, tv2;
/* get the size of the matrix from the command line */
if (argc <2 ) N= 1024*10;
else N = atoi(argv[1]);
n2 = N;
size = n2 * sizeof(double);
printf("\nRunning dgemm test for %d by %d matricies.\n", N, N);
/* Allocate host memory for the matrices */
h_A = (double *)malloc(size);
h_B = (double *)malloc(size);
h_C = (double *)malloc(size);
h_C_simple = (double *)malloc(size);
/* Fill the matrices with test data */
for (i = 0; i < n2; i++){
h_A[i] = rand() / (double)RAND_MAX;
h_B[i] = rand() / (double)RAND_MAX;
h_C[i] = rand() / (double)RAND_MAX;
h_C_simple[i] = h_C[i];
}
//print_rst(N, h_C);
printf("\tTesting simple C implementation of dgemm function.\n");
gettimeofday(&tv1, NULL);
/* Performs operation using plain C code */
vectorAdd(N, h_A, h_B, h_C_simple);
gettimeofday(&tv2, NULL);
printf("\t\tdone...\n");
printf("\t\tExecution time (in millisec): %.2f\n",
(double)(tv2.tv_usec-tv1.tv_usec)/1000 +
(double)(tv2.tv_sec -tv1.tv_sec )*1000);
print_rst(N, h_C_simple, h_C);
/* Memory clean up */
free(h_A); free(h_B); free(h_C); free(h_C_simple);
return(0);
}
#else
#define THREADS_PER_BLOCK 128
__constant__ double alpha = 1.0f;
__constant__ double beta = 0.0f;
/* ======================================================= */
/* Cuda implementation of dgemm */
/* ======================================================= */
__global__ void vectorAdd(double *a, double *b, double *c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
c[i] = a[i] + b[i];
}
void print_rst(int n, double *h_c_cuda, double *h_c)
{
int i;
for (i = 0; i < n; i++){
if(i%10 == 0) printf("--->i:%d\n", i);
printf("%.1f(%.1f) ", h_c_cuda[i], h_c[i]);
}
}
int main(int argc, char **argv)
{
double *h_A, *h_B, *h_C, *h_C_cuda;
double *d_A, *d_B, *d_C;
int n2, N;
int i;
int size=0;
struct timeval tv1, tv2;
/* get the size of the matrix from the command line */
//if (argc <2 ) N= 275;
if (argc <2 ) N= 1024*10;
else N = atoi(argv[1]);
n2 = N;
size = n2 * sizeof(double);
printf("\nRunning dgemm test for %d by %d matricies.\n", N, N);
/* Allocate host memory for the matrices */
h_A = (double *)malloc(size);
h_B = (double *)malloc(size);
h_C = (double *)malloc(size);
h_C_cuda = (double *)malloc(size);
/* Fill the matrices with test data */
for (i = 0; i < n2; i++){
h_A[i] = rand() / (double)RAND_MAX;
h_B[i] = rand() / (double)RAND_MAX;
h_C[i] = rand() / (double)RAND_MAX;
h_C_cuda[i] = h_C[i];
}
/* Allocate device memory for the matrices */
hipMalloc( (void**)&d_A, size );
hipMalloc( (void**)&d_B, size );
hipMalloc( (void**)&d_C, size );
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
hipMemcpy(d_C, h_C, size, hipMemcpyHostToDevice);
printf("\tTesting CUDA implementation of dgemm function.\n");
gettimeofday(&tv1, NULL);
/* Performs operation using cuda code */
//cuda_dgemm <<< 1, N>>>(N, alpha, d_A, d_B, beta, d_C);
hipLaunchKernelGGL(( vectorAdd) , dim3(N/THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, d_A, d_B, d_C);
hipMemcpy(h_C_cuda, d_C, size, hipMemcpyDeviceToHost);
gettimeofday(&tv2, NULL);
printf("\t\tdone...\n");
printf("\t\tExecution time (in millisec): %.2f\n",
(double)(tv2.tv_usec-tv1.tv_usec)/1000 +
(double)(tv2.tv_sec -tv1.tv_sec )*1000);
print_rst(N, h_C_cuda, h_C);
/* Memory clean up */
hipFree(d_A); hipFree(d_B); hipFree(d_C);
free(h_A); free(h_B); free(h_C); free(h_C_cuda);
return(0);
}
#endif
| 567af869c41298bcdd11f223c9d6436b8154b058.cu | /* GEMM is a General Matrix Multiply - a subroutine in the Basic Linear Algebra Subprograms library*/
/* Includes, system */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#ifdef HOST
/* ======================================================= */
/* Simple implementation of dgemm */
/* ======================================================= */
void vectorAdd(int n, double *a, double *b, double *c) {
int i;
for(i=0; i<n; i++) {
c[i] = a[i] + b[i];
}
}
void print_rst(int n, double *h_c_cuda, double *h_c)
{
int i;
for (i = 0; i < n; i++){
if(i%10 == 0) printf("--->i:%d\n", i);
printf("%.1f(%.1f) ", h_c_cuda[i], h_c[i]);
}
}
int main(int argc, char **argv)
{
double *h_A, *h_B, *h_C, *h_C_simple;
int n2, N;
int i;
int size=0;
struct timeval tv1, tv2;
/* get the size of the matrix from the command line */
if (argc <2 ) N= 1024*10;
else N = atoi(argv[1]);
n2 = N;
size = n2 * sizeof(double);
printf("\nRunning dgemm test for %d by %d matricies.\n", N, N);
/* Allocate host memory for the matrices */
h_A = (double *)malloc(size);
h_B = (double *)malloc(size);
h_C = (double *)malloc(size);
h_C_simple = (double *)malloc(size);
/* Fill the matrices with test data */
for (i = 0; i < n2; i++){
h_A[i] = rand() / (double)RAND_MAX;
h_B[i] = rand() / (double)RAND_MAX;
h_C[i] = rand() / (double)RAND_MAX;
h_C_simple[i] = h_C[i];
}
//print_rst(N, h_C);
printf("\tTesting simple C implementation of dgemm function.\n");
gettimeofday(&tv1, NULL);
/* Performs operation using plain C code */
vectorAdd(N, h_A, h_B, h_C_simple);
gettimeofday(&tv2, NULL);
printf("\t\tdone...\n");
printf("\t\tExecution time (in millisec): %.2f\n",
(double)(tv2.tv_usec-tv1.tv_usec)/1000 +
(double)(tv2.tv_sec -tv1.tv_sec )*1000);
print_rst(N, h_C_simple, h_C);
/* Memory clean up */
free(h_A); free(h_B); free(h_C); free(h_C_simple);
return(0);
}
#else
#define THREADS_PER_BLOCK 128
__constant__ double alpha = 1.0f;
__constant__ double beta = 0.0f;
/* ======================================================= */
/* Cuda implementation of dgemm */
/* ======================================================= */
__global__ void vectorAdd(double *a, double *b, double *c) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
c[i] = a[i] + b[i];
}
void print_rst(int n, double *h_c_cuda, double *h_c)
{
int i;
for (i = 0; i < n; i++){
if(i%10 == 0) printf("--->i:%d\n", i);
printf("%.1f(%.1f) ", h_c_cuda[i], h_c[i]);
}
}
int main(int argc, char **argv)
{
double *h_A, *h_B, *h_C, *h_C_cuda;
double *d_A, *d_B, *d_C;
int n2, N;
int i;
int size=0;
struct timeval tv1, tv2;
/* get the size of the matrix from the command line */
//if (argc <2 ) N= 275;
if (argc <2 ) N= 1024*10;
else N = atoi(argv[1]);
n2 = N;
size = n2 * sizeof(double);
printf("\nRunning dgemm test for %d by %d matricies.\n", N, N);
/* Allocate host memory for the matrices */
h_A = (double *)malloc(size);
h_B = (double *)malloc(size);
h_C = (double *)malloc(size);
h_C_cuda = (double *)malloc(size);
/* Fill the matrices with test data */
for (i = 0; i < n2; i++){
h_A[i] = rand() / (double)RAND_MAX;
h_B[i] = rand() / (double)RAND_MAX;
h_C[i] = rand() / (double)RAND_MAX;
h_C_cuda[i] = h_C[i];
}
/* Allocate device memory for the matrices */
cudaMalloc( (void**)&d_A, size );
cudaMalloc( (void**)&d_B, size );
cudaMalloc( (void**)&d_C, size );
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice);
printf("\tTesting CUDA implementation of dgemm function.\n");
gettimeofday(&tv1, NULL);
/* Performs operation using cuda code */
//cuda_dgemm <<< 1, N>>>(N, alpha, d_A, d_B, beta, d_C);
vectorAdd <<< N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_A, d_B, d_C);
cudaMemcpy(h_C_cuda, d_C, size, cudaMemcpyDeviceToHost);
gettimeofday(&tv2, NULL);
printf("\t\tdone...\n");
printf("\t\tExecution time (in millisec): %.2f\n",
(double)(tv2.tv_usec-tv1.tv_usec)/1000 +
(double)(tv2.tv_sec -tv1.tv_sec )*1000);
print_rst(N, h_C_cuda, h_C);
/* Memory clean up */
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
free(h_A); free(h_B); free(h_C); free(h_C_cuda);
return(0);
}
#endif
|
2b24f1b137bc93c631d2b59ebadc69726306e1a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void advNextStep(double *d_prevPoint, double *d_umat, double d_stepDist, int nRxns, double *points, int pointsPerFile, int pointCount, int index){
int newindex= blockIdx.x * blockDim.x + threadIdx.x;
int stride= blockDim.x * gridDim.x;
for(int i=newindex;i<nRxns;i+=stride){
points[pointCount+pointsPerFile*i]=d_prevPoint[nRxns*index+i]+d_stepDist*d_umat[nRxns*index+i];
}
} | 2b24f1b137bc93c631d2b59ebadc69726306e1a4.cu | #include "includes.h"
__global__ void advNextStep(double *d_prevPoint, double *d_umat, double d_stepDist, int nRxns, double *points, int pointsPerFile, int pointCount, int index){
int newindex= blockIdx.x * blockDim.x + threadIdx.x;
int stride= blockDim.x * gridDim.x;
for(int i=newindex;i<nRxns;i+=stride){
points[pointCount+pointsPerFile*i]=d_prevPoint[nRxns*index+i]+d_stepDist*d_umat[nRxns*index+i];
}
} |
4ec89b5dc64f5a36e61226ac75ad31641cb9d56e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/device/cudnn_util.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/ndarray/ndarray_util.h"
#include "oneflow/core/cuda/atomic.cuh"
#include <hipcub/hipcub.hpp>
namespace oneflow {
namespace {
class LayerNormCudnnBnCtx final {
public:
LayerNormCudnnBnCtx(const ShapeView& data_shape, const ShapeView& param_shape,
DataType data_type) {
const int64_t cudnn_c = param_shape.elem_cnt();
CHECK_EQ(data_shape.elem_cnt() % cudnn_c, 0);
const int64_t cudnn_w = data_shape.elem_cnt() / cudnn_c;
CHECK_LT(cudnn_c, GetMaxVal<int32_t>());
CHECK_LT(cudnn_w, GetMaxVal<int32_t>());
data_tensor_desc_.reset(new CudnnTensorDesc(CUDNN_TENSOR_NCHW, data_type, 1,
static_cast<int32_t>(cudnn_c), 1,
static_cast<int32_t>(cudnn_w)));
DataType param_dtype = data_type == DataType::kFloat16 ? DataType::kFloat : data_type;
param_tensor_desc_.reset(new CudnnTensorDesc(CUDNN_TENSOR_NCHW, param_dtype, 1,
static_cast<int32_t>(cudnn_c), 1, 1));
#if (CUDNN_VERSION >= 7000)
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
#else
mode_ = CUDNN_BATCHNORM_SPATIAL;
#endif
}
~LayerNormCudnnBnCtx() = default;
const cudnnTensorDescriptor_t& data_tensor_desc() const { return data_tensor_desc_->Get(); }
const cudnnTensorDescriptor_t& param_tensor_desc() const { return param_tensor_desc_->Get(); }
cudnnBatchNormMode_t mode() const { return mode_; };
private:
std::unique_ptr<CudnnTensorDesc> data_tensor_desc_;
std::unique_ptr<CudnnTensorDesc> param_tensor_desc_;
cudnnBatchNormMode_t mode_;
};
template<typename T, bool do_scale, bool do_center>
__global__ void InstanceScaleCenterGpu(const int64_t elem_cnt, const int64_t instance_size,
const T* in, const T* gamma, const T* beta, T* out) {
CUDA_1D_KERNEL_LOOP_T(int64_t, i, elem_cnt) {
const int64_t elem_id = i % instance_size;
T v = in[i];
if (do_scale) { v *= gamma[elem_id]; }
if (do_center) { v += beta[elem_id]; }
out[i] = v;
}
}
template<bool do_scale, bool do_center>
__global__ void InstanceScaleCenterH2Gpu(const int64_t h2_elem_cnt, const int64_t h2_instance_size,
const half* in, const half* gamma, const half* beta,
half* out) {
const auto* in_h2 = reinterpret_cast<const half2*>(in);
const auto* gamma_h2 = reinterpret_cast<const half2*>(gamma);
const auto* beta_h2 = reinterpret_cast<const half2*>(beta);
auto* out_h2 = reinterpret_cast<half2*>(out);
CUDA_1D_KERNEL_LOOP_T(int64_t, i, h2_elem_cnt) {
const int64_t elem_id = i % h2_instance_size;
half2 v2 = in_h2[i];
if (do_scale) { v2 = __hmul2(v2, gamma_h2[elem_id]); }
if (do_center) { v2 = __hadd2(v2, beta_h2[elem_id]); }
out_h2[i] = v2;
}
}
template<typename T>
void InstanceScaleCenter(DeviceCtx* ctx, const int64_t batch_size, const int64_t instance_size,
const T* in, const T* gamma, const T* beta, T* out) {
const int64_t elem_cnt = batch_size * instance_size;
if (beta != nullptr && gamma != nullptr) { // scale and center
hipLaunchKernelGGL(( InstanceScaleCenterGpu<T, true, true>)
, dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
elem_cnt, instance_size, in, gamma, beta, out);
} else if (gamma != nullptr) { // scale only
hipLaunchKernelGGL(( InstanceScaleCenterGpu<T, true, false>)
, dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
elem_cnt, instance_size, in, gamma, nullptr, out);
} else if (beta != nullptr) { // center only
hipLaunchKernelGGL(( InstanceScaleCenterGpu<T, false, true>)
, dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
elem_cnt, instance_size, in, nullptr, beta, out);
} else {
UNIMPLEMENTED();
}
}
void InstanceScaleCenterH2(DeviceCtx* ctx, const int64_t batch_size, const int64_t instance_size,
const half* in, const half* gamma, const half* beta, half* out) {
CHECK_EQ(instance_size % 2, 0);
const int64_t elem_cnt_h2 = batch_size * instance_size / 2;
const int64_t instance_size_h2 = instance_size / 2;
if (beta != nullptr && gamma != nullptr) { // scale and center
hipLaunchKernelGGL(( InstanceScaleCenterH2Gpu<true, true>)
, dim3(BlocksNum4ThreadsNum(elem_cnt_h2)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
elem_cnt_h2, instance_size_h2, in, gamma, beta, out);
} else if (gamma != nullptr) { // scale only
hipLaunchKernelGGL(( InstanceScaleCenterH2Gpu<true, false>)
, dim3(BlocksNum4ThreadsNum(elem_cnt_h2)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
elem_cnt_h2, instance_size_h2, in, gamma, nullptr, out);
} else if (beta != nullptr) { // center only
hipLaunchKernelGGL(( InstanceScaleCenterH2Gpu<false, true>)
, dim3(BlocksNum4ThreadsNum(elem_cnt_h2)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(),
elem_cnt_h2, instance_size_h2, in, nullptr, beta, out);
} else {
UNIMPLEMENTED();
}
}
template<>
void InstanceScaleCenter<float16>(DeviceCtx* ctx, const int64_t batch_size,
const int64_t instance_size, const float16* in,
const float16* gamma, const float16* beta, float16* out) {
if (instance_size % 2 == 0) {
InstanceScaleCenterH2(ctx, batch_size, instance_size, reinterpret_cast<const half*>(in),
reinterpret_cast<const half*>(gamma), reinterpret_cast<const half*>(beta),
reinterpret_cast<half*>(out));
} else {
InstanceScaleCenter<half>(ctx, batch_size, instance_size, reinterpret_cast<const half*>(in),
reinterpret_cast<const half*>(gamma),
reinterpret_cast<const half*>(beta), reinterpret_cast<half*>(out));
}
}
constexpr int64_t kLayerNormForwardGpuBlockSize = 256;
template<typename T>
struct LayerNormUtil {
using ComputeType = T;
__device__ static ComputeType ToComputeType(T v) { return v; }
__device__ static T FromComputeType(ComputeType v) { return v; }
};
template<>
struct LayerNormUtil<half> {
using ComputeType = float;
__device__ static ComputeType ToComputeType(half v) { return __half2float(v); }
__device__ static half FromComputeType(ComputeType v) { return __float2half(v); }
};
template<typename T>
int GetForwardDynamicSharedMemorySize(const int norm_size) {
return norm_size * sizeof(typename LayerNormUtil<T>::ComputeType);
}
int GetLayerNormForwardBlockSize() { return kLayerNormForwardGpuBlockSize; }
int GetLayerNormForwardNumBlocks(const int num_instances) {
return ::min(static_cast<int>(num_instances), kCudaMaxBlocksNum);
}
template<typename T, typename ComputeType>
__global__ void LayerNormForwardImpl(const int num_instances, const int norm_size,
const double epsilon, const T* x, const T* gamma,
const T* beta, ComputeType* mean, ComputeType* inv_variance,
T* normalized, T* y) {
using LU = LayerNormUtil<T>;
extern __shared__ __align__(sizeof(double)) unsigned char fw_shared_buf[];
auto* compute_buf = reinterpret_cast<ComputeType*>(fw_shared_buf);
__shared__ ComputeType row_mean_shared;
__shared__ ComputeType row_inv_var_shared;
typedef hipcub::BlockReduce<ComputeType, kLayerNormForwardGpuBlockSize> BlockReduce;
__shared__ typename BlockReduce::TempStorage cub_mean_reduce_tmp_storage;
__shared__ typename BlockReduce::TempStorage cub_variance_reduce_tmp_storage;
ComputeType inv_norm_size = static_cast<ComputeType>(1.0) / static_cast<ComputeType>(norm_size);
for (int row = blockIdx.x; row < num_instances; row += gridDim.x) {
const int row_offset = row * norm_size;
const T* x_row = x + row_offset;
ComputeType thread_sum = 0;
ComputeType thread_square_sum = 0;
const int tid = threadIdx.x;
for (int col = tid; col < norm_size; col += blockDim.x) {
const ComputeType val = LU::ToComputeType(x_row[col]);
compute_buf[col] = val;
thread_sum += val;
thread_square_sum += val * val;
}
__syncthreads();
ComputeType block_sum = BlockReduce(cub_mean_reduce_tmp_storage).Reduce(thread_sum, hipcub::Sum());
ComputeType block_square_sum =
BlockReduce(cub_variance_reduce_tmp_storage).Reduce(thread_square_sum, hipcub::Sum());
if (tid == 0) {
ComputeType row_mean = block_sum * inv_norm_size;
row_mean_shared = row_mean;
mean[row] = row_mean;
ComputeType row_variance =
max(block_square_sum * inv_norm_size - row_mean * row_mean, static_cast<ComputeType>(0));
ComputeType row_inv_var = rsqrt(row_variance + static_cast<ComputeType>(epsilon));
row_inv_var_shared = row_inv_var;
inv_variance[row] = row_inv_var;
}
__syncthreads();
ComputeType mean = row_mean_shared;
ComputeType inv_var = row_inv_var_shared;
for (int col = threadIdx.x; col < norm_size; col += blockDim.x) {
int offset = row_offset + col;
ComputeType val = compute_buf[col];
val = (val - mean) * inv_var;
if (gamma != nullptr || beta != nullptr) {
int elem_id = col;
if (gamma != nullptr) {
normalized[offset] = LU::FromComputeType(val);
val *= LU::ToComputeType(gamma[elem_id]);
}
if (beta != nullptr) { val += LU::ToComputeType(beta[elem_id]); }
}
y[offset] = LU::FromComputeType(val);
}
}
}
template<typename T>
void LayerNormForwardGpu(DeviceCtx* ctx, const int num_instances, const int norm_size,
const double epsilon, const T* x_ptr, const T* gamma_ptr,
const T* beta_ptr, T* normalized_ptr, T* y_ptr, user_op::Tensor* mean,
user_op::Tensor* inv_variance) {
hipLaunchKernelGGL(( LayerNormForwardImpl<T, typename LayerNormUtil<T>::ComputeType>)
, dim3(GetLayerNormForwardNumBlocks(num_instances)), dim3(GetLayerNormForwardBlockSize()),
GetForwardDynamicSharedMemorySize<T>(norm_size), ctx->cuda_stream(),
num_instances, norm_size, epsilon, x_ptr, gamma_ptr, beta_ptr,
mean->mut_dptr<typename LayerNormUtil<T>::ComputeType>(),
inv_variance->mut_dptr<typename LayerNormUtil<T>::ComputeType>(), normalized_ptr, y_ptr);
}
template<>
void LayerNormForwardGpu<float16>(DeviceCtx* ctx, const int num_instances, const int norm_size,
const double epsilon, const float16* x_ptr,
const float16* gamma_ptr, const float16* beta_ptr,
float16* normalized_ptr, float16* y_ptr, user_op::Tensor* mean,
user_op::Tensor* inv_variance) {
hipLaunchKernelGGL(( LayerNormForwardImpl<half, typename LayerNormUtil<half>::ComputeType>)
, dim3(GetLayerNormForwardNumBlocks(num_instances)), dim3(GetLayerNormForwardBlockSize()),
GetForwardDynamicSharedMemorySize<half>(norm_size), ctx->cuda_stream(),
num_instances, norm_size, epsilon, reinterpret_cast<const half*>(x_ptr),
reinterpret_cast<const half*>(gamma_ptr), reinterpret_cast<const half*>(beta_ptr),
mean->mut_dptr<typename LayerNormUtil<half>::ComputeType>(),
inv_variance->mut_dptr<typename LayerNormUtil<half>::ComputeType>(),
reinterpret_cast<half*>(normalized_ptr), reinterpret_cast<half*>(y_ptr));
}
int GetForwardFusedKernelMinNormSize() { return 64; }
template<typename T>
int GetForwardFusedKernelMaxActiveBlocks(const int32_t norm_size) {
int max_active_blocks;
OF_CUDA_CHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, LayerNormForwardImpl<T, typename LayerNormUtil<T>::ComputeType>,
GetLayerNormForwardBlockSize(), GetForwardDynamicSharedMemorySize<T>(norm_size)));
return max_active_blocks;
}
template<>
int GetForwardFusedKernelMaxActiveBlocks<float16>(const int32_t norm_size) {
int max_active_blocks;
OF_CUDA_CHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, LayerNormForwardImpl<half, typename LayerNormUtil<half>::ComputeType>,
GetLayerNormForwardBlockSize(), GetForwardDynamicSharedMemorySize<half>(norm_size)));
return max_active_blocks;
}
template<typename T>
bool IsForwardFusedKernelSupported(const int32_t norm_size, const int32_t instance_size) {
if (norm_size >= GetForwardFusedKernelMinNormSize() && norm_size % 32 == 0
&& GetForwardFusedKernelMaxActiveBlocks<T>(norm_size) > 0
&& (instance_size == 0 || norm_size == instance_size)) {
return true;
} else {
return false;
}
}
constexpr int64_t kLayerNormParamGradGpuBlockSize = 512;
int64_t GetLayerNormParamGradBlockSize() { return kLayerNormParamGradGpuBlockSize; }
int64_t GetLayerNormParamGradNumBlocks(const int64_t elem_cnt) {
return ::min(static_cast<int>((elem_cnt + kLayerNormParamGradGpuBlockSize - 1)
/ kLayerNormParamGradGpuBlockSize),
256);
}
template<typename T>
int64_t GetParamGradDynamicSharedMemorySize(const int64_t instance_size) {
return 2 * instance_size * sizeof(T);
}
template<>
int64_t GetParamGradDynamicSharedMemorySize<float16>(const int64_t instance_size) {
return 2 * instance_size * sizeof(float);
}
template<typename T, typename I>
__global__ void LayerNormParamGradImpl(const I n, const I instance_size, const T* dy,
const T* normalized, const T* gamma, T* gamma_diff,
T* beta_diff, T* normalized_diff) {
extern __shared__ __align__(sizeof(double)) unsigned char bw_shared_buf[];
auto* gamma_diff_sum_buf = reinterpret_cast<T*>(bw_shared_buf);
auto* beta_diff_sum_buf = gamma_diff_sum_buf + instance_size;
const I tid = threadIdx.x;
for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) {
gamma_diff_sum_buf[elem_id] = 0;
beta_diff_sum_buf[elem_id] = 0;
}
__syncthreads();
CUDA_1D_KERNEL_LOOP_T(I, i, n) {
const I elem_id = i % instance_size;
T dy_val = dy[i];
T normalized_val = normalized[i];
cuda::atomic::Add(&gamma_diff_sum_buf[elem_id], dy_val * normalized_val);
cuda::atomic::Add(&beta_diff_sum_buf[elem_id], dy_val);
T gamma_val = gamma[elem_id];
normalized_diff[i] = gamma_val * dy_val;
}
__syncthreads();
for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) {
cuda::atomic::Add(gamma_diff + elem_id, gamma_diff_sum_buf[elem_id]);
cuda::atomic::Add(beta_diff + elem_id, beta_diff_sum_buf[elem_id]);
}
}
template<typename I>
__global__ void LayerNormParamGradHalfImpl(const I n, const I instance_size, const half* dy,
const half* normalized, const half* gamma,
half* tmp_gamma_diff, half* tmp_beta_diff,
half* normalized_diff) {
extern __shared__ __align__(sizeof(double)) unsigned char bw_shared_buf[];
auto* gamma_diff_sum_buf = reinterpret_cast<float*>(bw_shared_buf);
auto* beta_diff_sum_buf = gamma_diff_sum_buf + instance_size;
const I tid = threadIdx.x;
for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) {
gamma_diff_sum_buf[elem_id] = 0;
beta_diff_sum_buf[elem_id] = 0;
}
__syncthreads();
CUDA_1D_KERNEL_LOOP_T(I, i, n) {
const I elem_id = i % instance_size;
half dy_val = dy[i];
half normalized_val = normalized[i];
cuda::atomic::Add(&gamma_diff_sum_buf[elem_id],
__half2float(dy_val) * __half2float(normalized_val));
cuda::atomic::Add(&beta_diff_sum_buf[elem_id], __half2float(dy_val));
half gamma_val = gamma[elem_id];
normalized_diff[i] = __hmul(gamma_val, dy_val);
}
__syncthreads();
for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) {
const I offset = blockIdx.x * instance_size + elem_id;
tmp_gamma_diff[offset] = __float2half(gamma_diff_sum_buf[elem_id]);
tmp_beta_diff[offset] = __float2half(beta_diff_sum_buf[elem_id]);
}
}
} // namespace
template<typename T, typename BNParamT>
class LayerNormGpuKernel final : public user_op::OpKernel {
public:
LayerNormGpuKernel() = default;
~LayerNormGpuKernel() = default;
private:
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
user_op::Tensor* mean = ctx->Tensor4ArgNameAndIndex("mean", 0);
user_op::Tensor* inv_variance = ctx->Tensor4ArgNameAndIndex("inv_variance", 0);
const bool scale = ctx->Attr<bool>("scale");
const bool center = ctx->Attr<bool>("center");
user_op::Tensor* normalized = scale ? ctx->Tensor4ArgNameAndIndex("normalized", 0) : y;
const double epsilon = ctx->Attr<double>("epsilon");
CHECK_GE(epsilon, CUDNN_BN_MIN_EPSILON);
const int32_t num_instances = mean->shape().elem_cnt();
const int32_t norm_size = x->shape().elem_cnt() / num_instances;
int32_t instance_size = 0;
const T* gamma_ptr = nullptr;
const T* beta_ptr = nullptr;
if (scale || center) {
if (scale) {
const user_op::Tensor* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0);
instance_size = gamma->shape().elem_cnt();
gamma_ptr = gamma->dptr<T>();
}
if (center) {
const user_op::Tensor* beta = ctx->Tensor4ArgNameAndIndex("beta", 0);
if (gamma_ptr) {
CHECK_EQ(beta->shape().elem_cnt(), instance_size);
} else {
instance_size = beta->shape().elem_cnt();
}
beta_ptr = beta->dptr<T>();
}
CHECK_EQ(y->shape().elem_cnt() % instance_size, 0);
}
if (IsForwardFusedKernelSupported<T>(norm_size, instance_size)) {
LayerNormForwardGpu<T>(ctx->device_ctx(), num_instances, norm_size, epsilon, x->dptr<T>(),
gamma_ptr, beta_ptr, normalized->mut_dptr<T>(), y->mut_dptr<T>(), mean,
inv_variance);
} else {
LayerNormCudnnBnCtx bn_ctx(x->shape(), mean->shape(), x->data_type());
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const size_t aligned_buffer_size =
GetCudaAlignedSize(mean->shape().elem_cnt() * GetSizeOfDataType(mean->data_type()));
char* cudnn_bn_scale_ones_dptr = tmp_buffer->mut_dptr<char>();
char* cudnn_bn_bias_zeros_dptr = cudnn_bn_scale_ones_dptr + aligned_buffer_size;
NewKernelUtil<DeviceType::kGPU>::Fill(ctx->device_ctx(), mean->shape().elem_cnt(),
static_cast<BNParamT>(1),
reinterpret_cast<BNParamT*>(cudnn_bn_scale_ones_dptr));
NewKernelUtil<DeviceType::kGPU>::Fill(ctx->device_ctx(), mean->shape().elem_cnt(),
static_cast<BNParamT>(0),
reinterpret_cast<BNParamT*>(cudnn_bn_bias_zeros_dptr));
OF_CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(
ctx->device_ctx()->cudnn_handle(), bn_ctx.mode(), CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(),
bn_ctx.data_tensor_desc(), x->dptr<T>(), bn_ctx.data_tensor_desc(),
normalized->mut_dptr<T>(), bn_ctx.param_tensor_desc(),
reinterpret_cast<BNParamT*>(cudnn_bn_scale_ones_dptr),
reinterpret_cast<BNParamT*>(cudnn_bn_bias_zeros_dptr), 1.0, nullptr, nullptr, epsilon,
mean->mut_dptr(), inv_variance->mut_dptr()));
if (scale || center) {
const int64_t batch_size = y->shape().elem_cnt() / instance_size;
InstanceScaleCenter<T>(ctx->device_ctx(), batch_size, instance_size, normalized->dptr<T>(),
gamma_ptr, beta_ptr, y->mut_dptr<T>());
}
}
};
};
#define REGISTER_LAYER_NORM_GPU_KERNEL(dtype, bn_param_dtype) \
REGISTER_USER_KERNEL("layer_norm") \
.SetCreateFn<LayerNormGpuKernel<dtype, bn_param_dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("x", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](oneflow::user_op::InferContext* ctx) { \
user_op::TensorDesc* mean = ctx->OutputTensorDesc("mean", 0); \
const DataType& data_type = mean->data_type(); \
const int64_t elem_cnt = mean->shape().elem_cnt(); \
return GetCudaAlignedSize(elem_cnt * GetSizeOfDataType(data_type)) * 2; \
});
REGISTER_LAYER_NORM_GPU_KERNEL(float, float)
REGISTER_LAYER_NORM_GPU_KERNEL(double, double)
REGISTER_LAYER_NORM_GPU_KERNEL(float16, float)
template<typename T, typename BNParamT>
class LayerNormGradGpuKernel final : public user_op::OpKernel {
public:
LayerNormGradGpuKernel() = default;
~LayerNormGradGpuKernel() = default;
private:
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* mean = ctx->Tensor4ArgNameAndIndex("mean", 0);
const user_op::Tensor* inv_variance = ctx->Tensor4ArgNameAndIndex("inv_variance", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const size_t aligned_buffer_size =
GetCudaAlignedSize(mean->shape().elem_cnt() * GetSizeOfDataType(mean->data_type()));
char* cudnn_bn_scale_ones_dptr = tmp_buffer->mut_dptr<char>();
char* cudnn_bn_scale_diff_buf_dptr = cudnn_bn_scale_ones_dptr + aligned_buffer_size;
char* cudnn_bn_bias_diff_buf_dptr = cudnn_bn_scale_ones_dptr + aligned_buffer_size;
NewKernelUtil<DeviceType::kGPU>::Fill(ctx->device_ctx(), mean->shape().elem_cnt(),
static_cast<BNParamT>(1),
reinterpret_cast<BNParamT*>(cudnn_bn_scale_ones_dptr));
const void* sp_alpha = CudnnSPOnePtr<T>();
const void* sp_beta;
if (ctx->has_input("_add_to_output", 0)) {
const user_op::Tensor* add_to_output = ctx->Tensor4ArgNameAndIndex("_add_to_output", 0);
CHECK_EQ(add_to_output->data_type(), dx->data_type());
CHECK_EQ(add_to_output->shape(), dx->shape());
Memcpy<DeviceType::kGPU>(
ctx->device_ctx(), dx->mut_dptr<void>(), add_to_output->dptr<void>(),
add_to_output->shape().elem_cnt() * GetSizeOfDataType(add_to_output->data_type()));
sp_beta = CudnnSPOnePtr<T>();
} else {
sp_beta = CudnnSPZeroPtr<T>();
}
const double epsilon = ctx->Attr<double>("epsilon");
CHECK_GE(epsilon, CUDNN_BN_MIN_EPSILON);
LayerNormCudnnBnCtx bn_ctx(x->shape(), mean->shape(), x->data_type());
OF_CUDNN_CHECK(cudnnBatchNormalizationBackward(
ctx->device_ctx()->cudnn_handle(), bn_ctx.mode(), sp_alpha, sp_beta, CudnnSPOnePtr<T>(),
CudnnSPZeroPtr<T>(), bn_ctx.data_tensor_desc(), x->dptr<T>(), bn_ctx.data_tensor_desc(),
dy->dptr<T>(), bn_ctx.data_tensor_desc(), dx->mut_dptr<T>(), bn_ctx.param_tensor_desc(),
reinterpret_cast<const BNParamT*>(cudnn_bn_scale_ones_dptr),
reinterpret_cast<BNParamT*>(cudnn_bn_scale_diff_buf_dptr),
reinterpret_cast<BNParamT*>(cudnn_bn_bias_diff_buf_dptr), epsilon, mean->dptr(),
inv_variance->dptr()));
};
};
#define REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(dtype, bn_param_dtype) \
REGISTER_USER_KERNEL("layer_norm_grad") \
.SetCreateFn<LayerNormGradGpuKernel<dtype, bn_param_dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dy", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](oneflow::user_op::InferContext* ctx) { \
const user_op::TensorDesc& mean = ctx->InputTensorDesc("mean", 0); \
const DataType& data_type = mean.data_type(); \
const int64_t elem_cnt = mean.shape().elem_cnt(); \
return GetCudaAlignedSize(elem_cnt * GetSizeOfDataType(data_type)) * 3; \
}) \
.SetInplaceProposalFn([](const user_op::InferContext& ctx, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
if (ctx.has_input("_add_to_output", 0)) { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("dx", 0, "_add_to_output", 0, true)); \
} \
return Maybe<void>::Ok(); \
});
REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(float, float)
REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(double, double)
REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(float16, float)
template<typename T>
class LayerNormParamGradGpuKernel final : public user_op::OpKernel {
public:
LayerNormParamGradGpuKernel() = default;
~LayerNormParamGradGpuKernel() = default;
private:
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
void Compute(user_op::KernelComputeContext* ctx) const override {
using NdUtil = NdarrayUtil<DeviceType::kGPU, T>;
auto Val = NdUtil::GetValNdarrayBuilder();
auto Var = NdUtil::GetVarNdarrayBuilder();
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
user_op::Tensor* beta_diff = ctx->Tensor4ArgNameAndIndex("beta_diff", 0);
user_op::Tensor* gamma_diff = ctx->Tensor4ArgNameAndIndex("gamma_diff", 0);
user_op::Tensor* normalized_diff = ctx->Tensor4ArgNameAndIndex("normalized_diff", 0);
user_op::Tensor* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0);
const bool has_beta_diff = beta_diff != nullptr;
const bool has_gamma_diff = gamma_diff != nullptr;
const bool has_normalized_diff = normalized_diff != nullptr;
const bool has_gamma = gamma != nullptr;
const int64_t begin_params_axis = ctx->Attr<int64_t>("begin_params_axis");
const int64_t elem_cnt = dy->shape().elem_cnt();
const int64_t m = dy->shape().Count(begin_params_axis);
int max_active_blocks;
OF_CUDA_CHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, LayerNormParamGradImpl<T, int64_t>, GetLayerNormParamGradBlockSize(),
GetParamGradDynamicSharedMemorySize<T>(m)));
if (has_gamma_diff && has_beta_diff && has_normalized_diff && max_active_blocks > 0) {
const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0);
Memset<DeviceType::kGPU>(ctx->device_ctx(), gamma_diff->mut_dptr<T>(), 0,
gamma_diff->shape().elem_cnt() * sizeof(T));
Memset<DeviceType::kGPU>(ctx->device_ctx(), beta_diff->mut_dptr<T>(), 0,
beta_diff->shape().elem_cnt() * sizeof(T));
if (elem_cnt > static_cast<int64_t>(GetMaxVal<int32_t>() / 2)) {
hipLaunchKernelGGL(( LayerNormParamGradImpl<T, int64_t>)
, dim3(GetLayerNormParamGradNumBlocks(elem_cnt)), dim3(GetLayerNormParamGradBlockSize()),
GetParamGradDynamicSharedMemorySize<T>(m), ctx->device_ctx()->cuda_stream(),
elem_cnt, m, dy->dptr<T>(), normalized->dptr<T>(), gamma->dptr<T>(),
gamma_diff->mut_dptr<T>(), beta_diff->mut_dptr<T>(),
normalized_diff->mut_dptr<T>());
} else {
hipLaunchKernelGGL(( LayerNormParamGradImpl<T, int32_t>)
, dim3(GetLayerNormParamGradNumBlocks(elem_cnt)), dim3(GetLayerNormParamGradBlockSize()),
GetParamGradDynamicSharedMemorySize<T>(m), ctx->device_ctx()->cuda_stream(),
static_cast<int32_t>(elem_cnt), static_cast<int32_t>(m), dy->dptr<T>(),
normalized->dptr<T>(), gamma->dptr<T>(), gamma_diff->mut_dptr<T>(),
beta_diff->mut_dptr<T>(), normalized_diff->mut_dptr<T>());
}
} else {
if (has_beta_diff) {
user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0);
CHECK_EQ(m, beta_diff->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, beta_diff->mut_dptr<T>()),
Val({n, m}, dy->dptr<T>()), Var({n, m}, reduce_buf->mut_dptr<T>()));
}
if (has_gamma_diff) {
const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0);
user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0);
CHECK_EQ(m, gamma_diff->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, reduce_buf->mut_dptr<T>()),
Val({n, m}, normalized->dptr<T>()), Val({n, m}, dy->dptr<T>()));
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, gamma_diff->mut_dptr<T>()),
Val({n, m}, reduce_buf->dptr<T>()),
Var({n, m}, reduce_buf->mut_dptr<T>()));
}
if (has_normalized_diff) {
if (has_gamma) {
CHECK_EQ(m, gamma->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, normalized_diff->mut_dptr<T>()),
Val({n, m}, dy->dptr<T>()), Val({1, m}, gamma->dptr<T>()));
} else {
Memcpy<DeviceType::kGPU>(ctx->device_ctx(), normalized_diff->mut_dptr<void>(),
dy->dptr<void>(),
dy->shape().elem_cnt() * GetSizeOfDataType(dy->data_type()));
}
}
}
};
};
#define REGISTER_LAYER_NORM_PARAM_GRAD_GPU_KERNEL(dtype) \
REGISTER_USER_KERNEL("layer_norm_param_grad") \
.SetCreateFn<LayerNormParamGradGpuKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dy", 0) == GetDataType<dtype>::value));
REGISTER_LAYER_NORM_PARAM_GRAD_GPU_KERNEL(float)
REGISTER_LAYER_NORM_PARAM_GRAD_GPU_KERNEL(double)
class LayerNormParamGradGpuHalfKernel final : public user_op::OpKernel {
public:
LayerNormParamGradGpuHalfKernel() = default;
~LayerNormParamGradGpuHalfKernel() = default;
private:
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
void Compute(user_op::KernelComputeContext* ctx) const override {
using NdUtil = NdarrayUtil<DeviceType::kGPU, float16>;
auto Val = NdUtil::GetValNdarrayBuilder();
auto Var = NdUtil::GetVarNdarrayBuilder();
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
user_op::Tensor* beta_diff = ctx->Tensor4ArgNameAndIndex("beta_diff", 0);
user_op::Tensor* gamma_diff = ctx->Tensor4ArgNameAndIndex("gamma_diff", 0);
user_op::Tensor* normalized_diff = ctx->Tensor4ArgNameAndIndex("normalized_diff", 0);
user_op::Tensor* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0);
const bool has_beta_diff = beta_diff != nullptr;
const bool has_gamma_diff = gamma_diff != nullptr;
const bool has_normalized_diff = normalized_diff != nullptr;
const bool has_gamma = gamma != nullptr;
const int64_t begin_params_axis = ctx->Attr<int64_t>("begin_params_axis");
const int64_t elem_cnt = dy->shape().elem_cnt();
const int64_t m = dy->shape().Count(begin_params_axis);
int max_active_blocks;
OF_CUDA_CHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, LayerNormParamGradHalfImpl<int64_t>, GetLayerNormParamGradBlockSize(),
GetParamGradDynamicSharedMemorySize<float16>(m)));
if (has_gamma_diff && has_beta_diff && has_normalized_diff && max_active_blocks > 0) {
const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const int64_t num_blocks = GetLayerNormParamGradNumBlocks(dy->shape().elem_cnt());
const size_t tmp_diff_size = GetCudaAlignedSize(num_blocks * m * sizeof(float16));
float16* tmp_gamma_diff = tmp_buffer->mut_dptr<float16>();
float16* tmp_beta_diff =
reinterpret_cast<float16*>(tmp_buffer->mut_dptr<char>() + tmp_diff_size);
float16* tmp_reduce_buf =
reinterpret_cast<float16*>(tmp_buffer->mut_dptr<char>() + 2 * tmp_diff_size);
CHECK_GE(tmp_buffer->shape().elem_cnt(), 3 * tmp_diff_size);
if (elem_cnt > static_cast<int64_t>(GetMaxVal<int32_t>() / 2)) {
hipLaunchKernelGGL(( LayerNormParamGradHalfImpl<int64_t>)
, dim3(GetLayerNormParamGradNumBlocks(elem_cnt)), dim3(GetLayerNormParamGradBlockSize()),
GetParamGradDynamicSharedMemorySize<float16>(m), ctx->device_ctx()->cuda_stream(),
elem_cnt, m, dy->dptr<half>(), normalized->dptr<half>(), gamma->dptr<half>(),
reinterpret_cast<half*>(tmp_gamma_diff), reinterpret_cast<half*>(tmp_beta_diff),
normalized_diff->mut_dptr<half>());
} else {
hipLaunchKernelGGL(( LayerNormParamGradHalfImpl<int32_t>)
, dim3(GetLayerNormParamGradNumBlocks(elem_cnt)), dim3(GetLayerNormParamGradBlockSize()),
GetParamGradDynamicSharedMemorySize<float16>(m), ctx->device_ctx()->cuda_stream(),
static_cast<int32_t>(elem_cnt), static_cast<int32_t>(m), dy->dptr<half>(),
normalized->dptr<half>(), gamma->dptr<half>(),
reinterpret_cast<half*>(tmp_gamma_diff), reinterpret_cast<half*>(tmp_beta_diff),
normalized_diff->mut_dptr<half>());
}
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, gamma_diff->mut_dptr<float16>()),
Val({num_blocks, m}, tmp_gamma_diff), Var({num_blocks, m}, tmp_reduce_buf));
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, beta_diff->mut_dptr<float16>()),
Val({num_blocks, m}, tmp_beta_diff), Var({num_blocks, m}, tmp_reduce_buf));
} else {
if (has_beta_diff) {
user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0);
CHECK_EQ(m, beta_diff->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, beta_diff->mut_dptr<float16>()),
Val({n, m}, dy->dptr<float16>()),
Var({n, m}, reduce_buf->mut_dptr<float16>()));
}
if (has_gamma_diff) {
const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0);
user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0);
CHECK_EQ(m, gamma_diff->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, reduce_buf->mut_dptr<float16>()),
Val({n, m}, normalized->dptr<float16>()),
Val({n, m}, dy->dptr<float16>()));
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, gamma_diff->mut_dptr<float16>()),
Val({n, m}, reduce_buf->dptr<float16>()),
Var({n, m}, reduce_buf->mut_dptr<float16>()));
}
if (has_normalized_diff) {
if (has_gamma) {
CHECK_EQ(m, gamma->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, normalized_diff->mut_dptr<float16>()),
Val({n, m}, dy->dptr<float16>()),
Val({1, m}, gamma->dptr<float16>()));
} else {
Memcpy<DeviceType::kGPU>(ctx->device_ctx(), normalized_diff->mut_dptr<void>(),
dy->dptr<void>(),
dy->shape().elem_cnt() * GetSizeOfDataType(dy->data_type()));
}
}
}
}
};
REGISTER_USER_KERNEL("layer_norm_param_grad")
.SetCreateFn<LayerNormParamGradGpuHalfKernel>()
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu")
& (user_op::HobDataType("dy", 0) == DataType::kFloat16))
.SetInferTmpSizeFn([](user_op::InferContext* ctx) {
const int64_t begin_params_axis = ctx->Attr<int64_t>("begin_params_axis");
const bool has_gamma_diff = ctx->has_output("gamma_diff", 0);
const bool has_beta_diff = ctx->has_output("beta_diff", 0);
const bool has_normalized_diff = ctx->has_output("normalized_diff", 0);
const auto& dy = ctx->InputTensorDesc("dy", 0);
const int64_t instance_size = dy.shape().Count(begin_params_axis);
size_t tmp_buffer_size = 0;
if (has_gamma_diff && has_beta_diff && has_normalized_diff) {
const size_t tmp_gamma_diff =
GetCudaAlignedSize(GetLayerNormParamGradNumBlocks(dy.shape().elem_cnt()) * instance_size
* sizeof(float16));
const size_t tmp_beta_diff = tmp_gamma_diff;
const size_t tmp_reduce_buf = tmp_gamma_diff;
tmp_buffer_size = tmp_gamma_diff + tmp_beta_diff + tmp_reduce_buf;
} else {
tmp_buffer_size = 0;
}
return tmp_buffer_size;
});
} // namespace oneflow
| 4ec89b5dc64f5a36e61226ac75ad31641cb9d56e.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/device/cudnn_util.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/ndarray/ndarray_util.h"
#include "oneflow/core/cuda/atomic.cuh"
#include <cub/cub.cuh>
namespace oneflow {
namespace {
class LayerNormCudnnBnCtx final {
public:
LayerNormCudnnBnCtx(const ShapeView& data_shape, const ShapeView& param_shape,
DataType data_type) {
const int64_t cudnn_c = param_shape.elem_cnt();
CHECK_EQ(data_shape.elem_cnt() % cudnn_c, 0);
const int64_t cudnn_w = data_shape.elem_cnt() / cudnn_c;
CHECK_LT(cudnn_c, GetMaxVal<int32_t>());
CHECK_LT(cudnn_w, GetMaxVal<int32_t>());
data_tensor_desc_.reset(new CudnnTensorDesc(CUDNN_TENSOR_NCHW, data_type, 1,
static_cast<int32_t>(cudnn_c), 1,
static_cast<int32_t>(cudnn_w)));
DataType param_dtype = data_type == DataType::kFloat16 ? DataType::kFloat : data_type;
param_tensor_desc_.reset(new CudnnTensorDesc(CUDNN_TENSOR_NCHW, param_dtype, 1,
static_cast<int32_t>(cudnn_c), 1, 1));
#if (CUDNN_VERSION >= 7000)
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
#else
mode_ = CUDNN_BATCHNORM_SPATIAL;
#endif
}
~LayerNormCudnnBnCtx() = default;
const cudnnTensorDescriptor_t& data_tensor_desc() const { return data_tensor_desc_->Get(); }
const cudnnTensorDescriptor_t& param_tensor_desc() const { return param_tensor_desc_->Get(); }
cudnnBatchNormMode_t mode() const { return mode_; };
private:
std::unique_ptr<CudnnTensorDesc> data_tensor_desc_;
std::unique_ptr<CudnnTensorDesc> param_tensor_desc_;
cudnnBatchNormMode_t mode_;
};
template<typename T, bool do_scale, bool do_center>
__global__ void InstanceScaleCenterGpu(const int64_t elem_cnt, const int64_t instance_size,
const T* in, const T* gamma, const T* beta, T* out) {
CUDA_1D_KERNEL_LOOP_T(int64_t, i, elem_cnt) {
const int64_t elem_id = i % instance_size;
T v = in[i];
if (do_scale) { v *= gamma[elem_id]; }
if (do_center) { v += beta[elem_id]; }
out[i] = v;
}
}
template<bool do_scale, bool do_center>
__global__ void InstanceScaleCenterH2Gpu(const int64_t h2_elem_cnt, const int64_t h2_instance_size,
const half* in, const half* gamma, const half* beta,
half* out) {
const auto* in_h2 = reinterpret_cast<const half2*>(in);
const auto* gamma_h2 = reinterpret_cast<const half2*>(gamma);
const auto* beta_h2 = reinterpret_cast<const half2*>(beta);
auto* out_h2 = reinterpret_cast<half2*>(out);
CUDA_1D_KERNEL_LOOP_T(int64_t, i, h2_elem_cnt) {
const int64_t elem_id = i % h2_instance_size;
half2 v2 = in_h2[i];
if (do_scale) { v2 = __hmul2(v2, gamma_h2[elem_id]); }
if (do_center) { v2 = __hadd2(v2, beta_h2[elem_id]); }
out_h2[i] = v2;
}
}
template<typename T>
void InstanceScaleCenter(DeviceCtx* ctx, const int64_t batch_size, const int64_t instance_size,
const T* in, const T* gamma, const T* beta, T* out) {
const int64_t elem_cnt = batch_size * instance_size;
if (beta != nullptr && gamma != nullptr) { // scale and center
InstanceScaleCenterGpu<T, true, true>
<<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
elem_cnt, instance_size, in, gamma, beta, out);
} else if (gamma != nullptr) { // scale only
InstanceScaleCenterGpu<T, true, false>
<<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
elem_cnt, instance_size, in, gamma, nullptr, out);
} else if (beta != nullptr) { // center only
InstanceScaleCenterGpu<T, false, true>
<<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
elem_cnt, instance_size, in, nullptr, beta, out);
} else {
UNIMPLEMENTED();
}
}
void InstanceScaleCenterH2(DeviceCtx* ctx, const int64_t batch_size, const int64_t instance_size,
const half* in, const half* gamma, const half* beta, half* out) {
CHECK_EQ(instance_size % 2, 0);
const int64_t elem_cnt_h2 = batch_size * instance_size / 2;
const int64_t instance_size_h2 = instance_size / 2;
if (beta != nullptr && gamma != nullptr) { // scale and center
InstanceScaleCenterH2Gpu<true, true>
<<<BlocksNum4ThreadsNum(elem_cnt_h2), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
elem_cnt_h2, instance_size_h2, in, gamma, beta, out);
} else if (gamma != nullptr) { // scale only
InstanceScaleCenterH2Gpu<true, false>
<<<BlocksNum4ThreadsNum(elem_cnt_h2), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
elem_cnt_h2, instance_size_h2, in, gamma, nullptr, out);
} else if (beta != nullptr) { // center only
InstanceScaleCenterH2Gpu<false, true>
<<<BlocksNum4ThreadsNum(elem_cnt_h2), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>(
elem_cnt_h2, instance_size_h2, in, nullptr, beta, out);
} else {
UNIMPLEMENTED();
}
}
template<>
void InstanceScaleCenter<float16>(DeviceCtx* ctx, const int64_t batch_size,
const int64_t instance_size, const float16* in,
const float16* gamma, const float16* beta, float16* out) {
if (instance_size % 2 == 0) {
InstanceScaleCenterH2(ctx, batch_size, instance_size, reinterpret_cast<const half*>(in),
reinterpret_cast<const half*>(gamma), reinterpret_cast<const half*>(beta),
reinterpret_cast<half*>(out));
} else {
InstanceScaleCenter<half>(ctx, batch_size, instance_size, reinterpret_cast<const half*>(in),
reinterpret_cast<const half*>(gamma),
reinterpret_cast<const half*>(beta), reinterpret_cast<half*>(out));
}
}
constexpr int64_t kLayerNormForwardGpuBlockSize = 256;
template<typename T>
struct LayerNormUtil {
using ComputeType = T;
__device__ static ComputeType ToComputeType(T v) { return v; }
__device__ static T FromComputeType(ComputeType v) { return v; }
};
template<>
struct LayerNormUtil<half> {
using ComputeType = float;
__device__ static ComputeType ToComputeType(half v) { return __half2float(v); }
__device__ static half FromComputeType(ComputeType v) { return __float2half(v); }
};
template<typename T>
int GetForwardDynamicSharedMemorySize(const int norm_size) {
return norm_size * sizeof(typename LayerNormUtil<T>::ComputeType);
}
int GetLayerNormForwardBlockSize() { return kLayerNormForwardGpuBlockSize; }
int GetLayerNormForwardNumBlocks(const int num_instances) {
return std::min(static_cast<int>(num_instances), kCudaMaxBlocksNum);
}
template<typename T, typename ComputeType>
__global__ void LayerNormForwardImpl(const int num_instances, const int norm_size,
const double epsilon, const T* x, const T* gamma,
const T* beta, ComputeType* mean, ComputeType* inv_variance,
T* normalized, T* y) {
using LU = LayerNormUtil<T>;
extern __shared__ __align__(sizeof(double)) unsigned char fw_shared_buf[];
auto* compute_buf = reinterpret_cast<ComputeType*>(fw_shared_buf);
__shared__ ComputeType row_mean_shared;
__shared__ ComputeType row_inv_var_shared;
typedef cub::BlockReduce<ComputeType, kLayerNormForwardGpuBlockSize> BlockReduce;
__shared__ typename BlockReduce::TempStorage cub_mean_reduce_tmp_storage;
__shared__ typename BlockReduce::TempStorage cub_variance_reduce_tmp_storage;
ComputeType inv_norm_size = static_cast<ComputeType>(1.0) / static_cast<ComputeType>(norm_size);
for (int row = blockIdx.x; row < num_instances; row += gridDim.x) {
const int row_offset = row * norm_size;
const T* x_row = x + row_offset;
ComputeType thread_sum = 0;
ComputeType thread_square_sum = 0;
const int tid = threadIdx.x;
for (int col = tid; col < norm_size; col += blockDim.x) {
const ComputeType val = LU::ToComputeType(x_row[col]);
compute_buf[col] = val;
thread_sum += val;
thread_square_sum += val * val;
}
__syncthreads();
ComputeType block_sum = BlockReduce(cub_mean_reduce_tmp_storage).Reduce(thread_sum, cub::Sum());
ComputeType block_square_sum =
BlockReduce(cub_variance_reduce_tmp_storage).Reduce(thread_square_sum, cub::Sum());
if (tid == 0) {
ComputeType row_mean = block_sum * inv_norm_size;
row_mean_shared = row_mean;
mean[row] = row_mean;
ComputeType row_variance =
max(block_square_sum * inv_norm_size - row_mean * row_mean, static_cast<ComputeType>(0));
ComputeType row_inv_var = rsqrt(row_variance + static_cast<ComputeType>(epsilon));
row_inv_var_shared = row_inv_var;
inv_variance[row] = row_inv_var;
}
__syncthreads();
ComputeType mean = row_mean_shared;
ComputeType inv_var = row_inv_var_shared;
for (int col = threadIdx.x; col < norm_size; col += blockDim.x) {
int offset = row_offset + col;
ComputeType val = compute_buf[col];
val = (val - mean) * inv_var;
if (gamma != nullptr || beta != nullptr) {
int elem_id = col;
if (gamma != nullptr) {
normalized[offset] = LU::FromComputeType(val);
val *= LU::ToComputeType(gamma[elem_id]);
}
if (beta != nullptr) { val += LU::ToComputeType(beta[elem_id]); }
}
y[offset] = LU::FromComputeType(val);
}
}
}
template<typename T>
void LayerNormForwardGpu(DeviceCtx* ctx, const int num_instances, const int norm_size,
const double epsilon, const T* x_ptr, const T* gamma_ptr,
const T* beta_ptr, T* normalized_ptr, T* y_ptr, user_op::Tensor* mean,
user_op::Tensor* inv_variance) {
LayerNormForwardImpl<T, typename LayerNormUtil<T>::ComputeType>
<<<GetLayerNormForwardNumBlocks(num_instances), GetLayerNormForwardBlockSize(),
GetForwardDynamicSharedMemorySize<T>(norm_size), ctx->cuda_stream()>>>(
num_instances, norm_size, epsilon, x_ptr, gamma_ptr, beta_ptr,
mean->mut_dptr<typename LayerNormUtil<T>::ComputeType>(),
inv_variance->mut_dptr<typename LayerNormUtil<T>::ComputeType>(), normalized_ptr, y_ptr);
}
template<>
void LayerNormForwardGpu<float16>(DeviceCtx* ctx, const int num_instances, const int norm_size,
const double epsilon, const float16* x_ptr,
const float16* gamma_ptr, const float16* beta_ptr,
float16* normalized_ptr, float16* y_ptr, user_op::Tensor* mean,
user_op::Tensor* inv_variance) {
LayerNormForwardImpl<half, typename LayerNormUtil<half>::ComputeType>
<<<GetLayerNormForwardNumBlocks(num_instances), GetLayerNormForwardBlockSize(),
GetForwardDynamicSharedMemorySize<half>(norm_size), ctx->cuda_stream()>>>(
num_instances, norm_size, epsilon, reinterpret_cast<const half*>(x_ptr),
reinterpret_cast<const half*>(gamma_ptr), reinterpret_cast<const half*>(beta_ptr),
mean->mut_dptr<typename LayerNormUtil<half>::ComputeType>(),
inv_variance->mut_dptr<typename LayerNormUtil<half>::ComputeType>(),
reinterpret_cast<half*>(normalized_ptr), reinterpret_cast<half*>(y_ptr));
}
int GetForwardFusedKernelMinNormSize() { return 64; }
template<typename T>
int GetForwardFusedKernelMaxActiveBlocks(const int32_t norm_size) {
int max_active_blocks;
OF_CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, LayerNormForwardImpl<T, typename LayerNormUtil<T>::ComputeType>,
GetLayerNormForwardBlockSize(), GetForwardDynamicSharedMemorySize<T>(norm_size)));
return max_active_blocks;
}
template<>
int GetForwardFusedKernelMaxActiveBlocks<float16>(const int32_t norm_size) {
int max_active_blocks;
OF_CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, LayerNormForwardImpl<half, typename LayerNormUtil<half>::ComputeType>,
GetLayerNormForwardBlockSize(), GetForwardDynamicSharedMemorySize<half>(norm_size)));
return max_active_blocks;
}
template<typename T>
bool IsForwardFusedKernelSupported(const int32_t norm_size, const int32_t instance_size) {
if (norm_size >= GetForwardFusedKernelMinNormSize() && norm_size % 32 == 0
&& GetForwardFusedKernelMaxActiveBlocks<T>(norm_size) > 0
&& (instance_size == 0 || norm_size == instance_size)) {
return true;
} else {
return false;
}
}
constexpr int64_t kLayerNormParamGradGpuBlockSize = 512;
int64_t GetLayerNormParamGradBlockSize() { return kLayerNormParamGradGpuBlockSize; }
int64_t GetLayerNormParamGradNumBlocks(const int64_t elem_cnt) {
return std::min(static_cast<int>((elem_cnt + kLayerNormParamGradGpuBlockSize - 1)
/ kLayerNormParamGradGpuBlockSize),
256);
}
template<typename T>
int64_t GetParamGradDynamicSharedMemorySize(const int64_t instance_size) {
return 2 * instance_size * sizeof(T);
}
template<>
int64_t GetParamGradDynamicSharedMemorySize<float16>(const int64_t instance_size) {
return 2 * instance_size * sizeof(float);
}
template<typename T, typename I>
__global__ void LayerNormParamGradImpl(const I n, const I instance_size, const T* dy,
const T* normalized, const T* gamma, T* gamma_diff,
T* beta_diff, T* normalized_diff) {
extern __shared__ __align__(sizeof(double)) unsigned char bw_shared_buf[];
auto* gamma_diff_sum_buf = reinterpret_cast<T*>(bw_shared_buf);
auto* beta_diff_sum_buf = gamma_diff_sum_buf + instance_size;
const I tid = threadIdx.x;
for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) {
gamma_diff_sum_buf[elem_id] = 0;
beta_diff_sum_buf[elem_id] = 0;
}
__syncthreads();
CUDA_1D_KERNEL_LOOP_T(I, i, n) {
const I elem_id = i % instance_size;
T dy_val = dy[i];
T normalized_val = normalized[i];
cuda::atomic::Add(&gamma_diff_sum_buf[elem_id], dy_val * normalized_val);
cuda::atomic::Add(&beta_diff_sum_buf[elem_id], dy_val);
T gamma_val = gamma[elem_id];
normalized_diff[i] = gamma_val * dy_val;
}
__syncthreads();
for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) {
cuda::atomic::Add(gamma_diff + elem_id, gamma_diff_sum_buf[elem_id]);
cuda::atomic::Add(beta_diff + elem_id, beta_diff_sum_buf[elem_id]);
}
}
template<typename I>
__global__ void LayerNormParamGradHalfImpl(const I n, const I instance_size, const half* dy,
const half* normalized, const half* gamma,
half* tmp_gamma_diff, half* tmp_beta_diff,
half* normalized_diff) {
extern __shared__ __align__(sizeof(double)) unsigned char bw_shared_buf[];
auto* gamma_diff_sum_buf = reinterpret_cast<float*>(bw_shared_buf);
auto* beta_diff_sum_buf = gamma_diff_sum_buf + instance_size;
const I tid = threadIdx.x;
for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) {
gamma_diff_sum_buf[elem_id] = 0;
beta_diff_sum_buf[elem_id] = 0;
}
__syncthreads();
CUDA_1D_KERNEL_LOOP_T(I, i, n) {
const I elem_id = i % instance_size;
half dy_val = dy[i];
half normalized_val = normalized[i];
cuda::atomic::Add(&gamma_diff_sum_buf[elem_id],
__half2float(dy_val) * __half2float(normalized_val));
cuda::atomic::Add(&beta_diff_sum_buf[elem_id], __half2float(dy_val));
half gamma_val = gamma[elem_id];
normalized_diff[i] = __hmul(gamma_val, dy_val);
}
__syncthreads();
for (I elem_id = tid; elem_id < instance_size; elem_id += blockDim.x) {
const I offset = blockIdx.x * instance_size + elem_id;
tmp_gamma_diff[offset] = __float2half(gamma_diff_sum_buf[elem_id]);
tmp_beta_diff[offset] = __float2half(beta_diff_sum_buf[elem_id]);
}
}
} // namespace
template<typename T, typename BNParamT>
class LayerNormGpuKernel final : public user_op::OpKernel {
public:
LayerNormGpuKernel() = default;
~LayerNormGpuKernel() = default;
private:
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
user_op::Tensor* mean = ctx->Tensor4ArgNameAndIndex("mean", 0);
user_op::Tensor* inv_variance = ctx->Tensor4ArgNameAndIndex("inv_variance", 0);
const bool scale = ctx->Attr<bool>("scale");
const bool center = ctx->Attr<bool>("center");
user_op::Tensor* normalized = scale ? ctx->Tensor4ArgNameAndIndex("normalized", 0) : y;
const double epsilon = ctx->Attr<double>("epsilon");
CHECK_GE(epsilon, CUDNN_BN_MIN_EPSILON);
const int32_t num_instances = mean->shape().elem_cnt();
const int32_t norm_size = x->shape().elem_cnt() / num_instances;
int32_t instance_size = 0;
const T* gamma_ptr = nullptr;
const T* beta_ptr = nullptr;
if (scale || center) {
if (scale) {
const user_op::Tensor* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0);
instance_size = gamma->shape().elem_cnt();
gamma_ptr = gamma->dptr<T>();
}
if (center) {
const user_op::Tensor* beta = ctx->Tensor4ArgNameAndIndex("beta", 0);
if (gamma_ptr) {
CHECK_EQ(beta->shape().elem_cnt(), instance_size);
} else {
instance_size = beta->shape().elem_cnt();
}
beta_ptr = beta->dptr<T>();
}
CHECK_EQ(y->shape().elem_cnt() % instance_size, 0);
}
if (IsForwardFusedKernelSupported<T>(norm_size, instance_size)) {
LayerNormForwardGpu<T>(ctx->device_ctx(), num_instances, norm_size, epsilon, x->dptr<T>(),
gamma_ptr, beta_ptr, normalized->mut_dptr<T>(), y->mut_dptr<T>(), mean,
inv_variance);
} else {
LayerNormCudnnBnCtx bn_ctx(x->shape(), mean->shape(), x->data_type());
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const size_t aligned_buffer_size =
GetCudaAlignedSize(mean->shape().elem_cnt() * GetSizeOfDataType(mean->data_type()));
char* cudnn_bn_scale_ones_dptr = tmp_buffer->mut_dptr<char>();
char* cudnn_bn_bias_zeros_dptr = cudnn_bn_scale_ones_dptr + aligned_buffer_size;
NewKernelUtil<DeviceType::kGPU>::Fill(ctx->device_ctx(), mean->shape().elem_cnt(),
static_cast<BNParamT>(1),
reinterpret_cast<BNParamT*>(cudnn_bn_scale_ones_dptr));
NewKernelUtil<DeviceType::kGPU>::Fill(ctx->device_ctx(), mean->shape().elem_cnt(),
static_cast<BNParamT>(0),
reinterpret_cast<BNParamT*>(cudnn_bn_bias_zeros_dptr));
OF_CUDNN_CHECK(cudnnBatchNormalizationForwardTraining(
ctx->device_ctx()->cudnn_handle(), bn_ctx.mode(), CudnnSPOnePtr<T>(), CudnnSPZeroPtr<T>(),
bn_ctx.data_tensor_desc(), x->dptr<T>(), bn_ctx.data_tensor_desc(),
normalized->mut_dptr<T>(), bn_ctx.param_tensor_desc(),
reinterpret_cast<BNParamT*>(cudnn_bn_scale_ones_dptr),
reinterpret_cast<BNParamT*>(cudnn_bn_bias_zeros_dptr), 1.0, nullptr, nullptr, epsilon,
mean->mut_dptr(), inv_variance->mut_dptr()));
if (scale || center) {
const int64_t batch_size = y->shape().elem_cnt() / instance_size;
InstanceScaleCenter<T>(ctx->device_ctx(), batch_size, instance_size, normalized->dptr<T>(),
gamma_ptr, beta_ptr, y->mut_dptr<T>());
}
}
};
};
#define REGISTER_LAYER_NORM_GPU_KERNEL(dtype, bn_param_dtype) \
REGISTER_USER_KERNEL("layer_norm") \
.SetCreateFn<LayerNormGpuKernel<dtype, bn_param_dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("x", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](oneflow::user_op::InferContext* ctx) { \
user_op::TensorDesc* mean = ctx->OutputTensorDesc("mean", 0); \
const DataType& data_type = mean->data_type(); \
const int64_t elem_cnt = mean->shape().elem_cnt(); \
return GetCudaAlignedSize(elem_cnt * GetSizeOfDataType(data_type)) * 2; \
});
REGISTER_LAYER_NORM_GPU_KERNEL(float, float)
REGISTER_LAYER_NORM_GPU_KERNEL(double, double)
REGISTER_LAYER_NORM_GPU_KERNEL(float16, float)
template<typename T, typename BNParamT>
class LayerNormGradGpuKernel final : public user_op::OpKernel {
public:
LayerNormGradGpuKernel() = default;
~LayerNormGradGpuKernel() = default;
private:
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
const user_op::Tensor* mean = ctx->Tensor4ArgNameAndIndex("mean", 0);
const user_op::Tensor* inv_variance = ctx->Tensor4ArgNameAndIndex("inv_variance", 0);
user_op::Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const size_t aligned_buffer_size =
GetCudaAlignedSize(mean->shape().elem_cnt() * GetSizeOfDataType(mean->data_type()));
char* cudnn_bn_scale_ones_dptr = tmp_buffer->mut_dptr<char>();
char* cudnn_bn_scale_diff_buf_dptr = cudnn_bn_scale_ones_dptr + aligned_buffer_size;
char* cudnn_bn_bias_diff_buf_dptr = cudnn_bn_scale_ones_dptr + aligned_buffer_size;
NewKernelUtil<DeviceType::kGPU>::Fill(ctx->device_ctx(), mean->shape().elem_cnt(),
static_cast<BNParamT>(1),
reinterpret_cast<BNParamT*>(cudnn_bn_scale_ones_dptr));
const void* sp_alpha = CudnnSPOnePtr<T>();
const void* sp_beta;
if (ctx->has_input("_add_to_output", 0)) {
const user_op::Tensor* add_to_output = ctx->Tensor4ArgNameAndIndex("_add_to_output", 0);
CHECK_EQ(add_to_output->data_type(), dx->data_type());
CHECK_EQ(add_to_output->shape(), dx->shape());
Memcpy<DeviceType::kGPU>(
ctx->device_ctx(), dx->mut_dptr<void>(), add_to_output->dptr<void>(),
add_to_output->shape().elem_cnt() * GetSizeOfDataType(add_to_output->data_type()));
sp_beta = CudnnSPOnePtr<T>();
} else {
sp_beta = CudnnSPZeroPtr<T>();
}
const double epsilon = ctx->Attr<double>("epsilon");
CHECK_GE(epsilon, CUDNN_BN_MIN_EPSILON);
LayerNormCudnnBnCtx bn_ctx(x->shape(), mean->shape(), x->data_type());
OF_CUDNN_CHECK(cudnnBatchNormalizationBackward(
ctx->device_ctx()->cudnn_handle(), bn_ctx.mode(), sp_alpha, sp_beta, CudnnSPOnePtr<T>(),
CudnnSPZeroPtr<T>(), bn_ctx.data_tensor_desc(), x->dptr<T>(), bn_ctx.data_tensor_desc(),
dy->dptr<T>(), bn_ctx.data_tensor_desc(), dx->mut_dptr<T>(), bn_ctx.param_tensor_desc(),
reinterpret_cast<const BNParamT*>(cudnn_bn_scale_ones_dptr),
reinterpret_cast<BNParamT*>(cudnn_bn_scale_diff_buf_dptr),
reinterpret_cast<BNParamT*>(cudnn_bn_bias_diff_buf_dptr), epsilon, mean->dptr(),
inv_variance->dptr()));
};
};
#define REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(dtype, bn_param_dtype) \
REGISTER_USER_KERNEL("layer_norm_grad") \
.SetCreateFn<LayerNormGradGpuKernel<dtype, bn_param_dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dy", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](oneflow::user_op::InferContext* ctx) { \
const user_op::TensorDesc& mean = ctx->InputTensorDesc("mean", 0); \
const DataType& data_type = mean.data_type(); \
const int64_t elem_cnt = mean.shape().elem_cnt(); \
return GetCudaAlignedSize(elem_cnt * GetSizeOfDataType(data_type)) * 3; \
}) \
.SetInplaceProposalFn([](const user_op::InferContext& ctx, \
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> { \
if (ctx.has_input("_add_to_output", 0)) { \
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("dx", 0, "_add_to_output", 0, true)); \
} \
return Maybe<void>::Ok(); \
});
REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(float, float)
REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(double, double)
REGISTER_LAYER_NORM_GRAD_GPU_KERNEL(float16, float)
template<typename T>
class LayerNormParamGradGpuKernel final : public user_op::OpKernel {
public:
LayerNormParamGradGpuKernel() = default;
~LayerNormParamGradGpuKernel() = default;
private:
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
void Compute(user_op::KernelComputeContext* ctx) const override {
using NdUtil = NdarrayUtil<DeviceType::kGPU, T>;
auto Val = NdUtil::GetValNdarrayBuilder();
auto Var = NdUtil::GetVarNdarrayBuilder();
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
user_op::Tensor* beta_diff = ctx->Tensor4ArgNameAndIndex("beta_diff", 0);
user_op::Tensor* gamma_diff = ctx->Tensor4ArgNameAndIndex("gamma_diff", 0);
user_op::Tensor* normalized_diff = ctx->Tensor4ArgNameAndIndex("normalized_diff", 0);
user_op::Tensor* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0);
const bool has_beta_diff = beta_diff != nullptr;
const bool has_gamma_diff = gamma_diff != nullptr;
const bool has_normalized_diff = normalized_diff != nullptr;
const bool has_gamma = gamma != nullptr;
const int64_t begin_params_axis = ctx->Attr<int64_t>("begin_params_axis");
const int64_t elem_cnt = dy->shape().elem_cnt();
const int64_t m = dy->shape().Count(begin_params_axis);
int max_active_blocks;
OF_CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, LayerNormParamGradImpl<T, int64_t>, GetLayerNormParamGradBlockSize(),
GetParamGradDynamicSharedMemorySize<T>(m)));
if (has_gamma_diff && has_beta_diff && has_normalized_diff && max_active_blocks > 0) {
const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0);
Memset<DeviceType::kGPU>(ctx->device_ctx(), gamma_diff->mut_dptr<T>(), 0,
gamma_diff->shape().elem_cnt() * sizeof(T));
Memset<DeviceType::kGPU>(ctx->device_ctx(), beta_diff->mut_dptr<T>(), 0,
beta_diff->shape().elem_cnt() * sizeof(T));
if (elem_cnt > static_cast<int64_t>(GetMaxVal<int32_t>() / 2)) {
LayerNormParamGradImpl<T, int64_t>
<<<GetLayerNormParamGradNumBlocks(elem_cnt), GetLayerNormParamGradBlockSize(),
GetParamGradDynamicSharedMemorySize<T>(m), ctx->device_ctx()->cuda_stream()>>>(
elem_cnt, m, dy->dptr<T>(), normalized->dptr<T>(), gamma->dptr<T>(),
gamma_diff->mut_dptr<T>(), beta_diff->mut_dptr<T>(),
normalized_diff->mut_dptr<T>());
} else {
LayerNormParamGradImpl<T, int32_t>
<<<GetLayerNormParamGradNumBlocks(elem_cnt), GetLayerNormParamGradBlockSize(),
GetParamGradDynamicSharedMemorySize<T>(m), ctx->device_ctx()->cuda_stream()>>>(
static_cast<int32_t>(elem_cnt), static_cast<int32_t>(m), dy->dptr<T>(),
normalized->dptr<T>(), gamma->dptr<T>(), gamma_diff->mut_dptr<T>(),
beta_diff->mut_dptr<T>(), normalized_diff->mut_dptr<T>());
}
} else {
if (has_beta_diff) {
user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0);
CHECK_EQ(m, beta_diff->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, beta_diff->mut_dptr<T>()),
Val({n, m}, dy->dptr<T>()), Var({n, m}, reduce_buf->mut_dptr<T>()));
}
if (has_gamma_diff) {
const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0);
user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0);
CHECK_EQ(m, gamma_diff->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, reduce_buf->mut_dptr<T>()),
Val({n, m}, normalized->dptr<T>()), Val({n, m}, dy->dptr<T>()));
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, gamma_diff->mut_dptr<T>()),
Val({n, m}, reduce_buf->dptr<T>()),
Var({n, m}, reduce_buf->mut_dptr<T>()));
}
if (has_normalized_diff) {
if (has_gamma) {
CHECK_EQ(m, gamma->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, normalized_diff->mut_dptr<T>()),
Val({n, m}, dy->dptr<T>()), Val({1, m}, gamma->dptr<T>()));
} else {
Memcpy<DeviceType::kGPU>(ctx->device_ctx(), normalized_diff->mut_dptr<void>(),
dy->dptr<void>(),
dy->shape().elem_cnt() * GetSizeOfDataType(dy->data_type()));
}
}
}
};
};
#define REGISTER_LAYER_NORM_PARAM_GRAD_GPU_KERNEL(dtype) \
REGISTER_USER_KERNEL("layer_norm_param_grad") \
.SetCreateFn<LayerNormParamGradGpuKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \
& (user_op::HobDataType("dy", 0) == GetDataType<dtype>::value));
REGISTER_LAYER_NORM_PARAM_GRAD_GPU_KERNEL(float)
REGISTER_LAYER_NORM_PARAM_GRAD_GPU_KERNEL(double)
class LayerNormParamGradGpuHalfKernel final : public user_op::OpKernel {
public:
LayerNormParamGradGpuHalfKernel() = default;
~LayerNormParamGradGpuHalfKernel() = default;
private:
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
void Compute(user_op::KernelComputeContext* ctx) const override {
using NdUtil = NdarrayUtil<DeviceType::kGPU, float16>;
auto Val = NdUtil::GetValNdarrayBuilder();
auto Var = NdUtil::GetVarNdarrayBuilder();
const user_op::Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
user_op::Tensor* beta_diff = ctx->Tensor4ArgNameAndIndex("beta_diff", 0);
user_op::Tensor* gamma_diff = ctx->Tensor4ArgNameAndIndex("gamma_diff", 0);
user_op::Tensor* normalized_diff = ctx->Tensor4ArgNameAndIndex("normalized_diff", 0);
user_op::Tensor* gamma = ctx->Tensor4ArgNameAndIndex("gamma", 0);
const bool has_beta_diff = beta_diff != nullptr;
const bool has_gamma_diff = gamma_diff != nullptr;
const bool has_normalized_diff = normalized_diff != nullptr;
const bool has_gamma = gamma != nullptr;
const int64_t begin_params_axis = ctx->Attr<int64_t>("begin_params_axis");
const int64_t elem_cnt = dy->shape().elem_cnt();
const int64_t m = dy->shape().Count(begin_params_axis);
int max_active_blocks;
OF_CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, LayerNormParamGradHalfImpl<int64_t>, GetLayerNormParamGradBlockSize(),
GetParamGradDynamicSharedMemorySize<float16>(m)));
if (has_gamma_diff && has_beta_diff && has_normalized_diff && max_active_blocks > 0) {
const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const int64_t num_blocks = GetLayerNormParamGradNumBlocks(dy->shape().elem_cnt());
const size_t tmp_diff_size = GetCudaAlignedSize(num_blocks * m * sizeof(float16));
float16* tmp_gamma_diff = tmp_buffer->mut_dptr<float16>();
float16* tmp_beta_diff =
reinterpret_cast<float16*>(tmp_buffer->mut_dptr<char>() + tmp_diff_size);
float16* tmp_reduce_buf =
reinterpret_cast<float16*>(tmp_buffer->mut_dptr<char>() + 2 * tmp_diff_size);
CHECK_GE(tmp_buffer->shape().elem_cnt(), 3 * tmp_diff_size);
if (elem_cnt > static_cast<int64_t>(GetMaxVal<int32_t>() / 2)) {
LayerNormParamGradHalfImpl<int64_t>
<<<GetLayerNormParamGradNumBlocks(elem_cnt), GetLayerNormParamGradBlockSize(),
GetParamGradDynamicSharedMemorySize<float16>(m), ctx->device_ctx()->cuda_stream()>>>(
elem_cnt, m, dy->dptr<half>(), normalized->dptr<half>(), gamma->dptr<half>(),
reinterpret_cast<half*>(tmp_gamma_diff), reinterpret_cast<half*>(tmp_beta_diff),
normalized_diff->mut_dptr<half>());
} else {
LayerNormParamGradHalfImpl<int32_t>
<<<GetLayerNormParamGradNumBlocks(elem_cnt), GetLayerNormParamGradBlockSize(),
GetParamGradDynamicSharedMemorySize<float16>(m), ctx->device_ctx()->cuda_stream()>>>(
static_cast<int32_t>(elem_cnt), static_cast<int32_t>(m), dy->dptr<half>(),
normalized->dptr<half>(), gamma->dptr<half>(),
reinterpret_cast<half*>(tmp_gamma_diff), reinterpret_cast<half*>(tmp_beta_diff),
normalized_diff->mut_dptr<half>());
}
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, gamma_diff->mut_dptr<float16>()),
Val({num_blocks, m}, tmp_gamma_diff), Var({num_blocks, m}, tmp_reduce_buf));
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, beta_diff->mut_dptr<float16>()),
Val({num_blocks, m}, tmp_beta_diff), Var({num_blocks, m}, tmp_reduce_buf));
} else {
if (has_beta_diff) {
user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0);
CHECK_EQ(m, beta_diff->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, beta_diff->mut_dptr<float16>()),
Val({n, m}, dy->dptr<float16>()),
Var({n, m}, reduce_buf->mut_dptr<float16>()));
}
if (has_gamma_diff) {
const user_op::Tensor* normalized = ctx->Tensor4ArgNameAndIndex("normalized", 0);
user_op::Tensor* reduce_buf = ctx->Tensor4ArgNameAndIndex("reduce_buf", 0);
CHECK_EQ(m, gamma_diff->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, reduce_buf->mut_dptr<float16>()),
Val({n, m}, normalized->dptr<float16>()),
Val({n, m}, dy->dptr<float16>()));
NdUtil::ReduceSum(ctx->device_ctx(), Var({1, m}, gamma_diff->mut_dptr<float16>()),
Val({n, m}, reduce_buf->dptr<float16>()),
Var({n, m}, reduce_buf->mut_dptr<float16>()));
}
if (has_normalized_diff) {
if (has_gamma) {
CHECK_EQ(m, gamma->shape().elem_cnt());
CHECK_EQ(dy->shape().elem_cnt() % m, 0);
const int64_t n = dy->shape().elem_cnt() / m;
NdUtil::BroadcastMul(ctx->device_ctx(), Var({n, m}, normalized_diff->mut_dptr<float16>()),
Val({n, m}, dy->dptr<float16>()),
Val({1, m}, gamma->dptr<float16>()));
} else {
Memcpy<DeviceType::kGPU>(ctx->device_ctx(), normalized_diff->mut_dptr<void>(),
dy->dptr<void>(),
dy->shape().elem_cnt() * GetSizeOfDataType(dy->data_type()));
}
}
}
}
};
REGISTER_USER_KERNEL("layer_norm_param_grad")
.SetCreateFn<LayerNormParamGradGpuHalfKernel>()
.SetIsMatchedHob((user_op::HobDeviceTag() == "gpu")
& (user_op::HobDataType("dy", 0) == DataType::kFloat16))
.SetInferTmpSizeFn([](user_op::InferContext* ctx) {
const int64_t begin_params_axis = ctx->Attr<int64_t>("begin_params_axis");
const bool has_gamma_diff = ctx->has_output("gamma_diff", 0);
const bool has_beta_diff = ctx->has_output("beta_diff", 0);
const bool has_normalized_diff = ctx->has_output("normalized_diff", 0);
const auto& dy = ctx->InputTensorDesc("dy", 0);
const int64_t instance_size = dy.shape().Count(begin_params_axis);
size_t tmp_buffer_size = 0;
if (has_gamma_diff && has_beta_diff && has_normalized_diff) {
const size_t tmp_gamma_diff =
GetCudaAlignedSize(GetLayerNormParamGradNumBlocks(dy.shape().elem_cnt()) * instance_size
* sizeof(float16));
const size_t tmp_beta_diff = tmp_gamma_diff;
const size_t tmp_reduce_buf = tmp_gamma_diff;
tmp_buffer_size = tmp_gamma_diff + tmp_beta_diff + tmp_reduce_buf;
} else {
tmp_buffer_size = 0;
}
return tmp_buffer_size;
});
} // namespace oneflow
|
e07aa9fbd72b830f8b91d13f2b2c95518738d922.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "mdCuda.h"
int main(int argc, char* argv[])
{
if(CheckParameters(argc, argv) == false)
return 0;
if(OpenFiles() == false)
return -1;
if(Input() == false)
return -1;
Solve();
CloseFiles();
return 0;
}
bool OpenFiles()
{
if(FileExists("mdse.out"))
{
cout << "mdse.out already exists. Enter 'y' to overwrite, 'n' to exit: ";
string answer;
cin >> answer;
if(answer != "y"){
cout << "Stopping." << endl;
return false;
}
}
fileInp.open("mdse.inp");
if(fileInp.good() == false)
{
cout << "mdse.inp couldn't be opened for reading. Stopping." << endl;
return false;
}
fileOut.open("mdse.out");
if(fileInp.good() == false)
{
cout << "mdse.out couldn't be opened for writing. Stopping." << endl;
return false;
}
fileOut << fixed << setprecision(5);
fileEne.open("mdse.ene");
if(fileEne.good() == false)
{
cout << "mdse.ene couldn't be opened for writing. Stopping." << endl;
return false;
}
fileEne << fixed << setprecision(5);
filePic.open("mdse.pic");
if(filePic.good() == false)
{
cout << "mdse.pic couldn't be opened for writing. Stopping." << endl;
return false;
}
filePic << fixed << setprecision(5);
fileBs.open("mdse.bs");
if(fileBs.good() == false)
{
cout << "mdse.bs couldn't be opened for writing. Stopping." << endl;
return false;
}
fileBs << fixed << setprecision(5);
return true;
}
bool FileExists(const string& filename)
{
struct stat buf;
if (stat(filename.c_str(), &buf) != -1)
{
return true;
}
return false;
}
bool Input()
{
// Potential parameters for Cu
/*RM = 63.546;
DT = 0.9E-15;
A1 = 110.766008;
A2 = -46.1649783;
RL1 = 2.09045946;
RL2 = 1.49853083;
AL1 = 0.394142248;
AL2 = 0.207225507;
D21 = 0.436092895;
D22 = 0.245082238;
*/
/*
// Potential parameters for Au
RM=196.9665;
DT=1.6E-15;
A1=345.923364;
A2=-38.9245908;
RL1=1.0428923;
RL2=1.05974062;
AL1=0.750775965;
AL2=0.229377368;
D21=0.888911352;
D22=0.254280292;
*/
// Potential parameters for Ag
RM = 107.868;
DT = 1.4E-15;
A1 = 220.262366;
A2 = -26.0811795;
RL1 = 1.72376253;
RL2 = 1.81484791;
AL1 = 0.673011507;
AL2 = 0.120620395;
D21 = 1.00610152;
D22 = 0.221234242;
double FACM = 0.103655772E-27;
BK = 8.617385E-05;
RM = RM * FACM;
try
{
// Read the title
Title = ReadLine();
// Skip the second line
ReadLine();
// Read MDSL, IAVL, IPPL, ISCAL, IPD, TE, NA, LAYER, IPBC, PP(1), PP(2), PP(3)
MDSL = GetValueInt();
IAVL = GetValueInt();
IPPL = GetValueInt();
ISCAL = GetValueInt();
IPD = GetValueInt();
TE = GetValueDouble();
NA = GetValueInt();
LAYER = GetValueInt();
IPBC = GetValueInt();
PP[0] = GetValueDouble();
PP[1] = GetValueDouble();
PP[2] = GetValueDouble();
// Generate atom coordinates
GenerateLatis();
// Sort atoms by the z axis
SortAtoms('Z');
// Find the periodic boundary limits if PBC is applied
FindBoundaries();
}
catch(exception& e)
{
cout << "Error in Input(): " << e.what() << endl;
return false;
}
return true;
}
bool Solve()
{
// Initialize some variables and define some factors
MDS = 0; // Current md simulation step;
int IPP=0; // Print counter
double EPAV = 0; // Average potential energy
double EKAV = 0; // Average kinetic energy
double ETAV = 0; // Average total energy
double SCFAV = 0; // Average scaling factor
TCALAV = 0; // System temperature
int IAV = 0; // Average counter
int ISCA = 0; // Scaling counter
double FFPR[MAX_ATOMS][3]; // Array to store forces from previous step
// Calculate the initial potential energy of each atom and the initial force that each atom experiences
Force();
// SET INITIAL VELOCITIES ACC. TO MAXWELL VEL. DISTRIBUTION
MaxWell();
// Printing initially distributed velocities, potential energies, forces, total energy and temperature
PrintInitial();
fileOut << "#" << endl << "# ********************* MD STEPS STARTED *************************" << endl << "#" << endl;
fileOut << "# MDS EPAV EKAV ETAV TCALAV" << endl;
fileOut << "# ----- ------------ ------------ ------------ ------------" << endl << "#" << endl;
fileEne << "#" << endl << "# ********************* MD STEPS STARTED *************************" << endl << "#" << endl;
fileEne << "# MDS EPAV EKAV ETAV TCALAV" << endl;
fileEne << "# ----- ------------ ------------ ------------ ------------" << endl << "#" << endl;
// Start Md Steps
while(MDS < MDSL){
MDS++;
IPP++;
ISCA++;
// Show status at each 100 steps
if((MDS % 100) == 0)
ShowStatus();
// Reposition the particles if PBC is applied
if(IPBC != 0)
Reposition();
// Calculate velocity and position of the particles using the velocity summed form of verlet algorithm (NVE MD velocity form)
Force();
// Compute the positions at time step n+1 as:
// ri(n+1)=ri(n)+hvi(n)+(1/2m)h2Fi(n)
for(int i=0; i<NA; i++){
X[i] = X[i] + DT*VV[i][0] + (pow(DT,2)*FF[i][0]) / (2*RM);
Y[i] = Y[i] + DT*VV[i][1] + (pow(DT,2)*FF[i][1]) / (2*RM);
Z[i] = Z[i] + DT*VV[i][2] + (pow(DT,2)*FF[i][2]) / (2*RM);
}
// Store the forces at time step Fi(n)
memcpy(FFPR, FF, NA*3*sizeof(double));
//for(int i=0; i<NA; i++){
// for(int j=0; j<3; j++){
// FFPR[i][j] = FF[i][j];
// }
//}
Force();
// Compute the velocities at time step n+1 as
// vi(n+1)=vi(n)+(h/2m)(Fi(n+1)+Fi(n))
for(int i=0; i<NA; i++){
VV[i][0] = VV[i][0] + DT * (FF[i][0]+FFPR[i][0]) / (2*RM);
VV[i][1] = VV[i][1] + DT * (FF[i][1]+FFPR[i][1]) / (2*RM);
VV[i][2] = VV[i][2] + DT * (FF[i][2]+FFPR[i][2]) / (2*RM);
VT[i] = pow(VV[i][0],2) + pow(VV[i][1],2) + pow(VV[i][2],2);
}
// Calculate the temperature that system reached by calculating the kinetic energy of each atom
EKINA = 0;
for(int i=0; i<NA; i++)
EKINA += VT[i];
EKINA *= RM;
TCALC = EKINA / (3*NA*BK);
// Calculate the scaling factor and scale the velocities
SCFAC = sqrt(TE/TCALC);
if(ISCA == ISCAL)
{
EKIN = 0;
for(int i=0; i<NA; i++){
for(int j=0; j<3; j++){
VV[i][j] *= SCFAC;
}
VT[i] = pow(VV[i][0],2) + pow(VV[i][1],2) + pow(VV[i][2],2);
EKIN += VT[i];
}
ISCA = 0;
EKIN *= RM;
TCALC = EKIN / (3 * NA * BK);
}
// Calculate total energy
ETOT = EPOT + EKINA;
// Calculate the averages of EPOT, EKINA, ETOT, SCFAC AND TCALC
EPAV += EPOT;
EKAV += EKINA;
ETAV += ETOT;
SCFAV += SCFAC;
TCALAV += TCALC;
IAV++;
if(IAV < IAVL)
continue;
EPAV /= IAVL;
EKAV /= IAVL;
ETAV /= IAVL;
SCFAV /= IAVL;
TCALAV /= IAVL;
// Print the averages
fileOut << setw(6) << MDS << " " << scientific << EPAV << " " << EKAV << " " << ETAV << " " << TCALAV << endl << fixed;
fileEne << setw(6) << MDS << " " << scientific << EPAV << " " << EKAV << " " << ETAV << " " << TCALAV << endl << fixed;
// Periodic printing of coordinates
if(IPP == IPPL){
PrintPeriodic();
IPP = 0;
}
IAV = 0;
EPAV = 0;
EKAV = 0;
ETAV = 0;
SCFAV = 0;
TCALAV = 0;
} // Md Steps Loop
PrintFinal();
return true;
}
void WriteBsFile()
{
for(int i=0; i<NA; i++){
fileBs << "atom Au " << setw(12) << X[i] << " " << setw(12) << Y[i] << " " << setw(12) << Z[i] << endl;
}
fileBs << "" << endl;
fileBs << "spec Au 0.50 1 0.75 0" << endl; // "atom size" "color codes rgb"
fileBs << "" << endl;
fileBs << "bonds Au Au 1.0 4.05 0.03 0.5 0.7 0.9" << endl; // Bond "min length" "max length" "line width" "color codes rgb"
fileBs << "" << endl;
fileBs << "tmat 1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" << endl;
fileBs << "dist 100.0" << endl;
fileBs << "inc 1.0" << endl;
fileBs << "scale 40.0" << endl;
fileBs << "rfac 1.0" << endl;
fileBs << "bfac 1.0" << endl;
fileBs << "switches 1 0 1 0 0 1 1 0 0" << endl;
fileBs << "" << endl;
}
bool CloseFiles()
{
fileInp.close();
fileOut.close();
fileEne.close();
filePic.close();
fileBs.close();
CuErr( hipHostFree(h_FFX));
CuErr( hipHostFree(h_FFY));
CuErr( hipHostFree(h_FFZ));
CuErr( hipHostFree(h_Params));
CuErr( hipFree(d_FFX));
CuErr( hipFree(d_FFY));
CuErr( hipFree(d_FFZ));
CuErr( hipFree(d_EE));
CuErr( hipFree(d_X));
CuErr( hipFree(d_Y));
CuErr( hipFree(d_Z));
CuErr( hipFree(d_Params));
return true;
}
void ShowStatus()
{
cout << "\rMDS Steps: " << MDS << " of " << MDSL;
}
string GetTime()
{
time_t rawtime;
struct tm * timeinfo;
char chars[100];
time ( &rawtime );
timeinfo = localtime ( &rawtime );
strftime (chars, 100, "%Y.%m.%d %H:%M:%S", timeinfo);
string final = " DATE AND TIME: ";
final += chars;
return final;
}
void Force()
{
if(OnlyCpu)
ForceCpu();
else
ForceCuda();
}
void ForceCpu()
{
double E2 = 0; // Total energy
double XIJ, YIJ, ZIJ, RIJ, RIJ2, EPP, FX2, FY2, FZ2;
double ARG1, ARG2, EXP1, EXP2, UIJ1, UIJ2, UIJ;
double FAC1, FAC2, FAC12, XRIJ, YRIJ, ZRIJ;
int i, j;
#pragma omp parallel for private(i,j,EPP,FX2,FY2,FZ2,RIJ,RIJ2,XIJ,YIJ,ZIJ,ARG1,ARG2,EXP1,EXP2,UIJ1,UIJ2,UIJ,FAC1,FAC2,FAC12,XRIJ,YRIJ,ZRIJ) reduction(+:E2)
for(i=0; i<NA; i++)
{
EE[i] = 0;
EPP = 0;
//Forces that effect atoms indexed with i in all three axes
FX2 = 0;
FY2 = 0;
FZ2 = 0;
for(j=0; j<NA; j++)
{
if(i == j)
continue;
// Apply periodic boundaries and find distances between atom I and j. RIJ2 is square of RIJ
Period(i, j, XIJ, YIJ, ZIJ, RIJ2, RIJ);
// Calculate potential energy U(r)
ARG1 = AL1*RIJ2;
ARG2 = AL2*RIJ2;
EXP1 = exp(-ARG1);
EXP2 = exp(-ARG2);
UIJ1 = A1*EXP1/(pow(RIJ,RL1));
UIJ2 = A2*EXP2/(pow(RIJ,RL2));
UIJ = D21*UIJ1 + D22*UIJ2;
EPP += UIJ;
// Calculate forces
FAC1 = -(RL1/RIJ + 2.0*AL1*RIJ);
FAC2 = -(RL2/RIJ + 2.0*AL2*RIJ);
FAC12 = FAC1*D21*UIJ1 + FAC2*D22*UIJ2;
XRIJ = XIJ/RIJ;
YRIJ = YIJ/RIJ;
ZRIJ = ZIJ/RIJ;
FX2 += FAC12*XRIJ;
FY2 += FAC12*YRIJ;
FZ2 += FAC12*ZRIJ;
}
FF[i][0] = -FX2;
FF[i][1] = -FY2;
FF[i][2] = -FZ2;
EE[i] = EPP;
E2 += EPP;
//FFF[i] = sqrt(FF[i][0]*FF[i][0] + FF[i][1]*FF[i][1] + FF[i][2]*FF[i][2]);
}
EPOT = E2;
}
void ForceCuda()
{
int sizeNA = NA * sizeof(double);
int sizeParams = 11 * sizeof(double);
// Pointers are global, allocating once is enough
if(h_FFX == NULL){
CuErr( hipHostMalloc(&h_FFX, sizeNA));
CuErr( hipHostMalloc(&h_FFY, sizeNA));
CuErr( hipHostMalloc(&h_FFZ, sizeNA));
CuErr( hipHostMalloc(&h_Params, sizeParams));
CuErr( hipMalloc(&d_FFX, sizeNA));
CuErr( hipMalloc(&d_FFY, sizeNA));
CuErr( hipMalloc(&d_FFZ, sizeNA));
CuErr( hipMalloc(&d_EE, sizeNA));
CuErr( hipMalloc(&d_X, sizeNA));
CuErr( hipMalloc(&d_Y, sizeNA));
CuErr( hipMalloc(&d_Z, sizeNA));
CuErr( hipMalloc(&d_Params, sizeParams));
h_Params[0] = PP[0];
h_Params[1] = PP[1];
h_Params[2] = PP[2];
h_Params[3] = AL1;
h_Params[4] = AL2;
h_Params[5] = A1 ;
h_Params[6] = A2 ;
h_Params[7] = RL1;
h_Params[8] = RL2;
h_Params[9] = D21;
h_Params[10] = D22;
CuErr( hipMemcpy(d_Params, h_Params, sizeParams, hipMemcpyHostToDevice));
//hipChannelFormatDesc chanDouble = hipCreateChannelDesc<double>();
//CuErr( hipBindTexture(0, &texX, d_X, &chanDouble, sizeNA));
//CuErr( hipBindTexture(0, &texY, d_Y, &chanDouble, sizeNA));
//CuErr( hipBindTexture(0, &texZ, d_Z, &chanDouble, sizeNA));
}
CuErr( hipMemcpy(d_X, X, sizeNA, hipMemcpyHostToDevice));
CuErr( hipMemcpy(d_Y, Y, sizeNA, hipMemcpyHostToDevice));
CuErr( hipMemcpy(d_Z, Z, sizeNA, hipMemcpyHostToDevice));
int blockSize = 32;
int numBlocks = NA / blockSize + (NA % blockSize == 0 ? 0:1);
hipLaunchKernelGGL(( kernelForce) , dim3(numBlocks), dim3(blockSize) , 0, 0, NA, d_FFX, d_FFY, d_FFZ, d_EE, d_X, d_Y, d_Z, IPBC, d_Params);
CuErrC("kernelForce kernel execution failed");
CuErr( hipMemcpy(X, d_X, sizeNA, hipMemcpyDeviceToHost));
CuErr( hipMemcpy(Y, d_Y, sizeNA, hipMemcpyDeviceToHost));
CuErr( hipMemcpy(Z, d_Z, sizeNA, hipMemcpyDeviceToHost));
CuErr( hipMemcpy(h_FFX, d_FFX, sizeNA, hipMemcpyDeviceToHost));
CuErr( hipMemcpy(h_FFY, d_FFY, sizeNA, hipMemcpyDeviceToHost));
CuErr( hipMemcpy(h_FFZ, d_FFZ, sizeNA, hipMemcpyDeviceToHost));
CuErr( hipMemcpy(EE, d_EE, sizeNA, hipMemcpyDeviceToHost));
EPOT = 0;
for(int i=0; i<NA; i++){
FF[i][0] = h_FFX[i];
FF[i][1] = h_FFY[i];
FF[i][2] = h_FFZ[i];
EPOT += EE[i];
}
}
void FindBoundaries()
{
if(IPBC == 0)
return;
for(int i=0; i<3; i++)
PL[i] = PP[i] / 2.0;
// Find smallest coordinates for X, Y and Z coordinates
PA[0] = X[0];
PA[1] = Y[0];
PA[2] = Z[0];
for(int i=1; i<NN; i++)
{
if(PA[0] > X[i])
PA[0] = X[i];
if(PA[1] > Y[i])
PA[1] = Y[i];
if(PA[2] > Z[i])
PA[2] = Z[i];
}
// Find ending coordinates of working system
PB[0] = PA[0] + PP[0];
PB[1] = PA[1] + PP[1];
PB[2] = PA[2] + PP[2];
}
// PRINTING OF POSITIONS, FORCES, AND ENERGIES
void PrintCoordinatesForcesEnergy(){
fileOut << " I X Y Z FX FY FZ EE" << endl;
fileOut << " ------ --------- ------------ ------------ ------------ ------------ ------------ ------------" << endl << endl;
for(int i=0; i<NA; i++){
fileOut << setw(6) << i+1;
fileOut << setw(12) << X[i] << " " << setw(12) << Y[i] << " " << setw(12) << Z[i] << " ";
fileOut << scientific << setw(13) << FF[i][0] << " " << setw(13) << FF[i][1] << " " << setw(13) << FF[i][2] << " " << setw(13) << EE[i];
fileOut << fixed << endl;
}
}
void PrintInitial()
{
string str;
fileInp.clear();
fileInp.seekg(0, ios::beg);
if(PSilent == false)
cout << "Simulation started" << endl;
fileOut << "******************************************************************************************" << endl;
fileOut << Title << endl;
fileOut << "******************************************************************************************" << endl << endl;
fileOut << GetTime() << endl << endl;
tStart = clock();
getline(fileInp, str);
getline(fileInp, str);
fileOut << str << endl;
getline(fileInp, str);
fileOut << str << endl << endl;
getline(fileInp, str);
fileOut << " INITIAL COORDINATES:" << endl;
for(int i=0; i<LAYER; i++){
getline(fileInp, str);
fileOut << str << endl;
}
fileOut << "******************************************************************************************" << endl << endl;
fileOut << " NUMBER OF MOVING ATOMS: NA= " << NA << endl;
fileOut << " NUMBER OF TOTAL ATOMS: NN= " << NN << endl << endl;
fileOut << " INITIAL COORDINATES OF ALL ATOMS: (X,Y,Z)" << endl << endl;
for(int i=0; i<NN; i++){
fileOut << setw(5) << i+1 << " " << setw(12) << X[i] << " " << setw(12) << Y[i] << " " << setw(12) << Z[i] << endl;
}
fileOut << "******************************************************************************************" << endl << endl;
fileOut << endl << " INITIAL COORDINATES, FORCES AND ENERGIES:" << endl << endl;
PrintCoordinatesForcesEnergy();
fileOut << endl << scientific;
fileOut << " EPOT=" << EPOT << " EKIN=" << EKIN << " TCALC=" << TCALC << " SCFAC=" << SCFAC << endl << endl << fixed;
}
void PrintPeriodic()
{
fileOut << endl << endl << " PERIODIC PRINTING OF COORDINATES, FORCES AND ENERGIES AT MDS: " << MDS << endl << endl;
PrintCoordinatesForcesEnergy();
fileOut << endl << scientific;
fileOut << " EPOT=" << EPOT << " EKIN=" << EKIN << " TCALC=" << TCALC;
fileOut << " SCFAC=" << SCFAC << endl << endl << fixed;
}
void PrintFinal()
{
if(IPBC != 0)
Reposition();
fileOut << endl << endl << " FINAL COORDINATES, FORCES AND ENERGIES:" << endl << endl;
PrintCoordinatesForcesEnergy();
fileOut << endl << scientific;
fileOut << " EPOT=" << EPOT << " EKINA=" << EKINA << " ETOT=" << ETOT << " TCALC=" << TCALC << endl << endl << fixed;
PrintElapsedTime();
fileOut << " *************** END OF THE CALCULATION ***************";
WritePicFile();
WriteBsFile();
if(PSilent == false)
cout << endl << "Simulation complete" << endl;
}
void PrintElapsedTime()
{
// Write current time
fileOut << endl << GetTime() << endl << endl;
// Calculate and write elapsed time
tStop = clock();
float seconds = float(tStop - tStart)/CLOCKS_PER_SEC;
int minutes = seconds/60;
seconds -= minutes*60;
int hours = minutes/60;
minutes -= hours*60;
fileOut << " ELAPSED TIME: " << hours << " HOURS " << minutes << " MINUTES " << seconds << " SECONDS" << endl << endl;
}
// RANDOM NUMBER GENERATOR, GENERATES RN IN THE INTERVAL (-1,1)
double Randum(double U, double S)
{
U = 23*U + 0.21132486579;
if((U-1.0) >= 0)
U = U - int(U);
if(U > 0.5)
S = -S;
U = U-int(U);
return (S * U);
}
// DISTRUBUTES THE VELOCITIES FOR THE ATOMS FOR THE SPECIFIED
// TEMPERATURE TE ACCORDING TO THE MAXWELL VELOCITY DISTRIBUTION
void MaxWell()
{
double FAC1 = sqrt(3.0*BK*TE/RM);
double U = 0.0;
double S = 1.0;
double VVX = 0.0;
double VVY = 0.0;
double VVZ = 0.0;
double FAC2 = (2.0/3.0) * FAC1;
FAC2 /= sqrt(3.0);
// EQUATING Vmean TO FAC2
for(int i=0; i<NA; i++){
for(int j=0; j<3; j++){
VV[i][j] = (FAC2 - FAC2*Randum(U,S));
}
}
// CALCULATING AVERAGES
double VVV = 0.0;
for(int i=0; i<NA; i++){
VVX = VVX + VV[i][0];
VVY = VVY + VV[i][1];
VVZ = VVZ + VV[i][2];
}
VVX /= NA;
VVY /= NA;
VVZ /= NA;
VVV = VVX*VVX + VVY*VVY + VVZ*VVZ;
double COSX = VVX / sqrt(VVV);
double COSY = VVY / sqrt(VVV);
double COSZ = VVZ / sqrt(VVV);
// CALCULATING EKIN AND TEMPERATURE WRT THE CALCULATED Vmean
EKIN = 0.5 * RM * (VVV * (9.0/4.0));
TCALC = EKIN / (1.5 * BK);
// CALCULATING THE SCALING FACTOR
SCFAC = sqrt(TE / TCALC);
// REDISTRIBUTING THE INITIAL VELOCITIES WRT SCALING FACTOR
VVV = sqrt(VVV);
double VVXNEW = COSX * VVV * SCFAC;
double VVYNEW = COSY * VVV * SCFAC;
double VVZNEW = COSZ * VVV * SCFAC;
double XSCALE = (VVXNEW-VVX);
double YSCALE = (VVYNEW-VVY);
double ZSCALE = (VVZNEW-VVZ);
for(int i=0; i<NA; i++){
VV[i][0] += XSCALE;
VV[i][1] += YSCALE;
VV[i][2] += ZSCALE;
VT[i] = pow(VV[i][0],2.0) + pow(VV[i][1],2) + pow(VV[i][2],2);
}
// CALCULATING AVERAGES OF SCALED VELOCITIES
VVX = 0;
VVY = 0;
VVZ = 0;
for(int i=0; i<NA; i++){
VVX += VV[i][0];
VVY += VV[i][1];
VVZ += VV[i][2];
}
VVX /= NA;
VVY /= NA;
VVZ /= NA;
// CALCULATING EKIN AND TEMPERATURE WRT THE SCALED Vmean
VVV = VVX*VVX + VVY*VVY + VVZ*VVZ;
EKIN = 0.5 * RM * (VVV * (9/4));
TCALC = EKIN / (1.5 * BK);
ETOT = EPOT + EKIN;
}
// REPOSITIONS COORDINATES WHEN ANY MOVING ATOM CROSSES THE BOUNDARY.
void Reposition()
{
double PAPL, H, B;
if(PP[0] > 0){
PAPL = PA[0] + PL[0];
for(int i=0; i<NA; i++){
H = (X[i]-PAPL) / PL[0];
B = H - 2.0*int(H);
X[i] = B*PL[0] + PAPL;
}
}
if(PP[1] > 0){
PAPL = PA[1] + PL[1];
for(int i=0; i<NA; i++){
H = (Y[i]-PAPL) / PL[1];
B = H - 2.0*int(H);
Y[i] = B*PL[1] + PAPL;
}
}
if(PP[2] > 0){
PAPL = PA[2] + PL[2];
for(int i=0; i<NA; i++){
H = (Z[i]-PAPL) / PL[2];
B = H - 2.0*int(H);
Z[i] = B*PL[2] + PAPL;
}
}
}
// Sorts atoms by the given axis
void SortAtoms(char sortAxis)
{
double *sortArray;
if(sortAxis == 'X')
sortArray = X;
else if(sortAxis == 'Y')
sortArray = Y;
else
sortArray = Z;
double tempX, tempY, tempZ;
for (int i = 0; i < NA; i++)
{
for (int j = i+1; j < NA; j++)
{
if (sortArray[i] > sortArray[j])
{
tempX = X[i];
tempY = Y[i];
tempZ = Z[i];
X[i] = X[j];
Y[i] = Y[j];
Z[i] = Z[j];
X[j] = tempX;
Y[j] = tempY;
Z[j] = tempZ;
}
}
}
}
// Generates the atoms according to coordinates and repeat parameters from the input
// In the input, the first 3 numbers are x,y,z coordinates, the second 3 numbers are unit cell lengths
// and the last 3 numbers specify how many times to copy that atom in x,y,z direction
void GenerateLatis()
{
// Skip the first line: (W(J,K),K=1,6),(NO(J,K),K=1,3)
ReadLine();
NN = 0;
for(int i=0; i<LAYER; i++)
{
double coordinateX = GetValueDouble();
double coordinateY = GetValueDouble();
double coordinateZ = GetValueDouble();
double unitCellLengthX = GetValueDouble();
double unitCellLengthY = GetValueDouble();
double unitCellLengthZ = GetValueDouble();
int multiplierX = GetValueInt();
int multiplierY = GetValueInt();
int multiplierZ = GetValueInt();
for (int iX = 0; iX < multiplierX; iX++)
{
for (int iY = 0; iY < multiplierY; iY++)
{
for (int iZ = 0; iZ < multiplierZ; iZ++)
{
double newCoordinateX = coordinateX + (iX * unitCellLengthX);
double newCoordinateY = coordinateY + (iY * unitCellLengthY);
double newCoordinateZ = coordinateZ + (iZ * unitCellLengthZ);
X[NN] = newCoordinateX;
Y[NN] = newCoordinateY;
Z[NN] = newCoordinateZ;
NN++;
if(NN > MAX_ATOMS)
{
cout << "The number of atoms cannot exceed " << MAX_ATOMS << ". Stopping.";
exit(1);
}
}
}
}
}
if (NN != NA)
cout << "Warning: number of total atoms NN is different from number of moving atoms NA." << endl;
}
string GetValue()
{
SkipSpace();
string val = "";
char c;
do {
fileInp.get(c);
val += c;
} while ((c != ' ') && (c != ',') && (c != '\n') && (c != '\r') && (fileInp.eof() != true));
val = val.substr(0, val.size() - 1);
return val;
}
int GetValueInt()
{
string str = GetValue();
int result = 0;
bool success = (stringstream(str) >> result);
if(success == false)
{
cout << "Error converting input to integer. Stopping." << endl;
exit(1);
}
return result;
}
double GetValueDouble()
{
string str = GetValue();
double result = 0;
bool success = (stringstream(str) >> result);
if(success == false)
{
cout << "Error converting input to double. Stopping." << endl;
exit(1);
}
return result;
}
float GetValueFloat()
{
string str = GetValue();
float result = 0;
bool success = (stringstream(str) >> result);
if(success == false)
{
cout << "Error converting input to double. Stopping." << endl;
exit(1);
}
return result;
}
string SkipSpace()
{
string val = "";
char c;
do {
fileInp.get(c);
val += c;
} while ((c == ' ') || (c == ',') || (c == '\n') || (c == '\r'));
val = val.substr(0, val.size() - 1);
fileInp.unget();
return val;
}
string ReadLine()
{
string line = "";
getline(fileInp, line);
return line;
}
// Calculates interatomic distance between atoms I and J
double Distance(int i, int j)
{
double XX = X[i] - X[j];
double YY = Y[i] - Y[j];
double ZZ = Z[i] - Z[j];
return XX*XX + YY*YY + ZZ*ZZ;
}
void WritePicFile()
{
double EB = EPOT / NA;
filePic << " NN=" << NN << " NA=" << NA << " TOTPE=" << EPOT << " APEPP=" << EB << endl;
for(int i=0; i<NA; i++){
filePic << setw(12) << X[i] << " " << setw(12) << Y[i] << " " << setw(12) << Z[i] << endl;
}
}
// Apply periodic boundry condition and find distances between the two particles
// Because of the periodic boundary, the distance may be the one in this working system or the particle in the adjacent system.
void Period(int i, int j, double &XIJ, double &YIJ, double &ZIJ, double &RIJ2, double &RIJ)
{
XIJ = X[i] - X[j];
YIJ = Y[i] - Y[j];
ZIJ = Z[i] - Z[j];
double DD, ID;
if(IPBC != 0){
if(PP[0] > 0){
DD = XIJ / PP[0];
ID = int(DD);
XIJ = XIJ - PP[0]*(ID+int(2.0*(DD-ID)));
}
if(PP[1] > 0){
DD = YIJ / PP[1];
ID = int(DD);
YIJ = YIJ - PP[1]*(ID+int(2.0*(DD-ID)));
}
if(PP[2] > 0){
DD = ZIJ / PP[2];
ID = int(DD);
ZIJ = ZIJ - PP[2]*(ID+int(2.0*(DD-ID)));
}
}
RIJ2 = XIJ*XIJ + YIJ*YIJ + ZIJ*ZIJ;
RIJ = sqrt(RIJ2);
}
// Check program starting parameters
bool CheckParameters(int argc, char* argv[])
{
PSilent = false;
OnlyCpu = true;
SetPriorityClass(GetCurrentProcess(),BELOW_NORMAL_PRIORITY_CLASS);
for(int i=1; i<argc; i++)
{
string parameter = argv[i];
if(parameter == "-help"){
cout << "Use parameter '-s' for silent mode. No output will be given to the console." << endl;
cout << "Use parameter '-cpu' for cpu calculations only (otherwise Nvidia graphics card required)." << endl;
return false;
}
else if(parameter == "-s"){
PSilent = true;
}
else if(parameter == "-cpu"){
OnlyCpu = true;
cout << "-cpu option received. Will use only cpu for computations." << endl;
}
}
return true;
} | e07aa9fbd72b830f8b91d13f2b2c95518738d922.cu | #include "mdCuda.h"
int main(int argc, char* argv[])
{
if(CheckParameters(argc, argv) == false)
return 0;
if(OpenFiles() == false)
return -1;
if(Input() == false)
return -1;
Solve();
CloseFiles();
return 0;
}
bool OpenFiles()
{
if(FileExists("mdse.out"))
{
cout << "mdse.out already exists. Enter 'y' to overwrite, 'n' to exit: ";
string answer;
cin >> answer;
if(answer != "y"){
cout << "Stopping." << endl;
return false;
}
}
fileInp.open("mdse.inp");
if(fileInp.good() == false)
{
cout << "mdse.inp couldn't be opened for reading. Stopping." << endl;
return false;
}
fileOut.open("mdse.out");
if(fileInp.good() == false)
{
cout << "mdse.out couldn't be opened for writing. Stopping." << endl;
return false;
}
fileOut << fixed << setprecision(5);
fileEne.open("mdse.ene");
if(fileEne.good() == false)
{
cout << "mdse.ene couldn't be opened for writing. Stopping." << endl;
return false;
}
fileEne << fixed << setprecision(5);
filePic.open("mdse.pic");
if(filePic.good() == false)
{
cout << "mdse.pic couldn't be opened for writing. Stopping." << endl;
return false;
}
filePic << fixed << setprecision(5);
fileBs.open("mdse.bs");
if(fileBs.good() == false)
{
cout << "mdse.bs couldn't be opened for writing. Stopping." << endl;
return false;
}
fileBs << fixed << setprecision(5);
return true;
}
bool FileExists(const string& filename)
{
struct stat buf;
if (stat(filename.c_str(), &buf) != -1)
{
return true;
}
return false;
}
bool Input()
{
// Potential parameters for Cu
/*RM = 63.546;
DT = 0.9E-15;
A1 = 110.766008;
A2 = -46.1649783;
RL1 = 2.09045946;
RL2 = 1.49853083;
AL1 = 0.394142248;
AL2 = 0.207225507;
D21 = 0.436092895;
D22 = 0.245082238;
*/
/*
// Potential parameters for Au
RM=196.9665;
DT=1.6E-15;
A1=345.923364;
A2=-38.9245908;
RL1=1.0428923;
RL2=1.05974062;
AL1=0.750775965;
AL2=0.229377368;
D21=0.888911352;
D22=0.254280292;
*/
// Potential parameters for Ag
RM = 107.868;
DT = 1.4E-15;
A1 = 220.262366;
A2 = -26.0811795;
RL1 = 1.72376253;
RL2 = 1.81484791;
AL1 = 0.673011507;
AL2 = 0.120620395;
D21 = 1.00610152;
D22 = 0.221234242;
double FACM = 0.103655772E-27;
BK = 8.617385E-05;
RM = RM * FACM;
try
{
// Read the title
Title = ReadLine();
// Skip the second line
ReadLine();
// Read MDSL, IAVL, IPPL, ISCAL, IPD, TE, NA, LAYER, IPBC, PP(1), PP(2), PP(3)
MDSL = GetValueInt();
IAVL = GetValueInt();
IPPL = GetValueInt();
ISCAL = GetValueInt();
IPD = GetValueInt();
TE = GetValueDouble();
NA = GetValueInt();
LAYER = GetValueInt();
IPBC = GetValueInt();
PP[0] = GetValueDouble();
PP[1] = GetValueDouble();
PP[2] = GetValueDouble();
// Generate atom coordinates
GenerateLatis();
// Sort atoms by the z axis
SortAtoms('Z');
// Find the periodic boundary limits if PBC is applied
FindBoundaries();
}
catch(exception& e)
{
cout << "Error in Input(): " << e.what() << endl;
return false;
}
return true;
}
bool Solve()
{
// Initialize some variables and define some factors
MDS = 0; // Current md simulation step;
int IPP=0; // Print counter
double EPAV = 0; // Average potential energy
double EKAV = 0; // Average kinetic energy
double ETAV = 0; // Average total energy
double SCFAV = 0; // Average scaling factor
TCALAV = 0; // System temperature
int IAV = 0; // Average counter
int ISCA = 0; // Scaling counter
double FFPR[MAX_ATOMS][3]; // Array to store forces from previous step
// Calculate the initial potential energy of each atom and the initial force that each atom experiences
Force();
// SET INITIAL VELOCITIES ACC. TO MAXWELL VEL. DISTRIBUTION
MaxWell();
// Printing initially distributed velocities, potential energies, forces, total energy and temperature
PrintInitial();
fileOut << "#" << endl << "# ********************* MD STEPS STARTED *************************" << endl << "#" << endl;
fileOut << "# MDS EPAV EKAV ETAV TCALAV" << endl;
fileOut << "# ----- ------------ ------------ ------------ ------------" << endl << "#" << endl;
fileEne << "#" << endl << "# ********************* MD STEPS STARTED *************************" << endl << "#" << endl;
fileEne << "# MDS EPAV EKAV ETAV TCALAV" << endl;
fileEne << "# ----- ------------ ------------ ------------ ------------" << endl << "#" << endl;
// Start Md Steps
while(MDS < MDSL){
MDS++;
IPP++;
ISCA++;
// Show status at each 100 steps
if((MDS % 100) == 0)
ShowStatus();
// Reposition the particles if PBC is applied
if(IPBC != 0)
Reposition();
// Calculate velocity and position of the particles using the velocity summed form of verlet algorithm (NVE MD velocity form)
Force();
// Compute the positions at time step n+1 as:
// ri(n+1)=ri(n)+hvi(n)+(1/2m)h2Fi(n)
for(int i=0; i<NA; i++){
X[i] = X[i] + DT*VV[i][0] + (pow(DT,2)*FF[i][0]) / (2*RM);
Y[i] = Y[i] + DT*VV[i][1] + (pow(DT,2)*FF[i][1]) / (2*RM);
Z[i] = Z[i] + DT*VV[i][2] + (pow(DT,2)*FF[i][2]) / (2*RM);
}
// Store the forces at time step Fi(n)
memcpy(FFPR, FF, NA*3*sizeof(double));
//for(int i=0; i<NA; i++){
// for(int j=0; j<3; j++){
// FFPR[i][j] = FF[i][j];
// }
//}
Force();
// Compute the velocities at time step n+1 as
// vi(n+1)=vi(n)+(h/2m)(Fi(n+1)+Fi(n))
for(int i=0; i<NA; i++){
VV[i][0] = VV[i][0] + DT * (FF[i][0]+FFPR[i][0]) / (2*RM);
VV[i][1] = VV[i][1] + DT * (FF[i][1]+FFPR[i][1]) / (2*RM);
VV[i][2] = VV[i][2] + DT * (FF[i][2]+FFPR[i][2]) / (2*RM);
VT[i] = pow(VV[i][0],2) + pow(VV[i][1],2) + pow(VV[i][2],2);
}
// Calculate the temperature that system reached by calculating the kinetic energy of each atom
EKINA = 0;
for(int i=0; i<NA; i++)
EKINA += VT[i];
EKINA *= RM;
TCALC = EKINA / (3*NA*BK);
// Calculate the scaling factor and scale the velocities
SCFAC = sqrt(TE/TCALC);
if(ISCA == ISCAL)
{
EKIN = 0;
for(int i=0; i<NA; i++){
for(int j=0; j<3; j++){
VV[i][j] *= SCFAC;
}
VT[i] = pow(VV[i][0],2) + pow(VV[i][1],2) + pow(VV[i][2],2);
EKIN += VT[i];
}
ISCA = 0;
EKIN *= RM;
TCALC = EKIN / (3 * NA * BK);
}
// Calculate total energy
ETOT = EPOT + EKINA;
// Calculate the averages of EPOT, EKINA, ETOT, SCFAC AND TCALC
EPAV += EPOT;
EKAV += EKINA;
ETAV += ETOT;
SCFAV += SCFAC;
TCALAV += TCALC;
IAV++;
if(IAV < IAVL)
continue;
EPAV /= IAVL;
EKAV /= IAVL;
ETAV /= IAVL;
SCFAV /= IAVL;
TCALAV /= IAVL;
// Print the averages
fileOut << setw(6) << MDS << " " << scientific << EPAV << " " << EKAV << " " << ETAV << " " << TCALAV << endl << fixed;
fileEne << setw(6) << MDS << " " << scientific << EPAV << " " << EKAV << " " << ETAV << " " << TCALAV << endl << fixed;
// Periodic printing of coordinates
if(IPP == IPPL){
PrintPeriodic();
IPP = 0;
}
IAV = 0;
EPAV = 0;
EKAV = 0;
ETAV = 0;
SCFAV = 0;
TCALAV = 0;
} // Md Steps Loop
PrintFinal();
return true;
}
void WriteBsFile()
{
for(int i=0; i<NA; i++){
fileBs << "atom Au " << setw(12) << X[i] << " " << setw(12) << Y[i] << " " << setw(12) << Z[i] << endl;
}
fileBs << "" << endl;
fileBs << "spec Au 0.50 1 0.75 0" << endl; // "atom size" "color codes rgb"
fileBs << "" << endl;
fileBs << "bonds Au Au 1.0 4.05 0.03 0.5 0.7 0.9" << endl; // Bond "min length" "max length" "line width" "color codes rgb"
fileBs << "" << endl;
fileBs << "tmat 1.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 1.0" << endl;
fileBs << "dist 100.0" << endl;
fileBs << "inc 1.0" << endl;
fileBs << "scale 40.0" << endl;
fileBs << "rfac 1.0" << endl;
fileBs << "bfac 1.0" << endl;
fileBs << "switches 1 0 1 0 0 1 1 0 0" << endl;
fileBs << "" << endl;
}
bool CloseFiles()
{
fileInp.close();
fileOut.close();
fileEne.close();
filePic.close();
fileBs.close();
CuErr( cudaFreeHost(h_FFX));
CuErr( cudaFreeHost(h_FFY));
CuErr( cudaFreeHost(h_FFZ));
CuErr( cudaFreeHost(h_Params));
CuErr( cudaFree(d_FFX));
CuErr( cudaFree(d_FFY));
CuErr( cudaFree(d_FFZ));
CuErr( cudaFree(d_EE));
CuErr( cudaFree(d_X));
CuErr( cudaFree(d_Y));
CuErr( cudaFree(d_Z));
CuErr( cudaFree(d_Params));
return true;
}
void ShowStatus()
{
cout << "\rMDS Steps: " << MDS << " of " << MDSL;
}
string GetTime()
{
time_t rawtime;
struct tm * timeinfo;
char chars[100];
time ( &rawtime );
timeinfo = localtime ( &rawtime );
strftime (chars, 100, "%Y.%m.%d %H:%M:%S", timeinfo);
string final = " DATE AND TIME: ";
final += chars;
return final;
}
void Force()
{
if(OnlyCpu)
ForceCpu();
else
ForceCuda();
}
void ForceCpu()
{
double E2 = 0; // Total energy
double XIJ, YIJ, ZIJ, RIJ, RIJ2, EPP, FX2, FY2, FZ2;
double ARG1, ARG2, EXP1, EXP2, UIJ1, UIJ2, UIJ;
double FAC1, FAC2, FAC12, XRIJ, YRIJ, ZRIJ;
int i, j;
#pragma omp parallel for private(i,j,EPP,FX2,FY2,FZ2,RIJ,RIJ2,XIJ,YIJ,ZIJ,ARG1,ARG2,EXP1,EXP2,UIJ1,UIJ2,UIJ,FAC1,FAC2,FAC12,XRIJ,YRIJ,ZRIJ) reduction(+:E2)
for(i=0; i<NA; i++)
{
EE[i] = 0;
EPP = 0;
//Forces that effect atoms indexed with i in all three axes
FX2 = 0;
FY2 = 0;
FZ2 = 0;
for(j=0; j<NA; j++)
{
if(i == j)
continue;
// Apply periodic boundaries and find distances between atom I and j. RIJ2 is square of RIJ
Period(i, j, XIJ, YIJ, ZIJ, RIJ2, RIJ);
// Calculate potential energy U(r)
ARG1 = AL1*RIJ2;
ARG2 = AL2*RIJ2;
EXP1 = exp(-ARG1);
EXP2 = exp(-ARG2);
UIJ1 = A1*EXP1/(pow(RIJ,RL1));
UIJ2 = A2*EXP2/(pow(RIJ,RL2));
UIJ = D21*UIJ1 + D22*UIJ2;
EPP += UIJ;
// Calculate forces
FAC1 = -(RL1/RIJ + 2.0*AL1*RIJ);
FAC2 = -(RL2/RIJ + 2.0*AL2*RIJ);
FAC12 = FAC1*D21*UIJ1 + FAC2*D22*UIJ2;
XRIJ = XIJ/RIJ;
YRIJ = YIJ/RIJ;
ZRIJ = ZIJ/RIJ;
FX2 += FAC12*XRIJ;
FY2 += FAC12*YRIJ;
FZ2 += FAC12*ZRIJ;
}
FF[i][0] = -FX2;
FF[i][1] = -FY2;
FF[i][2] = -FZ2;
EE[i] = EPP;
E2 += EPP;
//FFF[i] = sqrt(FF[i][0]*FF[i][0] + FF[i][1]*FF[i][1] + FF[i][2]*FF[i][2]);
}
EPOT = E2;
}
void ForceCuda()
{
int sizeNA = NA * sizeof(double);
int sizeParams = 11 * sizeof(double);
// Pointers are global, allocating once is enough
if(h_FFX == NULL){
CuErr( cudaMallocHost(&h_FFX, sizeNA));
CuErr( cudaMallocHost(&h_FFY, sizeNA));
CuErr( cudaMallocHost(&h_FFZ, sizeNA));
CuErr( cudaMallocHost(&h_Params, sizeParams));
CuErr( cudaMalloc(&d_FFX, sizeNA));
CuErr( cudaMalloc(&d_FFY, sizeNA));
CuErr( cudaMalloc(&d_FFZ, sizeNA));
CuErr( cudaMalloc(&d_EE, sizeNA));
CuErr( cudaMalloc(&d_X, sizeNA));
CuErr( cudaMalloc(&d_Y, sizeNA));
CuErr( cudaMalloc(&d_Z, sizeNA));
CuErr( cudaMalloc(&d_Params, sizeParams));
h_Params[0] = PP[0];
h_Params[1] = PP[1];
h_Params[2] = PP[2];
h_Params[3] = AL1;
h_Params[4] = AL2;
h_Params[5] = A1 ;
h_Params[6] = A2 ;
h_Params[7] = RL1;
h_Params[8] = RL2;
h_Params[9] = D21;
h_Params[10] = D22;
CuErr( cudaMemcpy(d_Params, h_Params, sizeParams, cudaMemcpyHostToDevice));
//cudaChannelFormatDesc chanDouble = cudaCreateChannelDesc<double>();
//CuErr( cudaBindTexture(0, &texX, d_X, &chanDouble, sizeNA));
//CuErr( cudaBindTexture(0, &texY, d_Y, &chanDouble, sizeNA));
//CuErr( cudaBindTexture(0, &texZ, d_Z, &chanDouble, sizeNA));
}
CuErr( cudaMemcpy(d_X, X, sizeNA, cudaMemcpyHostToDevice));
CuErr( cudaMemcpy(d_Y, Y, sizeNA, cudaMemcpyHostToDevice));
CuErr( cudaMemcpy(d_Z, Z, sizeNA, cudaMemcpyHostToDevice));
int blockSize = 32;
int numBlocks = NA / blockSize + (NA % blockSize == 0 ? 0:1);
kernelForce <<< numBlocks, blockSize >>> (NA, d_FFX, d_FFY, d_FFZ, d_EE, d_X, d_Y, d_Z, IPBC, d_Params);
CuErrC("kernelForce kernel execution failed");
CuErr( cudaMemcpy(X, d_X, sizeNA, cudaMemcpyDeviceToHost));
CuErr( cudaMemcpy(Y, d_Y, sizeNA, cudaMemcpyDeviceToHost));
CuErr( cudaMemcpy(Z, d_Z, sizeNA, cudaMemcpyDeviceToHost));
CuErr( cudaMemcpy(h_FFX, d_FFX, sizeNA, cudaMemcpyDeviceToHost));
CuErr( cudaMemcpy(h_FFY, d_FFY, sizeNA, cudaMemcpyDeviceToHost));
CuErr( cudaMemcpy(h_FFZ, d_FFZ, sizeNA, cudaMemcpyDeviceToHost));
CuErr( cudaMemcpy(EE, d_EE, sizeNA, cudaMemcpyDeviceToHost));
EPOT = 0;
for(int i=0; i<NA; i++){
FF[i][0] = h_FFX[i];
FF[i][1] = h_FFY[i];
FF[i][2] = h_FFZ[i];
EPOT += EE[i];
}
}
void FindBoundaries()
{
if(IPBC == 0)
return;
for(int i=0; i<3; i++)
PL[i] = PP[i] / 2.0;
// Find smallest coordinates for X, Y and Z coordinates
PA[0] = X[0];
PA[1] = Y[0];
PA[2] = Z[0];
for(int i=1; i<NN; i++)
{
if(PA[0] > X[i])
PA[0] = X[i];
if(PA[1] > Y[i])
PA[1] = Y[i];
if(PA[2] > Z[i])
PA[2] = Z[i];
}
// Find ending coordinates of working system
PB[0] = PA[0] + PP[0];
PB[1] = PA[1] + PP[1];
PB[2] = PA[2] + PP[2];
}
// PRINTING OF POSITIONS, FORCES, AND ENERGIES
void PrintCoordinatesForcesEnergy(){
fileOut << " I X Y Z FX FY FZ EE" << endl;
fileOut << " ------ --------- ------------ ------------ ------------ ------------ ------------ ------------" << endl << endl;
for(int i=0; i<NA; i++){
fileOut << setw(6) << i+1;
fileOut << setw(12) << X[i] << " " << setw(12) << Y[i] << " " << setw(12) << Z[i] << " ";
fileOut << scientific << setw(13) << FF[i][0] << " " << setw(13) << FF[i][1] << " " << setw(13) << FF[i][2] << " " << setw(13) << EE[i];
fileOut << fixed << endl;
}
}
void PrintInitial()
{
string str;
fileInp.clear();
fileInp.seekg(0, ios::beg);
if(PSilent == false)
cout << "Simulation started" << endl;
fileOut << "******************************************************************************************" << endl;
fileOut << Title << endl;
fileOut << "******************************************************************************************" << endl << endl;
fileOut << GetTime() << endl << endl;
tStart = clock();
getline(fileInp, str);
getline(fileInp, str);
fileOut << str << endl;
getline(fileInp, str);
fileOut << str << endl << endl;
getline(fileInp, str);
fileOut << " INITIAL COORDINATES:" << endl;
for(int i=0; i<LAYER; i++){
getline(fileInp, str);
fileOut << str << endl;
}
fileOut << "******************************************************************************************" << endl << endl;
fileOut << " NUMBER OF MOVING ATOMS: NA= " << NA << endl;
fileOut << " NUMBER OF TOTAL ATOMS: NN= " << NN << endl << endl;
fileOut << " INITIAL COORDINATES OF ALL ATOMS: (X,Y,Z)" << endl << endl;
for(int i=0; i<NN; i++){
fileOut << setw(5) << i+1 << " " << setw(12) << X[i] << " " << setw(12) << Y[i] << " " << setw(12) << Z[i] << endl;
}
fileOut << "******************************************************************************************" << endl << endl;
fileOut << endl << " INITIAL COORDINATES, FORCES AND ENERGIES:" << endl << endl;
PrintCoordinatesForcesEnergy();
fileOut << endl << scientific;
fileOut << " EPOT=" << EPOT << " EKIN=" << EKIN << " TCALC=" << TCALC << " SCFAC=" << SCFAC << endl << endl << fixed;
}
void PrintPeriodic()
{
fileOut << endl << endl << " PERIODIC PRINTING OF COORDINATES, FORCES AND ENERGIES AT MDS: " << MDS << endl << endl;
PrintCoordinatesForcesEnergy();
fileOut << endl << scientific;
fileOut << " EPOT=" << EPOT << " EKIN=" << EKIN << " TCALC=" << TCALC;
fileOut << " SCFAC=" << SCFAC << endl << endl << fixed;
}
void PrintFinal()
{
if(IPBC != 0)
Reposition();
fileOut << endl << endl << " FINAL COORDINATES, FORCES AND ENERGIES:" << endl << endl;
PrintCoordinatesForcesEnergy();
fileOut << endl << scientific;
fileOut << " EPOT=" << EPOT << " EKINA=" << EKINA << " ETOT=" << ETOT << " TCALC=" << TCALC << endl << endl << fixed;
PrintElapsedTime();
fileOut << " *************** END OF THE CALCULATION ***************";
WritePicFile();
WriteBsFile();
if(PSilent == false)
cout << endl << "Simulation complete" << endl;
}
void PrintElapsedTime()
{
// Write current time
fileOut << endl << GetTime() << endl << endl;
// Calculate and write elapsed time
tStop = clock();
float seconds = float(tStop - tStart)/CLOCKS_PER_SEC;
int minutes = seconds/60;
seconds -= minutes*60;
int hours = minutes/60;
minutes -= hours*60;
fileOut << " ELAPSED TIME: " << hours << " HOURS " << minutes << " MINUTES " << seconds << " SECONDS" << endl << endl;
}
// RANDOM NUMBER GENERATOR, GENERATES RN IN THE INTERVAL (-1,1)
double Randum(double U, double S)
{
U = 23*U + 0.21132486579;
if((U-1.0) >= 0)
U = U - int(U);
if(U > 0.5)
S = -S;
U = U-int(U);
return (S * U);
}
// DISTRUBUTES THE VELOCITIES FOR THE ATOMS FOR THE SPECIFIED
// TEMPERATURE TE ACCORDING TO THE MAXWELL VELOCITY DISTRIBUTION
void MaxWell()
{
double FAC1 = sqrt(3.0*BK*TE/RM);
double U = 0.0;
double S = 1.0;
double VVX = 0.0;
double VVY = 0.0;
double VVZ = 0.0;
double FAC2 = (2.0/3.0) * FAC1;
FAC2 /= sqrt(3.0);
// EQUATING Vmean TO FAC2
for(int i=0; i<NA; i++){
for(int j=0; j<3; j++){
VV[i][j] = (FAC2 - FAC2*Randum(U,S));
}
}
// CALCULATING AVERAGES
double VVV = 0.0;
for(int i=0; i<NA; i++){
VVX = VVX + VV[i][0];
VVY = VVY + VV[i][1];
VVZ = VVZ + VV[i][2];
}
VVX /= NA;
VVY /= NA;
VVZ /= NA;
VVV = VVX*VVX + VVY*VVY + VVZ*VVZ;
double COSX = VVX / sqrt(VVV);
double COSY = VVY / sqrt(VVV);
double COSZ = VVZ / sqrt(VVV);
// CALCULATING EKIN AND TEMPERATURE WRT THE CALCULATED Vmean
EKIN = 0.5 * RM * (VVV * (9.0/4.0));
TCALC = EKIN / (1.5 * BK);
// CALCULATING THE SCALING FACTOR
SCFAC = sqrt(TE / TCALC);
// REDISTRIBUTING THE INITIAL VELOCITIES WRT SCALING FACTOR
VVV = sqrt(VVV);
double VVXNEW = COSX * VVV * SCFAC;
double VVYNEW = COSY * VVV * SCFAC;
double VVZNEW = COSZ * VVV * SCFAC;
double XSCALE = (VVXNEW-VVX);
double YSCALE = (VVYNEW-VVY);
double ZSCALE = (VVZNEW-VVZ);
for(int i=0; i<NA; i++){
VV[i][0] += XSCALE;
VV[i][1] += YSCALE;
VV[i][2] += ZSCALE;
VT[i] = pow(VV[i][0],2.0) + pow(VV[i][1],2) + pow(VV[i][2],2);
}
// CALCULATING AVERAGES OF SCALED VELOCITIES
VVX = 0;
VVY = 0;
VVZ = 0;
for(int i=0; i<NA; i++){
VVX += VV[i][0];
VVY += VV[i][1];
VVZ += VV[i][2];
}
VVX /= NA;
VVY /= NA;
VVZ /= NA;
// CALCULATING EKIN AND TEMPERATURE WRT THE SCALED Vmean
VVV = VVX*VVX + VVY*VVY + VVZ*VVZ;
EKIN = 0.5 * RM * (VVV * (9/4));
TCALC = EKIN / (1.5 * BK);
ETOT = EPOT + EKIN;
}
// REPOSITIONS COORDINATES WHEN ANY MOVING ATOM CROSSES THE BOUNDARY.
void Reposition()
{
double PAPL, H, B;
if(PP[0] > 0){
PAPL = PA[0] + PL[0];
for(int i=0; i<NA; i++){
H = (X[i]-PAPL) / PL[0];
B = H - 2.0*int(H);
X[i] = B*PL[0] + PAPL;
}
}
if(PP[1] > 0){
PAPL = PA[1] + PL[1];
for(int i=0; i<NA; i++){
H = (Y[i]-PAPL) / PL[1];
B = H - 2.0*int(H);
Y[i] = B*PL[1] + PAPL;
}
}
if(PP[2] > 0){
PAPL = PA[2] + PL[2];
for(int i=0; i<NA; i++){
H = (Z[i]-PAPL) / PL[2];
B = H - 2.0*int(H);
Z[i] = B*PL[2] + PAPL;
}
}
}
// Sorts atoms by the given axis
void SortAtoms(char sortAxis)
{
double *sortArray;
if(sortAxis == 'X')
sortArray = X;
else if(sortAxis == 'Y')
sortArray = Y;
else
sortArray = Z;
double tempX, tempY, tempZ;
for (int i = 0; i < NA; i++)
{
for (int j = i+1; j < NA; j++)
{
if (sortArray[i] > sortArray[j])
{
tempX = X[i];
tempY = Y[i];
tempZ = Z[i];
X[i] = X[j];
Y[i] = Y[j];
Z[i] = Z[j];
X[j] = tempX;
Y[j] = tempY;
Z[j] = tempZ;
}
}
}
}
// Generates the atoms according to coordinates and repeat parameters from the input
// In the input, the first 3 numbers are x,y,z coordinates, the second 3 numbers are unit cell lengths
// and the last 3 numbers specify how many times to copy that atom in x,y,z direction
void GenerateLatis()
{
// Skip the first line: (W(J,K),K=1,6),(NO(J,K),K=1,3)
ReadLine();
NN = 0;
for(int i=0; i<LAYER; i++)
{
double coordinateX = GetValueDouble();
double coordinateY = GetValueDouble();
double coordinateZ = GetValueDouble();
double unitCellLengthX = GetValueDouble();
double unitCellLengthY = GetValueDouble();
double unitCellLengthZ = GetValueDouble();
int multiplierX = GetValueInt();
int multiplierY = GetValueInt();
int multiplierZ = GetValueInt();
for (int iX = 0; iX < multiplierX; iX++)
{
for (int iY = 0; iY < multiplierY; iY++)
{
for (int iZ = 0; iZ < multiplierZ; iZ++)
{
double newCoordinateX = coordinateX + (iX * unitCellLengthX);
double newCoordinateY = coordinateY + (iY * unitCellLengthY);
double newCoordinateZ = coordinateZ + (iZ * unitCellLengthZ);
X[NN] = newCoordinateX;
Y[NN] = newCoordinateY;
Z[NN] = newCoordinateZ;
NN++;
if(NN > MAX_ATOMS)
{
cout << "The number of atoms cannot exceed " << MAX_ATOMS << ". Stopping.";
exit(1);
}
}
}
}
}
if (NN != NA)
cout << "Warning: number of total atoms NN is different from number of moving atoms NA." << endl;
}
string GetValue()
{
SkipSpace();
string val = "";
char c;
do {
fileInp.get(c);
val += c;
} while ((c != ' ') && (c != ',') && (c != '\n') && (c != '\r') && (fileInp.eof() != true));
val = val.substr(0, val.size() - 1);
return val;
}
int GetValueInt()
{
string str = GetValue();
int result = 0;
bool success = (stringstream(str) >> result);
if(success == false)
{
cout << "Error converting input to integer. Stopping." << endl;
exit(1);
}
return result;
}
double GetValueDouble()
{
string str = GetValue();
double result = 0;
bool success = (stringstream(str) >> result);
if(success == false)
{
cout << "Error converting input to double. Stopping." << endl;
exit(1);
}
return result;
}
float GetValueFloat()
{
string str = GetValue();
float result = 0;
bool success = (stringstream(str) >> result);
if(success == false)
{
cout << "Error converting input to double. Stopping." << endl;
exit(1);
}
return result;
}
string SkipSpace()
{
string val = "";
char c;
do {
fileInp.get(c);
val += c;
} while ((c == ' ') || (c == ',') || (c == '\n') || (c == '\r'));
val = val.substr(0, val.size() - 1);
fileInp.unget();
return val;
}
string ReadLine()
{
string line = "";
getline(fileInp, line);
return line;
}
// Calculates interatomic distance between atoms I and J
double Distance(int i, int j)
{
double XX = X[i] - X[j];
double YY = Y[i] - Y[j];
double ZZ = Z[i] - Z[j];
return XX*XX + YY*YY + ZZ*ZZ;
}
void WritePicFile()
{
double EB = EPOT / NA;
filePic << " NN=" << NN << " NA=" << NA << " TOTPE=" << EPOT << " APEPP=" << EB << endl;
for(int i=0; i<NA; i++){
filePic << setw(12) << X[i] << " " << setw(12) << Y[i] << " " << setw(12) << Z[i] << endl;
}
}
// Apply periodic boundry condition and find distances between the two particles
// Because of the periodic boundary, the distance may be the one in this working system or the particle in the adjacent system.
void Period(int i, int j, double &XIJ, double &YIJ, double &ZIJ, double &RIJ2, double &RIJ)
{
XIJ = X[i] - X[j];
YIJ = Y[i] - Y[j];
ZIJ = Z[i] - Z[j];
double DD, ID;
if(IPBC != 0){
if(PP[0] > 0){
DD = XIJ / PP[0];
ID = int(DD);
XIJ = XIJ - PP[0]*(ID+int(2.0*(DD-ID)));
}
if(PP[1] > 0){
DD = YIJ / PP[1];
ID = int(DD);
YIJ = YIJ - PP[1]*(ID+int(2.0*(DD-ID)));
}
if(PP[2] > 0){
DD = ZIJ / PP[2];
ID = int(DD);
ZIJ = ZIJ - PP[2]*(ID+int(2.0*(DD-ID)));
}
}
RIJ2 = XIJ*XIJ + YIJ*YIJ + ZIJ*ZIJ;
RIJ = sqrt(RIJ2);
}
// Check program starting parameters
bool CheckParameters(int argc, char* argv[])
{
PSilent = false;
OnlyCpu = true;
SetPriorityClass(GetCurrentProcess(),BELOW_NORMAL_PRIORITY_CLASS);
for(int i=1; i<argc; i++)
{
string parameter = argv[i];
if(parameter == "-help"){
cout << "Use parameter '-s' for silent mode. No output will be given to the console." << endl;
cout << "Use parameter '-cpu' for cpu calculations only (otherwise Nvidia graphics card required)." << endl;
return false;
}
else if(parameter == "-s"){
PSilent = true;
}
else if(parameter == "-cpu"){
OnlyCpu = true;
cout << "-cpu option received. Will use only cpu for computations." << endl;
}
}
return true;
} |
3e49b0a196b04bd447d65b9bbc2cd4a7fd2c130f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "timer.h"
char res_seq[100];
char res_par[100];
#define DIM 2 /* Two-dimensional system */
#define X 0 /* x-coordinate subscript */
#define Y 1 /* y-coordinate subscript */
const double G = 6.673e-11;
typedef double vect_t[DIM]; /* Vector type for position, etc. */
// vect_t forces_reduction[4999][5000];
struct particle_s
{
double m; /* Mass */
vect_t s; /* Position */
vect_t v; /* Velocity */
};
void Usage(char *prog_name);
void Get_args(int argc, char *argv[], int *n_p, int *n_steps_p,
double *delta_t_p, int *output_freq_p, char *g_i_p);
void Get_init_cond(struct particle_s curr[], int n);
void Gen_init_cond(struct particle_s curr[], int n);
void Output_state(double time, struct particle_s curr[], int n);
void Compute_force(int part, vect_t forces[], struct particle_s curr[],
int n);
void Compute_force_parallel(int part, vect_t forces[], struct particle_s curr[],
int n);
void Update_part(int part, vect_t forces[], struct particle_s curr[],
int n, double delta_t);
void Compute_energy(struct particle_s curr[], int n, double *kin_en_p,
double *pot_en_p);
__global__ void kernel_compute_force_mat(struct particle_s *curr, vect_t *forces, int n)
{
const unsigned long long int thread_id_i = threadIdx.y + (unsigned long long int)blockIdx.y * blockDim.y;
const unsigned long long int thread_id_j = threadIdx.x + (unsigned long long int)blockIdx.x * blockDim.x;
if (thread_id_j < n && thread_id_i < thread_id_j)
{
const struct particle_s curr_i = curr[thread_id_i];
const struct particle_s curr_j = curr[thread_id_j];
vect_t f_part_k;
f_part_k[X] = curr_i.s[X] - curr_j.s[X];
f_part_k[Y] = curr_i.s[Y] - curr_j.s[Y];
const double len = sqrt(f_part_k[X] * f_part_k[X] + f_part_k[Y] * f_part_k[Y]);
const double len_3 = len * len * len;
const double mg = -G * curr_i.m * curr_j.m;
const double fact = mg / len_3;
f_part_k[X] *= fact;
f_part_k[Y] *= fact;
forces[thread_id_j * n + thread_id_i][X] = f_part_k[X];
forces[thread_id_j * n + thread_id_i][Y] = f_part_k[Y];
forces[thread_id_i * n + thread_id_j][X] = -f_part_k[X];
forces[thread_id_i * n + thread_id_j][Y] = -f_part_k[Y];
}
}
__global__ void kernel_reduce_force_mat(vect_t *forces, int n)
{
const unsigned long long int thread_id = threadIdx.x + (unsigned long long int)blockIdx.x * blockDim.x;
if (thread_id < n)
{
double sum_x = 0.0;
double sum_y = 0.0;
for (unsigned long long int i = thread_id + n; i < n * n; i += n)
{
sum_x += forces[i][X];
sum_y += forces[i][Y];
}
forces[thread_id][X] += sum_x;
forces[thread_id][Y] += sum_y;
}
}
__global__ void kernel_update_part(struct particle_s *curr, vect_t *forces, int n, double delta_time)
{
const unsigned long long int thread_id = threadIdx.x + (unsigned long long int)blockIdx.x * blockDim.x;
if (thread_id < n)
{
const double fact = delta_time / curr[thread_id].m;
curr[thread_id].s[X] += delta_time * curr[thread_id].v[X];
curr[thread_id].s[Y] += delta_time * curr[thread_id].v[Y];
curr[thread_id].v[X] += fact * forces[thread_id][X];
curr[thread_id].v[Y] += fact * forces[thread_id][Y];
}
}
void sequential_solution(int argc, char *argv[])
{
int n; /* Number of particles */
int n_steps; /* Number of timesteps */
int step; /* Current step */
int part; /* Current particle */
int output_freq; /* Frequency of output */
double delta_t; /* Size of timestep */
double t; /* Current Time */
struct particle_s *curr; /* Current state of system */
vect_t *forces; /* Forces on each particle */
char g_i; /*_G_en or _i_nput init conds */
double kinetic_energy, potential_energy;
double start, finish; /* For timings */
Get_args(argc, argv, &n, &n_steps, &delta_t, &output_freq, &g_i);
curr = (struct particle_s *)malloc(n * sizeof(struct particle_s));
forces = (vect_t *)malloc(n * sizeof(vect_t));
if (g_i == 'i')
Get_init_cond(curr, n);
else
Gen_init_cond(curr, n);
GET_TIME(start);
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
// Output_state(0, curr, n);
for (step = 1; step <= n_steps; step++)
{
t = step * delta_t;
memset(forces, 0, n * sizeof(vect_t));
for (part = 0; part < n - 1; part++)
Compute_force(part, forces, curr, n);
for (part = 0; part < n; part++)
Update_part(part, forces, curr, n, delta_t);
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
}
// Output_state(t, curr, n);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
sprintf(res_seq, " PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
GET_TIME(finish);
printf("Elapsed time = %e seconds\n", finish - start);
free(curr);
free(forces);
} /* sequential_solution */
void parallel_solution(int argc, char *argv[])
{
int n; /* Number of particles */
int n_steps; /* Number of timesteps */
int output_freq; /* Frequency of output */
double delta_time; /* Size of timestep */
double t; /* Current Time */
struct particle_s *curr; /* Current state of system */
char g_i; /*_G_en or _i_nput init conds */
double kinetic_energy, potential_energy;
double start, finish; /* For timings */
Get_args(argc, argv, &n, &n_steps, &delta_time, &output_freq, &g_i);
curr = (struct particle_s*)malloc(n * sizeof(struct particle_s));
if (g_i == 'i')
Get_init_cond(curr, n);
else
Gen_init_cond(curr, n);
GET_TIME(start);
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
// Output_state(0, curr, n);
const int block_size = 1024;
const int grid_size = ceil(((double)n) / block_size);
struct particle_s *curr_dev;
vect_t *forces_mat;
hipMalloc(&curr_dev, n * sizeof(struct particle_s));
hipMalloc(&forces_mat, n * n * sizeof(vect_t));
hipMemcpy(curr_dev, curr, n * sizeof(struct particle_s), hipMemcpyHostToDevice);
const dim3 block_size_mat(32, 32, 1);
const size_t grid_cols = (n + block_size_mat.x - 1) / block_size_mat.x;
const size_t grid_rows = (n + block_size_mat.y - 1) / block_size_mat.y;
const dim3 grid_size_mat(grid_cols, grid_rows, 1);
for (int step = 1; step <= n_steps; step++)
{
hipMemset(forces_mat, 0, n * n * sizeof(vect_t));
hipLaunchKernelGGL(( kernel_compute_force_mat), dim3(grid_size_mat), dim3(block_size_mat) , 0, 0, curr_dev, forces_mat, n);
hipLaunchKernelGGL(( kernel_reduce_force_mat), dim3(grid_size), dim3(block_size) , 0, 0, forces_mat, n);
hipLaunchKernelGGL(( kernel_update_part), dim3(grid_size), dim3(block_size) , 0, 0, curr_dev, forces_mat, n, delta_time);
}
hipMemcpy(curr, curr_dev, n * sizeof(struct particle_s), hipMemcpyDeviceToHost);
hipFree(forces_mat);
hipFree(curr_dev);
t = n_steps * delta_time;
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
// Output_state(t, curr, n);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
sprintf(res_par, " PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
GET_TIME(finish);
printf("Elapsed time = %e seconds\n", finish - start);
free(curr);
} /* parallel_solution */
int compare_results(void)
{
return !strcmp(res_seq, res_par);
}
int main(int argc, char *argv[])
{
float elapsed_time_seq;
hipEvent_t start_time_seq, end_time_seq;
hipEventCreate(&start_time_seq);
hipEventCreate(&end_time_seq);
float elapsed_time_parallel;
hipEvent_t start_time_parallel, end_time_parallel;
hipEventCreate(&start_time_parallel);
hipEventCreate(&end_time_parallel);
printf("---------------------Sequential execution---------------------\n");
hipEventRecord(start_time_seq, 0);
sequential_solution(argc, argv);
hipEventRecord(end_time_seq, 0);
hipEventSynchronize(end_time_seq);
hipEventElapsedTime(&elapsed_time_seq, start_time_seq, end_time_seq);
printf("----------------------Parallel execution----------------------\n");
hipEventRecord(start_time_parallel, 0);
parallel_solution(argc, argv);
hipEventRecord(end_time_parallel, 0);
hipEventSynchronize(end_time_parallel);
hipEventElapsedTime(&elapsed_time_parallel, start_time_parallel, end_time_parallel);
printf("\nSequential elapsed time: %fs\n", elapsed_time_seq / 1000.0);
printf("Parallel elapsed time: %fs\n", elapsed_time_parallel / 1000.0);
if (compare_results())
printf("Test PASSED\n");
else
printf("Test FAILED\n");
return 0;
} /* main */
void Usage(char *prog_name)
{
fprintf(stderr, "usage: %s <number of particles> <number of timesteps>\n",
prog_name);
fprintf(stderr, " <size of timestep> <output frequency>\n");
fprintf(stderr, " <g|i>\n");
fprintf(stderr, " 'g': program should generate init conds\n");
fprintf(stderr, " 'i': program should get init conds from stdin\n");
exit(0);
} /* Usage */
void Get_args(int argc, char *argv[], int *n_p, int *n_steps_p,
double *delta_t_p, int *output_freq_p, char *g_i_p)
{
if (argc != 6)
Usage(argv[0]);
*n_p = strtol(argv[1], NULL, 10);
*n_steps_p = strtol(argv[2], NULL, 10);
*delta_t_p = strtod(argv[3], NULL);
*output_freq_p = strtol(argv[4], NULL, 10);
*g_i_p = argv[5][0];
if (*n_p <= 0 || *n_steps_p < 0 || *delta_t_p <= 0)
Usage(argv[0]);
if (*g_i_p != 'g' && *g_i_p != 'i')
Usage(argv[0]);
} /* Get_args */
void Get_init_cond(struct particle_s curr[], int n)
{
int part;
printf("For each particle, enter (in order):\n");
printf(" its mass, its x-coord, its y-coord, ");
printf("its x-velocity, its y-velocity\n");
for (part = 0; part < n; part++)
{
scanf("%lf", &curr[part].m);
scanf("%lf", &curr[part].s[X]);
scanf("%lf", &curr[part].s[Y]);
scanf("%lf", &curr[part].v[X]);
scanf("%lf", &curr[part].v[Y]);
}
} /* Get_init_cond */
void Gen_init_cond(struct particle_s curr[], int n)
{
int part;
double mass = 5.0e24;
double gap = 1.0e5;
double speed = 3.0e4;
srandom(1);
for (part = 0; part < n; part++)
{
curr[part].m = mass;
curr[part].s[X] = part * gap;
curr[part].s[Y] = 0.0;
curr[part].v[X] = 0.0;
if (part % 2 == 0)
curr[part].v[Y] = speed;
else
curr[part].v[Y] = -speed;
}
} /* Gen_init_cond */
void Output_state(double time, struct particle_s curr[], int n)
{
int part;
printf("%.2f\n", time);
for (part = 0; part < n; part++)
{
printf("%3d %10.3e ", part, curr[part].s[X]);
printf(" %10.3e ", curr[part].s[Y]);
printf(" %10.3e ", curr[part].v[X]);
printf(" %10.3e\n", curr[part].v[Y]);
}
printf("\n");
} /* Output_state */
void Compute_force(int part, vect_t forces[], struct particle_s curr[],
int n)
{
int k;
double mg;
vect_t f_part_k;
double len, len_3, fact;
for (k = part + 1; k < n; k++)
{
f_part_k[X] = curr[part].s[X] - curr[k].s[X];
f_part_k[Y] = curr[part].s[Y] - curr[k].s[Y];
len = sqrt(f_part_k[X] * f_part_k[X] + f_part_k[Y] * f_part_k[Y]);
len_3 = len * len * len;
mg = -G * curr[part].m * curr[k].m;
fact = mg / len_3;
f_part_k[X] *= fact;
f_part_k[Y] *= fact;
forces[part][X] += f_part_k[X];
forces[part][Y] += f_part_k[Y];
forces[k][X] -= f_part_k[X];
forces[k][Y] -= f_part_k[Y];
}
} /* Compute_force */
void Update_part(int part, vect_t forces[], struct particle_s curr[],
int n, double delta_t)
{
double fact = delta_t / curr[part].m;
curr[part].s[X] += delta_t * curr[part].v[X];
curr[part].s[Y] += delta_t * curr[part].v[Y];
curr[part].v[X] += fact * forces[part][X];
curr[part].v[Y] += fact * forces[part][Y];
} /* Update_part */
void Compute_energy(struct particle_s curr[], int n, double *kin_en_p,
double *pot_en_p)
{
int i, j;
vect_t diff;
double pe = 0.0, ke = 0.0;
double dist, speed_sqr;
for (i = 0; i < n; i++)
{
speed_sqr = curr[i].v[X] * curr[i].v[X] + curr[i].v[Y] * curr[i].v[Y];
ke += curr[i].m * speed_sqr;
}
ke *= 0.5;
for (i = 0; i < n - 1; i++)
{
for (j = i + 1; j < n; j++)
{
diff[X] = curr[i].s[X] - curr[j].s[X];
diff[Y] = curr[i].s[Y] - curr[j].s[Y];
dist = sqrt(diff[X] * diff[X] + diff[Y] * diff[Y]);
pe += -G * curr[i].m * curr[j].m / dist;
}
}
*kin_en_p = ke;
*pot_en_p = pe;
} /* Compute_energy */
| 3e49b0a196b04bd447d65b9bbc2cd4a7fd2c130f.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "timer.h"
char res_seq[100];
char res_par[100];
#define DIM 2 /* Two-dimensional system */
#define X 0 /* x-coordinate subscript */
#define Y 1 /* y-coordinate subscript */
const double G = 6.673e-11;
typedef double vect_t[DIM]; /* Vector type for position, etc. */
// vect_t forces_reduction[4999][5000];
struct particle_s
{
double m; /* Mass */
vect_t s; /* Position */
vect_t v; /* Velocity */
};
void Usage(char *prog_name);
void Get_args(int argc, char *argv[], int *n_p, int *n_steps_p,
double *delta_t_p, int *output_freq_p, char *g_i_p);
void Get_init_cond(struct particle_s curr[], int n);
void Gen_init_cond(struct particle_s curr[], int n);
void Output_state(double time, struct particle_s curr[], int n);
void Compute_force(int part, vect_t forces[], struct particle_s curr[],
int n);
void Compute_force_parallel(int part, vect_t forces[], struct particle_s curr[],
int n);
void Update_part(int part, vect_t forces[], struct particle_s curr[],
int n, double delta_t);
void Compute_energy(struct particle_s curr[], int n, double *kin_en_p,
double *pot_en_p);
__global__ void kernel_compute_force_mat(struct particle_s *curr, vect_t *forces, int n)
{
const unsigned long long int thread_id_i = threadIdx.y + (unsigned long long int)blockIdx.y * blockDim.y;
const unsigned long long int thread_id_j = threadIdx.x + (unsigned long long int)blockIdx.x * blockDim.x;
if (thread_id_j < n && thread_id_i < thread_id_j)
{
const struct particle_s curr_i = curr[thread_id_i];
const struct particle_s curr_j = curr[thread_id_j];
vect_t f_part_k;
f_part_k[X] = curr_i.s[X] - curr_j.s[X];
f_part_k[Y] = curr_i.s[Y] - curr_j.s[Y];
const double len = sqrt(f_part_k[X] * f_part_k[X] + f_part_k[Y] * f_part_k[Y]);
const double len_3 = len * len * len;
const double mg = -G * curr_i.m * curr_j.m;
const double fact = mg / len_3;
f_part_k[X] *= fact;
f_part_k[Y] *= fact;
forces[thread_id_j * n + thread_id_i][X] = f_part_k[X];
forces[thread_id_j * n + thread_id_i][Y] = f_part_k[Y];
forces[thread_id_i * n + thread_id_j][X] = -f_part_k[X];
forces[thread_id_i * n + thread_id_j][Y] = -f_part_k[Y];
}
}
__global__ void kernel_reduce_force_mat(vect_t *forces, int n)
{
const unsigned long long int thread_id = threadIdx.x + (unsigned long long int)blockIdx.x * blockDim.x;
if (thread_id < n)
{
double sum_x = 0.0;
double sum_y = 0.0;
for (unsigned long long int i = thread_id + n; i < n * n; i += n)
{
sum_x += forces[i][X];
sum_y += forces[i][Y];
}
forces[thread_id][X] += sum_x;
forces[thread_id][Y] += sum_y;
}
}
__global__ void kernel_update_part(struct particle_s *curr, vect_t *forces, int n, double delta_time)
{
const unsigned long long int thread_id = threadIdx.x + (unsigned long long int)blockIdx.x * blockDim.x;
if (thread_id < n)
{
const double fact = delta_time / curr[thread_id].m;
curr[thread_id].s[X] += delta_time * curr[thread_id].v[X];
curr[thread_id].s[Y] += delta_time * curr[thread_id].v[Y];
curr[thread_id].v[X] += fact * forces[thread_id][X];
curr[thread_id].v[Y] += fact * forces[thread_id][Y];
}
}
void sequential_solution(int argc, char *argv[])
{
int n; /* Number of particles */
int n_steps; /* Number of timesteps */
int step; /* Current step */
int part; /* Current particle */
int output_freq; /* Frequency of output */
double delta_t; /* Size of timestep */
double t; /* Current Time */
struct particle_s *curr; /* Current state of system */
vect_t *forces; /* Forces on each particle */
char g_i; /*_G_en or _i_nput init conds */
double kinetic_energy, potential_energy;
double start, finish; /* For timings */
Get_args(argc, argv, &n, &n_steps, &delta_t, &output_freq, &g_i);
curr = (struct particle_s *)malloc(n * sizeof(struct particle_s));
forces = (vect_t *)malloc(n * sizeof(vect_t));
if (g_i == 'i')
Get_init_cond(curr, n);
else
Gen_init_cond(curr, n);
GET_TIME(start);
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
// Output_state(0, curr, n);
for (step = 1; step <= n_steps; step++)
{
t = step * delta_t;
memset(forces, 0, n * sizeof(vect_t));
for (part = 0; part < n - 1; part++)
Compute_force(part, forces, curr, n);
for (part = 0; part < n; part++)
Update_part(part, forces, curr, n, delta_t);
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
}
// Output_state(t, curr, n);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
sprintf(res_seq, " PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
GET_TIME(finish);
printf("Elapsed time = %e seconds\n", finish - start);
free(curr);
free(forces);
} /* sequential_solution */
void parallel_solution(int argc, char *argv[])
{
int n; /* Number of particles */
int n_steps; /* Number of timesteps */
int output_freq; /* Frequency of output */
double delta_time; /* Size of timestep */
double t; /* Current Time */
struct particle_s *curr; /* Current state of system */
char g_i; /*_G_en or _i_nput init conds */
double kinetic_energy, potential_energy;
double start, finish; /* For timings */
Get_args(argc, argv, &n, &n_steps, &delta_time, &output_freq, &g_i);
curr = (struct particle_s*)malloc(n * sizeof(struct particle_s));
if (g_i == 'i')
Get_init_cond(curr, n);
else
Gen_init_cond(curr, n);
GET_TIME(start);
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
// Output_state(0, curr, n);
const int block_size = 1024;
const int grid_size = ceil(((double)n) / block_size);
struct particle_s *curr_dev;
vect_t *forces_mat;
cudaMalloc(&curr_dev, n * sizeof(struct particle_s));
cudaMalloc(&forces_mat, n * n * sizeof(vect_t));
cudaMemcpy(curr_dev, curr, n * sizeof(struct particle_s), cudaMemcpyHostToDevice);
const dim3 block_size_mat(32, 32, 1);
const size_t grid_cols = (n + block_size_mat.x - 1) / block_size_mat.x;
const size_t grid_rows = (n + block_size_mat.y - 1) / block_size_mat.y;
const dim3 grid_size_mat(grid_cols, grid_rows, 1);
for (int step = 1; step <= n_steps; step++)
{
cudaMemset(forces_mat, 0, n * n * sizeof(vect_t));
kernel_compute_force_mat<<< grid_size_mat, block_size_mat >>>(curr_dev, forces_mat, n);
kernel_reduce_force_mat<<< grid_size, block_size >>>(forces_mat, n);
kernel_update_part<<< grid_size, block_size >>>(curr_dev, forces_mat, n, delta_time);
}
cudaMemcpy(curr, curr_dev, n * sizeof(struct particle_s), cudaMemcpyDeviceToHost);
cudaFree(forces_mat);
cudaFree(curr_dev);
t = n_steps * delta_time;
Compute_energy(curr, n, &kinetic_energy, &potential_energy);
// Output_state(t, curr, n);
printf(" PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
sprintf(res_par, " PE = %e, KE = %e, Total Energy = %e\n",
potential_energy, kinetic_energy, kinetic_energy + potential_energy);
GET_TIME(finish);
printf("Elapsed time = %e seconds\n", finish - start);
free(curr);
} /* parallel_solution */
int compare_results(void)
{
return !strcmp(res_seq, res_par);
}
int main(int argc, char *argv[])
{
float elapsed_time_seq;
cudaEvent_t start_time_seq, end_time_seq;
cudaEventCreate(&start_time_seq);
cudaEventCreate(&end_time_seq);
float elapsed_time_parallel;
cudaEvent_t start_time_parallel, end_time_parallel;
cudaEventCreate(&start_time_parallel);
cudaEventCreate(&end_time_parallel);
printf("---------------------Sequential execution---------------------\n");
cudaEventRecord(start_time_seq, 0);
sequential_solution(argc, argv);
cudaEventRecord(end_time_seq, 0);
cudaEventSynchronize(end_time_seq);
cudaEventElapsedTime(&elapsed_time_seq, start_time_seq, end_time_seq);
printf("----------------------Parallel execution----------------------\n");
cudaEventRecord(start_time_parallel, 0);
parallel_solution(argc, argv);
cudaEventRecord(end_time_parallel, 0);
cudaEventSynchronize(end_time_parallel);
cudaEventElapsedTime(&elapsed_time_parallel, start_time_parallel, end_time_parallel);
printf("\nSequential elapsed time: %fs\n", elapsed_time_seq / 1000.0);
printf("Parallel elapsed time: %fs\n", elapsed_time_parallel / 1000.0);
if (compare_results())
printf("Test PASSED\n");
else
printf("Test FAILED\n");
return 0;
} /* main */
void Usage(char *prog_name)
{
fprintf(stderr, "usage: %s <number of particles> <number of timesteps>\n",
prog_name);
fprintf(stderr, " <size of timestep> <output frequency>\n");
fprintf(stderr, " <g|i>\n");
fprintf(stderr, " 'g': program should generate init conds\n");
fprintf(stderr, " 'i': program should get init conds from stdin\n");
exit(0);
} /* Usage */
void Get_args(int argc, char *argv[], int *n_p, int *n_steps_p,
double *delta_t_p, int *output_freq_p, char *g_i_p)
{
if (argc != 6)
Usage(argv[0]);
*n_p = strtol(argv[1], NULL, 10);
*n_steps_p = strtol(argv[2], NULL, 10);
*delta_t_p = strtod(argv[3], NULL);
*output_freq_p = strtol(argv[4], NULL, 10);
*g_i_p = argv[5][0];
if (*n_p <= 0 || *n_steps_p < 0 || *delta_t_p <= 0)
Usage(argv[0]);
if (*g_i_p != 'g' && *g_i_p != 'i')
Usage(argv[0]);
} /* Get_args */
void Get_init_cond(struct particle_s curr[], int n)
{
int part;
printf("For each particle, enter (in order):\n");
printf(" its mass, its x-coord, its y-coord, ");
printf("its x-velocity, its y-velocity\n");
for (part = 0; part < n; part++)
{
scanf("%lf", &curr[part].m);
scanf("%lf", &curr[part].s[X]);
scanf("%lf", &curr[part].s[Y]);
scanf("%lf", &curr[part].v[X]);
scanf("%lf", &curr[part].v[Y]);
}
} /* Get_init_cond */
void Gen_init_cond(struct particle_s curr[], int n)
{
int part;
double mass = 5.0e24;
double gap = 1.0e5;
double speed = 3.0e4;
srandom(1);
for (part = 0; part < n; part++)
{
curr[part].m = mass;
curr[part].s[X] = part * gap;
curr[part].s[Y] = 0.0;
curr[part].v[X] = 0.0;
if (part % 2 == 0)
curr[part].v[Y] = speed;
else
curr[part].v[Y] = -speed;
}
} /* Gen_init_cond */
void Output_state(double time, struct particle_s curr[], int n)
{
int part;
printf("%.2f\n", time);
for (part = 0; part < n; part++)
{
printf("%3d %10.3e ", part, curr[part].s[X]);
printf(" %10.3e ", curr[part].s[Y]);
printf(" %10.3e ", curr[part].v[X]);
printf(" %10.3e\n", curr[part].v[Y]);
}
printf("\n");
} /* Output_state */
void Compute_force(int part, vect_t forces[], struct particle_s curr[],
int n)
{
int k;
double mg;
vect_t f_part_k;
double len, len_3, fact;
for (k = part + 1; k < n; k++)
{
f_part_k[X] = curr[part].s[X] - curr[k].s[X];
f_part_k[Y] = curr[part].s[Y] - curr[k].s[Y];
len = sqrt(f_part_k[X] * f_part_k[X] + f_part_k[Y] * f_part_k[Y]);
len_3 = len * len * len;
mg = -G * curr[part].m * curr[k].m;
fact = mg / len_3;
f_part_k[X] *= fact;
f_part_k[Y] *= fact;
forces[part][X] += f_part_k[X];
forces[part][Y] += f_part_k[Y];
forces[k][X] -= f_part_k[X];
forces[k][Y] -= f_part_k[Y];
}
} /* Compute_force */
void Update_part(int part, vect_t forces[], struct particle_s curr[],
int n, double delta_t)
{
double fact = delta_t / curr[part].m;
curr[part].s[X] += delta_t * curr[part].v[X];
curr[part].s[Y] += delta_t * curr[part].v[Y];
curr[part].v[X] += fact * forces[part][X];
curr[part].v[Y] += fact * forces[part][Y];
} /* Update_part */
void Compute_energy(struct particle_s curr[], int n, double *kin_en_p,
double *pot_en_p)
{
int i, j;
vect_t diff;
double pe = 0.0, ke = 0.0;
double dist, speed_sqr;
for (i = 0; i < n; i++)
{
speed_sqr = curr[i].v[X] * curr[i].v[X] + curr[i].v[Y] * curr[i].v[Y];
ke += curr[i].m * speed_sqr;
}
ke *= 0.5;
for (i = 0; i < n - 1; i++)
{
for (j = i + 1; j < n; j++)
{
diff[X] = curr[i].s[X] - curr[j].s[X];
diff[Y] = curr[i].s[Y] - curr[j].s[Y];
dist = sqrt(diff[X] * diff[X] + diff[Y] * diff[Y]);
pe += -G * curr[i].m * curr[j].m / dist;
}
}
*kin_en_p = ke;
*pot_en_p = pe;
} /* Compute_energy */
|
12def9f66226bda6ade7182afe13c05986c8d4b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "SparseMatrix.h"
#define threadsPerBlock 128
#define blocksPerGrid 128
__global__ void kernel(ColumnBegEndInLine* d_begEndLines, StructCOO* d_columnsValues, double* d_vect, double* d_result, int* d_N)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
int columnIndex(0);
int nonZerosOnLine(0);
while (i<d_N[0])
{
d_result[i] = 0;
for (int j=d_begEndLines[i].beg; j<=d_begEndLines[i].end; j++)
{
d_result[i] += d_columnsValues[j].value * d_vect[d_columnsValues[j].columnIndex];
}
i += blockDim.x*gridDim.x;
}
}
Vector cudaMatVect(SparseMatrix* spMat, Vector& vect)
{
spMat->setBegEndOnLine();
Vector result(spMat->getNumLines());
int* N = (int*) malloc(sizeof(int));
N[0] = sp->getNumLines();
size_t ColumnBegEndInLineSize = spMat->getNumLines()*sizeof(ColumnBegEndInLine);
size_t inputSize = vect.m_size*sizeof(int);
size_t outputSize = (spMat->getNumLines())*sizeof(int);
size_t StructCOOSize = spMat->getNumOfNonZeros()*sizeof(StructCOO);
//allocating variables on the device
ColumnBegEndInLine* d_begEndLines;
StructCOO* d_columnsValues;
double* d_vect;
double* d_result;
int* d_N;
hipMalloc((void**)&d_begEndLines,ColumnBegEndInLineSize);
hipMalloc((void**)&result,outputSize);
hipMalloc((void**)&d_columnsValues,StructCOOSize);
hipMalloc((void**)&d_vect,inputSize);
hipMalloc((void**)&d_N,sizeof(int));
//copyng to device
hipMemcpy(d_vect,vect.vect,inputSize,hipMemcpyHostToDevice);
hipMemcpy(d_N,N,sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_begEndLines,spMat->m_begEndOnLine,ColumnBegEndInLineSize,hipMemcpyHostToDevice);
hipMemcpy(d_columnsValues,spMat->m_columnValue,StructCOOSize,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel), dim3(blocksPerGrid),dim3(threadsPerBlock) , 0, 0, d_begEndLines,d_columnsValues,d_vect,d_result,d_N);
hipMemcpy(result.vect,d_result,outputSize,hipMemcpyDeviceToHost);
free(spMat->m_begEndOnLine);
hipFree(d_begEndLines);
hipFree(result);
hipFree(d_columnsValues);
hipFree(d_vect);
hipFree(d_N);
return result;
}
int main()
{
SparseMatrix* sp = new SparseMatrix(100,100);
std::vector<double> vect;
int i;
for (i=0; i<100; i++)
{
sp->add(i,i,1);
vect.push_back(1.);
}
sp->set();
std::vector<double> result;
//result = cudaMatVect(sp,vect);
double* c_vect;
int* linesCOO;
StructCOO* columnsValues;
int* N;
convertToCTypes(sp, vect, c_vect, linesCOO, columnsValues);
bool structArrayIsTrue(true), c_vectIsTrue(true), linesCOOIsTrue(true);
for (i=0; i<vect.size(); i++)
{
if (vect[i] != c_vect[i])
c_vectIsTrue = false;
}
/*
for (i=0; i<sp->m_linesCOO.size(); i++ )
{
if (sp->m_linesCOO[i])
linesCOOIsTrue = false;
}
for (i=0; i<sp->m_columnValue.size(); i++)
{
if (sp->m_columnValue[i] == columnsValues[i])
structArrayIsTrue = false;
}
*/
std::cout<<structArrayIsTrue<<" "<<linesCOOIsTrue<<" "<<c_vectIsTrue<<std::endl;
return 0;
} | 12def9f66226bda6ade7182afe13c05986c8d4b8.cu | #include "SparseMatrix.h"
#define threadsPerBlock 128
#define blocksPerGrid 128
__global__ void kernel(ColumnBegEndInLine* d_begEndLines, StructCOO* d_columnsValues, double* d_vect, double* d_result, int* d_N)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
int columnIndex(0);
int nonZerosOnLine(0);
while (i<d_N[0])
{
d_result[i] = 0;
for (int j=d_begEndLines[i].beg; j<=d_begEndLines[i].end; j++)
{
d_result[i] += d_columnsValues[j].value * d_vect[d_columnsValues[j].columnIndex];
}
i += blockDim.x*gridDim.x;
}
}
Vector cudaMatVect(SparseMatrix* spMat, Vector& vect)
{
spMat->setBegEndOnLine();
Vector result(spMat->getNumLines());
int* N = (int*) malloc(sizeof(int));
N[0] = sp->getNumLines();
size_t ColumnBegEndInLineSize = spMat->getNumLines()*sizeof(ColumnBegEndInLine);
size_t inputSize = vect.m_size*sizeof(int);
size_t outputSize = (spMat->getNumLines())*sizeof(int);
size_t StructCOOSize = spMat->getNumOfNonZeros()*sizeof(StructCOO);
//allocating variables on the device
ColumnBegEndInLine* d_begEndLines;
StructCOO* d_columnsValues;
double* d_vect;
double* d_result;
int* d_N;
cudaMalloc((void**)&d_begEndLines,ColumnBegEndInLineSize);
cudaMalloc((void**)&result,outputSize);
cudaMalloc((void**)&d_columnsValues,StructCOOSize);
cudaMalloc((void**)&d_vect,inputSize);
cudaMalloc((void**)&d_N,sizeof(int));
//copyng to device
cudaMemcpy(d_vect,vect.vect,inputSize,cudaMemcpyHostToDevice);
cudaMemcpy(d_N,N,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_begEndLines,spMat->m_begEndOnLine,ColumnBegEndInLineSize,cudaMemcpyHostToDevice);
cudaMemcpy(d_columnsValues,spMat->m_columnValue,StructCOOSize,cudaMemcpyHostToDevice);
kernel<<< blocksPerGrid,threadsPerBlock >>>(d_begEndLines,d_columnsValues,d_vect,d_result,d_N);
cudaMemcpy(result.vect,d_result,outputSize,cudaMemcpyDeviceToHost);
free(spMat->m_begEndOnLine);
cudaFree(d_begEndLines);
cudaFree(result);
cudaFree(d_columnsValues);
cudaFree(d_vect);
cudaFree(d_N);
return result;
}
int main()
{
SparseMatrix* sp = new SparseMatrix(100,100);
std::vector<double> vect;
int i;
for (i=0; i<100; i++)
{
sp->add(i,i,1);
vect.push_back(1.);
}
sp->set();
std::vector<double> result;
//result = cudaMatVect(sp,vect);
double* c_vect;
int* linesCOO;
StructCOO* columnsValues;
int* N;
convertToCTypes(sp, vect, c_vect, linesCOO, columnsValues);
bool structArrayIsTrue(true), c_vectIsTrue(true), linesCOOIsTrue(true);
for (i=0; i<vect.size(); i++)
{
if (vect[i] != c_vect[i])
c_vectIsTrue = false;
}
/*
for (i=0; i<sp->m_linesCOO.size(); i++ )
{
if (sp->m_linesCOO[i])
linesCOOIsTrue = false;
}
for (i=0; i<sp->m_columnValue.size(); i++)
{
if (sp->m_columnValue[i] == columnsValues[i])
structArrayIsTrue = false;
}
*/
std::cout<<structArrayIsTrue<<" "<<linesCOOIsTrue<<" "<<c_vectIsTrue<<std::endl;
return 0;
} |
e2165e778c22374775bf05af950c15eee28ef130.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#define PI 3.1415926535897932384626433832795029f
#define PIx2 6.2831853071795864769252867665590058f
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define K_ELEMS_PER_GRID 2048
#define PROJECT_DEF 1
struct kValues {
float Kx;
float Ky;
float Kz;
float PhiMag;
};
/*The algorithm first computes the real and imaginary components of mu
at each sample point in the trajectory space (k-space),
then computes the real and imaginarycomponents of FHd at each voxel in the image space.*/
#if PROJECT_DEF
#define BLOCK_SIZE 512
#define K_VALS_GRID_SIZE (BLOCK_SIZE * 4)
__constant__ __device__ kValues const_kValues[K_VALS_GRID_SIZE];
//calculate mu at each sample point t
__global__ void ComputePhiMagKernel(int numK, float *phiR, float *phiI,
float *phiMag)
{
// find the index of the voxel assigned to this thread
unsigned int t = threadIdx.x + (blockIdx.x * blockDim.x);
if (t < numK)
phiMag[t] = (phiR[t] * phiR[t]) + (phiI[t] * phiI[t]);
}
/*The GPU-based implementation of the FHd algorithm uses constant memory caches to eliminate the
potential bottleneck posed by memory bandwidth and latency.
The scan data is divided into many tiles, the host CPU loads the corresponding subset of sample points into constant memory
before executing the cmpFhD function.
Each thread computes a partial sum for a single element of FHd by iterating over all the sample points in the tile.
This optimization significantly increases the ratio of FP operations to global memory accesses.*/
//calculate FHd on one voxel//
__global__ void ComputeQKernel(int numK, int numX,
float *x_d, float *y_d, float *z_d,
float *Qr_d, float *Qi_d)
{
// find the index of the voxel assigned to this thread
unsigned int t = threadIdx.x + (blockIdx.x * blockDim.x);
if (t >= numX)
return;
//register allocate voxel inputs and outputs
float x_l = x_d[t];
float y_l = y_d[t];
float z_l = z_d[t];
float Qracc = 0.0f;
float Qiacc = 0.0f;
float phi = 0.0f;
float expArg;
int idx = 0;
if (numK % 2) {
/* if numK is odd */
// e^2pi*km*xn
expArg = PIx2 * (const_kValues[idx].Kx * x_l +
const_kValues[idx].Ky * y_l +
const_kValues[idx].Kz * z_l);
phi = const_kValues[idx].PhiMag;
/*First, instead of fath math, changing cos() and sin() function to hardware versions: __sin() and __cos().
Because CUDA offers hardware implementations of mathematic functions that
provide much higher throughput than their software counterparts, but it will
reduced accuracy when switching from software functions to hardware functions.
So must carefully.*/
/*failed to use five element taylor */
Qracc += phi * _cos(expArg);
Qiacc += phi * _sin(expArg);
idx++;
}
for (; idx < numK; idx++) {
/* using thread coarsening technique */
//const_kValues(sample data)is held in costant memory
expArg = PIx2 * (const_kValues[idx].Kx * x_l +
const_kValues[idx].Ky * y_l +
const_kValues[idx].Kz * z_l);
phi = const_kValues[idx].PhiMag;
Qracc += phi * _cos(expArg);
Qiacc += phi * _sin(expArg);
idx++;
expArg = PIx2 * (const_kValues[idx].Kx * x_l +
const_kValues[idx].Ky * y_l +
const_kValues[idx].Kz * z_l);
phi = const_kValues[idx].PhiMag;
/*hardware versions: __sin() and __cos()*/
Qracc += phi * _cos(expArg);
Qiacc += phi * _sin(expArg);
}
Qr_d[t] += Qracc;
Qi_d[t] += Qiacc;
}
void ComputePhiMagGPU(int numK, float* phiR_d, float* phiI_d,
float* phiMag_d)
{
unsigned int numBlocks = ((numK - 1) / BLOCK_SIZE) + 1;
dim3 dimGrid(numBlocks, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( ComputePhiMagKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, numK, phiR_d, phiI_d, phiMag_d);
}
void ComputeQGPU(int numK, int numX, struct kValues *kVals,
float *x_d, float *y_d, float *z_d, float *Qr_d, float *Qi_d)
{
unsigned int size_to_cover = K_VALS_GRID_SIZE;
unsigned int n_iter = ((numK - 1) / K_VALS_GRID_SIZE) + 1;
struct kValues *ptr = kVals;
unsigned int numBlocks = ((numX - 1) / BLOCK_SIZE) + 1;
dim3 dimGrid(numBlocks, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
//printf("size : %d\n", sizeof(struct kValues));
for (int iter = 0; iter < n_iter; iter++) {
size_to_cover = MIN(K_VALS_GRID_SIZE, numK - (iter * K_VALS_GRID_SIZE));
//printf("size to cover:%d, iter:%d, ptr:%u\n", size_to_cover, iter, ptr);
if (size_to_cover) {
hipMemcpyToSymbol(const_kValues, ptr, size_to_cover * sizeof(struct kValues), 0);
hipLaunchKernelGGL(( ComputeQKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, size_to_cover, numX, x_d, y_d, z_d, Qr_d, Qi_d);
if (hipSuccess != hipDeviceSynchronize()) {
printf("iter: %d ERROR!!!!!!\n", iter);
}
}
ptr += size_to_cover;
}
}
#else
inline
void
ComputePhiMagCPU(int numK,
float* phiR, float* phiI,
float* __restrict__ phiMag) {
int indexK = 0;
for (indexK = 0; indexK < numK; indexK++) {
float real = phiR[indexK];
float imag = phiI[indexK];
phiMag[indexK] = real*real + imag*imag;
}
}
inline
void
ComputeQCPU(int numK, int numX,
struct kValues *kVals,
float* x, float* y, float* z,
float *__restrict__ Qr, float *__restrict__ Qi) {
float expArg;
float cosArg;
float sinArg;
int indexK, indexX;
// Loop over the space and frequency domains.
// Generally, numX > numK.
// Since loops are not tiled, it's better that the loop with the smaller
// cache footprint be innermost.
for (indexX = 0; indexX < numX; indexX++) {
// Sum the contributions to this point over all frequencies
float Qracc = 0.0f;
float Qiacc = 0.0f;
for (indexK = 0; indexK < numK; indexK++) {
expArg = PIx2 * (kVals[indexK].Kx * x[indexX] +
kVals[indexK].Ky * y[indexX] +
kVals[indexK].Kz * z[indexX]);
cosArg = cosf(expArg);
sinArg = sinf(expArg);
float phi = kVals[indexK].PhiMag;
Qracc += phi * cosArg;
Qiacc += phi * sinArg;
}
Qr[indexX] = Qracc;
Qi[indexX] = Qiacc;
}
}
#endif
void createDataStructsCPU(int numK, int numX, float** phiMag,
float** Qr, float** Qi)
{
*phiMag = (float* ) memalign(16, numK * sizeof(float));
*Qr = (float*) memalign(16, numX * sizeof (float));
memset((void *)*Qr, 0, numX * sizeof(float));
*Qi = (float*) memalign(16, numX * sizeof (float));
memset((void *)*Qi, 0, numX * sizeof(float));
}
| e2165e778c22374775bf05af950c15eee28ef130.cu | /***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#define PI 3.1415926535897932384626433832795029f
#define PIx2 6.2831853071795864769252867665590058f
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define K_ELEMS_PER_GRID 2048
#define PROJECT_DEF 1
struct kValues {
float Kx;
float Ky;
float Kz;
float PhiMag;
};
/*The algorithm first computes the real and imaginary components of mu
at each sample point in the trajectory space (k-space),
then computes the real and imaginarycomponents of FHd at each voxel in the image space.*/
#if PROJECT_DEF
#define BLOCK_SIZE 512
#define K_VALS_GRID_SIZE (BLOCK_SIZE * 4)
__constant__ __device__ kValues const_kValues[K_VALS_GRID_SIZE];
//calculate mu at each sample point t
__global__ void ComputePhiMagKernel(int numK, float *phiR, float *phiI,
float *phiMag)
{
// find the index of the voxel assigned to this thread
unsigned int t = threadIdx.x + (blockIdx.x * blockDim.x);
if (t < numK)
phiMag[t] = (phiR[t] * phiR[t]) + (phiI[t] * phiI[t]);
}
/*The GPU-based implementation of the FHd algorithm uses constant memory caches to eliminate the
potential bottleneck posed by memory bandwidth and latency.
The scan data is divided into many tiles, the host CPU loads the corresponding subset of sample points into constant memory
before executing the cmpFhD function.
Each thread computes a partial sum for a single element of FHd by iterating over all the sample points in the tile.
This optimization significantly increases the ratio of FP operations to global memory accesses.*/
//calculate FHd on one voxel//
__global__ void ComputeQKernel(int numK, int numX,
float *x_d, float *y_d, float *z_d,
float *Qr_d, float *Qi_d)
{
// find the index of the voxel assigned to this thread
unsigned int t = threadIdx.x + (blockIdx.x * blockDim.x);
if (t >= numX)
return;
//register allocate voxel inputs and outputs
float x_l = x_d[t];
float y_l = y_d[t];
float z_l = z_d[t];
float Qracc = 0.0f;
float Qiacc = 0.0f;
float phi = 0.0f;
float expArg;
int idx = 0;
if (numK % 2) {
/* if numK is odd */
// e^2pi*km*xn
expArg = PIx2 * (const_kValues[idx].Kx * x_l +
const_kValues[idx].Ky * y_l +
const_kValues[idx].Kz * z_l);
phi = const_kValues[idx].PhiMag;
/*First, instead of fath math, changing cos() and sin() function to hardware versions: __sin() and __cos().
Because CUDA offers hardware implementations of mathematic functions that
provide much higher throughput than their software counterparts, but it will
reduced accuracy when switching from software functions to hardware functions.
So must carefully.*/
/*failed to use five element taylor */
Qracc += phi * _cos(expArg);
Qiacc += phi * _sin(expArg);
idx++;
}
for (; idx < numK; idx++) {
/* using thread coarsening technique */
//const_kValues(sample data)is held in costant memory
expArg = PIx2 * (const_kValues[idx].Kx * x_l +
const_kValues[idx].Ky * y_l +
const_kValues[idx].Kz * z_l);
phi = const_kValues[idx].PhiMag;
Qracc += phi * _cos(expArg);
Qiacc += phi * _sin(expArg);
idx++;
expArg = PIx2 * (const_kValues[idx].Kx * x_l +
const_kValues[idx].Ky * y_l +
const_kValues[idx].Kz * z_l);
phi = const_kValues[idx].PhiMag;
/*hardware versions: __sin() and __cos()*/
Qracc += phi * _cos(expArg);
Qiacc += phi * _sin(expArg);
}
Qr_d[t] += Qracc;
Qi_d[t] += Qiacc;
}
void ComputePhiMagGPU(int numK, float* phiR_d, float* phiI_d,
float* phiMag_d)
{
unsigned int numBlocks = ((numK - 1) / BLOCK_SIZE) + 1;
dim3 dimGrid(numBlocks, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
ComputePhiMagKernel<<<dimGrid, dimBlock>>>(numK, phiR_d, phiI_d, phiMag_d);
}
void ComputeQGPU(int numK, int numX, struct kValues *kVals,
float *x_d, float *y_d, float *z_d, float *Qr_d, float *Qi_d)
{
unsigned int size_to_cover = K_VALS_GRID_SIZE;
unsigned int n_iter = ((numK - 1) / K_VALS_GRID_SIZE) + 1;
struct kValues *ptr = kVals;
unsigned int numBlocks = ((numX - 1) / BLOCK_SIZE) + 1;
dim3 dimGrid(numBlocks, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
//printf("size : %d\n", sizeof(struct kValues));
for (int iter = 0; iter < n_iter; iter++) {
size_to_cover = MIN(K_VALS_GRID_SIZE, numK - (iter * K_VALS_GRID_SIZE));
//printf("size to cover:%d, iter:%d, ptr:%u\n", size_to_cover, iter, ptr);
if (size_to_cover) {
cudaMemcpyToSymbol(const_kValues, ptr, size_to_cover * sizeof(struct kValues), 0);
ComputeQKernel<<<dimGrid, dimBlock>>>(size_to_cover, numX, x_d, y_d, z_d, Qr_d, Qi_d);
if (cudaSuccess != cudaDeviceSynchronize()) {
printf("iter: %d ERROR!!!!!!\n", iter);
}
}
ptr += size_to_cover;
}
}
#else
inline
void
ComputePhiMagCPU(int numK,
float* phiR, float* phiI,
float* __restrict__ phiMag) {
int indexK = 0;
for (indexK = 0; indexK < numK; indexK++) {
float real = phiR[indexK];
float imag = phiI[indexK];
phiMag[indexK] = real*real + imag*imag;
}
}
inline
void
ComputeQCPU(int numK, int numX,
struct kValues *kVals,
float* x, float* y, float* z,
float *__restrict__ Qr, float *__restrict__ Qi) {
float expArg;
float cosArg;
float sinArg;
int indexK, indexX;
// Loop over the space and frequency domains.
// Generally, numX > numK.
// Since loops are not tiled, it's better that the loop with the smaller
// cache footprint be innermost.
for (indexX = 0; indexX < numX; indexX++) {
// Sum the contributions to this point over all frequencies
float Qracc = 0.0f;
float Qiacc = 0.0f;
for (indexK = 0; indexK < numK; indexK++) {
expArg = PIx2 * (kVals[indexK].Kx * x[indexX] +
kVals[indexK].Ky * y[indexX] +
kVals[indexK].Kz * z[indexX]);
cosArg = cosf(expArg);
sinArg = sinf(expArg);
float phi = kVals[indexK].PhiMag;
Qracc += phi * cosArg;
Qiacc += phi * sinArg;
}
Qr[indexX] = Qracc;
Qi[indexX] = Qiacc;
}
}
#endif
void createDataStructsCPU(int numK, int numX, float** phiMag,
float** Qr, float** Qi)
{
*phiMag = (float* ) memalign(16, numK * sizeof(float));
*Qr = (float*) memalign(16, numX * sizeof (float));
memset((void *)*Qr, 0, numX * sizeof(float));
*Qi = (float*) memalign(16, numX * sizeof (float));
memset((void *)*Qi, 0, numX * sizeof(float));
}
|
7d6f9e4d59e3b51124c9380556b8d76b16a9c421.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/context_cuda.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernels {
namespace {
template <typename T>
__global__ void _AdamUpdate(
const int N,
const T lr,
const T beta1,
const T beta2,
const T eps,
T* g,
T* m,
T* v) {
CUDA_1D_KERNEL_LOOP(i, N) {
T gi = g[i];
T mi = m[i] = m[i] * beta1 + gi * (1 - beta1);
T vi = v[i] = v[i] * beta2 + gi * gi * (1 - beta2);
g[i] = lr * mi / (sqrt(vi) + eps);
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
template <>
void AdamUpdate<float, CUDAContext>(
const int N,
const float lr,
const float beta1,
const float beta2,
const float eps,
float* g,
float* m,
float* v,
CUDAContext* ctx) {
hipLaunchKernelGGL(( _AdamUpdate), dim3(CUDA_BLOCKS(N)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(),
N, lr, beta1, beta2, eps, g, m, v);
}
} // namespace kernels
} // namespace dragon
#endif // USE_ROCM
| 7d6f9e4d59e3b51124c9380556b8d76b16a9c421.cu | #ifdef USE_CUDA
#include "dragon/core/context_cuda.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernels {
namespace {
template <typename T>
__global__ void _AdamUpdate(
const int N,
const T lr,
const T beta1,
const T beta2,
const T eps,
T* g,
T* m,
T* v) {
CUDA_1D_KERNEL_LOOP(i, N) {
T gi = g[i];
T mi = m[i] = m[i] * beta1 + gi * (1 - beta1);
T vi = v[i] = v[i] * beta2 + gi * gi * (1 - beta2);
g[i] = lr * mi / (sqrt(vi) + eps);
}
}
} // namespace
/* ------------------- Launcher Separator ------------------- */
template <>
void AdamUpdate<float, CUDAContext>(
const int N,
const float lr,
const float beta1,
const float beta2,
const float eps,
float* g,
float* m,
float* v,
CUDAContext* ctx) {
_AdamUpdate<<<CUDA_BLOCKS(N), CUDA_THREADS, 0, ctx->cuda_stream()>>>(
N, lr, beta1, beta2, eps, g, m, v);
}
} // namespace kernels
} // namespace dragon
#endif // USE_CUDA
|
4dbe53a017527686b855e870f3ba714649a6d4cc.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <stdio.h>
#include <iostream>
#include <iomanip>
#include <cassert>
#include <functional>
#include <algorithm>
#include <vector>
#include <math.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#include <hip/device_functions.h>
#include <math_functions.h>
#include <hip/hip_runtime_api.h>
using std::cout;
using std::endl;
using std::fixed;
using std::setprecision;
using std::generate;
using std::vector;
/* ********************************************************* */
/* These two lines define the dimensions (MxN) of the matrix */
#define M 4 // Number of elements/vector
#define N 3 // Number of vectors
/* Change them to test different size matrices */
/* ********************************************************* */
/* CPU Functions */
void matrixTranspose(vector<float>& V, vector<float>& Vt, bool reverse);
void printMatrix(vector<float>& V);
void printTranspose(vector<float>& Vt);
// Function to run CGS decomposition on the GPU
vector<float> runCGS();
void runQR();
/* GPU Functions */
__device__ void printVectorDevice(float* v);
__global__ void printVectorKernel(float* v);
__global__ void printMatrixKernel(float* V);
__global__ void getVectorKernel(float* v, float* V_t, int rowNum, bool reverse);
__global__ void matrixTransposeKernel(float* V, float* V_t, bool reverse);
__global__ void calculateProjectionGPU(float* u, float* upper, float* lower, float* p);
__global__ void innerProductGPU(float* a, float* b, float* c);
//__global__ void sumProjectionsGPU(float* P_t, float* projSum);
__global__ void vectorSubGPU(float* v, float* projSum, float* u);
__global__ void singleNormGPU(float* u_int, float* norm);
__global__ void vectorNormsGPU(float* U_t, float* norms);
__global__ void singleNormMultGPU(float* u_int, float* norm);
__global__ void normsMultGPU(float* U, float* norms, float* E);
int main() {
runQR();
return 0;
}
/* CPU Functions: */
// Transposes or reverse-transposes a matrix passed in as a 1D vector
void matrixTranspose(vector<float>& V, vector<float>& Vt, bool reverse) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
if (!reverse) {
Vt[i * M + j] = V[j * N + i];
}
else {
V[j * N + i] = Vt[i * M + j];
}
}
}
}
// Prints a matrix passed in as a 1-D vector
void printMatrix(vector<float>& V) {
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
cout << V[i * N + j] << "\t";
}
cout << endl;
}
cout << endl;
}
// Prints the transpose of a matrix passed in as a 1-D vector
void printTranspose(vector<float>& Vt) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
cout << Vt[i * M + j] << "\t";
}
cout << endl;
}
cout << endl;
}
/* GPU Functions: */
// Prints a vector from the GPU
__device__ void printVectorDevice(float* v) {
for (int i = 0; i < M; i++) {
printf("%f\t", v[i]);
}
printf("\n");
}
__global__ void printVectorKernel(float* v) {
if (threadIdx.x == 0) {
for (int i = 0; i < M; i++) {
printf("%f\t", v[i]);
}
printf("\n");
}
}
// Prints a matrix from the GPU
__global__ void printMatrixKernel(float* V) {
if (threadIdx.x == 0) {
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
printf("%f\t", V[i * N + j]);
}
printf("\n");
}
printf("\n");
}
}
// Transposes or reverse-transposes a matrix from GPU
__global__ void matrixTransposeKernel(float* V, float* V_t, bool reverse) {
if (threadIdx.x == 0) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
if (!reverse) {
V_t[i * M + j] = V[j * N + i];
}
else {
V[j * N + i] = V_t[i * M + j];
}
}
}
}
}
// Accesses a row in V_transpose and copies it into the storage vector v or does the reverse
__global__ void getVectorKernel(float* v, float* V_t, int rowNum, bool reverse) {
if (threadIdx.x == 0) {
for (int i = 0; i < M; i++) {
if (!reverse) {
v[i] = V_t[rowNum * M + i];
}
else {
V_t[rowNum * M + i] = v[i];
}
}
}
}
// Multiply a vector by a scalar to get a projection - requires M threads for M-length vectors
__global__ void calculateProjectionGPU(float* u, float* upper, float* lower, float* p) {
int i = threadIdx.x;
// Each thread does one multiplication
if (i < M) {
if (*lower != 0) {
__shared__ float temp[M];
temp[i] = *upper / *lower;
__syncthreads();
p[i] = u[i] * temp[i];
}
else {
p[i] = 0.0f;
}
}
}
// Calculate inner product on GPU - basically stolen from https://www.nvidia.com/content/GTC-2010/pdfs/2131_GTC2010.pdf
__global__ void innerProductGPU(float* a, float* b, float* c) {
// Likely to have more threads than entires, so use this to keep in range
if (threadIdx.x < M) {
// Each thread does one multiplication
// Need to use shared memory to store products
__shared__ float temp[M];
temp[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
// Need threads to synchronize - no threads advance until all are at this line, ensures no read-before-write hazard
__syncthreads();
// Now do the sum using only thread 0
if (threadIdx.x == 0) {
float sum = 0.0f;
for (int i = 0; i < M; i++) {
sum += temp[i];
}
*c = sum;
}
}
}
// Vector subtraction to get u[i] - requires M threads for M-length vectors, will be executed from 1 thread
__global__ void vectorSubGPU(float* v, float* projSum, float* u) {
int i = threadIdx.x;
// Each thread subtracts one element from the other
if (i < M) {
u[i] = v[i] - projSum[i];
}
}
// Calculates the eculidean norm of one vector and stores them into array - requires N threads for N columns
__global__ void singleNormGPU(float* u_int, float* norm) {
int idx = threadIdx.x;
if (idx < N) {
float temp = 0.0f;
// First sum the components of each u together
for (int i = 0; i < M; i++) {
temp += (u_int[i] * u_int[i]);
}
// Now get reciprocal sqrt and store
*norm = rsqrtf(temp);
}
}
// Calculates the eculidean norms of each vector and stores them into array - requires N threads for N columns
__global__ void vectorNormsGPU(float* U_t, float* norms) {
int idx = threadIdx.x;
if (idx < N) {
float temp = 0.0f;
// First sum the components of each u together
for (int i = 0; i < M; i++) {
temp += (U_t[idx * M + i] * U_t[idx * M + i]);
}
// Now get reciprocal sqrt and store into norms array
norms[idx] = rsqrtf(temp);
}
}
// Multiply one vector by the reciprocal square root to normalize it - requires M threads
__global__ void singleNormMultGPU(float* u_int, float* norm) {
int idx = threadIdx.x;
if (idx < M) {
u_int[idx] = u_int[idx] * (*norm);
}
}
// Mulitiplies each u by 1/norm to get the e's - requires M*N threads to do all at once
__global__ void normsMultGPU(float* U, float* norms, float* E) {
// Note: This function requires that U be passed in, not U_t (for indexing purposes)
int idx = threadIdx.x;
if (idx < M * N) {
// Get index in norms array
int normIdx = (idx % N);
E[idx] = U[idx] * norms[normIdx];
}
}
// Simple matrix multiplication on the GPU
__global__ void matrixMultGPU(float* Q_t, float* A, float* R) {
// Get each thread x and y
int row = threadIdx.y;
int col = threadIdx.x;
// Q_t is a NxM matrix, A is a MxN matrix
// Therefore R will be NxN
if ((row < N) && (col < N)) {
float sum = 0.0f;
for (int i = 0; i < M; i++) {
R[row * N + col] += Q_t[row * M + i] * A[i * N + col];
}
}
}
vector<float> runCGS() {
size_t bytesIP = sizeof(float); // One float
size_t bytesMatrix = M * N * sizeof(float); // MxN matrix
size_t bytesVecLen = M * sizeof(float); // Mx1 vector
size_t bytesNumVec = N * sizeof(float); // Nx1 vector
// Initialize vectors and matrices for QR
vector<float> h_v(M, 0.0f); // Storage vector for calculating u's
vector<float> h_V(M * N); // Input matrix of v's
vector<float> h_Vt(M * N); // Transpose of V
vector<float> h_u(M, 0.0f); // Storage vector for calculating u's
vector<float> h_u_int(M, 0.0f); // Storage vector for calculating intermediate u's
vector<float> h_U(M * N, 0.0f); // Initially empty matrix of u's
vector<float> h_U_int(M * N, 0.0f); // Initially empty matrix of intermediate u's
vector<float> h_Ut(M * N, 0.0f); // Transpose of U
vector<float> h_Ut_int(M * N, 0.0f); // Transpose of U_int
float* h_Upper = nullptr;
float* h_Lower = nullptr;
vector<float> h_p(M, 0.0f); // Holds a single projection
vector<float> h_Pt(M * N, 0.0f); // Transpose of projections matrix
vector<float> h_PS(M, 0.0f); // Sum of projections vector
float* h_n = nullptr; // Single norm value for MGS
vector<float> h_N(N, 0.0f); // Vector of norms
vector<float> h_E(M * N, 0.0f); // Output E matrix
// Initialize V with a 4x3 example that works out nicely - http://www.cs.nthu.edu.tw/~cherung/teaching/2008cs3331/chap4%20example.pdf
h_V[0] = 1.0; h_V[1] = -1.0; h_V[2] = 4.0;
h_V[3] = 1.0; h_V[4] = 4.0; h_V[5] = -2.0;
h_V[6] = 1.0; h_V[7] = 4.0; h_V[8] = 2.0;
h_V[9] = 1.0; h_V[10] = -1.0; h_V[11] = 0.0;
// Initialize V_transpose
matrixTranspose(h_V, h_Vt, false);
// Copy v1 to u1
for (int i = 0; i < M; i++) {
h_Ut[i] = h_Vt[i];
}
// Store into h_U
matrixTranspose(h_U, h_Ut, true);
// Print initial V matrix:
printf("Input Matrix V:\n");
printMatrix(h_V);
printf("\n");
// Allocate device memory
float* d_v, * d_V, * d_Vt, * d_u, * d_u_int, * d_U, * d_U_int, * d_Ut, * d_Ut_int, * d_Upper, * d_Lower, * d_p, * d_Pt, * d_PS, * d_n, * d_N, * d_E;
hipMalloc(&d_v, bytesVecLen);
hipMalloc(&d_V, bytesMatrix);
hipMalloc(&d_Vt, bytesMatrix);
hipMalloc(&d_u, bytesVecLen);
hipMalloc(&d_u_int, bytesVecLen);
hipMalloc(&d_U, bytesMatrix);
hipMalloc(&d_U_int, bytesMatrix);
hipMalloc(&d_Ut, bytesMatrix);
hipMalloc(&d_Ut_int, bytesMatrix);
hipMalloc((void**)&d_Upper, bytesIP);
hipMalloc((void**)&d_Lower, bytesIP);
hipMalloc(&d_p, bytesVecLen);
hipMalloc(&d_Pt, bytesMatrix);
hipMalloc(&d_PS, bytesVecLen);
hipMalloc((void**)&d_n, bytesIP);
hipMalloc(&d_N, bytesNumVec);
hipMalloc(&d_E, bytesMatrix);
hipMemcpy(d_v, h_v.data(), bytesVecLen, hipMemcpyHostToDevice);
hipMemcpy(d_V, h_V.data(), bytesMatrix, hipMemcpyHostToDevice);
hipMemcpy(d_Vt, h_Vt.data(), bytesMatrix, hipMemcpyHostToDevice);
hipMemcpy(d_u, h_u.data(), bytesVecLen, hipMemcpyHostToDevice);
hipMemcpy(d_u_int, h_u_int.data(), bytesVecLen, hipMemcpyHostToDevice);
hipMemcpy(d_U, h_U.data(), bytesMatrix, hipMemcpyHostToDevice);
hipMemcpy(d_U_int, h_U_int.data(), bytesVecLen, hipMemcpyHostToDevice);
hipMemcpy(d_Ut, h_Ut.data(), bytesMatrix, hipMemcpyHostToDevice);
hipMemcpy(d_Ut_int, h_Ut_int.data(), bytesMatrix, hipMemcpyHostToDevice);
hipMemcpy(d_Upper, h_Upper, bytesIP, hipMemcpyHostToDevice);
hipMemcpy(d_Lower, h_Lower, bytesIP, hipMemcpyHostToDevice);
hipMemcpy(d_p, h_p.data(), bytesVecLen, hipMemcpyHostToDevice);
hipMemcpy(d_Pt, h_Pt.data(), bytesMatrix, hipMemcpyHostToDevice);
hipMemcpy(d_PS, h_PS.data(), bytesVecLen, hipMemcpyHostToDevice);
hipMemcpy(d_n, h_n, bytesIP, hipMemcpyHostToDevice);
hipMemcpy(d_N, h_N.data(), bytesNumVec, hipMemcpyHostToDevice);
hipMemcpy(d_E, h_E.data(), bytesMatrix, hipMemcpyHostToDevice);
int numBlocks = 1;
int threads = 32;
dim3 threadsPerBlock(threads, 1, 1);
for (int i = 1; i < N; i++) {
// Load v(i)
getVectorKernel << <numBlocks, threadsPerBlock >> > (d_v, d_Vt, i, false);
for (int j = 0; j < i; j++) {
// Load d_u with u(j)
getVectorKernel << <numBlocks, threadsPerBlock >> > (d_u, d_Ut, j, false);
hipDeviceSynchronize();
// Computer the two inner products for the projection onto that u(j)
innerProductGPU << <numBlocks, threadsPerBlock >> > (d_u, d_v, d_Upper);
innerProductGPU << <numBlocks, threadsPerBlock >> > (d_u, d_u, d_Lower);
hipDeviceSynchronize();
// Compute the projection and store into d_p
calculateProjectionGPU << <numBlocks, threadsPerBlock >> > (d_u, d_Upper, d_Lower, d_p);
hipDeviceSynchronize();
// Now calculate next intermediate u
vectorSubGPU << <numBlocks, threadsPerBlock >> > (d_v, d_p, d_u_int);
hipDeviceSynchronize();
// Now place that intermediate u into U_t_int
getVectorKernel << <numBlocks, threadsPerBlock >> > (d_u_int, d_Ut_int, j, true);
hipDeviceSynchronize();
// Next load d_v with previous u_int
getVectorKernel << <numBlocks, threadsPerBlock >> > (d_v, d_Ut_int, j, false);
}
// Next calculate norm of u(i-1)_int
singleNormGPU << <numBlocks, threadsPerBlock >> > (d_u_int, d_n);
hipDeviceSynchronize();
// Get next entry in U matrix by dividing norm by u(i)_int
singleNormMultGPU << <numBlocks, threadsPerBlock >> > (d_u_int, d_n);
hipDeviceSynchronize();
// Store that result to U_transpose matrix
getVectorKernel << <numBlocks, threadsPerBlock >> > (d_u_int, d_Ut, i, true);
hipDeviceSynchronize();
// Reverse transpose d_Ut
matrixTransposeKernel << <numBlocks, threadsPerBlock >> > (d_U, d_Ut, true);
hipDeviceSynchronize();
}
// Next calculate norms for e's
vectorNormsGPU << <numBlocks, threadsPerBlock >> > (d_Ut, d_N);
hipDeviceSynchronize();
// Finally get E matrix
normsMultGPU << <numBlocks, threadsPerBlock >> > (d_U, d_N, d_E);
hipDeviceSynchronize();
// Copy output E matrix back
hipMemcpy(h_E.data(), d_E, bytesMatrix, hipMemcpyDeviceToHost);
// Free memory on device
hipFree(d_v);
hipFree(d_V);
hipFree(d_Vt);
hipFree(d_u);
hipFree(d_U);
hipFree(d_Ut);
hipFree(d_Upper);
hipFree(d_Lower);
hipFree(d_Pt);
hipFree(d_PS);
hipFree(d_N);
hipFree(d_E);
return h_E;
}
void runQR() {
vector<float> h_Q(M * N, 0.0f);
h_Q = runCGS();
// Print E matrix
printf("Orthonormal Basis Q: \n");
printMatrix(h_Q);
printf("\n");
// Get transpose of Q
vector<float> h_Qt(M * N, 0.0f);
matrixTranspose(h_Q, h_Qt, false);
printf("Transpose of Q: \n");
printTranspose(h_Qt);
printf("\n");
// Init GPU Parameters
int numBlocks = 1;
int threads = 32;
dim3 threadsPerBlock(threads, threads);
size_t bytesQt = N * M * sizeof(float);
size_t bytesA = M * N * sizeof(float);
size_t bytesR = N * N * sizeof(float);
vector<float> h_A(M * N, 0.0f);
h_A[0] = 1.0; h_A[1] = -1.0; h_A[2] = 4.0;
h_A[3] = 1.0; h_A[4] = 4.0; h_A[5] = -2.0;
h_A[6] = 1.0; h_A[7] = 4.0; h_A[8] = 2.0;
h_A[9] = 1.0; h_A[10] = -1.0; h_A[11] = 0.0;
vector<float> h_R(N * N, 0.0f);
// Allocate and copy to device memory
float* d_Qt, * d_A, * d_R;
hipMalloc(&d_Qt, bytesQt);
hipMalloc(&d_A, bytesA);
hipMalloc(&d_R, bytesR);
hipMemcpy(d_Qt, h_Qt.data(), bytesQt, hipMemcpyHostToDevice);
hipMemcpy(d_A, h_A.data(), bytesA, hipMemcpyHostToDevice);
hipMemcpy(d_R, h_R.data(), bytesR, hipMemcpyHostToDevice);
// Run the matrix multiplication
matrixMultGPU << <numBlocks, threadsPerBlock >> > (d_Qt, d_A, d_R);
// Copy data back
hipMemcpy(h_R.data(), d_R, bytesR, hipMemcpyDeviceToHost);
// Print R
printf("Upper triangular matrix R:\n");
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
printf("%f\t", h_R[i * N + j]);
}
printf("\n");
}
printf("\n");
} | 4dbe53a017527686b855e870f3ba714649a6d4cc.cu | #include <cstdlib>
#include <stdio.h>
#include <iostream>
#include <iomanip>
#include <cassert>
#include <functional>
#include <algorithm>
#include <vector>
#include <math.h>
#include <chrono>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cuda.h>
#include <device_functions.h>
#include <math_functions.h>
#include <cuda_runtime_api.h>
using std::cout;
using std::endl;
using std::fixed;
using std::setprecision;
using std::generate;
using std::vector;
/* ********************************************************* */
/* These two lines define the dimensions (MxN) of the matrix */
#define M 4 // Number of elements/vector
#define N 3 // Number of vectors
/* Change them to test different size matrices */
/* ********************************************************* */
/* CPU Functions */
void matrixTranspose(vector<float>& V, vector<float>& Vt, bool reverse);
void printMatrix(vector<float>& V);
void printTranspose(vector<float>& Vt);
// Function to run CGS decomposition on the GPU
vector<float> runCGS();
void runQR();
/* GPU Functions */
__device__ void printVectorDevice(float* v);
__global__ void printVectorKernel(float* v);
__global__ void printMatrixKernel(float* V);
__global__ void getVectorKernel(float* v, float* V_t, int rowNum, bool reverse);
__global__ void matrixTransposeKernel(float* V, float* V_t, bool reverse);
__global__ void calculateProjectionGPU(float* u, float* upper, float* lower, float* p);
__global__ void innerProductGPU(float* a, float* b, float* c);
//__global__ void sumProjectionsGPU(float* P_t, float* projSum);
__global__ void vectorSubGPU(float* v, float* projSum, float* u);
__global__ void singleNormGPU(float* u_int, float* norm);
__global__ void vectorNormsGPU(float* U_t, float* norms);
__global__ void singleNormMultGPU(float* u_int, float* norm);
__global__ void normsMultGPU(float* U, float* norms, float* E);
int main() {
runQR();
return 0;
}
/* CPU Functions: */
// Transposes or reverse-transposes a matrix passed in as a 1D vector
void matrixTranspose(vector<float>& V, vector<float>& Vt, bool reverse) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
if (!reverse) {
Vt[i * M + j] = V[j * N + i];
}
else {
V[j * N + i] = Vt[i * M + j];
}
}
}
}
// Prints a matrix passed in as a 1-D vector
void printMatrix(vector<float>& V) {
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
cout << V[i * N + j] << "\t";
}
cout << endl;
}
cout << endl;
}
// Prints the transpose of a matrix passed in as a 1-D vector
void printTranspose(vector<float>& Vt) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
cout << Vt[i * M + j] << "\t";
}
cout << endl;
}
cout << endl;
}
/* GPU Functions: */
// Prints a vector from the GPU
__device__ void printVectorDevice(float* v) {
for (int i = 0; i < M; i++) {
printf("%f\t", v[i]);
}
printf("\n");
}
__global__ void printVectorKernel(float* v) {
if (threadIdx.x == 0) {
for (int i = 0; i < M; i++) {
printf("%f\t", v[i]);
}
printf("\n");
}
}
// Prints a matrix from the GPU
__global__ void printMatrixKernel(float* V) {
if (threadIdx.x == 0) {
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
printf("%f\t", V[i * N + j]);
}
printf("\n");
}
printf("\n");
}
}
// Transposes or reverse-transposes a matrix from GPU
__global__ void matrixTransposeKernel(float* V, float* V_t, bool reverse) {
if (threadIdx.x == 0) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
if (!reverse) {
V_t[i * M + j] = V[j * N + i];
}
else {
V[j * N + i] = V_t[i * M + j];
}
}
}
}
}
// Accesses a row in V_transpose and copies it into the storage vector v or does the reverse
__global__ void getVectorKernel(float* v, float* V_t, int rowNum, bool reverse) {
if (threadIdx.x == 0) {
for (int i = 0; i < M; i++) {
if (!reverse) {
v[i] = V_t[rowNum * M + i];
}
else {
V_t[rowNum * M + i] = v[i];
}
}
}
}
// Multiply a vector by a scalar to get a projection - requires M threads for M-length vectors
__global__ void calculateProjectionGPU(float* u, float* upper, float* lower, float* p) {
int i = threadIdx.x;
// Each thread does one multiplication
if (i < M) {
if (*lower != 0) {
__shared__ float temp[M];
temp[i] = *upper / *lower;
__syncthreads();
p[i] = u[i] * temp[i];
}
else {
p[i] = 0.0f;
}
}
}
// Calculate inner product on GPU - basically stolen from https://www.nvidia.com/content/GTC-2010/pdfs/2131_GTC2010.pdf
__global__ void innerProductGPU(float* a, float* b, float* c) {
// Likely to have more threads than entires, so use this to keep in range
if (threadIdx.x < M) {
// Each thread does one multiplication
// Need to use shared memory to store products
__shared__ float temp[M];
temp[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
// Need threads to synchronize - no threads advance until all are at this line, ensures no read-before-write hazard
__syncthreads();
// Now do the sum using only thread 0
if (threadIdx.x == 0) {
float sum = 0.0f;
for (int i = 0; i < M; i++) {
sum += temp[i];
}
*c = sum;
}
}
}
// Vector subtraction to get u[i] - requires M threads for M-length vectors, will be executed from 1 thread
__global__ void vectorSubGPU(float* v, float* projSum, float* u) {
int i = threadIdx.x;
// Each thread subtracts one element from the other
if (i < M) {
u[i] = v[i] - projSum[i];
}
}
// Calculates the eculidean norm of one vector and stores them into array - requires N threads for N columns
__global__ void singleNormGPU(float* u_int, float* norm) {
int idx = threadIdx.x;
if (idx < N) {
float temp = 0.0f;
// First sum the components of each u together
for (int i = 0; i < M; i++) {
temp += (u_int[i] * u_int[i]);
}
// Now get reciprocal sqrt and store
*norm = rsqrtf(temp);
}
}
// Calculates the eculidean norms of each vector and stores them into array - requires N threads for N columns
__global__ void vectorNormsGPU(float* U_t, float* norms) {
int idx = threadIdx.x;
if (idx < N) {
float temp = 0.0f;
// First sum the components of each u together
for (int i = 0; i < M; i++) {
temp += (U_t[idx * M + i] * U_t[idx * M + i]);
}
// Now get reciprocal sqrt and store into norms array
norms[idx] = rsqrtf(temp);
}
}
// Multiply one vector by the reciprocal square root to normalize it - requires M threads
__global__ void singleNormMultGPU(float* u_int, float* norm) {
int idx = threadIdx.x;
if (idx < M) {
u_int[idx] = u_int[idx] * (*norm);
}
}
// Mulitiplies each u by 1/norm to get the e's - requires M*N threads to do all at once
__global__ void normsMultGPU(float* U, float* norms, float* E) {
// Note: This function requires that U be passed in, not U_t (for indexing purposes)
int idx = threadIdx.x;
if (idx < M * N) {
// Get index in norms array
int normIdx = (idx % N);
E[idx] = U[idx] * norms[normIdx];
}
}
// Simple matrix multiplication on the GPU
__global__ void matrixMultGPU(float* Q_t, float* A, float* R) {
// Get each thread x and y
int row = threadIdx.y;
int col = threadIdx.x;
// Q_t is a NxM matrix, A is a MxN matrix
// Therefore R will be NxN
if ((row < N) && (col < N)) {
float sum = 0.0f;
for (int i = 0; i < M; i++) {
R[row * N + col] += Q_t[row * M + i] * A[i * N + col];
}
}
}
vector<float> runCGS() {
size_t bytesIP = sizeof(float); // One float
size_t bytesMatrix = M * N * sizeof(float); // MxN matrix
size_t bytesVecLen = M * sizeof(float); // Mx1 vector
size_t bytesNumVec = N * sizeof(float); // Nx1 vector
// Initialize vectors and matrices for QR
vector<float> h_v(M, 0.0f); // Storage vector for calculating u's
vector<float> h_V(M * N); // Input matrix of v's
vector<float> h_Vt(M * N); // Transpose of V
vector<float> h_u(M, 0.0f); // Storage vector for calculating u's
vector<float> h_u_int(M, 0.0f); // Storage vector for calculating intermediate u's
vector<float> h_U(M * N, 0.0f); // Initially empty matrix of u's
vector<float> h_U_int(M * N, 0.0f); // Initially empty matrix of intermediate u's
vector<float> h_Ut(M * N, 0.0f); // Transpose of U
vector<float> h_Ut_int(M * N, 0.0f); // Transpose of U_int
float* h_Upper = nullptr;
float* h_Lower = nullptr;
vector<float> h_p(M, 0.0f); // Holds a single projection
vector<float> h_Pt(M * N, 0.0f); // Transpose of projections matrix
vector<float> h_PS(M, 0.0f); // Sum of projections vector
float* h_n = nullptr; // Single norm value for MGS
vector<float> h_N(N, 0.0f); // Vector of norms
vector<float> h_E(M * N, 0.0f); // Output E matrix
// Initialize V with a 4x3 example that works out nicely - http://www.cs.nthu.edu.tw/~cherung/teaching/2008cs3331/chap4%20example.pdf
h_V[0] = 1.0; h_V[1] = -1.0; h_V[2] = 4.0;
h_V[3] = 1.0; h_V[4] = 4.0; h_V[5] = -2.0;
h_V[6] = 1.0; h_V[7] = 4.0; h_V[8] = 2.0;
h_V[9] = 1.0; h_V[10] = -1.0; h_V[11] = 0.0;
// Initialize V_transpose
matrixTranspose(h_V, h_Vt, false);
// Copy v1 to u1
for (int i = 0; i < M; i++) {
h_Ut[i] = h_Vt[i];
}
// Store into h_U
matrixTranspose(h_U, h_Ut, true);
// Print initial V matrix:
printf("Input Matrix V:\n");
printMatrix(h_V);
printf("\n");
// Allocate device memory
float* d_v, * d_V, * d_Vt, * d_u, * d_u_int, * d_U, * d_U_int, * d_Ut, * d_Ut_int, * d_Upper, * d_Lower, * d_p, * d_Pt, * d_PS, * d_n, * d_N, * d_E;
cudaMalloc(&d_v, bytesVecLen);
cudaMalloc(&d_V, bytesMatrix);
cudaMalloc(&d_Vt, bytesMatrix);
cudaMalloc(&d_u, bytesVecLen);
cudaMalloc(&d_u_int, bytesVecLen);
cudaMalloc(&d_U, bytesMatrix);
cudaMalloc(&d_U_int, bytesMatrix);
cudaMalloc(&d_Ut, bytesMatrix);
cudaMalloc(&d_Ut_int, bytesMatrix);
cudaMalloc((void**)&d_Upper, bytesIP);
cudaMalloc((void**)&d_Lower, bytesIP);
cudaMalloc(&d_p, bytesVecLen);
cudaMalloc(&d_Pt, bytesMatrix);
cudaMalloc(&d_PS, bytesVecLen);
cudaMalloc((void**)&d_n, bytesIP);
cudaMalloc(&d_N, bytesNumVec);
cudaMalloc(&d_E, bytesMatrix);
cudaMemcpy(d_v, h_v.data(), bytesVecLen, cudaMemcpyHostToDevice);
cudaMemcpy(d_V, h_V.data(), bytesMatrix, cudaMemcpyHostToDevice);
cudaMemcpy(d_Vt, h_Vt.data(), bytesMatrix, cudaMemcpyHostToDevice);
cudaMemcpy(d_u, h_u.data(), bytesVecLen, cudaMemcpyHostToDevice);
cudaMemcpy(d_u_int, h_u_int.data(), bytesVecLen, cudaMemcpyHostToDevice);
cudaMemcpy(d_U, h_U.data(), bytesMatrix, cudaMemcpyHostToDevice);
cudaMemcpy(d_U_int, h_U_int.data(), bytesVecLen, cudaMemcpyHostToDevice);
cudaMemcpy(d_Ut, h_Ut.data(), bytesMatrix, cudaMemcpyHostToDevice);
cudaMemcpy(d_Ut_int, h_Ut_int.data(), bytesMatrix, cudaMemcpyHostToDevice);
cudaMemcpy(d_Upper, h_Upper, bytesIP, cudaMemcpyHostToDevice);
cudaMemcpy(d_Lower, h_Lower, bytesIP, cudaMemcpyHostToDevice);
cudaMemcpy(d_p, h_p.data(), bytesVecLen, cudaMemcpyHostToDevice);
cudaMemcpy(d_Pt, h_Pt.data(), bytesMatrix, cudaMemcpyHostToDevice);
cudaMemcpy(d_PS, h_PS.data(), bytesVecLen, cudaMemcpyHostToDevice);
cudaMemcpy(d_n, h_n, bytesIP, cudaMemcpyHostToDevice);
cudaMemcpy(d_N, h_N.data(), bytesNumVec, cudaMemcpyHostToDevice);
cudaMemcpy(d_E, h_E.data(), bytesMatrix, cudaMemcpyHostToDevice);
int numBlocks = 1;
int threads = 32;
dim3 threadsPerBlock(threads, 1, 1);
for (int i = 1; i < N; i++) {
// Load v(i)
getVectorKernel << <numBlocks, threadsPerBlock >> > (d_v, d_Vt, i, false);
for (int j = 0; j < i; j++) {
// Load d_u with u(j)
getVectorKernel << <numBlocks, threadsPerBlock >> > (d_u, d_Ut, j, false);
cudaDeviceSynchronize();
// Computer the two inner products for the projection onto that u(j)
innerProductGPU << <numBlocks, threadsPerBlock >> > (d_u, d_v, d_Upper);
innerProductGPU << <numBlocks, threadsPerBlock >> > (d_u, d_u, d_Lower);
cudaDeviceSynchronize();
// Compute the projection and store into d_p
calculateProjectionGPU << <numBlocks, threadsPerBlock >> > (d_u, d_Upper, d_Lower, d_p);
cudaDeviceSynchronize();
// Now calculate next intermediate u
vectorSubGPU << <numBlocks, threadsPerBlock >> > (d_v, d_p, d_u_int);
cudaDeviceSynchronize();
// Now place that intermediate u into U_t_int
getVectorKernel << <numBlocks, threadsPerBlock >> > (d_u_int, d_Ut_int, j, true);
cudaDeviceSynchronize();
// Next load d_v with previous u_int
getVectorKernel << <numBlocks, threadsPerBlock >> > (d_v, d_Ut_int, j, false);
}
// Next calculate norm of u(i-1)_int
singleNormGPU << <numBlocks, threadsPerBlock >> > (d_u_int, d_n);
cudaDeviceSynchronize();
// Get next entry in U matrix by dividing norm by u(i)_int
singleNormMultGPU << <numBlocks, threadsPerBlock >> > (d_u_int, d_n);
cudaDeviceSynchronize();
// Store that result to U_transpose matrix
getVectorKernel << <numBlocks, threadsPerBlock >> > (d_u_int, d_Ut, i, true);
cudaDeviceSynchronize();
// Reverse transpose d_Ut
matrixTransposeKernel << <numBlocks, threadsPerBlock >> > (d_U, d_Ut, true);
cudaDeviceSynchronize();
}
// Next calculate norms for e's
vectorNormsGPU << <numBlocks, threadsPerBlock >> > (d_Ut, d_N);
cudaDeviceSynchronize();
// Finally get E matrix
normsMultGPU << <numBlocks, threadsPerBlock >> > (d_U, d_N, d_E);
cudaDeviceSynchronize();
// Copy output E matrix back
cudaMemcpy(h_E.data(), d_E, bytesMatrix, cudaMemcpyDeviceToHost);
// Free memory on device
cudaFree(d_v);
cudaFree(d_V);
cudaFree(d_Vt);
cudaFree(d_u);
cudaFree(d_U);
cudaFree(d_Ut);
cudaFree(d_Upper);
cudaFree(d_Lower);
cudaFree(d_Pt);
cudaFree(d_PS);
cudaFree(d_N);
cudaFree(d_E);
return h_E;
}
void runQR() {
vector<float> h_Q(M * N, 0.0f);
h_Q = runCGS();
// Print E matrix
printf("Orthonormal Basis Q: \n");
printMatrix(h_Q);
printf("\n");
// Get transpose of Q
vector<float> h_Qt(M * N, 0.0f);
matrixTranspose(h_Q, h_Qt, false);
printf("Transpose of Q: \n");
printTranspose(h_Qt);
printf("\n");
// Init GPU Parameters
int numBlocks = 1;
int threads = 32;
dim3 threadsPerBlock(threads, threads);
size_t bytesQt = N * M * sizeof(float);
size_t bytesA = M * N * sizeof(float);
size_t bytesR = N * N * sizeof(float);
vector<float> h_A(M * N, 0.0f);
h_A[0] = 1.0; h_A[1] = -1.0; h_A[2] = 4.0;
h_A[3] = 1.0; h_A[4] = 4.0; h_A[5] = -2.0;
h_A[6] = 1.0; h_A[7] = 4.0; h_A[8] = 2.0;
h_A[9] = 1.0; h_A[10] = -1.0; h_A[11] = 0.0;
vector<float> h_R(N * N, 0.0f);
// Allocate and copy to device memory
float* d_Qt, * d_A, * d_R;
cudaMalloc(&d_Qt, bytesQt);
cudaMalloc(&d_A, bytesA);
cudaMalloc(&d_R, bytesR);
cudaMemcpy(d_Qt, h_Qt.data(), bytesQt, cudaMemcpyHostToDevice);
cudaMemcpy(d_A, h_A.data(), bytesA, cudaMemcpyHostToDevice);
cudaMemcpy(d_R, h_R.data(), bytesR, cudaMemcpyHostToDevice);
// Run the matrix multiplication
matrixMultGPU << <numBlocks, threadsPerBlock >> > (d_Qt, d_A, d_R);
// Copy data back
cudaMemcpy(h_R.data(), d_R, bytesR, cudaMemcpyDeviceToHost);
// Print R
printf("Upper triangular matrix R:\n");
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
printf("%f\t", h_R[i * N + j]);
}
printf("\n");
}
printf("\n");
} |
2fb5da73e2a405bd50ab6bc15abc416c373216e4.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017 XGBoost contributors
*/
#include "./host_device_vector.h"
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <algorithm>
#include <cstdint>
#include <mutex>
#include "device_helpers_hip.cuh"
namespace xgboost {
// the handler to call instead of hipSetDevice; only used for testing
static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT
void SetCudaSetDeviceHandler(void (*handler)(int)) {
cudaSetDeviceHandler = handler;
}
// wrapper over access with useful methods
class Permissions {
GPUAccess access_;
explicit Permissions(GPUAccess access) : access_{access} {}
public:
Permissions() : access_{GPUAccess::kNone} {}
explicit Permissions(bool perm)
: access_(perm ? GPUAccess::kWrite : GPUAccess::kNone) {}
bool CanRead() const { return access_ >= kRead; }
bool CanWrite() const { return access_ == kWrite; }
bool CanAccess(GPUAccess access) const { return access_ >= access; }
void Grant(GPUAccess access) { access_ = ::max(access_, access); }
void DenyComplementary(GPUAccess compl_access) {
access_ = ::min(access_, GPUAccess::kWrite - compl_access);
}
Permissions Complementary() const {
return Permissions(GPUAccess::kWrite - access_);
}
};
template <typename T>
struct HostDeviceVectorImpl {
struct DeviceShard {
DeviceShard()
: proper_size_{0}, device_{-1}, start_{0}, perm_d_{false},
cached_size_{static_cast<size_t>(~0)}, vec_{nullptr} {}
void Init(HostDeviceVectorImpl<T>* vec, int device) {
if (vec_ == nullptr) { vec_ = vec; }
CHECK_EQ(vec, vec_);
device_ = device;
LazyResize(vec_->Size());
perm_d_ = vec_->perm_h_.Complementary();
}
void Init(HostDeviceVectorImpl<T>* vec, const DeviceShard& other) {
if (vec_ == nullptr) { vec_ = vec; }
CHECK_EQ(vec, vec_);
device_ = other.device_;
cached_size_ = other.cached_size_;
start_ = other.start_;
proper_size_ = other.proper_size_;
SetDevice();
data_.resize(other.data_.size());
perm_d_ = other.perm_d_;
}
void ScatterFrom(const T* begin) {
// TODO(canonizer): avoid full copy of host data
LazySyncDevice(GPUAccess::kWrite);
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_.data().get(), begin + start_,
data_.size() * sizeof(T), hipMemcpyDefault));
}
void GatherTo(thrust::device_ptr<T> begin) {
LazySyncDevice(GPUAccess::kRead);
SetDevice();
dh::safe_cuda(hipMemcpyAsync(begin.get() + start_, data_.data().get(),
proper_size_ * sizeof(T), hipMemcpyDefault));
}
void Fill(T v) {
// TODO(canonizer): avoid full copy of host data
LazySyncDevice(GPUAccess::kWrite);
SetDevice();
thrust::fill(data_.begin(), data_.end(), v);
}
void Copy(DeviceShard* other) {
// TODO(canonizer): avoid full copy of host data for this (but not for other)
LazySyncDevice(GPUAccess::kWrite);
other->LazySyncDevice(GPUAccess::kRead);
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_.data().get(), other->data_.data().get(),
data_.size() * sizeof(T), hipMemcpyDefault));
}
void LazySyncHost(GPUAccess access) {
SetDevice();
dh::safe_cuda(hipMemcpy(vec_->data_h_.data() + start_,
data_.data().get(), proper_size_ * sizeof(T),
hipMemcpyDeviceToHost));
perm_d_.DenyComplementary(access);
}
void LazyResize(size_t new_size) {
if (new_size == cached_size_) { return; }
// resize is required
int ndevices = vec_->distribution_.devices_.Size();
int device_index = vec_->distribution_.devices_.Index(device_);
start_ = vec_->distribution_.ShardStart(new_size, device_index);
proper_size_ = vec_->distribution_.ShardProperSize(new_size, device_index);
// The size on this device.
size_t size_d = vec_->distribution_.ShardSize(new_size, device_index);
SetDevice();
data_.resize(size_d);
cached_size_ = new_size;
}
void LazySyncDevice(GPUAccess access) {
if (perm_d_.CanAccess(access)) { return; }
if (perm_d_.CanRead()) {
// deny read to the host
perm_d_.Grant(access);
std::lock_guard<std::mutex> lock(vec_->mutex_);
vec_->perm_h_.DenyComplementary(access);
return;
}
// data is on the host
size_t size_h = vec_->data_h_.size();
LazyResize(size_h);
SetDevice();
dh::safe_cuda(
hipMemcpy(data_.data().get(), vec_->data_h_.data() + start_,
data_.size() * sizeof(T), hipMemcpyHostToDevice));
perm_d_.Grant(access);
std::lock_guard<std::mutex> lock(vec_->mutex_);
vec_->perm_h_.DenyComplementary(access);
vec_->size_d_ = size_h;
}
void SetDevice() {
if (cudaSetDeviceHandler == nullptr) {
dh::safe_cuda(hipSetDevice(device_));
} else {
(*cudaSetDeviceHandler)(device_);
}
}
T* Raw() { return data_.data().get(); }
size_t Start() const { return start_; }
size_t DataSize() const { return data_.size(); }
Permissions& Perm() { return perm_d_; }
Permissions const& Perm() const { return perm_d_; }
private:
int device_;
thrust::device_vector<T> data_;
// cached vector size
size_t cached_size_;
size_t start_;
// size of the portion to copy back to the host
size_t proper_size_;
Permissions perm_d_;
HostDeviceVectorImpl<T>* vec_;
};
HostDeviceVectorImpl(size_t size, T v, const GPUDistribution &distribution)
: distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) {
if (!distribution_.IsEmpty()) {
size_d_ = size;
InitShards();
Fill(v);
} else {
data_h_.resize(size, v);
}
}
// required, as a new std::mutex has to be created
HostDeviceVectorImpl(const HostDeviceVectorImpl<T>& other)
: data_h_(other.data_h_), perm_h_(other.perm_h_), size_d_(other.size_d_),
distribution_(other.distribution_), mutex_() {
shards_.resize(other.shards_.size());
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Init(this, other.shards_.at(i));
});
}
// Initializer can be std::vector<T> or std::initializer_list<T>
template <class Initializer>
HostDeviceVectorImpl(const Initializer& init, const GPUDistribution &distribution)
: distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) {
if (!distribution_.IsEmpty()) {
size_d_ = init.size();
InitShards();
Copy(init);
} else {
data_h_ = init;
}
}
void InitShards() {
int ndevices = distribution_.devices_.Size();
shards_.resize(ndevices);
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Init(this, distribution_.devices_.DeviceId(i));
});
}
size_t Size() const { return perm_h_.CanRead() ? data_h_.size() : size_d_; }
GPUSet Devices() const { return distribution_.devices_; }
const GPUDistribution& Distribution() const { return distribution_; }
T* DevicePointer(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kWrite);
return shards_.at(distribution_.devices_.Index(device)).Raw();
}
const T* ConstDevicePointer(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).Raw();
}
common::Span<T> DeviceSpan(int device) {
GPUSet devices = distribution_.devices_;
CHECK(devices.Contains(device));
LazySyncDevice(device, GPUAccess::kWrite);
return {shards_.at(devices.Index(device)).Raw(),
static_cast<typename common::Span<T>::index_type>(DeviceSize(device))};
}
common::Span<const T> ConstDeviceSpan(int device) {
GPUSet devices = distribution_.devices_;
CHECK(devices.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
using SpanInd = typename common::Span<const T>::index_type;
return {shards_.at(devices.Index(device)).Raw(),
static_cast<SpanInd>(DeviceSize(device))};
}
size_t DeviceSize(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).DataSize();
}
size_t DeviceStart(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).Start();
}
thrust::device_ptr<T> tbegin(int device) { // NOLINT
return thrust::device_ptr<T>(DevicePointer(device));
}
thrust::device_ptr<const T> tcbegin(int device) { // NOLINT
return thrust::device_ptr<const T>(ConstDevicePointer(device));
}
thrust::device_ptr<T> tend(int device) { // NOLINT
return tbegin(device) + DeviceSize(device);
}
thrust::device_ptr<const T> tcend(int device) { // NOLINT
return tcbegin(device) + DeviceSize(device);
}
void ScatterFrom(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) {
CHECK_EQ(end - begin, Size());
if (perm_h_.CanWrite()) {
dh::safe_cuda(hipMemcpy(data_h_.data(), begin.get(),
(end - begin) * sizeof(T),
hipMemcpyDeviceToHost));
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(begin.get());
});
}
}
void GatherTo(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) {
CHECK_EQ(end - begin, Size());
if (perm_h_.CanWrite()) {
dh::safe_cuda(hipMemcpy(begin.get(), data_h_.data(),
data_h_.size() * sizeof(T),
hipMemcpyHostToDevice));
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.GatherTo(begin); });
}
}
void Fill(T v) { // NOLINT
if (perm_h_.CanWrite()) {
std::fill(data_h_.begin(), data_h_.end(), v);
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.Fill(v); });
}
}
void Copy(HostDeviceVectorImpl<T>* other) {
CHECK_EQ(Size(), other->Size());
// Data is on host.
if (perm_h_.CanWrite() && other->perm_h_.CanWrite()) {
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
return;
}
// Data is on device;
if (distribution_ != other->distribution_) {
distribution_ = GPUDistribution();
Reshard(other->Distribution());
size_d_ = other->size_d_;
}
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Copy(&other->shards_.at(i));
});
}
void Copy(const std::vector<T>& other) {
CHECK_EQ(Size(), other.size());
if (perm_h_.CanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(other.data());
});
}
}
void Copy(std::initializer_list<T> other) {
CHECK_EQ(Size(), other.size());
if (perm_h_.CanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(other.begin());
});
}
}
std::vector<T>& HostVector() {
LazySyncHost(GPUAccess::kWrite);
return data_h_;
}
const std::vector<T>& ConstHostVector() {
LazySyncHost(GPUAccess::kRead);
return data_h_;
}
void Reshard(const GPUDistribution& distribution) {
if (distribution_ == distribution) { return; }
CHECK(distribution_.IsEmpty() || distribution.IsEmpty());
if (distribution.IsEmpty()) {
LazySyncHost(GPUAccess::kWrite);
}
distribution_ = distribution;
InitShards();
}
void Reshard(GPUSet new_devices) {
if (distribution_.Devices() == new_devices) { return; }
Reshard(GPUDistribution::Block(new_devices));
}
void Resize(size_t new_size, T v) {
if (new_size == Size()) { return; }
if (distribution_.IsFixedSize()) {
CHECK_EQ(new_size, distribution_.offsets_.back());
}
if (Size() == 0 && !distribution_.IsEmpty()) {
// fast on-device resize
perm_h_ = Permissions(false);
size_d_ = new_size;
InitShards();
Fill(v);
} else {
// resize on host
LazySyncHost(GPUAccess::kWrite);
data_h_.resize(new_size, v);
}
}
void LazySyncHost(GPUAccess access) {
if (perm_h_.CanAccess(access)) { return; }
if (perm_h_.CanRead()) {
// data is present, just need to deny access to the device
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.Perm().DenyComplementary(access);
});
perm_h_.Grant(access);
return;
}
if (data_h_.size() != size_d_) { data_h_.resize(size_d_); }
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.LazySyncHost(access);
});
perm_h_.Grant(access);
}
void LazySyncDevice(int device, GPUAccess access) {
GPUSet devices = distribution_.Devices();
CHECK(devices.Contains(device));
shards_.at(devices.Index(device)).LazySyncDevice(access);
}
bool HostCanAccess(GPUAccess access) { return perm_h_.CanAccess(access); }
bool DeviceCanAccess(int device, GPUAccess access) {
GPUSet devices = distribution_.Devices();
if (!devices.Contains(device)) { return false; }
return shards_.at(devices.Index(device)).Perm().CanAccess(access);
}
private:
std::vector<T> data_h_;
Permissions perm_h_;
// the total size of the data stored on the devices
size_t size_d_;
GPUDistribution distribution_;
// protects size_d_ and perm_h_ when updated from multiple threads
std::mutex mutex_;
std::vector<DeviceShard> shards_;
};
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(size_t size, T v, const GPUDistribution &distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(size, v, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(std::initializer_list<T> init, const GPUDistribution &distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(init, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(const std::vector<T>& init, const GPUDistribution &distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(init, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const HostDeviceVector<T>& other)
: impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(*other.impl_);
}
template <typename T>
HostDeviceVector<T>& HostDeviceVector<T>::operator=
(const HostDeviceVector<T>& other) {
if (this == &other) { return *this; }
std::unique_ptr<HostDeviceVectorImpl<T>> newImpl(new HostDeviceVectorImpl<T>(*other.impl_));
delete impl_;
impl_ = newImpl.release();
return *this;
}
template <typename T>
HostDeviceVector<T>::~HostDeviceVector() {
delete impl_;
impl_ = nullptr;
}
template <typename T>
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
template <typename T>
GPUSet HostDeviceVector<T>::Devices() const { return impl_->Devices(); }
template <typename T>
const GPUDistribution& HostDeviceVector<T>::Distribution() const {
return impl_->Distribution();
}
template <typename T>
T* HostDeviceVector<T>::DevicePointer(int device) {
return impl_->DevicePointer(device);
}
template <typename T>
const T* HostDeviceVector<T>::ConstDevicePointer(int device) const {
return impl_->ConstDevicePointer(device);
}
template <typename T>
common::Span<T> HostDeviceVector<T>::DeviceSpan(int device) {
return impl_->DeviceSpan(device);
}
template <typename T>
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan(int device) const {
return impl_->ConstDeviceSpan(device);
}
template <typename T>
size_t HostDeviceVector<T>::DeviceStart(int device) const {
return impl_->DeviceStart(device);
}
template <typename T>
size_t HostDeviceVector<T>::DeviceSize(int device) const {
return impl_->DeviceSize(device);
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tbegin(int device) { // NOLINT
return impl_->tbegin(device);
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcbegin(int device) const { // NOLINT
return impl_->tcbegin(device);
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tend(int device) { // NOLINT
return impl_->tend(device);
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcend(int device) const { // NOLINT
return impl_->tcend(device);
}
template <typename T>
void HostDeviceVector<T>::ScatterFrom
(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) {
impl_->ScatterFrom(begin, end);
}
template <typename T>
void HostDeviceVector<T>::GatherTo
(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) const {
impl_->GatherTo(begin, end);
}
template <typename T>
void HostDeviceVector<T>::Fill(T v) {
impl_->Fill(v);
}
template <typename T>
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
impl_->Copy(other.impl_);
}
template <typename T>
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
impl_->Copy(other);
}
template <typename T>
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
template <typename T>
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
return impl_->ConstHostVector();
}
template <typename T>
bool HostDeviceVector<T>::HostCanAccess(GPUAccess access) const {
return impl_->HostCanAccess(access);
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanAccess(int device, GPUAccess access) const {
return impl_->DeviceCanAccess(device, access);
}
template <typename T>
void HostDeviceVector<T>::Reshard(GPUSet new_devices) const {
impl_->Reshard(new_devices);
}
template <typename T>
void HostDeviceVector<T>::Reshard(const GPUDistribution& distribution) const {
impl_->Reshard(distribution);
}
template <typename T>
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
impl_->Resize(new_size, v);
}
// explicit instantiations are required, as HostDeviceVector isn't header-only
template class HostDeviceVector<bst_float>;
template class HostDeviceVector<GradientPair>;
template class HostDeviceVector<int>;
template class HostDeviceVector<Entry>;
template class HostDeviceVector<size_t>;
} // namespace xgboost
| 2fb5da73e2a405bd50ab6bc15abc416c373216e4.cu | /*!
* Copyright 2017 XGBoost contributors
*/
#include "./host_device_vector.h"
#include <thrust/fill.h>
#include <xgboost/data.h>
#include <algorithm>
#include <cstdint>
#include <mutex>
#include "./device_helpers.cuh"
namespace xgboost {
// the handler to call instead of cudaSetDevice; only used for testing
static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT
void SetCudaSetDeviceHandler(void (*handler)(int)) {
cudaSetDeviceHandler = handler;
}
// wrapper over access with useful methods
class Permissions {
GPUAccess access_;
explicit Permissions(GPUAccess access) : access_{access} {}
public:
Permissions() : access_{GPUAccess::kNone} {}
explicit Permissions(bool perm)
: access_(perm ? GPUAccess::kWrite : GPUAccess::kNone) {}
bool CanRead() const { return access_ >= kRead; }
bool CanWrite() const { return access_ == kWrite; }
bool CanAccess(GPUAccess access) const { return access_ >= access; }
void Grant(GPUAccess access) { access_ = std::max(access_, access); }
void DenyComplementary(GPUAccess compl_access) {
access_ = std::min(access_, GPUAccess::kWrite - compl_access);
}
Permissions Complementary() const {
return Permissions(GPUAccess::kWrite - access_);
}
};
template <typename T>
struct HostDeviceVectorImpl {
struct DeviceShard {
DeviceShard()
: proper_size_{0}, device_{-1}, start_{0}, perm_d_{false},
cached_size_{static_cast<size_t>(~0)}, vec_{nullptr} {}
void Init(HostDeviceVectorImpl<T>* vec, int device) {
if (vec_ == nullptr) { vec_ = vec; }
CHECK_EQ(vec, vec_);
device_ = device;
LazyResize(vec_->Size());
perm_d_ = vec_->perm_h_.Complementary();
}
void Init(HostDeviceVectorImpl<T>* vec, const DeviceShard& other) {
if (vec_ == nullptr) { vec_ = vec; }
CHECK_EQ(vec, vec_);
device_ = other.device_;
cached_size_ = other.cached_size_;
start_ = other.start_;
proper_size_ = other.proper_size_;
SetDevice();
data_.resize(other.data_.size());
perm_d_ = other.perm_d_;
}
void ScatterFrom(const T* begin) {
// TODO(canonizer): avoid full copy of host data
LazySyncDevice(GPUAccess::kWrite);
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_.data().get(), begin + start_,
data_.size() * sizeof(T), cudaMemcpyDefault));
}
void GatherTo(thrust::device_ptr<T> begin) {
LazySyncDevice(GPUAccess::kRead);
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(begin.get() + start_, data_.data().get(),
proper_size_ * sizeof(T), cudaMemcpyDefault));
}
void Fill(T v) {
// TODO(canonizer): avoid full copy of host data
LazySyncDevice(GPUAccess::kWrite);
SetDevice();
thrust::fill(data_.begin(), data_.end(), v);
}
void Copy(DeviceShard* other) {
// TODO(canonizer): avoid full copy of host data for this (but not for other)
LazySyncDevice(GPUAccess::kWrite);
other->LazySyncDevice(GPUAccess::kRead);
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_.data().get(), other->data_.data().get(),
data_.size() * sizeof(T), cudaMemcpyDefault));
}
void LazySyncHost(GPUAccess access) {
SetDevice();
dh::safe_cuda(cudaMemcpy(vec_->data_h_.data() + start_,
data_.data().get(), proper_size_ * sizeof(T),
cudaMemcpyDeviceToHost));
perm_d_.DenyComplementary(access);
}
void LazyResize(size_t new_size) {
if (new_size == cached_size_) { return; }
// resize is required
int ndevices = vec_->distribution_.devices_.Size();
int device_index = vec_->distribution_.devices_.Index(device_);
start_ = vec_->distribution_.ShardStart(new_size, device_index);
proper_size_ = vec_->distribution_.ShardProperSize(new_size, device_index);
// The size on this device.
size_t size_d = vec_->distribution_.ShardSize(new_size, device_index);
SetDevice();
data_.resize(size_d);
cached_size_ = new_size;
}
void LazySyncDevice(GPUAccess access) {
if (perm_d_.CanAccess(access)) { return; }
if (perm_d_.CanRead()) {
// deny read to the host
perm_d_.Grant(access);
std::lock_guard<std::mutex> lock(vec_->mutex_);
vec_->perm_h_.DenyComplementary(access);
return;
}
// data is on the host
size_t size_h = vec_->data_h_.size();
LazyResize(size_h);
SetDevice();
dh::safe_cuda(
cudaMemcpy(data_.data().get(), vec_->data_h_.data() + start_,
data_.size() * sizeof(T), cudaMemcpyHostToDevice));
perm_d_.Grant(access);
std::lock_guard<std::mutex> lock(vec_->mutex_);
vec_->perm_h_.DenyComplementary(access);
vec_->size_d_ = size_h;
}
void SetDevice() {
if (cudaSetDeviceHandler == nullptr) {
dh::safe_cuda(cudaSetDevice(device_));
} else {
(*cudaSetDeviceHandler)(device_);
}
}
T* Raw() { return data_.data().get(); }
size_t Start() const { return start_; }
size_t DataSize() const { return data_.size(); }
Permissions& Perm() { return perm_d_; }
Permissions const& Perm() const { return perm_d_; }
private:
int device_;
thrust::device_vector<T> data_;
// cached vector size
size_t cached_size_;
size_t start_;
// size of the portion to copy back to the host
size_t proper_size_;
Permissions perm_d_;
HostDeviceVectorImpl<T>* vec_;
};
HostDeviceVectorImpl(size_t size, T v, const GPUDistribution &distribution)
: distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) {
if (!distribution_.IsEmpty()) {
size_d_ = size;
InitShards();
Fill(v);
} else {
data_h_.resize(size, v);
}
}
// required, as a new std::mutex has to be created
HostDeviceVectorImpl(const HostDeviceVectorImpl<T>& other)
: data_h_(other.data_h_), perm_h_(other.perm_h_), size_d_(other.size_d_),
distribution_(other.distribution_), mutex_() {
shards_.resize(other.shards_.size());
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Init(this, other.shards_.at(i));
});
}
// Initializer can be std::vector<T> or std::initializer_list<T>
template <class Initializer>
HostDeviceVectorImpl(const Initializer& init, const GPUDistribution &distribution)
: distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) {
if (!distribution_.IsEmpty()) {
size_d_ = init.size();
InitShards();
Copy(init);
} else {
data_h_ = init;
}
}
void InitShards() {
int ndevices = distribution_.devices_.Size();
shards_.resize(ndevices);
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Init(this, distribution_.devices_.DeviceId(i));
});
}
size_t Size() const { return perm_h_.CanRead() ? data_h_.size() : size_d_; }
GPUSet Devices() const { return distribution_.devices_; }
const GPUDistribution& Distribution() const { return distribution_; }
T* DevicePointer(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kWrite);
return shards_.at(distribution_.devices_.Index(device)).Raw();
}
const T* ConstDevicePointer(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).Raw();
}
common::Span<T> DeviceSpan(int device) {
GPUSet devices = distribution_.devices_;
CHECK(devices.Contains(device));
LazySyncDevice(device, GPUAccess::kWrite);
return {shards_.at(devices.Index(device)).Raw(),
static_cast<typename common::Span<T>::index_type>(DeviceSize(device))};
}
common::Span<const T> ConstDeviceSpan(int device) {
GPUSet devices = distribution_.devices_;
CHECK(devices.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
using SpanInd = typename common::Span<const T>::index_type;
return {shards_.at(devices.Index(device)).Raw(),
static_cast<SpanInd>(DeviceSize(device))};
}
size_t DeviceSize(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).DataSize();
}
size_t DeviceStart(int device) {
CHECK(distribution_.devices_.Contains(device));
LazySyncDevice(device, GPUAccess::kRead);
return shards_.at(distribution_.devices_.Index(device)).Start();
}
thrust::device_ptr<T> tbegin(int device) { // NOLINT
return thrust::device_ptr<T>(DevicePointer(device));
}
thrust::device_ptr<const T> tcbegin(int device) { // NOLINT
return thrust::device_ptr<const T>(ConstDevicePointer(device));
}
thrust::device_ptr<T> tend(int device) { // NOLINT
return tbegin(device) + DeviceSize(device);
}
thrust::device_ptr<const T> tcend(int device) { // NOLINT
return tcbegin(device) + DeviceSize(device);
}
void ScatterFrom(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) {
CHECK_EQ(end - begin, Size());
if (perm_h_.CanWrite()) {
dh::safe_cuda(cudaMemcpy(data_h_.data(), begin.get(),
(end - begin) * sizeof(T),
cudaMemcpyDeviceToHost));
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(begin.get());
});
}
}
void GatherTo(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) {
CHECK_EQ(end - begin, Size());
if (perm_h_.CanWrite()) {
dh::safe_cuda(cudaMemcpy(begin.get(), data_h_.data(),
data_h_.size() * sizeof(T),
cudaMemcpyHostToDevice));
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.GatherTo(begin); });
}
}
void Fill(T v) { // NOLINT
if (perm_h_.CanWrite()) {
std::fill(data_h_.begin(), data_h_.end(), v);
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.Fill(v); });
}
}
void Copy(HostDeviceVectorImpl<T>* other) {
CHECK_EQ(Size(), other->Size());
// Data is on host.
if (perm_h_.CanWrite() && other->perm_h_.CanWrite()) {
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
return;
}
// Data is on device;
if (distribution_ != other->distribution_) {
distribution_ = GPUDistribution();
Reshard(other->Distribution());
size_d_ = other->size_d_;
}
dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) {
shard.Copy(&other->shards_.at(i));
});
}
void Copy(const std::vector<T>& other) {
CHECK_EQ(Size(), other.size());
if (perm_h_.CanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(other.data());
});
}
}
void Copy(std::initializer_list<T> other) {
CHECK_EQ(Size(), other.size());
if (perm_h_.CanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.ScatterFrom(other.begin());
});
}
}
std::vector<T>& HostVector() {
LazySyncHost(GPUAccess::kWrite);
return data_h_;
}
const std::vector<T>& ConstHostVector() {
LazySyncHost(GPUAccess::kRead);
return data_h_;
}
void Reshard(const GPUDistribution& distribution) {
if (distribution_ == distribution) { return; }
CHECK(distribution_.IsEmpty() || distribution.IsEmpty());
if (distribution.IsEmpty()) {
LazySyncHost(GPUAccess::kWrite);
}
distribution_ = distribution;
InitShards();
}
void Reshard(GPUSet new_devices) {
if (distribution_.Devices() == new_devices) { return; }
Reshard(GPUDistribution::Block(new_devices));
}
void Resize(size_t new_size, T v) {
if (new_size == Size()) { return; }
if (distribution_.IsFixedSize()) {
CHECK_EQ(new_size, distribution_.offsets_.back());
}
if (Size() == 0 && !distribution_.IsEmpty()) {
// fast on-device resize
perm_h_ = Permissions(false);
size_d_ = new_size;
InitShards();
Fill(v);
} else {
// resize on host
LazySyncHost(GPUAccess::kWrite);
data_h_.resize(new_size, v);
}
}
void LazySyncHost(GPUAccess access) {
if (perm_h_.CanAccess(access)) { return; }
if (perm_h_.CanRead()) {
// data is present, just need to deny access to the device
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.Perm().DenyComplementary(access);
});
perm_h_.Grant(access);
return;
}
if (data_h_.size() != size_d_) { data_h_.resize(size_d_); }
dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) {
shard.LazySyncHost(access);
});
perm_h_.Grant(access);
}
void LazySyncDevice(int device, GPUAccess access) {
GPUSet devices = distribution_.Devices();
CHECK(devices.Contains(device));
shards_.at(devices.Index(device)).LazySyncDevice(access);
}
bool HostCanAccess(GPUAccess access) { return perm_h_.CanAccess(access); }
bool DeviceCanAccess(int device, GPUAccess access) {
GPUSet devices = distribution_.Devices();
if (!devices.Contains(device)) { return false; }
return shards_.at(devices.Index(device)).Perm().CanAccess(access);
}
private:
std::vector<T> data_h_;
Permissions perm_h_;
// the total size of the data stored on the devices
size_t size_d_;
GPUDistribution distribution_;
// protects size_d_ and perm_h_ when updated from multiple threads
std::mutex mutex_;
std::vector<DeviceShard> shards_;
};
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(size_t size, T v, const GPUDistribution &distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(size, v, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(std::initializer_list<T> init, const GPUDistribution &distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(init, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector
(const std::vector<T>& init, const GPUDistribution &distribution) : impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(init, distribution);
}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const HostDeviceVector<T>& other)
: impl_(nullptr) {
impl_ = new HostDeviceVectorImpl<T>(*other.impl_);
}
template <typename T>
HostDeviceVector<T>& HostDeviceVector<T>::operator=
(const HostDeviceVector<T>& other) {
if (this == &other) { return *this; }
std::unique_ptr<HostDeviceVectorImpl<T>> newImpl(new HostDeviceVectorImpl<T>(*other.impl_));
delete impl_;
impl_ = newImpl.release();
return *this;
}
template <typename T>
HostDeviceVector<T>::~HostDeviceVector() {
delete impl_;
impl_ = nullptr;
}
template <typename T>
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
template <typename T>
GPUSet HostDeviceVector<T>::Devices() const { return impl_->Devices(); }
template <typename T>
const GPUDistribution& HostDeviceVector<T>::Distribution() const {
return impl_->Distribution();
}
template <typename T>
T* HostDeviceVector<T>::DevicePointer(int device) {
return impl_->DevicePointer(device);
}
template <typename T>
const T* HostDeviceVector<T>::ConstDevicePointer(int device) const {
return impl_->ConstDevicePointer(device);
}
template <typename T>
common::Span<T> HostDeviceVector<T>::DeviceSpan(int device) {
return impl_->DeviceSpan(device);
}
template <typename T>
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan(int device) const {
return impl_->ConstDeviceSpan(device);
}
template <typename T>
size_t HostDeviceVector<T>::DeviceStart(int device) const {
return impl_->DeviceStart(device);
}
template <typename T>
size_t HostDeviceVector<T>::DeviceSize(int device) const {
return impl_->DeviceSize(device);
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tbegin(int device) { // NOLINT
return impl_->tbegin(device);
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcbegin(int device) const { // NOLINT
return impl_->tcbegin(device);
}
template <typename T>
thrust::device_ptr<T> HostDeviceVector<T>::tend(int device) { // NOLINT
return impl_->tend(device);
}
template <typename T>
thrust::device_ptr<const T> HostDeviceVector<T>::tcend(int device) const { // NOLINT
return impl_->tcend(device);
}
template <typename T>
void HostDeviceVector<T>::ScatterFrom
(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) {
impl_->ScatterFrom(begin, end);
}
template <typename T>
void HostDeviceVector<T>::GatherTo
(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) const {
impl_->GatherTo(begin, end);
}
template <typename T>
void HostDeviceVector<T>::Fill(T v) {
impl_->Fill(v);
}
template <typename T>
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
impl_->Copy(other.impl_);
}
template <typename T>
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
impl_->Copy(other);
}
template <typename T>
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
template <typename T>
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
return impl_->ConstHostVector();
}
template <typename T>
bool HostDeviceVector<T>::HostCanAccess(GPUAccess access) const {
return impl_->HostCanAccess(access);
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanAccess(int device, GPUAccess access) const {
return impl_->DeviceCanAccess(device, access);
}
template <typename T>
void HostDeviceVector<T>::Reshard(GPUSet new_devices) const {
impl_->Reshard(new_devices);
}
template <typename T>
void HostDeviceVector<T>::Reshard(const GPUDistribution& distribution) const {
impl_->Reshard(distribution);
}
template <typename T>
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
impl_->Resize(new_size, v);
}
// explicit instantiations are required, as HostDeviceVector isn't header-only
template class HostDeviceVector<bst_float>;
template class HostDeviceVector<GradientPair>;
template class HostDeviceVector<int>;
template class HostDeviceVector<Entry>;
template class HostDeviceVector<size_t>;
} // namespace xgboost
|
a35c562e04593067555c4e5e3f523245efcde3f1.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2021 Darius Rckert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/config.h"
#include "saiga/core/image/all.h"
#include "saiga/core/math/all.h"
#include "saiga/core/util/statistics.h"
#include "saiga/cuda/imageProcessing/OrbDescriptors.h"
#include "saiga/cuda/imageProcessing/image.h"
#include "saiga/vision/features/OrbDescriptors.h"
#include "gtest/gtest.h"
using namespace Saiga;
class FeatureTest
{
public:
FeatureTest()
{
// Load images
random.create(200, 200);
for (int i : random.rowRange())
{
for (int j : random.colRange())
{
random(i, j) = Random::uniformInt(0, 255);
}
}
square.create(200, 200);
square.makeZero();
ImageDraw::drawCircle(square.getImageView(), vec2(100, 100), 50, 255);
random.save("test_random.png");
square.save("test_square.png");
d_random.upload(random);
d_square.upload(square);
obj_random = d_random.GetTextureObject(hipAddressModeClamp);
obj_square = d_square.GetTextureObject(hipAddressModeClamp);
}
TemplatedImage<unsigned char> random;
TemplatedImage<unsigned char> square;
CUDA::CudaImage<unsigned char> d_random;
CUDA::CudaImage<unsigned char> d_square;
hipTextureObject_t obj_random;
hipTextureObject_t obj_square;
ORB orb;
Saiga::CUDA::ORB gpu_orb;
};
FeatureTest test;
TEST(ORB, Angle)
{
vec2 center(100, 100);
float radius = 50;
int n = 100;
std::vector<std::pair<vec2, float>> point_expected_angle;
for (int i = 0; i < 100; ++i)
{
float alpha = (float(i) / n) * pi<float>() * 2;
vec2 p;
p.x() = sin(-alpha - pi<float>() / 2) * radius;
p.y() = cos(-alpha - pi<float>() / 2) * radius;
p += center;
point_expected_angle.push_back({p, degrees(alpha)});
}
for (auto pa : point_expected_angle)
{
float angle = test.orb.ComputeAngle(test.square, pa.first);
// We allow a 2 degree error due to sampling artifacts
EXPECT_NEAR(angle, pa.second, 2);
}
thrust::device_vector<KeyPoint<float>> d_kps(point_expected_angle.size());
for (int i = 0; i < point_expected_angle.size(); ++i)
{
KeyPoint<float> kp;
kp.point = point_expected_angle[i].first;
kp.angle = 0;
d_kps[i] = kp;
}
test.gpu_orb.ComputeAngles(test.obj_square, test.d_square.getImageView(), d_kps, 0, 0, 0, 0, 0);
for (int i = 0; i < point_expected_angle.size(); ++i)
{
KeyPoint<float> h_kp = d_kps[i];
EXPECT_NEAR(point_expected_angle[i].second, h_kp.angle, 2);
}
}
TEST(ORB, AngleRandom)
{
std::vector<vec2> sample_points;
// use enough points to fill multiple SMs on the gpu
for (int i = 0; i < 2000; ++i)
{
sample_points.push_back(vec2(Random::uniformInt(50, 150), Random::uniformInt(50, 150)));
}
std::vector<double> angle_cpu;
for (auto p : sample_points)
{
float angle = test.orb.ComputeAngle(test.random, p);
angle_cpu.push_back(angle);
}
thrust::device_vector<KeyPoint<float>> d_kps(sample_points.size());
for (int i = 0; i < sample_points.size(); ++i)
{
KeyPoint<float> kp;
kp.point = sample_points[i];
kp.angle = 0;
d_kps[i] = kp;
}
test.gpu_orb.ComputeAngles(test.obj_random, test.d_random.getImageView(), d_kps, 0, 0, 0, 0, 0);
for (int i = 0; i < sample_points.size(); ++i)
{
KeyPoint<float> h_kp = d_kps[i];
EXPECT_NEAR(angle_cpu[i], h_kp.angle, 0.1);
}
}
TEST(ORB, DescriptorRandom)
{
thrust::host_vector<KeyPoint<float>> sample_keypoints;
int N = 20000;
for (int i = 0; i < N; ++i)
{
KeyPoint<float> kp;
kp.point = vec2(Random::uniformInt(50, 150), Random::uniformInt(50, 150));
if (i < N / 2)
{
kp.angle = 0;
}
else
{
kp.angle = Random::sampleDouble(0, 360);
}
sample_keypoints.push_back(kp);
}
std::vector<DescriptorORB> cpu_descriptors;
for (int i = 0; i < sample_keypoints.size(); ++i)
{
auto kp = sample_keypoints[i];
cpu_descriptors.push_back(test.orb.ComputeDescriptor(test.random, kp.point, kp.angle));
}
thrust::device_vector<KeyPoint<float>> gpu_keypoints = sample_keypoints;
thrust::device_vector<DescriptorORB> gpu_descriptors(sample_keypoints.size());
test.gpu_orb.ComputeDescriptors(test.obj_random, test.d_random.getImageView(), gpu_keypoints, gpu_descriptors, 0);
thrust::host_vector<DescriptorORB> gpu_descriptors_on_host(gpu_descriptors);
std::vector<double> distance_0;
std::vector<double> distance_random;
for (int i = 0; i < sample_keypoints.size(); ++i)
{
auto desc1 = cpu_descriptors[i];
auto desc2 = gpu_descriptors_on_host[i];
auto dis = distance(desc1, desc2);
if (i < N / 2)
{
distance_0.push_back(dis);
}
else
{
distance_random.push_back(dis);
}
}
Statistics<double> stat_0(distance_0);
Statistics<double> stat_random(distance_random);
// The 0 angles must be exact 0
EXPECT_EQ(stat_0.max, 0);
// Random angles is a little above 0 because of sin/cos differences
EXPECT_LE(stat_random.max, 10);
EXPECT_EQ(stat_random.median, 0);
}
| a35c562e04593067555c4e5e3f523245efcde3f1.cu | /**
* Copyright (c) 2021 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/config.h"
#include "saiga/core/image/all.h"
#include "saiga/core/math/all.h"
#include "saiga/core/util/statistics.h"
#include "saiga/cuda/imageProcessing/OrbDescriptors.h"
#include "saiga/cuda/imageProcessing/image.h"
#include "saiga/vision/features/OrbDescriptors.h"
#include "gtest/gtest.h"
using namespace Saiga;
class FeatureTest
{
public:
FeatureTest()
{
// Load images
random.create(200, 200);
for (int i : random.rowRange())
{
for (int j : random.colRange())
{
random(i, j) = Random::uniformInt(0, 255);
}
}
square.create(200, 200);
square.makeZero();
ImageDraw::drawCircle(square.getImageView(), vec2(100, 100), 50, 255);
random.save("test_random.png");
square.save("test_square.png");
d_random.upload(random);
d_square.upload(square);
obj_random = d_random.GetTextureObject(cudaAddressModeClamp);
obj_square = d_square.GetTextureObject(cudaAddressModeClamp);
}
TemplatedImage<unsigned char> random;
TemplatedImage<unsigned char> square;
CUDA::CudaImage<unsigned char> d_random;
CUDA::CudaImage<unsigned char> d_square;
cudaTextureObject_t obj_random;
cudaTextureObject_t obj_square;
ORB orb;
Saiga::CUDA::ORB gpu_orb;
};
FeatureTest test;
TEST(ORB, Angle)
{
vec2 center(100, 100);
float radius = 50;
int n = 100;
std::vector<std::pair<vec2, float>> point_expected_angle;
for (int i = 0; i < 100; ++i)
{
float alpha = (float(i) / n) * pi<float>() * 2;
vec2 p;
p.x() = sin(-alpha - pi<float>() / 2) * radius;
p.y() = cos(-alpha - pi<float>() / 2) * radius;
p += center;
point_expected_angle.push_back({p, degrees(alpha)});
}
for (auto pa : point_expected_angle)
{
float angle = test.orb.ComputeAngle(test.square, pa.first);
// We allow a 2 degree error due to sampling artifacts
EXPECT_NEAR(angle, pa.second, 2);
}
thrust::device_vector<KeyPoint<float>> d_kps(point_expected_angle.size());
for (int i = 0; i < point_expected_angle.size(); ++i)
{
KeyPoint<float> kp;
kp.point = point_expected_angle[i].first;
kp.angle = 0;
d_kps[i] = kp;
}
test.gpu_orb.ComputeAngles(test.obj_square, test.d_square.getImageView(), d_kps, 0, 0, 0, 0, 0);
for (int i = 0; i < point_expected_angle.size(); ++i)
{
KeyPoint<float> h_kp = d_kps[i];
EXPECT_NEAR(point_expected_angle[i].second, h_kp.angle, 2);
}
}
TEST(ORB, AngleRandom)
{
std::vector<vec2> sample_points;
// use enough points to fill multiple SMs on the gpu
for (int i = 0; i < 2000; ++i)
{
sample_points.push_back(vec2(Random::uniformInt(50, 150), Random::uniformInt(50, 150)));
}
std::vector<double> angle_cpu;
for (auto p : sample_points)
{
float angle = test.orb.ComputeAngle(test.random, p);
angle_cpu.push_back(angle);
}
thrust::device_vector<KeyPoint<float>> d_kps(sample_points.size());
for (int i = 0; i < sample_points.size(); ++i)
{
KeyPoint<float> kp;
kp.point = sample_points[i];
kp.angle = 0;
d_kps[i] = kp;
}
test.gpu_orb.ComputeAngles(test.obj_random, test.d_random.getImageView(), d_kps, 0, 0, 0, 0, 0);
for (int i = 0; i < sample_points.size(); ++i)
{
KeyPoint<float> h_kp = d_kps[i];
EXPECT_NEAR(angle_cpu[i], h_kp.angle, 0.1);
}
}
TEST(ORB, DescriptorRandom)
{
thrust::host_vector<KeyPoint<float>> sample_keypoints;
int N = 20000;
for (int i = 0; i < N; ++i)
{
KeyPoint<float> kp;
kp.point = vec2(Random::uniformInt(50, 150), Random::uniformInt(50, 150));
if (i < N / 2)
{
kp.angle = 0;
}
else
{
kp.angle = Random::sampleDouble(0, 360);
}
sample_keypoints.push_back(kp);
}
std::vector<DescriptorORB> cpu_descriptors;
for (int i = 0; i < sample_keypoints.size(); ++i)
{
auto kp = sample_keypoints[i];
cpu_descriptors.push_back(test.orb.ComputeDescriptor(test.random, kp.point, kp.angle));
}
thrust::device_vector<KeyPoint<float>> gpu_keypoints = sample_keypoints;
thrust::device_vector<DescriptorORB> gpu_descriptors(sample_keypoints.size());
test.gpu_orb.ComputeDescriptors(test.obj_random, test.d_random.getImageView(), gpu_keypoints, gpu_descriptors, 0);
thrust::host_vector<DescriptorORB> gpu_descriptors_on_host(gpu_descriptors);
std::vector<double> distance_0;
std::vector<double> distance_random;
for (int i = 0; i < sample_keypoints.size(); ++i)
{
auto desc1 = cpu_descriptors[i];
auto desc2 = gpu_descriptors_on_host[i];
auto dis = distance(desc1, desc2);
if (i < N / 2)
{
distance_0.push_back(dis);
}
else
{
distance_random.push_back(dis);
}
}
Statistics<double> stat_0(distance_0);
Statistics<double> stat_random(distance_random);
// The 0 angles must be exact 0
EXPECT_EQ(stat_0.max, 0);
// Random angles is a little above 0 because of sin/cos differences
EXPECT_LE(stat_random.max, 10);
EXPECT_EQ(stat_random.median, 0);
}
|
debfafd2219e1279032cb15e79ee8891cb56fcf8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MatrixCoverGPU.cuh"
#include <iostream>
#include <fstream>
#include <cstdio>
int main()
{
std::ifstream file("matrix.txt");
int graph_count=2;
int total_dl_matrix_row_num[2]={276,276};
int total_dl_matrix_col_num[2]={29,29};
int offset_matrix[2]={0, 276*29};
int offset_row[2]={0,276};
int offset_col[2]={0,29};
int *dl_matrix;
std::cout<<"reading dl matrix from file"<<std::endl;
int total_matrix = total_dl_matrix_row_num[0]*total_dl_matrix_col_num[0]+total_dl_matrix_row_num[1]*total_dl_matrix_col_num[1];
int total_row = total_dl_matrix_row_num[0]+total_dl_matrix_row_num[1];
int total_col = total_dl_matrix_col_num[0]+total_dl_matrix_col_num[1];
dl_matrix = new int [total_matrix];
std::cout<<"allocate dl matrix space from main memory"<<std::endl;
for (int k = 0; k<graph_count; k++)
{
for (int i = 0; i < total_dl_matrix_row_num[k]; i++)
{
for (int j = 0; j < total_dl_matrix_col_num[k]; j++)
{
file>>dl_matrix[offset_matrix[k]+i*total_dl_matrix_col_num[k]+j];
}
}
}
std::cout<<"reading dl matrix from file DONE"<<std::endl;
std::ifstream col("col.txt");
//int *deleted_cols = new int[total_dl_matrix_col_num];
int *col_group = new int[total_col];
for (int k =0; k<graph_count;k++){
for(int i=0; i<total_dl_matrix_col_num[k]; i++){
col>>col_group[offset_col[k]+i];
}
}
int *deleted_cols = new int[total_col];
int *deleted_rows = new int[total_row];
int conflict_count[2] = {0,0};
int vertex_num[2]= {5,5};
//int *results = new int[total_dl_matrix_row_num];
//allocate necessary vectors and matrix on GPU
int *dl_matrix_gpu;
int *results_gpu;
hipMalloc(&dl_matrix_gpu, sizeof(int*)*total_matrix);
hipMemcpy(dl_matrix_gpu, dl_matrix, sizeof(int*)*total_matrix, hipMemcpyHostToDevice);
hipMalloc(&results_gpu, sizeof(int*)*total_row);
int *deleted_cols_gpu;
int *deleted_rows_gpu;
int *col_group_gpu;
int *row_group_gpu;
int *conflict_count_gpu;
hipMalloc(&deleted_cols_gpu, sizeof(int*)*total_col);
hipMalloc(&deleted_rows_gpu, sizeof(int*)*total_row);
hipMalloc(&col_group_gpu, sizeof(int*)*total_col);
hipMalloc(&row_group_gpu, sizeof(int*)*total_row);
hipMalloc(&conflict_count_gpu, sizeof(int*)*total_col);
hipMemcpy(col_group_gpu, col_group, sizeof(int*)*total_col, hipMemcpyHostToDevice);
int *vertex_num_gpu;
int *total_dl_matrix_col_num_gpu;
int *total_dl_matrix_row_num_gpu;
hipMalloc(&vertex_num_gpu, sizeof(int*)*graph_count);
hipMalloc(&total_dl_matrix_col_num_gpu, sizeof(int*)*graph_count);
hipMalloc(&total_dl_matrix_row_num_gpu, sizeof(int*)*graph_count);
hipMemcpy(vertex_num_gpu, vertex_num, sizeof(int*)*graph_count, hipMemcpyHostToDevice);
hipMemcpy(total_dl_matrix_col_num_gpu, total_dl_matrix_col_num, sizeof(int*)*graph_count, hipMemcpyHostToDevice);
hipMemcpy(total_dl_matrix_row_num_gpu, total_dl_matrix_row_num, sizeof(int*)*graph_count, hipMemcpyHostToDevice);
int *offset_col_gpu;
int *offset_row_gpu;
int *offset_matrix_gpu;
hipMalloc(&offset_col_gpu, sizeof(int*)*graph_count);
hipMalloc(&offset_row_gpu, sizeof(int*)*graph_count);
hipMalloc(&offset_matrix_gpu, sizeof(int*)*graph_count);
hipMemcpy(offset_col_gpu, offset_col, sizeof(int*)*graph_count, hipMemcpyHostToDevice);
hipMemcpy(offset_row_gpu, offset_row, sizeof(int*)*graph_count, hipMemcpyHostToDevice);
hipMemcpy(offset_matrix_gpu, offset_matrix, sizeof(int*)*graph_count, hipMemcpyHostToDevice);
int *search_depth_gpu;
int *selected_row_id_gpu;
int *current_conflict_count_gpu;
int *conflict_node_id_gpu;
int *conflict_col_id_gpu;
int *existance_of_candidate_rows_gpu;
hipMalloc(&search_depth_gpu, sizeof(int*)*graph_count);
hipMalloc(&selected_row_id_gpu, sizeof(int*)*graph_count);
hipMalloc(¤t_conflict_count_gpu, sizeof(int*)*graph_count);
hipMalloc(&conflict_node_id_gpu, sizeof(int*)*graph_count);
hipMalloc(&conflict_col_id_gpu, sizeof(int*)*graph_count);
hipMalloc(&existance_of_candidate_rows_gpu, sizeof(int*)*graph_count);
int hard_conflict_threshold=500;
//int * row_group=new int[total_dl_matrix_row_num];
//get col and row group
//init_vectors<<<1, 32>>>(row_group_gpu, total_dl_matrix_row_num_gpu);
//init_vectors<<<1, 32>>>(deleted_cols_gpu, total_dl_matrix_col_num_gpu);
//init_vectors<<<1, 32>>>(deleted_rows_gpu, total_dl_matrix_row_num_gpu);
//init_vectors<<<1, 32>>>(results_gpu, total_dl_matrix_row_num_gpu);
//init_vectors<<<1, 32>>>(conflict_count_gpu, total_dl_matrix_col_num_gpu);
//init_vectors<<<1, 32>>>(deleted_rows_gpu, total_dl_matrix_row_num_gpu);
//hipDeviceSynchronize();
//hipMemcpy(row_group, row_group_gpu, sizeof(int)*total_dl_matrix_row_num_gpu, hipMemcpyDeviceToHost);
//std::cout<<"print row group"<<std::endl;
//for(int i=0; i<total_dl_matrix_row_num; i++)
//{
// std::cout<<row_group[i]<<' ';
//}
//std::cout<<std::endl;
//get_vertex_row_group<<<1, 32>>>(row_group_gpu, dl_matrix_gpu, vertex_num_gpu, total_dl_matrix_row_num_gpu, total_dl_matrix_col_num_gpu);
//hipMemcpy(row_group, row_group_gpu, sizeof(int)*total_dl_matrix_row_num_gpu, hipMemcpyDeviceToHost);
//std::cout<<"print row group"<<std::endl;
//for(int i=0; i<total_dl_matrix_row_num; i++)
// {
// std::cout<<row_group[i]<<' ';
//}
//std::cout<<std::endl;
//hipMemcpy(col_group_gpu, col_group, sizeof(int)*total_dl_matrix_col_num, hipMemcpyHostToDevice);
//delete_rows_and_columns<<<1, 32>>>(dl_matrix_gpu, deleted_rows_gpu, deleted_cols_gpu, 1, 1, total_dl_matrix_row_num, total_dl_matrix_col_num);
//hipMemcpy(deleted_cols, deleted_cols_gpu, sizeof(int)*total_dl_matrix_col_num, hipMemcpyDeviceToHost);
//hipMemcpy(deleted_rows, deleted_rows_gpu, sizeof(int)*total_dl_matrix_row_num, hipMemcpyDeviceToHost);
//print_vec(deleted_cols,total_dl_matrix_col_num);
//print_vec(deleted_rows,total_dl_matrix_row_num);
//print_vec_g<<<1,1>>>(col_group_gpu, total_col);
hipDeviceSynchronize();
hipLaunchKernelGGL(( mc_solver), dim3(2),dim3(32), 0, 0, dl_matrix_gpu, results_gpu,
deleted_cols_gpu, deleted_rows_gpu, col_group_gpu, row_group_gpu, conflict_count_gpu,
vertex_num_gpu, total_dl_matrix_row_num_gpu, total_dl_matrix_col_num_gpu,
offset_col_gpu, offset_row_gpu, offset_matrix_gpu,
search_depth_gpu, selected_row_id_gpu, current_conflict_count_gpu, conflict_node_id_gpu, conflict_col_id_gpu, existance_of_candidate_rows_gpu,
graph_count, hard_conflict_threshold);
hipDeviceSynchronize();
//mc_solver(dl_matrix_gpu, results_gpu, deleted_cols_gpu, deleted_rows_gpu, col_group_gpu, row_group_gpu, conflict_count_gpu, vertex_num_gpu, total_dl_matrix_row_num_gpu, total_dl_matrix_col_num_gpu);
std::cout<<"================================================================================================================================="<<std::endl;
int *results = new int [total_row];
hipMemcpy(results, results_gpu, sizeof(int)*total_row, hipMemcpyDeviceToHost);
hipMemcpy(deleted_cols, deleted_cols_gpu, sizeof(int)*total_col, hipMemcpyDeviceToHost);
for (int k=0; k < graph_count; k++)
{
for (int i = 0; i < total_dl_matrix_row_num[k]; i++)
{
std::cout << results[offset_row[k]+i] << ' ';
}
std::cout << std::endl;
for (int i = 0; i < total_dl_matrix_row_num[k]; i++){
if(results[offset_row[k]+i]>0){
std::cout << i << ' ';
}
}
std::cout<<std::endl;
for (int i = 0; i < total_dl_matrix_col_num[k]; i++)
{
if (deleted_cols[offset_col[k]+i] == -1)
{
conflict_count[k]++;
}
}
std::cout << "Conflict Num is " << conflict_count[k] / 3 << std::endl;
}
hipFree(dl_matrix_gpu);
hipFree(results_gpu);
hipFree(deleted_cols_gpu);
hipFree(deleted_rows_gpu);
hipFree(col_group_gpu);
hipFree(row_group_gpu);
hipFree(conflict_count_gpu);
hipFree(vertex_num_gpu);
hipFree(total_dl_matrix_col_num_gpu);
hipFree(total_dl_matrix_row_num_gpu);
hipFree(offset_col_gpu);
hipFree(offset_row_gpu);
hipFree(offset_matrix_gpu);
hipFree(search_depth_gpu);
hipFree(selected_row_id_gpu);
hipFree(current_conflict_count_gpu);
hipFree(conflict_col_id_gpu);
hipFree(conflict_node_id_gpu);
hipFree(existance_of_candidate_rows_gpu);
delete[] results;
delete[] dl_matrix;
//delete[] test_matrix;
delete[] deleted_cols;
delete[] deleted_rows;
//delete[] gtcol;
delete[] col_group;
//delete[] gtr;
return 0;
}
| debfafd2219e1279032cb15e79ee8891cb56fcf8.cu | #include "MatrixCoverGPU.cuh"
#include <iostream>
#include <fstream>
#include <cstdio>
int main()
{
std::ifstream file("matrix.txt");
int graph_count=2;
int total_dl_matrix_row_num[2]={276,276};
int total_dl_matrix_col_num[2]={29,29};
int offset_matrix[2]={0, 276*29};
int offset_row[2]={0,276};
int offset_col[2]={0,29};
int *dl_matrix;
std::cout<<"reading dl matrix from file"<<std::endl;
int total_matrix = total_dl_matrix_row_num[0]*total_dl_matrix_col_num[0]+total_dl_matrix_row_num[1]*total_dl_matrix_col_num[1];
int total_row = total_dl_matrix_row_num[0]+total_dl_matrix_row_num[1];
int total_col = total_dl_matrix_col_num[0]+total_dl_matrix_col_num[1];
dl_matrix = new int [total_matrix];
std::cout<<"allocate dl matrix space from main memory"<<std::endl;
for (int k = 0; k<graph_count; k++)
{
for (int i = 0; i < total_dl_matrix_row_num[k]; i++)
{
for (int j = 0; j < total_dl_matrix_col_num[k]; j++)
{
file>>dl_matrix[offset_matrix[k]+i*total_dl_matrix_col_num[k]+j];
}
}
}
std::cout<<"reading dl matrix from file DONE"<<std::endl;
std::ifstream col("col.txt");
//int *deleted_cols = new int[total_dl_matrix_col_num];
int *col_group = new int[total_col];
for (int k =0; k<graph_count;k++){
for(int i=0; i<total_dl_matrix_col_num[k]; i++){
col>>col_group[offset_col[k]+i];
}
}
int *deleted_cols = new int[total_col];
int *deleted_rows = new int[total_row];
int conflict_count[2] = {0,0};
int vertex_num[2]= {5,5};
//int *results = new int[total_dl_matrix_row_num];
//allocate necessary vectors and matrix on GPU
int *dl_matrix_gpu;
int *results_gpu;
cudaMalloc(&dl_matrix_gpu, sizeof(int*)*total_matrix);
cudaMemcpy(dl_matrix_gpu, dl_matrix, sizeof(int*)*total_matrix, cudaMemcpyHostToDevice);
cudaMalloc(&results_gpu, sizeof(int*)*total_row);
int *deleted_cols_gpu;
int *deleted_rows_gpu;
int *col_group_gpu;
int *row_group_gpu;
int *conflict_count_gpu;
cudaMalloc(&deleted_cols_gpu, sizeof(int*)*total_col);
cudaMalloc(&deleted_rows_gpu, sizeof(int*)*total_row);
cudaMalloc(&col_group_gpu, sizeof(int*)*total_col);
cudaMalloc(&row_group_gpu, sizeof(int*)*total_row);
cudaMalloc(&conflict_count_gpu, sizeof(int*)*total_col);
cudaMemcpy(col_group_gpu, col_group, sizeof(int*)*total_col, cudaMemcpyHostToDevice);
int *vertex_num_gpu;
int *total_dl_matrix_col_num_gpu;
int *total_dl_matrix_row_num_gpu;
cudaMalloc(&vertex_num_gpu, sizeof(int*)*graph_count);
cudaMalloc(&total_dl_matrix_col_num_gpu, sizeof(int*)*graph_count);
cudaMalloc(&total_dl_matrix_row_num_gpu, sizeof(int*)*graph_count);
cudaMemcpy(vertex_num_gpu, vertex_num, sizeof(int*)*graph_count, cudaMemcpyHostToDevice);
cudaMemcpy(total_dl_matrix_col_num_gpu, total_dl_matrix_col_num, sizeof(int*)*graph_count, cudaMemcpyHostToDevice);
cudaMemcpy(total_dl_matrix_row_num_gpu, total_dl_matrix_row_num, sizeof(int*)*graph_count, cudaMemcpyHostToDevice);
int *offset_col_gpu;
int *offset_row_gpu;
int *offset_matrix_gpu;
cudaMalloc(&offset_col_gpu, sizeof(int*)*graph_count);
cudaMalloc(&offset_row_gpu, sizeof(int*)*graph_count);
cudaMalloc(&offset_matrix_gpu, sizeof(int*)*graph_count);
cudaMemcpy(offset_col_gpu, offset_col, sizeof(int*)*graph_count, cudaMemcpyHostToDevice);
cudaMemcpy(offset_row_gpu, offset_row, sizeof(int*)*graph_count, cudaMemcpyHostToDevice);
cudaMemcpy(offset_matrix_gpu, offset_matrix, sizeof(int*)*graph_count, cudaMemcpyHostToDevice);
int *search_depth_gpu;
int *selected_row_id_gpu;
int *current_conflict_count_gpu;
int *conflict_node_id_gpu;
int *conflict_col_id_gpu;
int *existance_of_candidate_rows_gpu;
cudaMalloc(&search_depth_gpu, sizeof(int*)*graph_count);
cudaMalloc(&selected_row_id_gpu, sizeof(int*)*graph_count);
cudaMalloc(¤t_conflict_count_gpu, sizeof(int*)*graph_count);
cudaMalloc(&conflict_node_id_gpu, sizeof(int*)*graph_count);
cudaMalloc(&conflict_col_id_gpu, sizeof(int*)*graph_count);
cudaMalloc(&existance_of_candidate_rows_gpu, sizeof(int*)*graph_count);
int hard_conflict_threshold=500;
//int * row_group=new int[total_dl_matrix_row_num];
//get col and row group
//init_vectors<<<1, 32>>>(row_group_gpu, total_dl_matrix_row_num_gpu);
//init_vectors<<<1, 32>>>(deleted_cols_gpu, total_dl_matrix_col_num_gpu);
//init_vectors<<<1, 32>>>(deleted_rows_gpu, total_dl_matrix_row_num_gpu);
//init_vectors<<<1, 32>>>(results_gpu, total_dl_matrix_row_num_gpu);
//init_vectors<<<1, 32>>>(conflict_count_gpu, total_dl_matrix_col_num_gpu);
//init_vectors<<<1, 32>>>(deleted_rows_gpu, total_dl_matrix_row_num_gpu);
//cudaDeviceSynchronize();
//cudaMemcpy(row_group, row_group_gpu, sizeof(int)*total_dl_matrix_row_num_gpu, cudaMemcpyDeviceToHost);
//std::cout<<"print row group"<<std::endl;
//for(int i=0; i<total_dl_matrix_row_num; i++)
//{
// std::cout<<row_group[i]<<' ';
//}
//std::cout<<std::endl;
//get_vertex_row_group<<<1, 32>>>(row_group_gpu, dl_matrix_gpu, vertex_num_gpu, total_dl_matrix_row_num_gpu, total_dl_matrix_col_num_gpu);
//cudaMemcpy(row_group, row_group_gpu, sizeof(int)*total_dl_matrix_row_num_gpu, cudaMemcpyDeviceToHost);
//std::cout<<"print row group"<<std::endl;
//for(int i=0; i<total_dl_matrix_row_num; i++)
// {
// std::cout<<row_group[i]<<' ';
//}
//std::cout<<std::endl;
//cudaMemcpy(col_group_gpu, col_group, sizeof(int)*total_dl_matrix_col_num, cudaMemcpyHostToDevice);
//delete_rows_and_columns<<<1, 32>>>(dl_matrix_gpu, deleted_rows_gpu, deleted_cols_gpu, 1, 1, total_dl_matrix_row_num, total_dl_matrix_col_num);
//cudaMemcpy(deleted_cols, deleted_cols_gpu, sizeof(int)*total_dl_matrix_col_num, cudaMemcpyDeviceToHost);
//cudaMemcpy(deleted_rows, deleted_rows_gpu, sizeof(int)*total_dl_matrix_row_num, cudaMemcpyDeviceToHost);
//print_vec(deleted_cols,total_dl_matrix_col_num);
//print_vec(deleted_rows,total_dl_matrix_row_num);
//print_vec_g<<<1,1>>>(col_group_gpu, total_col);
cudaDeviceSynchronize();
mc_solver<<<2,32>>>(dl_matrix_gpu, results_gpu,
deleted_cols_gpu, deleted_rows_gpu, col_group_gpu, row_group_gpu, conflict_count_gpu,
vertex_num_gpu, total_dl_matrix_row_num_gpu, total_dl_matrix_col_num_gpu,
offset_col_gpu, offset_row_gpu, offset_matrix_gpu,
search_depth_gpu, selected_row_id_gpu, current_conflict_count_gpu, conflict_node_id_gpu, conflict_col_id_gpu, existance_of_candidate_rows_gpu,
graph_count, hard_conflict_threshold);
cudaDeviceSynchronize();
//mc_solver(dl_matrix_gpu, results_gpu, deleted_cols_gpu, deleted_rows_gpu, col_group_gpu, row_group_gpu, conflict_count_gpu, vertex_num_gpu, total_dl_matrix_row_num_gpu, total_dl_matrix_col_num_gpu);
std::cout<<"================================================================================================================================="<<std::endl;
int *results = new int [total_row];
cudaMemcpy(results, results_gpu, sizeof(int)*total_row, cudaMemcpyDeviceToHost);
cudaMemcpy(deleted_cols, deleted_cols_gpu, sizeof(int)*total_col, cudaMemcpyDeviceToHost);
for (int k=0; k < graph_count; k++)
{
for (int i = 0; i < total_dl_matrix_row_num[k]; i++)
{
std::cout << results[offset_row[k]+i] << ' ';
}
std::cout << std::endl;
for (int i = 0; i < total_dl_matrix_row_num[k]; i++){
if(results[offset_row[k]+i]>0){
std::cout << i << ' ';
}
}
std::cout<<std::endl;
for (int i = 0; i < total_dl_matrix_col_num[k]; i++)
{
if (deleted_cols[offset_col[k]+i] == -1)
{
conflict_count[k]++;
}
}
std::cout << "Conflict Num is " << conflict_count[k] / 3 << std::endl;
}
cudaFree(dl_matrix_gpu);
cudaFree(results_gpu);
cudaFree(deleted_cols_gpu);
cudaFree(deleted_rows_gpu);
cudaFree(col_group_gpu);
cudaFree(row_group_gpu);
cudaFree(conflict_count_gpu);
cudaFree(vertex_num_gpu);
cudaFree(total_dl_matrix_col_num_gpu);
cudaFree(total_dl_matrix_row_num_gpu);
cudaFree(offset_col_gpu);
cudaFree(offset_row_gpu);
cudaFree(offset_matrix_gpu);
cudaFree(search_depth_gpu);
cudaFree(selected_row_id_gpu);
cudaFree(current_conflict_count_gpu);
cudaFree(conflict_col_id_gpu);
cudaFree(conflict_node_id_gpu);
cudaFree(existance_of_candidate_rows_gpu);
delete[] results;
delete[] dl_matrix;
//delete[] test_matrix;
delete[] deleted_cols;
delete[] deleted_rows;
//delete[] gtcol;
delete[] col_group;
//delete[] gtr;
return 0;
}
|
4476a3dc6e615cd4d6c40bce95fc53b8edc83a6e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "hiprand/hiprand_kernel.h"
#include "math.h"
#include <thrust/device_vector.h>
#include <iostream>
__global__ void gerar(int *out, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n) return;
hiprandState_t st;
hiprand_init(0, i, 0, &st);
// nmeros entre 10 e 40
int temp = 0;
for (int k = 0; k < 500; k++) {
temp += (int) (30 * hiprand_uniform(&st) + 10);
}
out[i] = temp;
}
int main() {
thrust::device_vector<int> nums(100000);
int nblocks = ceil(nums.size() / 1024);
hipLaunchKernelGGL(( gerar), dim3(nblocks), dim3(1024), 0, 0, thrust::raw_pointer_cast(nums.data()), nums.size());
for (int i =0 ; i< 10; i++) {
std::cout << nums[i] << "\n";
}
}
| 4476a3dc6e615cd4d6c40bce95fc53b8edc83a6e.cu | #include "curand.h"
#include "curand_kernel.h"
#include "math.h"
#include <thrust/device_vector.h>
#include <iostream>
__global__ void gerar(int *out, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n) return;
curandState st;
curand_init(0, i, 0, &st);
// números entre 10 e 40
int temp = 0;
for (int k = 0; k < 500; k++) {
temp += (int) (30 * curand_uniform(&st) + 10);
}
out[i] = temp;
}
int main() {
thrust::device_vector<int> nums(100000);
int nblocks = ceil(nums.size() / 1024);
gerar<<<nblocks, 1024>>>(thrust::raw_pointer_cast(nums.data()), nums.size());
for (int i =0 ; i< 10; i++) {
std::cout << nums[i] << "\n";
}
}
|
3a71d8c22035b5695fdb2bd629b401eb62993c2d.hip | // !!! This is a file automatically generated by hipify!!!
/*
** Hello World using CUDA
**
** The string "Hello World!" is mangled then restored using a common CUDA idiom
**
** Byron Galbraith
** 2009-02-18
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
// Device kernel
__global__ void
helloWorld(char* str)
{
// determine where in the thread grid we are
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// unmangle output
str[idx] += idx;
}
// Host function
int
main(int argc, char** argv)
{
int i;
// desired output
char str[] = "Hello World!";
// mangle contents of output
// the null character is left intact for simplicity
for(i = 0; i < 12; i++)
str[i] -= i;
// allocate memory on the device
char *d_str;
size_t size = sizeof(str);
hipMalloc((void**)&d_str, size);
// copy the string to the device
hipMemcpy(d_str, str, size, hipMemcpyHostToDevice);
// set the grid and block sizes
dim3 dimGrid(2); // one block per word
dim3 dimBlock(6); // one thread per character
// invoke the kernel
hipLaunchKernelGGL(( helloWorld), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_str);
// retrieve the results from the device
hipMemcpy(str, d_str, size, hipMemcpyDeviceToHost);
// free up the allocated memory on the device
hipFree(d_str);
// everyone's favorite part
printf("%s\n", str);
return 0;
}
| 3a71d8c22035b5695fdb2bd629b401eb62993c2d.cu | /*
** Hello World using CUDA
**
** The string "Hello World!" is mangled then restored using a common CUDA idiom
**
** Byron Galbraith
** 2009-02-18
*/
#include <cuda.h>
#include <stdio.h>
// Device kernel
__global__ void
helloWorld(char* str)
{
// determine where in the thread grid we are
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// unmangle output
str[idx] += idx;
}
// Host function
int
main(int argc, char** argv)
{
int i;
// desired output
char str[] = "Hello World!";
// mangle contents of output
// the null character is left intact for simplicity
for(i = 0; i < 12; i++)
str[i] -= i;
// allocate memory on the device
char *d_str;
size_t size = sizeof(str);
cudaMalloc((void**)&d_str, size);
// copy the string to the device
cudaMemcpy(d_str, str, size, cudaMemcpyHostToDevice);
// set the grid and block sizes
dim3 dimGrid(2); // one block per word
dim3 dimBlock(6); // one thread per character
// invoke the kernel
helloWorld<<< dimGrid, dimBlock >>>(d_str);
// retrieve the results from the device
cudaMemcpy(str, d_str, size, cudaMemcpyDeviceToHost);
// free up the allocated memory on the device
cudaFree(d_str);
// everyone's favorite part
printf("%s\n", str);
return 0;
}
|
02539c6fa9595460f99bd58c9931c303b0ce85d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// nvcc backGroundSubtractionWyatt.cu -o temp.exe -lm
#include <math.h>
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
// size of vector
#define FRAMES 5 // Number of frames
#define PIXELS_PER_FRAME 10 // Number of pixels per frame
#define BLOCK 128 // Size of blocks, best if it is a power of 2.
// Globals
int *BlockOfFrames_CPU, *BlockOfFrames_GPU;
float *MeanFrame_GPU;
float *BlockOfLogNormalFrames_GPU;
float *MeanLogNormalFrame_GPU;
float *MedianLogNormalFrame_GPU;
float *StdvLogNormalFrame_GPU;
int *NewFrame_CPU, *NewFrame_GPU;
int *BlackAndWhiteFrame_CPU, *BlackAndWhiteFrame_GPU;
// These globals can be removed after debugging.
float *BlockOfLogNormalFrames_CPU;
float *MeanFrame_CPU;
float *MeanLogNormalFrame_CPU;
float *MedianLogNormalFrame_CPU;
float *StdvLogNormalFrame_CPU;
dim3 dimBlock, dimGrid;
void AllocateMemory()
{
// This are the set of frames that will be used to generate the log normal frame
// and the standard deviation frame
BlockOfFrames_CPU = (int *)malloc(FRAMES*PIXELS_PER_FRAME*sizeof(int));
hipMalloc((void**)&BlockOfFrames_GPU,FRAMES*PIXELS_PER_FRAME*sizeof(int));
hipMalloc((void**)&BlockOfLogNormalFrames_GPU,FRAMES*PIXELS_PER_FRAME*sizeof(float));
// Will hold the log normal frame and the standard deviation of the frames minus the log normal
hipMalloc((void**)&MeanFrame_GPU, PIXELS_PER_FRAME*sizeof(float));
hipMalloc((void**)&MeanLogNormalFrame_GPU, PIXELS_PER_FRAME*sizeof(float));
hipMalloc((void**)&MedianLogNormalFrame_GPU, PIXELS_PER_FRAME*sizeof(float));
hipMalloc((void**)&StdvLogNormalFrame_GPU, PIXELS_PER_FRAME*sizeof(float));
NewFrame_CPU = (int *)malloc(PIXELS_PER_FRAME*sizeof(float));
BlackAndWhiteFrame_CPU = (int *)malloc(PIXELS_PER_FRAME*sizeof(float));
hipMalloc((void**)&NewFrame_GPU, PIXELS_PER_FRAME*sizeof(int));
hipMalloc((void**)&BlackAndWhiteFrame_GPU, PIXELS_PER_FRAME*sizeof(int));
// These all can be removed latter. I'm just using them for debuging
BlockOfLogNormalFrames_CPU = (float *)malloc(FRAMES*PIXELS_PER_FRAME*sizeof(float));
MeanFrame_CPU = (float *)malloc(PIXELS_PER_FRAME*sizeof(float));
MeanLogNormalFrame_CPU = (float *)malloc(PIXELS_PER_FRAME*sizeof(float));
MedianLogNormalFrame_CPU = (float *)malloc(PIXELS_PER_FRAME*sizeof(float));
StdvLogNormalFrame_CPU = (float *)malloc(PIXELS_PER_FRAME*sizeof(float));
}
void loadPixels()
{
/*
However you get 300,000 by 80 pixels loaded in here then CUDA will do the rest.
This is loading the big vector from 1st 300,000 then from 2nd 300,000 and so on until frame 80.
It may be faster to load the pixels the other way 80 first pixels then 80 second pixels and so on 300000 times.
Test it and see.
I just load (below) some small values to check that everything is working.
M is the number of frames and N is the number of pixels per frame
*/
for(int i = 0; i < FRAMES; i++)
{
for(int j = 0; j < PIXELS_PER_FRAME; j++)
{
BlockOfFrames_CPU[j +i*PIXELS_PER_FRAME] = i;
if(i == 4) BlockOfFrames_CPU[j +i*PIXELS_PER_FRAME] = 12;
}
}
}
void loadNewFrame()
{
//This is where you will load the image to be processed.
for(int i = 0; i < PIXELS_PER_FRAME; i++)
{
NewFrame_CPU[i] = i*2;
}
}
void SetUpCudaDevices()
{
dimBlock.x = BLOCK;
dimBlock.y = 1;
dimBlock.z = 1;
dimGrid.x = ((PIXELS_PER_FRAME - 1)/BLOCK)+1;
dimGrid.y = 1;
dimGrid.z = 1;
}
__global__ void creatingMeanPixelFrame(float *meanFrame, int *allFrames, int pixelsPerFrame, int frames)
{
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
if(pixel < pixelsPerFrame)
{
double sum = 0.0;
for(int i = 0; i < frames; i++)
{
sum += allFrames[pixel + pixelsPerFrame*i];
}
meanFrame[pixel] = sum/(float)frames;
}
}
__global__ void creatingLogNormalFrames(float *meanFrame, int *allFrames, float *allFramesLogNormal, int pixelsPerFrame, int frames)
{
int id;
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
if(pixel < pixelsPerFrame)
{
for(int i = 0; i < frames; i++)
{
//Same screen location (pixel) but moving through frames (i).
id = pixel + pixelsPerFrame*i;
allFramesLogNormal[id] = (float)allFrames[id] - meanFrame[pixel];
allFramesLogNormal[id] = abs(allFramesLogNormal[id]);
//Can't take log of zero so to be safe check and move it off zero.
if(allFramesLogNormal[id] == 0.0f)
{
allFramesLogNormal[id] = 0.000001f;
}
allFramesLogNormal[id] = logf(allFramesLogNormal[id]);
//allFramesLogNormal[id] = (float)allFrames[id]; // Remove after debugging.
}
}
}
__global__ void creatingMeanLogNormalFrame(float *meanlogNormalFrame, float *allFramesLogNormal, int pixelsPerFrame, int frames)
{
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
if(pixel < pixelsPerFrame)
{
double sum = 0.0;
for(int i = 0; i < frames; i++)
{
sum += allFramesLogNormal[pixel + pixelsPerFrame*i];
}
meanlogNormalFrame[pixel] = sum/(float)frames;
}
}
__global__ void creatingMedianLogNormalFrame(float *medianlogNormalFrame, float *allFramesLogNormal, int pixelsPerFrame, int frames)
{
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
int used[FRAMES], index, count;
float median = 0.0;
float small;
if(pixel < pixelsPerFrame)
{
for(int i = 0; i < frames; i++)
{
used[i] = 0;
}
if(frames%2 == 0)
{
int middle2 = frames/2;
int middle1 = middle2 - 1;
index = -1;
count = 0;
while(count <= middle2)
{
small = 10000000.0f; //Needs to be a number larger than anything you would get in a log of a pixel.
for(int i = 0; i < frames; i++)
{
if(allFramesLogNormal[pixel + pixelsPerFrame*i] < small && used[i] == 0)
{
small = allFramesLogNormal[pixel + pixelsPerFrame*i];
index = i;
}
}
if(index == -1) printf("\nError no index found\n");
used[index] = 1;
if(count == middle1 || count == middle2)
{
median += allFramesLogNormal[pixel + pixelsPerFrame*index];
}
count++;
}
median /=2.0f;
}
else
{
int middle = frames/2;
index = -1;
count = 0;
while(count <= middle)
{
small = 10000000.0f; //Needs to be a number larger than anything you would get in a log of a pixel.
for(int i = 0; i < frames; i++)
{
if(allFramesLogNormal[pixel + pixelsPerFrame*i] < small)
{
if(used[i] == 0)
{
small = allFramesLogNormal[pixel + pixelsPerFrame*i];
index = i;
}
}
}
if(index == -1) printf("\nError no index found\n");
used[index] = 1;
if(count == middle)
{
median += allFramesLogNormal[pixel + pixelsPerFrame*index];
}
count++;
}
}
medianlogNormalFrame[pixel] = median;
}
}
__global__ void creatingStdvLogNormalFrame(float *stdvLogNormalFrame, float *meanLogNormalFrame, float *allFramesLogNormal, int pixelsPerFrame, int frames)
{
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
float temp;
if(pixel < pixelsPerFrame)
{
double sum = 0.0;
for(int i = 0; i < frames; i++)
{
temp = allFramesLogNormal[pixel + pixelsPerFrame*i] - meanLogNormalFrame[pixel];
sum += temp*temp;
}
stdvLogNormalFrame[pixel] = sqrtf((sum)/(float)(frames-1));
}
}
__global__ void CreateBlackAndWHiteFrame(int *BlackAndWhiteFrame_GPU, int *NewFrame_GPU, float *StdvLogNormalFrame_GPU, float *MeanLogNormalFrame_GPU, int pixelsPerFrame)
{
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
float breakPoint = 2.0f; // ************** not sure what this value should be ??????????
if(pixel < pixelsPerFrame)
{
float CDF = 0.5f + 0.5f*erff((logf((float)NewFrame_GPU[pixel]) - MeanLogNormalFrame_GPU[pixel])/sqrtf(2.0*StdvLogNormalFrame_GPU[pixel]));
if(CDF < breakPoint)
{
BlackAndWhiteFrame_GPU[pixel] = 0;
}
else
{
BlackAndWhiteFrame_GPU[pixel] = 1; //Can remove this if you do a memset before you use the data.
}
}
}
void errorCheck(const char *message)
{
hipError_t error;
error = hipGetLastError();
if(error != hipSuccess)
{
printf("\n CUDA ERROR: %s = %s\n", message, hipGetErrorString(error));
exit(0);
}
}
void stats()
{
hipMemcpyAsync(BlockOfLogNormalFrames_CPU, BlockOfLogNormalFrames_GPU, PIXELS_PER_FRAME*FRAMES*sizeof(float), hipMemcpyDeviceToHost);
errorCheck("copy Mean frame down");
hipMemcpyAsync(MeanFrame_CPU, MeanFrame_GPU, PIXELS_PER_FRAME*sizeof(float), hipMemcpyDeviceToHost);
errorCheck("copy Mean frame down");
hipMemcpyAsync(MeanLogNormalFrame_CPU, MeanLogNormalFrame_GPU, PIXELS_PER_FRAME*sizeof(float), hipMemcpyDeviceToHost);
errorCheck("copy MeanLogNormal frame down");
hipMemcpyAsync(MedianLogNormalFrame_CPU, MedianLogNormalFrame_GPU, PIXELS_PER_FRAME*sizeof(float), hipMemcpyDeviceToHost);
errorCheck("copy MedianLogNormal frame down");
hipMemcpyAsync(StdvLogNormalFrame_CPU, StdvLogNormalFrame_GPU, PIXELS_PER_FRAME*sizeof(float), hipMemcpyDeviceToHost);
errorCheck("copy StdvLogNormal frame down");
printf("\n\n");
printf("frames");
for(int j = 0; j < FRAMES; j++)
{
printf("\n");
for(int i = 0; i < PIXELS_PER_FRAME; i++)
{
printf("%d ", BlockOfFrames_CPU[i + j*PIXELS_PER_FRAME]);
}
}
printf("\n\n");
printf("log normal frames");
for(int j = 0; j < FRAMES; j++)
{
printf("\n");
for(int i = 0; i < PIXELS_PER_FRAME; i++)
{
printf("%f ", BlockOfLogNormalFrames_CPU[i + j*PIXELS_PER_FRAME]);
}
}
printf("\n\n");
for(int i = 0; i < PIXELS_PER_FRAME; i++)
{
printf("MeanFrame[%d] = %f MeanLogNormalFrame[%d] = %f MedianLogNormalFrame[%d] = %f StdvLogNormalFrame[%d] = %f \n", i, MeanFrame_CPU[i], i, MeanLogNormalFrame_CPU[i], i, MedianLogNormalFrame_CPU[i], i, StdvLogNormalFrame_CPU[i]);
}
printf("\n");
for(int i = 0; i < PIXELS_PER_FRAME; i++)
{
printf("NewFrame[%d] = %d blackAndWhiteFrame[%d] = %d \n", i, NewFrame_CPU[i], i, BlackAndWhiteFrame_CPU[i]);
}
}
void cleanUp()
{
free(BlockOfFrames_CPU);
free(NewFrame_CPU);
free(BlackAndWhiteFrame_CPU);
hipFree(BlockOfFrames_GPU);
hipFree(BlockOfLogNormalFrames_GPU);
hipFree(MeanFrame_GPU);
hipFree(MeanLogNormalFrame_GPU);
hipFree(MedianLogNormalFrame_GPU);
hipFree(StdvLogNormalFrame_GPU);
hipFree(NewFrame_GPU);
hipFree(BlackAndWhiteFrame_GPU);
// These can be removed latter. I just used them for debuging.
free(BlockOfLogNormalFrames_CPU);
free(MeanFrame_CPU);
free(MeanLogNormalFrame_CPU);
free(MedianLogNormalFrame_CPU);
free(StdvLogNormalFrame_CPU);
}
int main()
{
AllocateMemory();
SetUpCudaDevices();
loadPixels();
hipMemcpyAsync(BlockOfFrames_GPU, BlockOfFrames_CPU, PIXELS_PER_FRAME*FRAMES*sizeof(int), hipMemcpyHostToDevice);
errorCheck("copyFramessUp");
hipDeviceSynchronize();
hipLaunchKernelGGL(( creatingMeanPixelFrame), dim3(dimGrid),dim3(dimBlock), 0, 0, MeanFrame_GPU, BlockOfFrames_GPU, PIXELS_PER_FRAME, FRAMES);
errorCheck("creatingMeanPixelFrame");
hipLaunchKernelGGL(( creatingLogNormalFrames), dim3(dimGrid),dim3(dimBlock), 0, 0, MeanFrame_GPU, BlockOfFrames_GPU, BlockOfLogNormalFrames_GPU, PIXELS_PER_FRAME, FRAMES);
errorCheck("creatingLogNormalFrames");
hipLaunchKernelGGL(( creatingMeanLogNormalFrame), dim3(dimGrid),dim3(dimBlock), 0, 0, MeanLogNormalFrame_GPU, BlockOfLogNormalFrames_GPU, PIXELS_PER_FRAME, FRAMES);
errorCheck("creatingMeanLogNormalFrame");
hipLaunchKernelGGL(( creatingMedianLogNormalFrame), dim3(dimGrid),dim3(dimBlock), 0, 0, MedianLogNormalFrame_GPU, BlockOfLogNormalFrames_GPU, PIXELS_PER_FRAME, FRAMES);
errorCheck("creatingMedianLogNormalFrame");
hipLaunchKernelGGL(( creatingStdvLogNormalFrame), dim3(dimGrid),dim3(dimBlock), 0, 0, StdvLogNormalFrame_GPU, MeanLogNormalFrame_GPU, BlockOfLogNormalFrames_GPU, PIXELS_PER_FRAME, FRAMES);
errorCheck("creatingStdvLogNormalFrame");
hipDeviceSynchronize();
loadNewFrame();
hipMemcpyAsync(NewFrame_GPU, NewFrame_CPU, PIXELS_PER_FRAME*sizeof(int), hipMemcpyHostToDevice);
errorCheck("copy New frame up");
hipLaunchKernelGGL(( CreateBlackAndWHiteFrame), dim3(dimGrid),dim3(dimBlock), 0, 0, BlackAndWhiteFrame_GPU, NewFrame_GPU, StdvLogNormalFrame_GPU, MeanLogNormalFrame_GPU, PIXELS_PER_FRAME);
errorCheck("creatingStdvLogNormalFrame");
hipMemcpyAsync(BlackAndWhiteFrame_CPU, BlackAndWhiteFrame_GPU, PIXELS_PER_FRAME*sizeof(float), hipMemcpyDeviceToHost);
errorCheck("copy black and white frame down");
//Do stuff with black and white frame
stats();
cleanUp();
printf("\n DONE \n");
}
| 02539c6fa9595460f99bd58c9931c303b0ce85d3.cu | // nvcc backGroundSubtractionWyatt.cu -o temp.exe -lm
#include <math.h>
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
// size of vector
#define FRAMES 5 // Number of frames
#define PIXELS_PER_FRAME 10 // Number of pixels per frame
#define BLOCK 128 // Size of blocks, best if it is a power of 2.
// Globals
int *BlockOfFrames_CPU, *BlockOfFrames_GPU;
float *MeanFrame_GPU;
float *BlockOfLogNormalFrames_GPU;
float *MeanLogNormalFrame_GPU;
float *MedianLogNormalFrame_GPU;
float *StdvLogNormalFrame_GPU;
int *NewFrame_CPU, *NewFrame_GPU;
int *BlackAndWhiteFrame_CPU, *BlackAndWhiteFrame_GPU;
// These globals can be removed after debugging.
float *BlockOfLogNormalFrames_CPU;
float *MeanFrame_CPU;
float *MeanLogNormalFrame_CPU;
float *MedianLogNormalFrame_CPU;
float *StdvLogNormalFrame_CPU;
dim3 dimBlock, dimGrid;
void AllocateMemory()
{
// This are the set of frames that will be used to generate the log normal frame
// and the standard deviation frame
BlockOfFrames_CPU = (int *)malloc(FRAMES*PIXELS_PER_FRAME*sizeof(int));
cudaMalloc((void**)&BlockOfFrames_GPU,FRAMES*PIXELS_PER_FRAME*sizeof(int));
cudaMalloc((void**)&BlockOfLogNormalFrames_GPU,FRAMES*PIXELS_PER_FRAME*sizeof(float));
// Will hold the log normal frame and the standard deviation of the frames minus the log normal
cudaMalloc((void**)&MeanFrame_GPU, PIXELS_PER_FRAME*sizeof(float));
cudaMalloc((void**)&MeanLogNormalFrame_GPU, PIXELS_PER_FRAME*sizeof(float));
cudaMalloc((void**)&MedianLogNormalFrame_GPU, PIXELS_PER_FRAME*sizeof(float));
cudaMalloc((void**)&StdvLogNormalFrame_GPU, PIXELS_PER_FRAME*sizeof(float));
NewFrame_CPU = (int *)malloc(PIXELS_PER_FRAME*sizeof(float));
BlackAndWhiteFrame_CPU = (int *)malloc(PIXELS_PER_FRAME*sizeof(float));
cudaMalloc((void**)&NewFrame_GPU, PIXELS_PER_FRAME*sizeof(int));
cudaMalloc((void**)&BlackAndWhiteFrame_GPU, PIXELS_PER_FRAME*sizeof(int));
// These all can be removed latter. I'm just using them for debuging
BlockOfLogNormalFrames_CPU = (float *)malloc(FRAMES*PIXELS_PER_FRAME*sizeof(float));
MeanFrame_CPU = (float *)malloc(PIXELS_PER_FRAME*sizeof(float));
MeanLogNormalFrame_CPU = (float *)malloc(PIXELS_PER_FRAME*sizeof(float));
MedianLogNormalFrame_CPU = (float *)malloc(PIXELS_PER_FRAME*sizeof(float));
StdvLogNormalFrame_CPU = (float *)malloc(PIXELS_PER_FRAME*sizeof(float));
}
void loadPixels()
{
/*
However you get 300,000 by 80 pixels loaded in here then CUDA will do the rest.
This is loading the big vector from 1st 300,000 then from 2nd 300,000 and so on until frame 80.
It may be faster to load the pixels the other way 80 first pixels then 80 second pixels and so on 300000 times.
Test it and see.
I just load (below) some small values to check that everything is working.
M is the number of frames and N is the number of pixels per frame
*/
for(int i = 0; i < FRAMES; i++)
{
for(int j = 0; j < PIXELS_PER_FRAME; j++)
{
BlockOfFrames_CPU[j +i*PIXELS_PER_FRAME] = i;
if(i == 4) BlockOfFrames_CPU[j +i*PIXELS_PER_FRAME] = 12;
}
}
}
void loadNewFrame()
{
//This is where you will load the image to be processed.
for(int i = 0; i < PIXELS_PER_FRAME; i++)
{
NewFrame_CPU[i] = i*2;
}
}
void SetUpCudaDevices()
{
dimBlock.x = BLOCK;
dimBlock.y = 1;
dimBlock.z = 1;
dimGrid.x = ((PIXELS_PER_FRAME - 1)/BLOCK)+1;
dimGrid.y = 1;
dimGrid.z = 1;
}
__global__ void creatingMeanPixelFrame(float *meanFrame, int *allFrames, int pixelsPerFrame, int frames)
{
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
if(pixel < pixelsPerFrame)
{
double sum = 0.0;
for(int i = 0; i < frames; i++)
{
sum += allFrames[pixel + pixelsPerFrame*i];
}
meanFrame[pixel] = sum/(float)frames;
}
}
__global__ void creatingLogNormalFrames(float *meanFrame, int *allFrames, float *allFramesLogNormal, int pixelsPerFrame, int frames)
{
int id;
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
if(pixel < pixelsPerFrame)
{
for(int i = 0; i < frames; i++)
{
//Same screen location (pixel) but moving through frames (i).
id = pixel + pixelsPerFrame*i;
allFramesLogNormal[id] = (float)allFrames[id] - meanFrame[pixel];
allFramesLogNormal[id] = abs(allFramesLogNormal[id]);
//Can't take log of zero so to be safe check and move it off zero.
if(allFramesLogNormal[id] == 0.0f)
{
allFramesLogNormal[id] = 0.000001f;
}
allFramesLogNormal[id] = logf(allFramesLogNormal[id]);
//allFramesLogNormal[id] = (float)allFrames[id]; // Remove after debugging.
}
}
}
__global__ void creatingMeanLogNormalFrame(float *meanlogNormalFrame, float *allFramesLogNormal, int pixelsPerFrame, int frames)
{
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
if(pixel < pixelsPerFrame)
{
double sum = 0.0;
for(int i = 0; i < frames; i++)
{
sum += allFramesLogNormal[pixel + pixelsPerFrame*i];
}
meanlogNormalFrame[pixel] = sum/(float)frames;
}
}
__global__ void creatingMedianLogNormalFrame(float *medianlogNormalFrame, float *allFramesLogNormal, int pixelsPerFrame, int frames)
{
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
int used[FRAMES], index, count;
float median = 0.0;
float small;
if(pixel < pixelsPerFrame)
{
for(int i = 0; i < frames; i++)
{
used[i] = 0;
}
if(frames%2 == 0)
{
int middle2 = frames/2;
int middle1 = middle2 - 1;
index = -1;
count = 0;
while(count <= middle2)
{
small = 10000000.0f; //Needs to be a number larger than anything you would get in a log of a pixel.
for(int i = 0; i < frames; i++)
{
if(allFramesLogNormal[pixel + pixelsPerFrame*i] < small && used[i] == 0)
{
small = allFramesLogNormal[pixel + pixelsPerFrame*i];
index = i;
}
}
if(index == -1) printf("\nError no index found\n");
used[index] = 1;
if(count == middle1 || count == middle2)
{
median += allFramesLogNormal[pixel + pixelsPerFrame*index];
}
count++;
}
median /=2.0f;
}
else
{
int middle = frames/2;
index = -1;
count = 0;
while(count <= middle)
{
small = 10000000.0f; //Needs to be a number larger than anything you would get in a log of a pixel.
for(int i = 0; i < frames; i++)
{
if(allFramesLogNormal[pixel + pixelsPerFrame*i] < small)
{
if(used[i] == 0)
{
small = allFramesLogNormal[pixel + pixelsPerFrame*i];
index = i;
}
}
}
if(index == -1) printf("\nError no index found\n");
used[index] = 1;
if(count == middle)
{
median += allFramesLogNormal[pixel + pixelsPerFrame*index];
}
count++;
}
}
medianlogNormalFrame[pixel] = median;
}
}
__global__ void creatingStdvLogNormalFrame(float *stdvLogNormalFrame, float *meanLogNormalFrame, float *allFramesLogNormal, int pixelsPerFrame, int frames)
{
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
float temp;
if(pixel < pixelsPerFrame)
{
double sum = 0.0;
for(int i = 0; i < frames; i++)
{
temp = allFramesLogNormal[pixel + pixelsPerFrame*i] - meanLogNormalFrame[pixel];
sum += temp*temp;
}
stdvLogNormalFrame[pixel] = sqrtf((sum)/(float)(frames-1));
}
}
__global__ void CreateBlackAndWHiteFrame(int *BlackAndWhiteFrame_GPU, int *NewFrame_GPU, float *StdvLogNormalFrame_GPU, float *MeanLogNormalFrame_GPU, int pixelsPerFrame)
{
int pixel = threadIdx.x + blockIdx.x*blockDim.x;
float breakPoint = 2.0f; // ************** not sure what this value should be ??????????
if(pixel < pixelsPerFrame)
{
float CDF = 0.5f + 0.5f*erff((logf((float)NewFrame_GPU[pixel]) - MeanLogNormalFrame_GPU[pixel])/sqrtf(2.0*StdvLogNormalFrame_GPU[pixel]));
if(CDF < breakPoint)
{
BlackAndWhiteFrame_GPU[pixel] = 0;
}
else
{
BlackAndWhiteFrame_GPU[pixel] = 1; //Can remove this if you do a memset before you use the data.
}
}
}
void errorCheck(const char *message)
{
cudaError_t error;
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("\n CUDA ERROR: %s = %s\n", message, cudaGetErrorString(error));
exit(0);
}
}
void stats()
{
cudaMemcpyAsync(BlockOfLogNormalFrames_CPU, BlockOfLogNormalFrames_GPU, PIXELS_PER_FRAME*FRAMES*sizeof(float), cudaMemcpyDeviceToHost);
errorCheck("copy Mean frame down");
cudaMemcpyAsync(MeanFrame_CPU, MeanFrame_GPU, PIXELS_PER_FRAME*sizeof(float), cudaMemcpyDeviceToHost);
errorCheck("copy Mean frame down");
cudaMemcpyAsync(MeanLogNormalFrame_CPU, MeanLogNormalFrame_GPU, PIXELS_PER_FRAME*sizeof(float), cudaMemcpyDeviceToHost);
errorCheck("copy MeanLogNormal frame down");
cudaMemcpyAsync(MedianLogNormalFrame_CPU, MedianLogNormalFrame_GPU, PIXELS_PER_FRAME*sizeof(float), cudaMemcpyDeviceToHost);
errorCheck("copy MedianLogNormal frame down");
cudaMemcpyAsync(StdvLogNormalFrame_CPU, StdvLogNormalFrame_GPU, PIXELS_PER_FRAME*sizeof(float), cudaMemcpyDeviceToHost);
errorCheck("copy StdvLogNormal frame down");
printf("\n\n");
printf("frames");
for(int j = 0; j < FRAMES; j++)
{
printf("\n");
for(int i = 0; i < PIXELS_PER_FRAME; i++)
{
printf("%d ", BlockOfFrames_CPU[i + j*PIXELS_PER_FRAME]);
}
}
printf("\n\n");
printf("log normal frames");
for(int j = 0; j < FRAMES; j++)
{
printf("\n");
for(int i = 0; i < PIXELS_PER_FRAME; i++)
{
printf("%f ", BlockOfLogNormalFrames_CPU[i + j*PIXELS_PER_FRAME]);
}
}
printf("\n\n");
for(int i = 0; i < PIXELS_PER_FRAME; i++)
{
printf("MeanFrame[%d] = %f MeanLogNormalFrame[%d] = %f MedianLogNormalFrame[%d] = %f StdvLogNormalFrame[%d] = %f \n", i, MeanFrame_CPU[i], i, MeanLogNormalFrame_CPU[i], i, MedianLogNormalFrame_CPU[i], i, StdvLogNormalFrame_CPU[i]);
}
printf("\n");
for(int i = 0; i < PIXELS_PER_FRAME; i++)
{
printf("NewFrame[%d] = %d blackAndWhiteFrame[%d] = %d \n", i, NewFrame_CPU[i], i, BlackAndWhiteFrame_CPU[i]);
}
}
void cleanUp()
{
free(BlockOfFrames_CPU);
free(NewFrame_CPU);
free(BlackAndWhiteFrame_CPU);
cudaFree(BlockOfFrames_GPU);
cudaFree(BlockOfLogNormalFrames_GPU);
cudaFree(MeanFrame_GPU);
cudaFree(MeanLogNormalFrame_GPU);
cudaFree(MedianLogNormalFrame_GPU);
cudaFree(StdvLogNormalFrame_GPU);
cudaFree(NewFrame_GPU);
cudaFree(BlackAndWhiteFrame_GPU);
// These can be removed latter. I just used them for debuging.
free(BlockOfLogNormalFrames_CPU);
free(MeanFrame_CPU);
free(MeanLogNormalFrame_CPU);
free(MedianLogNormalFrame_CPU);
free(StdvLogNormalFrame_CPU);
}
int main()
{
AllocateMemory();
SetUpCudaDevices();
loadPixels();
cudaMemcpyAsync(BlockOfFrames_GPU, BlockOfFrames_CPU, PIXELS_PER_FRAME*FRAMES*sizeof(int), cudaMemcpyHostToDevice);
errorCheck("copyFramessUp");
cudaDeviceSynchronize();
creatingMeanPixelFrame<<<dimGrid,dimBlock>>>(MeanFrame_GPU, BlockOfFrames_GPU, PIXELS_PER_FRAME, FRAMES);
errorCheck("creatingMeanPixelFrame");
creatingLogNormalFrames<<<dimGrid,dimBlock>>>(MeanFrame_GPU, BlockOfFrames_GPU, BlockOfLogNormalFrames_GPU, PIXELS_PER_FRAME, FRAMES);
errorCheck("creatingLogNormalFrames");
creatingMeanLogNormalFrame<<<dimGrid,dimBlock>>>(MeanLogNormalFrame_GPU, BlockOfLogNormalFrames_GPU, PIXELS_PER_FRAME, FRAMES);
errorCheck("creatingMeanLogNormalFrame");
creatingMedianLogNormalFrame<<<dimGrid,dimBlock>>>(MedianLogNormalFrame_GPU, BlockOfLogNormalFrames_GPU, PIXELS_PER_FRAME, FRAMES);
errorCheck("creatingMedianLogNormalFrame");
creatingStdvLogNormalFrame<<<dimGrid,dimBlock>>>(StdvLogNormalFrame_GPU, MeanLogNormalFrame_GPU, BlockOfLogNormalFrames_GPU, PIXELS_PER_FRAME, FRAMES);
errorCheck("creatingStdvLogNormalFrame");
cudaDeviceSynchronize();
loadNewFrame();
cudaMemcpyAsync(NewFrame_GPU, NewFrame_CPU, PIXELS_PER_FRAME*sizeof(int), cudaMemcpyHostToDevice);
errorCheck("copy New frame up");
CreateBlackAndWHiteFrame<<<dimGrid,dimBlock>>>(BlackAndWhiteFrame_GPU, NewFrame_GPU, StdvLogNormalFrame_GPU, MeanLogNormalFrame_GPU, PIXELS_PER_FRAME);
errorCheck("creatingStdvLogNormalFrame");
cudaMemcpyAsync(BlackAndWhiteFrame_CPU, BlackAndWhiteFrame_GPU, PIXELS_PER_FRAME*sizeof(float), cudaMemcpyDeviceToHost);
errorCheck("copy black and white frame down");
//Do stuff with black and white frame
stats();
cleanUp();
printf("\n DONE \n");
}
|
712155fe5cecef8c804de75c2e3f9debd06616ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#include <rocm_smi/rocm_smi.h>
#include <assert.h>
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(double * input, int N, int M, double * __var_4__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_0__ <= (M-2)){
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_1__ <= (N-2)){
double __temp_0__;
__temp_0__ = (7 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(-1))]);
double __temp_1__;
__temp_1__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(-1))]);
double __temp_2__;
__temp_2__ = (__temp_0__ + __temp_1__);
double __temp_3__;
__temp_3__ = (9 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(-1))]);
double __temp_4__;
__temp_4__ = (__temp_2__ + __temp_3__);
double __temp_5__;
__temp_5__ = (12 * input[__iter_0__+(-1)+(M-0)*(__iter_1__)]);
double __temp_6__;
__temp_6__ = (__temp_4__ + __temp_5__);
double __temp_7__;
__temp_7__ = (15 * input[__iter_0__+(M-0)*(__iter_1__)]);
double __temp_8__;
__temp_8__ = (__temp_6__ + __temp_7__);
double __temp_9__;
__temp_9__ = (12 * input[__iter_0__+(1)+(M-0)*(__iter_1__)]);
double __temp_10__;
__temp_10__ = (__temp_8__ + __temp_9__);
double __temp_11__;
__temp_11__ = (9 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(1))]);
double __temp_12__;
__temp_12__ = (__temp_10__ + __temp_11__);
double __temp_13__;
__temp_13__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(1))]);
double __temp_14__;
__temp_14__ = (__temp_12__ + __temp_13__);
double __temp_15__;
__temp_15__ = (7 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(1))]);
double __temp_16__;
__temp_16__ = (__temp_14__ + __temp_15__);
double __temp_17__;
__temp_17__ = (__temp_16__ / 118);
__var_4__[__iter_0__+(M-0)*(__iter_1__)] = __temp_17__;
}
}
}
__global__ void __kernel___forma_kernel__1__(double * __var_4__, int N, int M, double * __var_3__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_2__;
__iter_2__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_2__ <= (M-2)){
int __iter_3__;
__iter_3__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_3__ <= (N-2)){
double __temp_18__;
__temp_18__ = (7 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(-1))]);
double __temp_19__;
__temp_19__ = (5 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(-1))]);
double __temp_20__;
__temp_20__ = (__temp_18__ + __temp_19__);
double __temp_21__;
__temp_21__ = (9 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(-1))]);
double __temp_22__;
__temp_22__ = (__temp_20__ + __temp_21__);
double __temp_23__;
__temp_23__ = (12 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__)]);
double __temp_24__;
__temp_24__ = (__temp_22__ + __temp_23__);
double __temp_25__;
__temp_25__ = (15 * __var_4__[__iter_2__+(M-0)*(__iter_3__)]);
double __temp_26__;
__temp_26__ = (__temp_24__ + __temp_25__);
double __temp_27__;
__temp_27__ = (12 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__)]);
double __temp_28__;
__temp_28__ = (__temp_26__ + __temp_27__);
double __temp_29__;
__temp_29__ = (9 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(1))]);
double __temp_30__;
__temp_30__ = (__temp_28__ + __temp_29__);
double __temp_31__;
__temp_31__ = (5 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(1))]);
double __temp_32__;
__temp_32__ = (__temp_30__ + __temp_31__);
double __temp_33__;
__temp_33__ = (7 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(1))]);
double __temp_34__;
__temp_34__ = (__temp_32__ + __temp_33__);
double __temp_35__;
__temp_35__ = (__temp_34__ / 118);
__var_3__[__iter_2__+(M-0)*(__iter_3__)] = __temp_35__;
}
}
}
__global__ void __kernel___forma_kernel__2__(double * __var_3__, int N, int M, double * __var_2__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_4__;
__iter_4__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_4__ <= (M-2)){
int __iter_5__;
__iter_5__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_5__ <= (N-2)){
double __temp_36__;
__temp_36__ = (7 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(-1))]);
double __temp_37__;
__temp_37__ = (5 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(-1))]);
double __temp_38__;
__temp_38__ = (__temp_36__ + __temp_37__);
double __temp_39__;
__temp_39__ = (9 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(-1))]);
double __temp_40__;
__temp_40__ = (__temp_38__ + __temp_39__);
double __temp_41__;
__temp_41__ = (12 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__)]);
double __temp_42__;
__temp_42__ = (__temp_40__ + __temp_41__);
double __temp_43__;
__temp_43__ = (15 * __var_3__[__iter_4__+(M-0)*(__iter_5__)]);
double __temp_44__;
__temp_44__ = (__temp_42__ + __temp_43__);
double __temp_45__;
__temp_45__ = (12 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__)]);
double __temp_46__;
__temp_46__ = (__temp_44__ + __temp_45__);
double __temp_47__;
__temp_47__ = (9 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(1))]);
double __temp_48__;
__temp_48__ = (__temp_46__ + __temp_47__);
double __temp_49__;
__temp_49__ = (5 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(1))]);
double __temp_50__;
__temp_50__ = (__temp_48__ + __temp_49__);
double __temp_51__;
__temp_51__ = (7 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(1))]);
double __temp_52__;
__temp_52__ = (__temp_50__ + __temp_51__);
double __temp_53__;
__temp_53__ = (__temp_52__ / 118);
__var_2__[__iter_4__+(M-0)*(__iter_5__)] = __temp_53__;
}
}
}
__global__ void __kernel___forma_kernel__3__(double * __var_2__, int N, int M, double * __var_1__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_6__;
__iter_6__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_6__ <= (M-2)){
int __iter_7__;
__iter_7__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_7__ <= (N-2)){
double __temp_54__;
__temp_54__ = (7 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(-1))]);
double __temp_55__;
__temp_55__ = (5 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(-1))]);
double __temp_56__;
__temp_56__ = (__temp_54__ + __temp_55__);
double __temp_57__;
__temp_57__ = (9 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(-1))]);
double __temp_58__;
__temp_58__ = (__temp_56__ + __temp_57__);
double __temp_59__;
__temp_59__ = (12 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__)]);
double __temp_60__;
__temp_60__ = (__temp_58__ + __temp_59__);
double __temp_61__;
__temp_61__ = (15 * __var_2__[__iter_6__+(M-0)*(__iter_7__)]);
double __temp_62__;
__temp_62__ = (__temp_60__ + __temp_61__);
double __temp_63__;
__temp_63__ = (12 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__)]);
double __temp_64__;
__temp_64__ = (__temp_62__ + __temp_63__);
double __temp_65__;
__temp_65__ = (9 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(1))]);
double __temp_66__;
__temp_66__ = (__temp_64__ + __temp_65__);
double __temp_67__;
__temp_67__ = (5 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(1))]);
double __temp_68__;
__temp_68__ = (__temp_66__ + __temp_67__);
double __temp_69__;
__temp_69__ = (7 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(1))]);
double __temp_70__;
__temp_70__ = (__temp_68__ + __temp_69__);
double __temp_71__;
__temp_71__ = (__temp_70__ / 118);
__var_1__[__iter_6__+(M-0)*(__iter_7__)] = __temp_71__;
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi(double * h_input, int N, int M, double * __var_0__){
/* Host allocation Begin */
double * input;
hipMalloc(&input,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(double)*((N-0)*(M-0)), memcpy_kind_h_input);
}
double * __var_1__;
hipMalloc(&__var_1__,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
double * __var_2__;
hipMalloc(&__var_2__,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
double * __var_3__;
hipMalloc(&__var_3__,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_3__\n");
double * __var_4__;
hipMalloc(&__var_4__,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_4__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-2) - 1 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((N-2) - 1 ) + 1;
int __max_occupancy_blocksize___kernel___forma_kernel__0__;
int _max_occupancy_gridsize___kernel___forma_kernel__0__;
hipOccupancyMaxPotentialBlockSize(&_max_occupancy_gridsize___kernel___forma_kernel__0__,&__max_occupancy_blocksize___kernel___forma_kernel__0__,(const void*)__kernel___forma_kernel__0__,0,0);
int __max_occupancy_blocksize___kernel___forma_kernel__0___0 = pow((double)__max_occupancy_blocksize___kernel___forma_kernel__0__, (double)(1.0/(double)2));
__max_occupancy_blocksize___kernel___forma_kernel__0___0 = FORMA_MAX(__max_occupancy_blocksize___kernel___forma_kernel__0___0/32, 1)*32;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 32;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
unsigned int power1, power2;
rsmi_status_t result;
uint32_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(RSMI_STATUS_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(RSMI_STATUS_SUCCESS == result);
hipDeviceSynchronize();
for (int x=0; x<1000; x++) {
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __var_4__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_4__, N, M, __var_3__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_3__, N, M, __var_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, __var_2__, N, M, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
}
hipDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(RSMI_STATUS_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(double)*((N-0)*(M-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__var_2__);
hipFree(__var_3__);
hipFree(__var_4__);
}
/*Host Free End*/
| 712155fe5cecef8c804de75c2e3f9debd06616ad.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#include <nvml.h>
#include <assert.h>
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(double * input, int N, int M, double * __var_4__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_0__;
__iter_0__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_0__ <= (M-2)){
int __iter_1__;
__iter_1__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_1__ <= (N-2)){
double __temp_0__;
__temp_0__ = (7 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(-1))]);
double __temp_1__;
__temp_1__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(-1))]);
double __temp_2__;
__temp_2__ = (__temp_0__ + __temp_1__);
double __temp_3__;
__temp_3__ = (9 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(-1))]);
double __temp_4__;
__temp_4__ = (__temp_2__ + __temp_3__);
double __temp_5__;
__temp_5__ = (12 * input[__iter_0__+(-1)+(M-0)*(__iter_1__)]);
double __temp_6__;
__temp_6__ = (__temp_4__ + __temp_5__);
double __temp_7__;
__temp_7__ = (15 * input[__iter_0__+(M-0)*(__iter_1__)]);
double __temp_8__;
__temp_8__ = (__temp_6__ + __temp_7__);
double __temp_9__;
__temp_9__ = (12 * input[__iter_0__+(1)+(M-0)*(__iter_1__)]);
double __temp_10__;
__temp_10__ = (__temp_8__ + __temp_9__);
double __temp_11__;
__temp_11__ = (9 * input[__iter_0__+(-1)+(M-0)*(__iter_1__+(1))]);
double __temp_12__;
__temp_12__ = (__temp_10__ + __temp_11__);
double __temp_13__;
__temp_13__ = (5 * input[__iter_0__+(M-0)*(__iter_1__+(1))]);
double __temp_14__;
__temp_14__ = (__temp_12__ + __temp_13__);
double __temp_15__;
__temp_15__ = (7 * input[__iter_0__+(1)+(M-0)*(__iter_1__+(1))]);
double __temp_16__;
__temp_16__ = (__temp_14__ + __temp_15__);
double __temp_17__;
__temp_17__ = (__temp_16__ / 118);
__var_4__[__iter_0__+(M-0)*(__iter_1__)] = __temp_17__;
}
}
}
__global__ void __kernel___forma_kernel__1__(double * __var_4__, int N, int M, double * __var_3__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_2__;
__iter_2__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_2__ <= (M-2)){
int __iter_3__;
__iter_3__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_3__ <= (N-2)){
double __temp_18__;
__temp_18__ = (7 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(-1))]);
double __temp_19__;
__temp_19__ = (5 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(-1))]);
double __temp_20__;
__temp_20__ = (__temp_18__ + __temp_19__);
double __temp_21__;
__temp_21__ = (9 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(-1))]);
double __temp_22__;
__temp_22__ = (__temp_20__ + __temp_21__);
double __temp_23__;
__temp_23__ = (12 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__)]);
double __temp_24__;
__temp_24__ = (__temp_22__ + __temp_23__);
double __temp_25__;
__temp_25__ = (15 * __var_4__[__iter_2__+(M-0)*(__iter_3__)]);
double __temp_26__;
__temp_26__ = (__temp_24__ + __temp_25__);
double __temp_27__;
__temp_27__ = (12 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__)]);
double __temp_28__;
__temp_28__ = (__temp_26__ + __temp_27__);
double __temp_29__;
__temp_29__ = (9 * __var_4__[__iter_2__+(-1)+(M-0)*(__iter_3__+(1))]);
double __temp_30__;
__temp_30__ = (__temp_28__ + __temp_29__);
double __temp_31__;
__temp_31__ = (5 * __var_4__[__iter_2__+(M-0)*(__iter_3__+(1))]);
double __temp_32__;
__temp_32__ = (__temp_30__ + __temp_31__);
double __temp_33__;
__temp_33__ = (7 * __var_4__[__iter_2__+(1)+(M-0)*(__iter_3__+(1))]);
double __temp_34__;
__temp_34__ = (__temp_32__ + __temp_33__);
double __temp_35__;
__temp_35__ = (__temp_34__ / 118);
__var_3__[__iter_2__+(M-0)*(__iter_3__)] = __temp_35__;
}
}
}
__global__ void __kernel___forma_kernel__2__(double * __var_3__, int N, int M, double * __var_2__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_4__;
__iter_4__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_4__ <= (M-2)){
int __iter_5__;
__iter_5__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_5__ <= (N-2)){
double __temp_36__;
__temp_36__ = (7 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(-1))]);
double __temp_37__;
__temp_37__ = (5 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(-1))]);
double __temp_38__;
__temp_38__ = (__temp_36__ + __temp_37__);
double __temp_39__;
__temp_39__ = (9 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(-1))]);
double __temp_40__;
__temp_40__ = (__temp_38__ + __temp_39__);
double __temp_41__;
__temp_41__ = (12 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__)]);
double __temp_42__;
__temp_42__ = (__temp_40__ + __temp_41__);
double __temp_43__;
__temp_43__ = (15 * __var_3__[__iter_4__+(M-0)*(__iter_5__)]);
double __temp_44__;
__temp_44__ = (__temp_42__ + __temp_43__);
double __temp_45__;
__temp_45__ = (12 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__)]);
double __temp_46__;
__temp_46__ = (__temp_44__ + __temp_45__);
double __temp_47__;
__temp_47__ = (9 * __var_3__[__iter_4__+(-1)+(M-0)*(__iter_5__+(1))]);
double __temp_48__;
__temp_48__ = (__temp_46__ + __temp_47__);
double __temp_49__;
__temp_49__ = (5 * __var_3__[__iter_4__+(M-0)*(__iter_5__+(1))]);
double __temp_50__;
__temp_50__ = (__temp_48__ + __temp_49__);
double __temp_51__;
__temp_51__ = (7 * __var_3__[__iter_4__+(1)+(M-0)*(__iter_5__+(1))]);
double __temp_52__;
__temp_52__ = (__temp_50__ + __temp_51__);
double __temp_53__;
__temp_53__ = (__temp_52__ / 118);
__var_2__[__iter_4__+(M-0)*(__iter_5__)] = __temp_53__;
}
}
}
__global__ void __kernel___forma_kernel__3__(double * __var_2__, int N, int M, double * __var_1__){
int FORMA_BLOCKDIM_Y = (int)(blockDim.y);
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int __iter_6__;
__iter_6__ = (int)(blockIdx.x)*(int)(FORMA_BLOCKDIM_X) + (int)(threadIdx.x) + 1;
if(__iter_6__ <= (M-2)){
int __iter_7__;
__iter_7__ = (int)(blockIdx.y)*(int)(FORMA_BLOCKDIM_Y) + (int)(threadIdx.y) + 1;
if(__iter_7__ <= (N-2)){
double __temp_54__;
__temp_54__ = (7 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(-1))]);
double __temp_55__;
__temp_55__ = (5 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(-1))]);
double __temp_56__;
__temp_56__ = (__temp_54__ + __temp_55__);
double __temp_57__;
__temp_57__ = (9 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(-1))]);
double __temp_58__;
__temp_58__ = (__temp_56__ + __temp_57__);
double __temp_59__;
__temp_59__ = (12 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__)]);
double __temp_60__;
__temp_60__ = (__temp_58__ + __temp_59__);
double __temp_61__;
__temp_61__ = (15 * __var_2__[__iter_6__+(M-0)*(__iter_7__)]);
double __temp_62__;
__temp_62__ = (__temp_60__ + __temp_61__);
double __temp_63__;
__temp_63__ = (12 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__)]);
double __temp_64__;
__temp_64__ = (__temp_62__ + __temp_63__);
double __temp_65__;
__temp_65__ = (9 * __var_2__[__iter_6__+(-1)+(M-0)*(__iter_7__+(1))]);
double __temp_66__;
__temp_66__ = (__temp_64__ + __temp_65__);
double __temp_67__;
__temp_67__ = (5 * __var_2__[__iter_6__+(M-0)*(__iter_7__+(1))]);
double __temp_68__;
__temp_68__ = (__temp_66__ + __temp_67__);
double __temp_69__;
__temp_69__ = (7 * __var_2__[__iter_6__+(1)+(M-0)*(__iter_7__+(1))]);
double __temp_70__;
__temp_70__ = (__temp_68__ + __temp_69__);
double __temp_71__;
__temp_71__ = (__temp_70__ / 118);
__var_1__[__iter_6__+(M-0)*(__iter_7__)] = __temp_71__;
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi(double * h_input, int N, int M, double * __var_0__){
/* Host allocation Begin */
double * input;
cudaMalloc(&input,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(double)*((N-0)*(M-0)), memcpy_kind_h_input);
}
double * __var_1__;
cudaMalloc(&__var_1__,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
double * __var_2__;
cudaMalloc(&__var_2__,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_2__\n");
double * __var_3__;
cudaMalloc(&__var_3__,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_3__\n");
double * __var_4__;
cudaMalloc(&__var_4__,sizeof(double)*((N-0)*(M-0)));
Check_CUDA_Error("Allocation Error!! : __var_4__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-2) - 1 ) + 1;
int __size_1___kernel___forma_kernel__0__ = ((N-2) - 1 ) + 1;
int __max_occupancy_blocksize___kernel___forma_kernel__0__;
int _max_occupancy_gridsize___kernel___forma_kernel__0__;
cudaOccupancyMaxPotentialBlockSize(&_max_occupancy_gridsize___kernel___forma_kernel__0__,&__max_occupancy_blocksize___kernel___forma_kernel__0__,(const void*)__kernel___forma_kernel__0__,0,0);
int __max_occupancy_blocksize___kernel___forma_kernel__0___0 = pow((double)__max_occupancy_blocksize___kernel___forma_kernel__0__, (double)(1.0/(double)2));
__max_occupancy_blocksize___kernel___forma_kernel__0___0 = FORMA_MAX(__max_occupancy_blocksize___kernel___forma_kernel__0___0/32, 1)*32;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 32;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__block_0___kernel___forma_kernel__0__);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
unsigned int power1, power2;
nvmlReturn_t result;
nvmlDevice_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(NVML_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(NVML_SUCCESS == result);
cudaDeviceSynchronize();
for (int x=0; x<1000; x++) {
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __var_4__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_4__, N, M, __var_3__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_3__, N, M, __var_2__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (__var_2__, N, M, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
}
cudaDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(NVML_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(double)*((N-0)*(M-0)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__var_2__);
cudaFree(__var_3__);
cudaFree(__var_4__);
}
/*Host Free End*/
|
9ef45729694f6098b1bd7e81f1fed2b1d50eb9c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* bfs_main.cu
*
* Created on: Feb 28, 2017
* Author: chao
*
* The gpu version of BFS algorithm.
* Similar to the sequential version, but use one cuda thread to process one
* graph node in the font-wave in every iteration. So for each iteration, the
* number of cuda threads of gpu kernel may change.
*
* usage:
* Compile with Makefile.
* run as: ./a.out -v -i inputfile -o outputfile
* -v: print time info
* -i: the input graph data file path
* -o: output file path
*
*
*/
#include <cstdlib>
#include <cstdio>
#include <cstdint>
#include <climits>
#include <iostream>
#include <iomanip>
#include <vector>
#include "../../common/helper_getopt.h"
#include "../../common/helper_timer.h"
#include "../../common/helper_err.h"
#include "bfs_main.h"
#include "bfs_comm_data.h"
#include "bfs_kernel.h"
int main(int argc, char*argv[]){
bool printTime = false;
char* input_path = NULL;
char* output_path=NULL;
/*
* parse arguments
*/
int opt;
extern char* optarg;
extern int optind;
while((opt=getopt(argc, argv, "vi:o:"))!=EOF){
switch(opt){
case 'v':
printTime = true;
break;
case 'i':
input_path = optarg;
break;
case 'o':
output_path = optarg;
break;
case '?':
break;
default:
break;
}
}
if(input_path == NULL){
std::cerr<<"Need input file path with -i !!!"<<std::endl;
return 1;
}
/*
* read input file and initialize the graph data
*/
std::cout<<"Read graph data ..."<<std::endl;
Node_t *graph_nodes;
Edge_t *graph_edges;
int total_graph_nodes;
int total_graph_edges;
int source_nodeid;
initGraphFromFile(input_path, graph_nodes, graph_edges,
total_graph_nodes, total_graph_edges, source_nodeid);
//std::cout<<total_graph_nodes<<" "<<total_graph_edges<<std::endl;
int *shortestPath = new int[total_graph_nodes];
for(int i=0; i<total_graph_nodes; i++){
shortestPath[i] = INT_MAX;
}
shortestPath[source_nodeid] = 0;
double t1, t2;
/*
* create gpu memory
*/
hipSetDevice(0);
Node_t *graph_nodes_d;
Edge_t *graph_edges_d;
int *shortestPath_d;
checkCudaErr(hipMalloc(&graph_nodes_d,
total_graph_nodes*sizeof(Node_t)));
checkCudaErr(hipMalloc(&graph_edges_d,
total_graph_edges*sizeof(Edge_t)));
checkCudaErr(hipMalloc(&shortestPath_d,
total_graph_nodes*sizeof(int)));
/*
* copyin data
*/
t1 = getTime();
checkCudaErr(hipMemcpy(graph_nodes_d,
graph_nodes,
total_graph_nodes*sizeof(Node_t),
hipMemcpyHostToDevice));
checkCudaErr(hipMemcpy(graph_edges_d,
graph_edges,
total_graph_edges*sizeof(Edge_t),
hipMemcpyHostToDevice));
checkCudaErr(hipMemcpy(shortestPath_d,
shortestPath,
total_graph_nodes*sizeof(int),
hipMemcpyHostToDevice));
t2 = getTime();
double copyinTime =0;
copyinTime += t2-t1;
/*
* call kernel to do bfs
*/
int* frontWave_d;
int* nextWave_d;
// allocal wave array, and assume the real wave size will not exceed
// MAX_WAVE_SIZE during the iteration
checkCudaErr(hipMalloc(&frontWave_d,
MAX_WAVE_SIZE*sizeof(int)));
checkCudaErr(hipMalloc(&nextWave_d,
MAX_WAVE_SIZE*sizeof(int)));
int frontWaveSize;
int* nextWaveSize_d;
checkCudaErr(hipMalloc((void**)&nextWaveSize_d, sizeof(int)));
std::cout<<"start bfs processing ..."<<std::endl;
//add source node id to frontwave to start
t1 = getTime();
checkCudaErr(hipMemcpy(frontWave_d, &source_nodeid,
sizeof(int), hipMemcpyHostToDevice));
t2= getTime();
copyinTime += t2-t1;
frontWaveSize = 1;
double kernelTime=0;
double copyoutTime=0;
while(frontWaveSize >0){
int reset = 0;
t1=getTime();
/*checkCudaErr(hipMemcpyToSymbol(nextWaveSize_d, &reset, sizeof(int),
0, hipMemcpyHostToDevice));*/
checkCudaErr(hipMemcpy(nextWaveSize_d, &reset, sizeof(int), hipMemcpyHostToDevice));
t2 = getTime();
copyinTime += t2-t1;
t1 = getTime();
if(frontWaveSize > MAX_THREAD_PER_BLOCK){
//std::cout<<"go multiblock ..."<<std::endl;
dim3 block(MAX_THREAD_PER_BLOCK, 1 ,1);
dim3 grid((frontWaveSize+MAX_THREAD_PER_BLOCK-1)/MAX_THREAD_PER_BLOCK,1,1);
hipLaunchKernelGGL(( bfs_multiblocks), dim3(grid), dim3(block), 0, 0,
graph_nodes_d,
graph_edges_d,
shortestPath_d,
frontWaveSize,
frontWave_d,
nextWave_d,
nextWaveSize_d);
}
else{
//std::cout<<"go single ..."<<std::endl;
dim3 block(MAX_THREAD_PER_BLOCK,1,1);
dim3 grid(1,1,1);
hipLaunchKernelGGL(( bfs_singleblock), dim3(grid), dim3(block), 0, 0,
graph_nodes_d,
graph_edges_d,
shortestPath_d,
frontWaveSize,
frontWave_d,
nextWave_d,
nextWaveSize_d);
}
checkCudaErr(hipGetLastError());
checkCudaErr(hipDeviceSynchronize());
t2 = getTime();
kernelTime += t2 -t1;
t1= getTime();
/*checkCudaErr(hipMemcpyFromSymbol(&frontWaveSize, nextWaveSize_d,
sizeof(int), 0, hipMemcpyDeviceToHost));*/
checkCudaErr(hipMemcpy(&frontWaveSize, nextWaveSize_d, sizeof(int), hipMemcpyDeviceToHost));
t2 = getTime();
copyoutTime += t2-t1;
//std::cout<<frontWaveSize<<std::endl;
int *tmp = frontWave_d;
frontWave_d = nextWave_d;
nextWave_d = tmp;
}
/*
* copy result back
*/
t1 = getTime();
checkCudaErr(hipMemcpy(shortestPath, shortestPath_d,
total_graph_nodes*sizeof(int),
hipMemcpyDeviceToHost));
t2 = getTime();
copyoutTime += t2-t1;
/*
* write result
*/
if(output_path!=NULL){
std::cout<<"write output ..."<<std::endl;
writeOutput(output_path, shortestPath, total_graph_nodes);
}
delete graph_nodes;
delete graph_edges;
delete shortestPath;
hipFree(graph_nodes_d);
hipFree(graph_edges_d);
hipFree(shortestPath_d);
hipFree(frontWave_d);
hipFree(nextWave_d);
hipFree(nextWaveSize_d);
std::cout<<"Test complete !!!"<<std::endl;
if(printTime){
std::cout<<"\tgraph info:"<<std::endl;
std::cout<<"\t\tnodes: "<<total_graph_nodes<<std::endl;
std::cout<<"\t\tedges: "<<total_graph_edges<<std::endl;
std::cout<<"\t\tsource node id: "<<source_nodeid<<std::endl;
std::cout<<"\ttime info: "<<std::endl;
std::cout<<"\t\tTotal time: "<<std::fixed<<std::setprecision(4)
<<1000*(kernelTime+copyinTime+copyoutTime)<<"(ms)"<<std::endl;
std::cout<<"\t\tkernel time: "<<std::fixed<<std::setprecision(4)<<1000*kernelTime<<"(ms)"<<std::endl;
std::cout<<"\t\tcopyin time: "<<std::fixed<<std::setprecision(4)<<1000*copyinTime<<"(ms)"<<std::endl;
std::cout<<"\t\tcopyout time: "<<std::fixed<<std::setprecision(4)<<1000*copyoutTime<<"(ms)"<<std::endl;
}
return 0;
}
void initGraphFromFile(char* infile,
Node_t *&nodes, Edge_t *&edges,
int &total_nodes, int &total_edges, int &src_node){
FILE *fp = fopen(infile, "r");
if(!fp){
std::cerr<<"Can't open input file !!!"<<std::endl;
exit(1);
}
int x, y, count;
count = fscanf(fp, "%d", &total_nodes);
nodes = new Node_t[total_nodes];
for(int i=0; i<total_nodes; i++){
count =fscanf(fp, "%d", &x);
count=fscanf(fp, "%d", &y);
nodes[i].start_edgeid = x;
nodes[i].num_edges = y;
}
count =fscanf(fp, "%d", &src_node);
count =fscanf(fp, "%d", &total_edges);
edges = new Edge_t[total_edges];
for(int i=0; i<total_edges; i++){
count=fscanf(fp, "%d", &x);
count=fscanf(fp, "%d", &y);
//edges[i].dst_nodeid = x;
//edges[i].weight = y;
edges[i] = x;
}
fclose(fp);
}
void writeOutput(
char *outfile,
int *spath,
int total_nodes){
FILE *fp = fopen(outfile, "w");
if(!fp){
std::cout<<"Cann't open the output file !!!"<<std::endl;
exit(1);
}
fprintf(fp, "%d\n", total_nodes);
for(int i=0; i<total_nodes; i++){
fprintf(fp, "%d %d\n", i, spath[i]);
}
fclose(fp);
}
| 9ef45729694f6098b1bd7e81f1fed2b1d50eb9c2.cu | /*
* bfs_main.cu
*
* Created on: Feb 28, 2017
* Author: chao
*
* The gpu version of BFS algorithm.
* Similar to the sequential version, but use one cuda thread to process one
* graph node in the font-wave in every iteration. So for each iteration, the
* number of cuda threads of gpu kernel may change.
*
* usage:
* Compile with Makefile.
* run as: ./a.out -v -i inputfile -o outputfile
* -v: print time info
* -i: the input graph data file path
* -o: output file path
*
*
*/
#include <cstdlib>
#include <cstdio>
#include <cstdint>
#include <climits>
#include <iostream>
#include <iomanip>
#include <vector>
#include "../../common/helper_getopt.h"
#include "../../common/helper_timer.h"
#include "../../common/helper_err.h"
#include "bfs_main.h"
#include "bfs_comm_data.h"
#include "bfs_kernel.h"
int main(int argc, char*argv[]){
bool printTime = false;
char* input_path = NULL;
char* output_path=NULL;
/*
* parse arguments
*/
int opt;
extern char* optarg;
extern int optind;
while((opt=getopt(argc, argv, "vi:o:"))!=EOF){
switch(opt){
case 'v':
printTime = true;
break;
case 'i':
input_path = optarg;
break;
case 'o':
output_path = optarg;
break;
case '?':
break;
default:
break;
}
}
if(input_path == NULL){
std::cerr<<"Need input file path with -i !!!"<<std::endl;
return 1;
}
/*
* read input file and initialize the graph data
*/
std::cout<<"Read graph data ..."<<std::endl;
Node_t *graph_nodes;
Edge_t *graph_edges;
int total_graph_nodes;
int total_graph_edges;
int source_nodeid;
initGraphFromFile(input_path, graph_nodes, graph_edges,
total_graph_nodes, total_graph_edges, source_nodeid);
//std::cout<<total_graph_nodes<<" "<<total_graph_edges<<std::endl;
int *shortestPath = new int[total_graph_nodes];
for(int i=0; i<total_graph_nodes; i++){
shortestPath[i] = INT_MAX;
}
shortestPath[source_nodeid] = 0;
double t1, t2;
/*
* create gpu memory
*/
cudaSetDevice(0);
Node_t *graph_nodes_d;
Edge_t *graph_edges_d;
int *shortestPath_d;
checkCudaErr(cudaMalloc(&graph_nodes_d,
total_graph_nodes*sizeof(Node_t)));
checkCudaErr(cudaMalloc(&graph_edges_d,
total_graph_edges*sizeof(Edge_t)));
checkCudaErr(cudaMalloc(&shortestPath_d,
total_graph_nodes*sizeof(int)));
/*
* copyin data
*/
t1 = getTime();
checkCudaErr(cudaMemcpy(graph_nodes_d,
graph_nodes,
total_graph_nodes*sizeof(Node_t),
cudaMemcpyHostToDevice));
checkCudaErr(cudaMemcpy(graph_edges_d,
graph_edges,
total_graph_edges*sizeof(Edge_t),
cudaMemcpyHostToDevice));
checkCudaErr(cudaMemcpy(shortestPath_d,
shortestPath,
total_graph_nodes*sizeof(int),
cudaMemcpyHostToDevice));
t2 = getTime();
double copyinTime =0;
copyinTime += t2-t1;
/*
* call kernel to do bfs
*/
int* frontWave_d;
int* nextWave_d;
// allocal wave array, and assume the real wave size will not exceed
// MAX_WAVE_SIZE during the iteration
checkCudaErr(cudaMalloc(&frontWave_d,
MAX_WAVE_SIZE*sizeof(int)));
checkCudaErr(cudaMalloc(&nextWave_d,
MAX_WAVE_SIZE*sizeof(int)));
int frontWaveSize;
int* nextWaveSize_d;
checkCudaErr(cudaMalloc((void**)&nextWaveSize_d, sizeof(int)));
std::cout<<"start bfs processing ..."<<std::endl;
//add source node id to frontwave to start
t1 = getTime();
checkCudaErr(cudaMemcpy(frontWave_d, &source_nodeid,
sizeof(int), cudaMemcpyHostToDevice));
t2= getTime();
copyinTime += t2-t1;
frontWaveSize = 1;
double kernelTime=0;
double copyoutTime=0;
while(frontWaveSize >0){
int reset = 0;
t1=getTime();
/*checkCudaErr(cudaMemcpyToSymbol(nextWaveSize_d, &reset, sizeof(int),
0, cudaMemcpyHostToDevice));*/
checkCudaErr(cudaMemcpy(nextWaveSize_d, &reset, sizeof(int), cudaMemcpyHostToDevice));
t2 = getTime();
copyinTime += t2-t1;
t1 = getTime();
if(frontWaveSize > MAX_THREAD_PER_BLOCK){
//std::cout<<"go multiblock ..."<<std::endl;
dim3 block(MAX_THREAD_PER_BLOCK, 1 ,1);
dim3 grid((frontWaveSize+MAX_THREAD_PER_BLOCK-1)/MAX_THREAD_PER_BLOCK,1,1);
bfs_multiblocks<<<grid, block>>>(
graph_nodes_d,
graph_edges_d,
shortestPath_d,
frontWaveSize,
frontWave_d,
nextWave_d,
nextWaveSize_d);
}
else{
//std::cout<<"go single ..."<<std::endl;
dim3 block(MAX_THREAD_PER_BLOCK,1,1);
dim3 grid(1,1,1);
bfs_singleblock<<<grid, block>>>(
graph_nodes_d,
graph_edges_d,
shortestPath_d,
frontWaveSize,
frontWave_d,
nextWave_d,
nextWaveSize_d);
}
checkCudaErr(cudaGetLastError());
checkCudaErr(cudaDeviceSynchronize());
t2 = getTime();
kernelTime += t2 -t1;
t1= getTime();
/*checkCudaErr(cudaMemcpyFromSymbol(&frontWaveSize, nextWaveSize_d,
sizeof(int), 0, cudaMemcpyDeviceToHost));*/
checkCudaErr(cudaMemcpy(&frontWaveSize, nextWaveSize_d, sizeof(int), cudaMemcpyDeviceToHost));
t2 = getTime();
copyoutTime += t2-t1;
//std::cout<<frontWaveSize<<std::endl;
int *tmp = frontWave_d;
frontWave_d = nextWave_d;
nextWave_d = tmp;
}
/*
* copy result back
*/
t1 = getTime();
checkCudaErr(cudaMemcpy(shortestPath, shortestPath_d,
total_graph_nodes*sizeof(int),
cudaMemcpyDeviceToHost));
t2 = getTime();
copyoutTime += t2-t1;
/*
* write result
*/
if(output_path!=NULL){
std::cout<<"write output ..."<<std::endl;
writeOutput(output_path, shortestPath, total_graph_nodes);
}
delete graph_nodes;
delete graph_edges;
delete shortestPath;
cudaFree(graph_nodes_d);
cudaFree(graph_edges_d);
cudaFree(shortestPath_d);
cudaFree(frontWave_d);
cudaFree(nextWave_d);
cudaFree(nextWaveSize_d);
std::cout<<"Test complete !!!"<<std::endl;
if(printTime){
std::cout<<"\tgraph info:"<<std::endl;
std::cout<<"\t\tnodes: "<<total_graph_nodes<<std::endl;
std::cout<<"\t\tedges: "<<total_graph_edges<<std::endl;
std::cout<<"\t\tsource node id: "<<source_nodeid<<std::endl;
std::cout<<"\ttime info: "<<std::endl;
std::cout<<"\t\tTotal time: "<<std::fixed<<std::setprecision(4)
<<1000*(kernelTime+copyinTime+copyoutTime)<<"(ms)"<<std::endl;
std::cout<<"\t\tkernel time: "<<std::fixed<<std::setprecision(4)<<1000*kernelTime<<"(ms)"<<std::endl;
std::cout<<"\t\tcopyin time: "<<std::fixed<<std::setprecision(4)<<1000*copyinTime<<"(ms)"<<std::endl;
std::cout<<"\t\tcopyout time: "<<std::fixed<<std::setprecision(4)<<1000*copyoutTime<<"(ms)"<<std::endl;
}
return 0;
}
void initGraphFromFile(char* infile,
Node_t *&nodes, Edge_t *&edges,
int &total_nodes, int &total_edges, int &src_node){
FILE *fp = fopen(infile, "r");
if(!fp){
std::cerr<<"Can't open input file !!!"<<std::endl;
exit(1);
}
int x, y, count;
count = fscanf(fp, "%d", &total_nodes);
nodes = new Node_t[total_nodes];
for(int i=0; i<total_nodes; i++){
count =fscanf(fp, "%d", &x);
count=fscanf(fp, "%d", &y);
nodes[i].start_edgeid = x;
nodes[i].num_edges = y;
}
count =fscanf(fp, "%d", &src_node);
count =fscanf(fp, "%d", &total_edges);
edges = new Edge_t[total_edges];
for(int i=0; i<total_edges; i++){
count=fscanf(fp, "%d", &x);
count=fscanf(fp, "%d", &y);
//edges[i].dst_nodeid = x;
//edges[i].weight = y;
edges[i] = x;
}
fclose(fp);
}
void writeOutput(
char *outfile,
int *spath,
int total_nodes){
FILE *fp = fopen(outfile, "w");
if(!fp){
std::cout<<"Cann't open the output file !!!"<<std::endl;
exit(1);
}
fprintf(fp, "%d\n", total_nodes);
for(int i=0; i<total_nodes; i++){
fprintf(fp, "%d %d\n", i, spath[i]);
}
fclose(fp);
}
|
7ca3e63f28b0c470c4048dc436ea6fe6749704d2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define SIZE 1000
#define NUM_BIN 16
__global__ void histogram_without_atomic(int *d_b, int *d_a)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int item = d_a[tid];
if (tid < SIZE)
{
d_b[item]++;
}
}
__global__ void histogram_atomic(int *d_b, int *d_a)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int item = d_a[tid];
if (tid < SIZE)
{
atomicAdd(&(d_b[item]), 1);
}
}
int main()
{
int h_a[SIZE];
for (int i = 0; i < SIZE; i++) {
h_a[i] = i % NUM_BIN;
}
int h_b[NUM_BIN];
for (int i = 0; i < NUM_BIN; i++) {
h_b[i] = 0;
}
// declare GPU memory pointers
int * d_a;
int * d_b;
// allocate GPU memory
hipMalloc((void **)&d_a, SIZE * sizeof(int));
hipMalloc((void **)&d_b, NUM_BIN * sizeof(int));
// transfer the arrays to the GPU
hipMemcpy(d_a, h_a, SIZE * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, NUM_BIN * sizeof(int), hipMemcpyHostToDevice);
// launch the kernel
histogram_without_atomic << <((SIZE+NUM_BIN-1) / NUM_BIN), NUM_BIN >> >(d_b, d_a);
//histogram_atomic << <((SIZE+NUM_BIN-1) / NUM_BIN), NUM_BIN >> >(d_b, d_a);
// copy back the sum from GPU
hipMemcpy(h_b, d_b, NUM_BIN * sizeof(int), hipMemcpyDeviceToHost);
printf("Histogram using 16 bin without shared Memory is: \n");
for (int i = 0; i < NUM_BIN; i++) {
printf("bin %d: count %d\n", i, h_b[i]);
}
// free GPU memory allocation
hipFree(d_a);
hipFree(d_b);
return 0;
}
| 7ca3e63f28b0c470c4048dc436ea6fe6749704d2.cu | #include <stdio.h>
#include <cuda_runtime.h>
#define SIZE 1000
#define NUM_BIN 16
__global__ void histogram_without_atomic(int *d_b, int *d_a)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int item = d_a[tid];
if (tid < SIZE)
{
d_b[item]++;
}
}
__global__ void histogram_atomic(int *d_b, int *d_a)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int item = d_a[tid];
if (tid < SIZE)
{
atomicAdd(&(d_b[item]), 1);
}
}
int main()
{
int h_a[SIZE];
for (int i = 0; i < SIZE; i++) {
h_a[i] = i % NUM_BIN;
}
int h_b[NUM_BIN];
for (int i = 0; i < NUM_BIN; i++) {
h_b[i] = 0;
}
// declare GPU memory pointers
int * d_a;
int * d_b;
// allocate GPU memory
cudaMalloc((void **)&d_a, SIZE * sizeof(int));
cudaMalloc((void **)&d_b, NUM_BIN * sizeof(int));
// transfer the arrays to the GPU
cudaMemcpy(d_a, h_a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, NUM_BIN * sizeof(int), cudaMemcpyHostToDevice);
// launch the kernel
histogram_without_atomic << <((SIZE+NUM_BIN-1) / NUM_BIN), NUM_BIN >> >(d_b, d_a);
//histogram_atomic << <((SIZE+NUM_BIN-1) / NUM_BIN), NUM_BIN >> >(d_b, d_a);
// copy back the sum from GPU
cudaMemcpy(h_b, d_b, NUM_BIN * sizeof(int), cudaMemcpyDeviceToHost);
printf("Histogram using 16 bin without shared Memory is: \n");
for (int i = 0; i < NUM_BIN; i++) {
printf("bin %d: count %d\n", i, h_b[i]);
}
// free GPU memory allocation
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
|
aa26d4b3a0bfe86ecfda4b5dd3814f3ed10453bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef GET_QUANTILE_CU
#define GET_QUANTILE_CU
__global__ void
quanMap_kernel(Record *d_R, int interval, int rLen,int *d_output)
{
extern __shared__ Record tempBuf[];
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
if(resultID<rLen)
tempBuf[tid]=d_R[resultID];
__syncthreads();
if(tid==0)
{
Record value;
value.x=tempBuf[0].y;
int numElement=0;
if(resultID+interval>rLen)
numElement=rLen-resultID;
else
numElement=interval;
value.y=tempBuf[numElement-1].y+1;
int curQuanPos=(resultID/interval)<<1;
d_output[curQuanPos]=value.x;
d_output[curQuanPos+1]=value.y;
}
}
void getQuantile(Record *d_R, int rLen, int interval, int* d_output, int numQuantile)
{
int numThreadsPerBlock_x=interval;
int numThreadsPerBlock_y=1;
int numBlock_X=divRoundUp(rLen, interval);
int numBlock_Y=1;
if(numBlock_X>NLJ_MAX_NUM_BLOCK_PER_DIM)
{
numBlock_Y=numBlock_X/NLJ_MAX_NUM_BLOCK_PER_DIM;
if(numBlock_X%NLJ_MAX_NUM_BLOCK_PER_DIM!=0)
numBlock_Y++;
numBlock_X=NLJ_MAX_NUM_BLOCK_PER_DIM;
}
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_X, numBlock_Y , 1);
hipLaunchKernelGGL(( quanMap_kernel), dim3(grid),dim3(thread), interval*sizeof(Record), 0, d_R, interval,rLen, d_output);
}
#endif
| aa26d4b3a0bfe86ecfda4b5dd3814f3ed10453bd.cu | #ifndef GET_QUANTILE_CU
#define GET_QUANTILE_CU
__global__ void
quanMap_kernel(Record *d_R, int interval, int rLen,int *d_output)
{
extern __shared__ Record tempBuf[];
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
if(resultID<rLen)
tempBuf[tid]=d_R[resultID];
__syncthreads();
if(tid==0)
{
Record value;
value.x=tempBuf[0].y;
int numElement=0;
if(resultID+interval>rLen)
numElement=rLen-resultID;
else
numElement=interval;
value.y=tempBuf[numElement-1].y+1;
int curQuanPos=(resultID/interval)<<1;
d_output[curQuanPos]=value.x;
d_output[curQuanPos+1]=value.y;
}
}
void getQuantile(Record *d_R, int rLen, int interval, int* d_output, int numQuantile)
{
int numThreadsPerBlock_x=interval;
int numThreadsPerBlock_y=1;
int numBlock_X=divRoundUp(rLen, interval);
int numBlock_Y=1;
if(numBlock_X>NLJ_MAX_NUM_BLOCK_PER_DIM)
{
numBlock_Y=numBlock_X/NLJ_MAX_NUM_BLOCK_PER_DIM;
if(numBlock_X%NLJ_MAX_NUM_BLOCK_PER_DIM!=0)
numBlock_Y++;
numBlock_X=NLJ_MAX_NUM_BLOCK_PER_DIM;
}
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_X, numBlock_Y , 1);
quanMap_kernel<<<grid,thread, interval*sizeof(Record)>>>(d_R, interval,rLen, d_output);
}
#endif
|
21874fa2e63fed027da1c7d2d76aff7a5edec60e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* bf-knn (Brute-Force k-Nearest Neighbors Search on the GPU) is the proprietary
* property of The Regents of the University of California ("The Regents.")
*
* Copyright 2015 The Regents of the University of California, Davis campus.
* All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted by nonprofit, research institutions for research
* use only, provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The name of The Regents may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* The end-user understands that the program was developed for research purposes
* and is advised not to rely exclusively on the program for any reason.
*
* THE SOFTWARE PROVIDED IS ON AN "AS IS" BASIS, AND THE REGENTS HAVE NO
* OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
* MODIFICATIONS. THE REGENTS SPECIFICALLY DISCLAIM ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO
* EVENT SHALL THE REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
* INCIDENTAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES, INCLUDING BUT NOT LIMITED TO
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, LOSS OF USE, DATA OR PROFITS, OR
* BUSINESS INTERRUPTION, HOWEVER CAUSED AND UNDER ANY THEORY OF LIABILITY
* WHETHER IN CONTRACT, STRICT LIABILITY OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE AND ITS
* DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* If you do not agree to these terms, do not download or use the software. This
* license may be modified only in a writing signed by authorized signatory of
* both parties.
*
* For commercial license information please contact [email protected].
******************************************************************************/
#include "bf_knn_host.h"
#include "util.h"
#include "bf_knn_device.cuh"
const bool kPrintTime = true;
void ComputeDistances(const int padded_num_dimension,
const int padded_num_query,
const int padded_num_reference,
const float* const d_padded_query,
const float* const d_padded_reference,
long long* const d_candidate) {
assert(padded_num_dimension % CHUNK_SIZE == 0);
assert(padded_num_query % TILE_SIZE == 0);
assert(padded_num_reference % TILE_SIZE == 0);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(padded_num_reference / TILE_SIZE, padded_num_query / TILE_SIZE);
CREATE_AND_START_TIMER;
hipLaunchKernelGGL(( kComputeDistances), dim3(grid), dim3(block), 0, 0,
padded_num_dimension, padded_num_query, padded_num_reference,
d_padded_query, d_padded_reference, d_candidate);
SYNC_AND_CHECK_ERROR;
STOP_TIMER_AND_CALCULATE_ELAPSED;
if (kPrintTime) printf("ComputeDistances %.3f ms\n", gpu_timer.elapsed());
}
void SortCandidateGroups(const int num_query, const int num_reference,
const int padded_num_reference,
const int num_nearest_neighbor,
long long* const d_candidate) {
dim3 block, grid;
if (num_nearest_neighbor == 500) {
block = dim3(K500_NT);
grid = dim3(DivideAndCeil(num_reference, K500_NV), num_query);
} else if (num_nearest_neighbor == 1000) {
block = dim3(K1000_NT);
grid = dim3(DivideAndCeil(num_reference, K1000_NV), num_query);
} else if (num_nearest_neighbor == 2000) {
block = dim3(K2000_NT);
grid = dim3(DivideAndCeil(num_reference, K2000_NV), num_query);
} else if (num_nearest_neighbor == 3000) {
block = dim3(K3000_NT);
grid = dim3(DivideAndCeil(num_reference, K3000_NV), num_query);
}
CREATE_AND_START_TIMER;
if (num_nearest_neighbor == 500) {
hipLaunchKernelGGL(( kSortCandidateGroups<K500_NT, K500_VT, K500_NV>), dim3(grid), dim3(block), 0, 0,
num_reference, padded_num_reference, d_candidate);
} else if (num_nearest_neighbor == 1000) {
hipLaunchKernelGGL(( kSortCandidateGroups<K1000_NT, K1000_VT, K1000_NV>), dim3(grid), dim3(block), 0, 0,
num_reference, padded_num_reference, d_candidate);
} else if (num_nearest_neighbor == 2000) {
hipLaunchKernelGGL(( kSortCandidateGroups<K2000_NT, K2000_VT, K2000_NV>), dim3(grid), dim3(block), 0, 0,
num_reference, padded_num_reference, d_candidate);
} else if (num_nearest_neighbor == 3000) {
hipLaunchKernelGGL(( kSortCandidateGroups<K3000_NT, K3000_VT, K3000_NV>), dim3(grid), dim3(block), 0, 0,
num_reference, padded_num_reference, d_candidate);
}
SYNC_AND_CHECK_ERROR;
STOP_TIMER_AND_CALCULATE_ELAPSED;
if (kPrintTime) printf("SortCandidateGroups %.3f ms\n", gpu_timer.elapsed());
}
void MergeCandidateGroups(const int num_query, const int num_reference,
const int padded_num_reference,
const int num_nearest_neighbor,
long long* const d_candidate) {
dim3 block;
int remaining;
if (num_nearest_neighbor == 500) {
block = dim3(K500_NT);
remaining = DivideAndCeil(num_reference, K500_NV);
} else if (num_nearest_neighbor == 1000) {
block = dim3(K1000_NT);
remaining = DivideAndCeil(num_reference, K1000_NV);
} else if (num_nearest_neighbor == 2000) {
block = dim3(K2000_NT);
remaining = DivideAndCeil(num_reference, K2000_NV);
} else if (num_nearest_neighbor == 3000) {
block = dim3(K3000_NT);
remaining = DivideAndCeil(num_reference, K3000_NV);
}
float total_elapsed = 0.0f;
while (remaining > 1) {
int batch = DivideAndFloor(remaining, 2);
int span = DivideAndCeil(remaining, 2);
dim3 grid(batch, num_query);
CREATE_AND_START_TIMER;
if (num_nearest_neighbor == 500) {
hipLaunchKernelGGL(( kMergeCandidateGroups<K500_NT, K500_VT, K500_NV>), dim3(grid), dim3(block), 0, 0,
num_reference, padded_num_reference, span, d_candidate);
} else if (num_nearest_neighbor == 1000) {
hipLaunchKernelGGL(( kMergeCandidateGroups<K1000_NT, K1000_VT, K1000_NV>), dim3(grid), dim3(block), 0, 0,
num_reference, padded_num_reference, span, d_candidate);
} else if (num_nearest_neighbor == 2000) {
hipLaunchKernelGGL(( kMergeCandidateGroups<K2000_NT, K2000_VT, K2000_NV>), dim3(grid), dim3(block), 0, 0,
num_reference, padded_num_reference, span, d_candidate);
} else if (num_nearest_neighbor == 3000) {
hipLaunchKernelGGL(( kMergeCandidateGroups<K3000_NT, K3000_VT, K3000_NV>), dim3(grid), dim3(block), 0, 0,
num_reference, padded_num_reference, span, d_candidate);
}
SYNC_AND_CHECK_ERROR;
STOP_TIMER_AND_CALCULATE_ELAPSED;
if (kPrintTime) total_elapsed += gpu_timer.elapsed();
remaining = span;
}
if (kPrintTime) printf("MergeCandidateGroups %.3f ms\n", total_elapsed);
}
void RetrieveResults(const int num_query, const int padded_num_reference,
const int num_nearest_neighbor,
const long long* const d_candidate, int* const d_knn_index,
float* const d_knn_distance) {
dim3 block(min(num_nearest_neighbor, 1024));
dim3 grid(DivideAndCeil(num_nearest_neighbor, 1024), num_query);
CREATE_AND_START_TIMER;
hipLaunchKernelGGL(( kRetrieveResults), dim3(grid), dim3(block), 0, 0,
padded_num_reference, num_nearest_neighbor, d_candidate, d_knn_index,
d_knn_distance);
SYNC_AND_CHECK_ERROR;
STOP_TIMER_AND_CALCULATE_ELAPSED;
if (kPrintTime) printf("RetrieveResults %.3f ms\n", gpu_timer.elapsed());
}
void BruteForceKnnSearch(const int num_dimension, const int num_query,
const int num_reference,
const int num_nearest_neighbor,
const float* const query, const float* const reference,
int* const knn_index, float* const knn_distance) {
assert(num_dimension > 0);
assert(num_query > 0);
assert(num_reference > 0);
assert(num_nearest_neighbor == 500 || num_nearest_neighbor == 1000 ||
num_nearest_neighbor == 2000 || num_nearest_neighbor == 3000);
assert(num_reference >= num_nearest_neighbor);
assert(query != NULL);
assert(reference != NULL);
assert(knn_index != NULL);
assert(knn_distance != NULL);
// The reason that 'query' and 'reference' are padded is kComputeDistances
// only works for a complete 96 X 96 tile of 'candidate' and processes a chunk
// of 16 dimensions in each iteration.
const int padded_num_query = CeilToMultiple(num_query, TILE_SIZE);
const int padded_num_reference = CeilToMultiple(num_reference, TILE_SIZE);
const int padded_num_dimension = CeilToMultiple(num_dimension, CHUNK_SIZE);
float* h_padded_query = new float[padded_num_dimension * padded_num_query];
float* h_padded_reference =
new float[padded_num_dimension * padded_num_reference];
float* d_padded_query = NULL;
CHECK_ERROR(
hipMalloc((void**)&d_padded_query,
sizeof(float) * padded_num_dimension * padded_num_query));
float* d_padded_reference = NULL;
CHECK_ERROR(
hipMalloc((void**)&d_padded_reference,
sizeof(float) * padded_num_dimension * padded_num_reference));
long long* d_candidate = NULL;
CHECK_ERROR(
hipMalloc((void**)&d_candidate,
sizeof(long long) * padded_num_query * padded_num_reference));
int* d_knn_index = NULL;
CHECK_ERROR(hipMalloc((void**)&d_knn_index,
sizeof(int) * num_query * num_nearest_neighbor));
float* d_knn_distance = NULL;
CHECK_ERROR(hipMalloc((void**)&d_knn_distance,
sizeof(float) * num_query * num_nearest_neighbor));
memset((void*)h_padded_query, 0,
sizeof(float) * padded_num_dimension * padded_num_query);
for (int i = 0; i < num_dimension; ++i)
memcpy(h_padded_query + padded_num_query * i, query + num_query * i,
sizeof(float) * num_query);
memset((void*)h_padded_reference, 0,
sizeof(float) * padded_num_dimension * padded_num_reference);
for (int i = 0; i < num_dimension; ++i)
memcpy(h_padded_reference + padded_num_reference * i,
reference + num_reference * i, sizeof(float) * num_reference);
CHECK_ERROR(
hipMemcpy(d_padded_query, h_padded_query,
sizeof(float) * padded_num_dimension * padded_num_query,
hipMemcpyHostToDevice));
CHECK_ERROR(
hipMemcpy(d_padded_reference, h_padded_reference,
sizeof(float) * padded_num_dimension * padded_num_reference,
hipMemcpyHostToDevice));
ComputeDistances(padded_num_dimension, padded_num_query, padded_num_reference,
d_padded_query, d_padded_reference, d_candidate);
SortCandidateGroups(num_query, num_reference, padded_num_reference,
num_nearest_neighbor, d_candidate);
MergeCandidateGroups(num_query, num_reference, padded_num_reference,
num_nearest_neighbor, d_candidate);
RetrieveResults(num_query, padded_num_reference, num_nearest_neighbor,
d_candidate, d_knn_index, d_knn_distance);
CHECK_ERROR(hipMemcpy(knn_index, d_knn_index,
sizeof(int) * num_query * num_nearest_neighbor,
hipMemcpyDeviceToHost));
CHECK_ERROR(hipMemcpy(knn_distance, d_knn_distance,
sizeof(float) * num_query * num_nearest_neighbor,
hipMemcpyDeviceToHost));
delete[] h_padded_query;
delete[] h_padded_reference;
CHECK_ERROR(hipFree(d_padded_query));
CHECK_ERROR(hipFree(d_padded_reference));
CHECK_ERROR(hipFree(d_candidate));
CHECK_ERROR(hipFree(d_knn_index));
CHECK_ERROR(hipFree(d_knn_distance));
}
| 21874fa2e63fed027da1c7d2d76aff7a5edec60e.cu | /*******************************************************************************
* bf-knn (Brute-Force k-Nearest Neighbors Search on the GPU) is the proprietary
* property of The Regents of the University of California ("The Regents.")
*
* Copyright © 2015 The Regents of the University of California, Davis campus.
* All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted by nonprofit, research institutions for research
* use only, provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The name of The Regents may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* The end-user understands that the program was developed for research purposes
* and is advised not to rely exclusively on the program for any reason.
*
* THE SOFTWARE PROVIDED IS ON AN "AS IS" BASIS, AND THE REGENTS HAVE NO
* OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
* MODIFICATIONS. THE REGENTS SPECIFICALLY DISCLAIM ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO
* EVENT SHALL THE REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
* INCIDENTAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES, INCLUDING BUT NOT LIMITED TO
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, LOSS OF USE, DATA OR PROFITS, OR
* BUSINESS INTERRUPTION, HOWEVER CAUSED AND UNDER ANY THEORY OF LIABILITY
* WHETHER IN CONTRACT, STRICT LIABILITY OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE AND ITS
* DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* If you do not agree to these terms, do not download or use the software. This
* license may be modified only in a writing signed by authorized signatory of
* both parties.
*
* For commercial license information please contact [email protected].
******************************************************************************/
#include "bf_knn_host.h"
#include "util.h"
#include "bf_knn_device.cuh"
const bool kPrintTime = true;
void ComputeDistances(const int padded_num_dimension,
const int padded_num_query,
const int padded_num_reference,
const float* const d_padded_query,
const float* const d_padded_reference,
long long* const d_candidate) {
assert(padded_num_dimension % CHUNK_SIZE == 0);
assert(padded_num_query % TILE_SIZE == 0);
assert(padded_num_reference % TILE_SIZE == 0);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(padded_num_reference / TILE_SIZE, padded_num_query / TILE_SIZE);
CREATE_AND_START_TIMER;
kComputeDistances<<<grid, block>>>
(padded_num_dimension, padded_num_query, padded_num_reference,
d_padded_query, d_padded_reference, d_candidate);
SYNC_AND_CHECK_ERROR;
STOP_TIMER_AND_CALCULATE_ELAPSED;
if (kPrintTime) printf("ComputeDistances %.3f ms\n", gpu_timer.elapsed());
}
void SortCandidateGroups(const int num_query, const int num_reference,
const int padded_num_reference,
const int num_nearest_neighbor,
long long* const d_candidate) {
dim3 block, grid;
if (num_nearest_neighbor == 500) {
block = dim3(K500_NT);
grid = dim3(DivideAndCeil(num_reference, K500_NV), num_query);
} else if (num_nearest_neighbor == 1000) {
block = dim3(K1000_NT);
grid = dim3(DivideAndCeil(num_reference, K1000_NV), num_query);
} else if (num_nearest_neighbor == 2000) {
block = dim3(K2000_NT);
grid = dim3(DivideAndCeil(num_reference, K2000_NV), num_query);
} else if (num_nearest_neighbor == 3000) {
block = dim3(K3000_NT);
grid = dim3(DivideAndCeil(num_reference, K3000_NV), num_query);
}
CREATE_AND_START_TIMER;
if (num_nearest_neighbor == 500) {
kSortCandidateGroups<K500_NT, K500_VT, K500_NV><<<grid, block>>>
(num_reference, padded_num_reference, d_candidate);
} else if (num_nearest_neighbor == 1000) {
kSortCandidateGroups<K1000_NT, K1000_VT, K1000_NV><<<grid, block>>>
(num_reference, padded_num_reference, d_candidate);
} else if (num_nearest_neighbor == 2000) {
kSortCandidateGroups<K2000_NT, K2000_VT, K2000_NV><<<grid, block>>>
(num_reference, padded_num_reference, d_candidate);
} else if (num_nearest_neighbor == 3000) {
kSortCandidateGroups<K3000_NT, K3000_VT, K3000_NV><<<grid, block>>>
(num_reference, padded_num_reference, d_candidate);
}
SYNC_AND_CHECK_ERROR;
STOP_TIMER_AND_CALCULATE_ELAPSED;
if (kPrintTime) printf("SortCandidateGroups %.3f ms\n", gpu_timer.elapsed());
}
void MergeCandidateGroups(const int num_query, const int num_reference,
const int padded_num_reference,
const int num_nearest_neighbor,
long long* const d_candidate) {
dim3 block;
int remaining;
if (num_nearest_neighbor == 500) {
block = dim3(K500_NT);
remaining = DivideAndCeil(num_reference, K500_NV);
} else if (num_nearest_neighbor == 1000) {
block = dim3(K1000_NT);
remaining = DivideAndCeil(num_reference, K1000_NV);
} else if (num_nearest_neighbor == 2000) {
block = dim3(K2000_NT);
remaining = DivideAndCeil(num_reference, K2000_NV);
} else if (num_nearest_neighbor == 3000) {
block = dim3(K3000_NT);
remaining = DivideAndCeil(num_reference, K3000_NV);
}
float total_elapsed = 0.0f;
while (remaining > 1) {
int batch = DivideAndFloor(remaining, 2);
int span = DivideAndCeil(remaining, 2);
dim3 grid(batch, num_query);
CREATE_AND_START_TIMER;
if (num_nearest_neighbor == 500) {
kMergeCandidateGroups<K500_NT, K500_VT, K500_NV><<<grid, block>>>
(num_reference, padded_num_reference, span, d_candidate);
} else if (num_nearest_neighbor == 1000) {
kMergeCandidateGroups<K1000_NT, K1000_VT, K1000_NV><<<grid, block>>>
(num_reference, padded_num_reference, span, d_candidate);
} else if (num_nearest_neighbor == 2000) {
kMergeCandidateGroups<K2000_NT, K2000_VT, K2000_NV><<<grid, block>>>
(num_reference, padded_num_reference, span, d_candidate);
} else if (num_nearest_neighbor == 3000) {
kMergeCandidateGroups<K3000_NT, K3000_VT, K3000_NV><<<grid, block>>>
(num_reference, padded_num_reference, span, d_candidate);
}
SYNC_AND_CHECK_ERROR;
STOP_TIMER_AND_CALCULATE_ELAPSED;
if (kPrintTime) total_elapsed += gpu_timer.elapsed();
remaining = span;
}
if (kPrintTime) printf("MergeCandidateGroups %.3f ms\n", total_elapsed);
}
void RetrieveResults(const int num_query, const int padded_num_reference,
const int num_nearest_neighbor,
const long long* const d_candidate, int* const d_knn_index,
float* const d_knn_distance) {
dim3 block(min(num_nearest_neighbor, 1024));
dim3 grid(DivideAndCeil(num_nearest_neighbor, 1024), num_query);
CREATE_AND_START_TIMER;
kRetrieveResults<<<grid, block>>>
(padded_num_reference, num_nearest_neighbor, d_candidate, d_knn_index,
d_knn_distance);
SYNC_AND_CHECK_ERROR;
STOP_TIMER_AND_CALCULATE_ELAPSED;
if (kPrintTime) printf("RetrieveResults %.3f ms\n", gpu_timer.elapsed());
}
void BruteForceKnnSearch(const int num_dimension, const int num_query,
const int num_reference,
const int num_nearest_neighbor,
const float* const query, const float* const reference,
int* const knn_index, float* const knn_distance) {
assert(num_dimension > 0);
assert(num_query > 0);
assert(num_reference > 0);
assert(num_nearest_neighbor == 500 || num_nearest_neighbor == 1000 ||
num_nearest_neighbor == 2000 || num_nearest_neighbor == 3000);
assert(num_reference >= num_nearest_neighbor);
assert(query != NULL);
assert(reference != NULL);
assert(knn_index != NULL);
assert(knn_distance != NULL);
// The reason that 'query' and 'reference' are padded is kComputeDistances
// only works for a complete 96 X 96 tile of 'candidate' and processes a chunk
// of 16 dimensions in each iteration.
const int padded_num_query = CeilToMultiple(num_query, TILE_SIZE);
const int padded_num_reference = CeilToMultiple(num_reference, TILE_SIZE);
const int padded_num_dimension = CeilToMultiple(num_dimension, CHUNK_SIZE);
float* h_padded_query = new float[padded_num_dimension * padded_num_query];
float* h_padded_reference =
new float[padded_num_dimension * padded_num_reference];
float* d_padded_query = NULL;
CHECK_ERROR(
cudaMalloc((void**)&d_padded_query,
sizeof(float) * padded_num_dimension * padded_num_query));
float* d_padded_reference = NULL;
CHECK_ERROR(
cudaMalloc((void**)&d_padded_reference,
sizeof(float) * padded_num_dimension * padded_num_reference));
long long* d_candidate = NULL;
CHECK_ERROR(
cudaMalloc((void**)&d_candidate,
sizeof(long long) * padded_num_query * padded_num_reference));
int* d_knn_index = NULL;
CHECK_ERROR(cudaMalloc((void**)&d_knn_index,
sizeof(int) * num_query * num_nearest_neighbor));
float* d_knn_distance = NULL;
CHECK_ERROR(cudaMalloc((void**)&d_knn_distance,
sizeof(float) * num_query * num_nearest_neighbor));
memset((void*)h_padded_query, 0,
sizeof(float) * padded_num_dimension * padded_num_query);
for (int i = 0; i < num_dimension; ++i)
memcpy(h_padded_query + padded_num_query * i, query + num_query * i,
sizeof(float) * num_query);
memset((void*)h_padded_reference, 0,
sizeof(float) * padded_num_dimension * padded_num_reference);
for (int i = 0; i < num_dimension; ++i)
memcpy(h_padded_reference + padded_num_reference * i,
reference + num_reference * i, sizeof(float) * num_reference);
CHECK_ERROR(
cudaMemcpy(d_padded_query, h_padded_query,
sizeof(float) * padded_num_dimension * padded_num_query,
cudaMemcpyHostToDevice));
CHECK_ERROR(
cudaMemcpy(d_padded_reference, h_padded_reference,
sizeof(float) * padded_num_dimension * padded_num_reference,
cudaMemcpyHostToDevice));
ComputeDistances(padded_num_dimension, padded_num_query, padded_num_reference,
d_padded_query, d_padded_reference, d_candidate);
SortCandidateGroups(num_query, num_reference, padded_num_reference,
num_nearest_neighbor, d_candidate);
MergeCandidateGroups(num_query, num_reference, padded_num_reference,
num_nearest_neighbor, d_candidate);
RetrieveResults(num_query, padded_num_reference, num_nearest_neighbor,
d_candidate, d_knn_index, d_knn_distance);
CHECK_ERROR(cudaMemcpy(knn_index, d_knn_index,
sizeof(int) * num_query * num_nearest_neighbor,
cudaMemcpyDeviceToHost));
CHECK_ERROR(cudaMemcpy(knn_distance, d_knn_distance,
sizeof(float) * num_query * num_nearest_neighbor,
cudaMemcpyDeviceToHost));
delete[] h_padded_query;
delete[] h_padded_reference;
CHECK_ERROR(cudaFree(d_padded_query));
CHECK_ERROR(cudaFree(d_padded_reference));
CHECK_ERROR(cudaFree(d_candidate));
CHECK_ERROR(cudaFree(d_knn_index));
CHECK_ERROR(cudaFree(d_knn_distance));
}
|
af10510ab3c9108c5145845ed23c9ab8a4971fc6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file generate_phase_factors.cu
*
* \brief CUDA kernel to generate plane-wave atomic phase factors.
*/
#include "../SDDK/GPU/cuda_common.hpp"
#include "../SDDK/GPU/acc_runtime.hpp"
__global__ void generate_phase_factors_gpu_kernel
(
int num_gvec_loc,
int num_atoms,
double const* atom_pos,
int const* gvec,
acc_complex_double_t* phase_factors
)
{
int ia = blockIdx.y;
int igloc = blockIdx.x * blockDim.x + threadIdx.x;
if (igloc < num_gvec_loc) {
int gvx = gvec[array2D_offset(igloc, 0, num_gvec_loc)];
int gvy = gvec[array2D_offset(igloc, 1, num_gvec_loc)];
int gvz = gvec[array2D_offset(igloc, 2, num_gvec_loc)];
double ax = atom_pos[array2D_offset(ia, 0, num_atoms)];
double ay = atom_pos[array2D_offset(ia, 1, num_atoms)];
double az = atom_pos[array2D_offset(ia, 2, num_atoms)];
double p = twopi * (ax * gvx + ay * gvy + az * gvz);
double sinp = sin(p);
double cosp = cos(p);
phase_factors[array2D_offset(igloc, ia, num_gvec_loc)] = make_accDoubleComplex(cosp, sinp);
}
}
extern "C" void generate_phase_factors_gpu(int num_gvec_loc__,
int num_atoms__,
int const* gvec__,
double const* atom_pos__,
acc_complex_double_t* phase_factors__)
{
dim3 grid_t(32);
dim3 grid_b(num_blocks(num_gvec_loc__, grid_t.x), num_atoms__);
accLaunchKernel((generate_phase_factors_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0,
num_gvec_loc__,
num_atoms__,
atom_pos__,
gvec__,
phase_factors__
);
}
| af10510ab3c9108c5145845ed23c9ab8a4971fc6.cu | // Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file generate_phase_factors.cu
*
* \brief CUDA kernel to generate plane-wave atomic phase factors.
*/
#include "../SDDK/GPU/cuda_common.hpp"
#include "../SDDK/GPU/acc_runtime.hpp"
__global__ void generate_phase_factors_gpu_kernel
(
int num_gvec_loc,
int num_atoms,
double const* atom_pos,
int const* gvec,
acc_complex_double_t* phase_factors
)
{
int ia = blockIdx.y;
int igloc = blockIdx.x * blockDim.x + threadIdx.x;
if (igloc < num_gvec_loc) {
int gvx = gvec[array2D_offset(igloc, 0, num_gvec_loc)];
int gvy = gvec[array2D_offset(igloc, 1, num_gvec_loc)];
int gvz = gvec[array2D_offset(igloc, 2, num_gvec_loc)];
double ax = atom_pos[array2D_offset(ia, 0, num_atoms)];
double ay = atom_pos[array2D_offset(ia, 1, num_atoms)];
double az = atom_pos[array2D_offset(ia, 2, num_atoms)];
double p = twopi * (ax * gvx + ay * gvy + az * gvz);
double sinp = sin(p);
double cosp = cos(p);
phase_factors[array2D_offset(igloc, ia, num_gvec_loc)] = make_accDoubleComplex(cosp, sinp);
}
}
extern "C" void generate_phase_factors_gpu(int num_gvec_loc__,
int num_atoms__,
int const* gvec__,
double const* atom_pos__,
acc_complex_double_t* phase_factors__)
{
dim3 grid_t(32);
dim3 grid_b(num_blocks(num_gvec_loc__, grid_t.x), num_atoms__);
accLaunchKernel((generate_phase_factors_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0,
num_gvec_loc__,
num_atoms__,
atom_pos__,
gvec__,
phase_factors__
);
}
|
9d22119e3c85b4b648d0b0d3a6c93dd9f3254dbf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <THHUNN/THHUNN.h>
#include <THH/THHTensor.hpp>
#include <THHUNN/common.h>
template <typename Dtype>
__global__ void MaxUnpoolForward(const int nthreads, const Dtype* bottom_data, const int64_t* bottom_mask,
const int num, const int channels, const int iheight, const int iwidth, const int oheight, const int owidth, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) { //index here indices the input pixels
int c = (index / iwidth / iheight) % channels;
int n = index / iwidth / iheight / channels;
top_data += (n*channels + c)*oheight*owidth;
int maxind = bottom_mask[index] - TH_INDEX_BASE;
top_data[maxind] = bottom_data[index];
}
}
template <typename Dtype>
__global__ void MaxUnpoolBackward(const int nthreads, const Dtype* top_diff, const int64_t* bottom_mask,
const int num, const int channels, const int iheight, const int iwidth, const int oheight, const int owidth, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int c = (index / iwidth / iheight) % channels;
int n = index / iwidth / iheight / channels;
top_diff += (n*channels + c)*oheight*owidth;
int maxind = bottom_mask[index] - TH_INDEX_BASE;
bottom_diff[index] = top_diff[maxind];
}
}
#include <THHUNN/generic/SpatialMaxUnpooling.hip>
#include <THH/THHGenerateFloatTypes.h>
| 9d22119e3c85b4b648d0b0d3a6c93dd9f3254dbf.cu | #include <THCUNN/THCUNN.h>
#include <THC/THCTensor.hpp>
#include <THCUNN/common.h>
template <typename Dtype>
__global__ void MaxUnpoolForward(const int nthreads, const Dtype* bottom_data, const int64_t* bottom_mask,
const int num, const int channels, const int iheight, const int iwidth, const int oheight, const int owidth, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) { //index here indices the input pixels
int c = (index / iwidth / iheight) % channels;
int n = index / iwidth / iheight / channels;
top_data += (n*channels + c)*oheight*owidth;
int maxind = bottom_mask[index] - TH_INDEX_BASE;
top_data[maxind] = bottom_data[index];
}
}
template <typename Dtype>
__global__ void MaxUnpoolBackward(const int nthreads, const Dtype* top_diff, const int64_t* bottom_mask,
const int num, const int channels, const int iheight, const int iwidth, const int oheight, const int owidth, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
int c = (index / iwidth / iheight) % channels;
int n = index / iwidth / iheight / channels;
top_diff += (n*channels + c)*oheight*owidth;
int maxind = bottom_mask[index] - TH_INDEX_BASE;
bottom_diff[index] = top_diff[maxind];
}
}
#include <THCUNN/generic/SpatialMaxUnpooling.cu>
#include <THC/THCGenerateFloatTypes.h>
|
af0ed5ad3d334d2c2b51e448338ca61a80412fc6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cassert>
#define BLOCK_SIZE 256
#define NUM_NEIGHBORS 4
#include "mp2.h"
#include "mp2-part3-util.h"
#include "mp2-util.h"
event_pair timer;
//------------------------------------------------------------------------------
void host_knn_particle(
float3 *particles, int *bins, int *part_knn, int id, int bin_id, int bx,
int by, int bz, int3 binning_dim, int bin_size
) {
// for each particle
// loop over all the neighbor bins in x,y and z,
// as well as the bin it is in itself
float neigh_dist[NUM_NEIGHBORS];
int neigh_ids[NUM_NEIGHBORS];
init_list(&neigh_dist[0],NUM_NEIGHBORS,2.0f);
init_list(&neigh_ids[0],NUM_NEIGHBORS,-1);
float3 pos = particles[id];
for(int x_offset=-1;x_offset<2;x_offset++)
{
int nx = bx + x_offset;
if(nx > -1 && nx < binning_dim.x)
{
for(int y_offset=-1;y_offset<2;y_offset++)
{
int ny = by + y_offset;
if(ny > -1 && ny < binning_dim.y)
{
for(int z_offset=-1;z_offset<2;z_offset++)
{
int nz = bz + z_offset;
if(nz > -1 && nz < binning_dim.z)
{
int neigh_bin_id = nx + binning_dim.x*(ny + binning_dim.y*nz);
// loop over all the particles in those bins
for(int bin_offset=0;bin_offset<bin_size;bin_offset++)
{
int neigh_particle_id = bins[neigh_bin_id*bin_size + bin_offset];
// skip empty bin entries and don't interact with yourself
if(neigh_particle_id != -1 && neigh_particle_id != id)
{
float rsq = dist2(pos,particles[neigh_particle_id]);
insert_list(
&neigh_dist[0], &neigh_ids[0], rsq,
neigh_particle_id
);
}
}
}
}
}
}
}
}
for(int j=0;j<NUM_NEIGHBORS;j++)
{
part_knn[j] = neigh_ids[j];
}
}
//------------------------------------------------------------------------------
__device__
void device_knn_particle(
float3 *particles, int *bins, int *part_knn, int id, int bin_id, int bx,
int by, int bz, int3 binning_dim, int bin_size
) {
// for each particle
// loop over all the neighbor bins in x,y and z,
// as well as the bin it is in itself
float neigh_dist[NUM_NEIGHBORS];
int neigh_ids[NUM_NEIGHBORS];
init_list(&neigh_dist[0],NUM_NEIGHBORS,2.0f);
init_list(&neigh_ids[0],NUM_NEIGHBORS,-1);
float3 pos = particles[id];
for(int x_offset=-1;x_offset<2;x_offset++)
{
int nx = bx + x_offset;
if(nx > -1 && nx < binning_dim.x)
{
for(int y_offset=-1;y_offset<2;y_offset++)
{
int ny = by + y_offset;
if(ny > -1 && ny < binning_dim.y)
{
for(int z_offset=-1;z_offset<2;z_offset++)
{
int nz = bz + z_offset;
if(nz > -1 && nz < binning_dim.z)
{
int neigh_bin_id = nx + binning_dim.x*(ny + binning_dim.y*nz);
// loop over all the particles in those bins
for(int bin_offset=0;bin_offset<bin_size;bin_offset++)
{
int neigh_particle_id = bins[neigh_bin_id*bin_size + bin_offset];
// skip empty bin entries and don't interact with yourself
__syncthreads();
if(neigh_particle_id != -1 && neigh_particle_id != id)
{
__syncthreads();
float rsq = dist2(pos,particles[neigh_particle_id]);
insert_list(
&neigh_dist[0], &neigh_ids[0], rsq,
neigh_particle_id
);
__syncthreads();
}
__syncthreads();
}
}
}
}
}
}
}
__syncthreads();
for(int j=0;j<NUM_NEIGHBORS;j++)
{
__syncthreads();
part_knn[j] = neigh_ids[j];
}
__syncthreads();
}
//------------------------------------------------------------------------------
void host_binned_knn(
float3 *particles, int *bins, int *knn, int3 binning_dim, int bin_size
) {
// loop over all bins
for(int bx=0;bx<binning_dim.x;bx++)
{
for(int by=0;by<binning_dim.y;by++)
{
for(int bz=0;bz<binning_dim.z;bz++)
{
int bin_id = bx + binning_dim.x*(by + binning_dim.y*bz);
for(int j=0;j<bin_size;j++)
{
int id = bins[bin_id*bin_size + j];
if(id != -1)
{
host_knn_particle(
particles, bins, &knn[id*NUM_NEIGHBORS],id, bin_id, bx, by, bz,
binning_dim, bin_size
);
}
}
}
}
}
}
//------------------------------------------------------------------------------
__global__
void device_binned_knn(
float3 *particles, int *bins, int *knn, int3 binning_dim, int bin_size
) {
int bx = blockIdx.x * blockDim.x + threadIdx.x;
int by = blockIdx.y * blockDim.y + threadIdx.y;
int bz = blockIdx.z * blockDim.z + threadIdx.z;
if (bx < binning_dim.x && by < binning_dim.y && bz < binning_dim.z){
int bin_id = bx + binning_dim.x*(by + binning_dim.y*bz);
for(int j=0; j<bin_size; j++)
{
int id = bins[bin_id * bin_size + j];
if(id != -1)
{
device_knn_particle(
particles, bins, &knn[id * NUM_NEIGHBORS], id, bin_id, bx, by, bz,
binning_dim, bin_size
);
}
}
}
}
//------------------------------------------------------------------------------
void host_binning(
float3 *particles, int *bins, int *bin_counters, int3 gridding, int bin_size,
int num_particles
) {
for (int i=0; i<num_particles; i++)
{
unsigned int bin = bin_index(particles[i],gridding);
if(bin_counters[bin] < bin_size)
{
unsigned int offset = bin_counters[bin];
bin_counters[bin]++;
bins[bin * bin_size + offset] = i;
}
}
}
//------------------------------------------------------------------------------
__global__
void device_binning(
float3 * d_particles, int * d_bins, int * d_bin_counters, int3 gridding,
int bin_size, int num_particles
) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < num_particles)
{
unsigned int bin = bin_index(d_particles[i], gridding);
if(d_bin_counters[bin] < bin_size)
{
int offset = atomicAdd(&d_bin_counters[bin], 1);
d_bins[bin * bin_size + offset] = i;
}
}
}
//------------------------------------------------------------------------------
void allocate_host_memory(
int num_particles, int num_bins, int bin_size, float3 *&h_particles,
float3 *&h_particles_checker, int *&h_bins, int *&h_bins_checker,
int *&h_bin_counters, int *&h_bin_counters_checker, int *&h_knn,
int *&h_knn_checker
) {
h_particles = (float3*)malloc(num_particles * sizeof(float3));
h_particles_checker = (float3*)malloc(num_particles * sizeof(float3));
h_bins = (int*)malloc(num_bins * bin_size * sizeof(int));
h_bins_checker = (int*)malloc(num_bins * bin_size * sizeof(int));
h_bin_counters = (int*)malloc(num_bins * sizeof(int));
h_bin_counters_checker = (int*)malloc(num_bins * sizeof(int));
h_knn = (int*)malloc(num_particles * NUM_NEIGHBORS * sizeof(int));
h_knn_checker = (int*)malloc(num_particles * NUM_NEIGHBORS * sizeof(int));
}
//------------------------------------------------------------------------------
void allocate_device_memory(
int num_particles, int num_bins, int bin_size, float3 *&d_particles,
int *&d_bins, int *&d_knn, int *&d_bin_counters
) {
hipMalloc((void**)&d_particles, num_particles * sizeof(float3));
hipMalloc((void**)&d_bins, num_bins * bin_size * sizeof(int));
hipMalloc((void**)&d_knn, NUM_NEIGHBORS * num_particles * sizeof(int));
hipMalloc((void**)&d_bin_counters, num_bins * sizeof(int));
}
//------------------------------------------------------------------------------
void deallocate_host_memory(
float3 *h_particles, int *h_bins, int *h_knn, int *h_bin_counters,
float3 *h_particles_checker, int *h_bins_checker, int *h_knn_checker,
int *h_bin_counters_checker
) {
// deallocate memory
free(h_particles);
free(h_bins);
free(h_knn);
free(h_bin_counters);
free(h_particles_checker);
free(h_bins_checker);
free(h_knn_checker);
free(h_bin_counters_checker);
}
//------------------------------------------------------------------------------
void deallocate_device_memory(
float3 *d_particles, int *d_bins, int *d_knn, int *d_bin_counters
) {
hipFree(d_particles);
hipFree(d_bins);
hipFree(d_knn);
hipFree(d_bin_counters);
}
//------------------------------------------------------------------------------
int main(void)
{
// Hyperparameters
int num_particles = 64*1024;
int log_bpd = 4;
int bins_per_dim = 1 << log_bpd;
unsigned int num_bins = bins_per_dim * bins_per_dim * bins_per_dim;
int bin_size = num_particles/num_bins * 3;
int3 gridding = make_int3(log_bpd, log_bpd, log_bpd);
int3 binning_dim = make_int3(bins_per_dim,bins_per_dim,bins_per_dim);
float3 *h_particles = 0;
int *h_bins = 0;
int *h_bin_counters = 0;
int *h_bins_checker = 0;
float3 *h_particles_checker = 0;
int *h_bin_counters_checker = 0;
int *h_knn = 0;
int *h_knn_checker = 0;
float3 *d_particles = 0;
int *d_bins = 0;
int *d_knn = 0;
int *d_bin_counters = 0;
allocate_host_memory(
num_particles, num_bins, bin_size, h_particles,
h_particles_checker, h_bins, h_bins_checker,
h_bin_counters, h_bin_counters_checker, h_knn,
h_knn_checker
);
allocate_device_memory(
num_particles, num_bins, bin_size, d_particles, d_bins, d_knn,
d_bin_counters
);
// generate random input
// initialize
srand(13);
for(int i=0;i< num_particles;i++)
{
h_particles[i] = h_particles_checker[i] = make_float3(
(float)rand()/(float)RAND_MAX,
(float)rand()/(float)RAND_MAX,
(float)rand()/(float)RAND_MAX
);
}
for(int i=0;i<num_bins;i++)
{
h_bin_counters[i] = 0; h_bin_counters_checker[i] = 0;
}
for(int i=0;i<num_bins*bin_size;i++)
{
h_bins[i] = -1;
h_bins_checker[i] = -1;
}
for(int i=0;i<num_particles*NUM_NEIGHBORS;i++)
{
h_knn[i] = -1;
h_knn_checker[i] = -1;
}
hipMemcpy(
d_particles, h_particles, num_particles * sizeof(float3),
hipMemcpyHostToDevice
);
check_cuda_error("Memcpy error");
hipMemset(d_bins, -1, num_bins * bin_size * sizeof(int));
hipMemset(d_knn, -1, NUM_NEIGHBORS * num_particles * sizeof(int));
hipMemset(d_bin_counters, 0, num_bins * sizeof(int));
check_cuda_error("Memset error");
start_timer(&timer);
hipLaunchKernelGGL(( device_binning), dim3(num_particles / 256), dim3(256), 0, 0,
d_particles, d_bins, d_bin_counters, gridding, bin_size, num_particles
);
check_cuda_error("Binning error");
stop_timer(&timer,"Host binning completed");
const dim3 blockSize(
4,
16,
16
);
start_timer(&timer);
hipLaunchKernelGGL(( device_binned_knn), dim3(num_bins/1024), dim3(blockSize), 0, 0,
d_particles, d_bins, d_knn, binning_dim, bin_size
);
check_cuda_error("Binned knn error");
stop_timer(&timer,"Device binned knn completed");
hipMemcpy(
h_bin_counters, d_bin_counters, num_bins * sizeof(int),
hipMemcpyDeviceToHost
);
hipMemcpy(
h_knn, d_knn, NUM_NEIGHBORS * num_particles * sizeof(int),
hipMemcpyDeviceToHost
);
// generate reference output
start_timer(&timer);
host_binning(
h_particles_checker, h_bins_checker, h_bin_counters_checker, gridding,
bin_size, num_particles
);
stop_timer(&timer,"Host binning completed");
start_timer(&timer);
host_binned_knn(
h_particles_checker, h_bins_checker, h_knn_checker, binning_dim, bin_size
);
stop_timer(&timer,"Host binned knn completed");
// check CUDA output versus reference output
cross_check_results(
num_particles, num_bins, bin_size, h_bin_counters,
h_bin_counters_checker, h_knn, h_knn_checker
);
deallocate_host_memory(
h_particles, h_bins, h_knn, h_bin_counters, h_particles_checker,
h_bins_checker, h_knn_checker, h_bin_counters_checker
);
deallocate_device_memory(d_particles, d_bins, d_knn, d_bin_counters);
return 0;
}
| af0ed5ad3d334d2c2b51e448338ca61a80412fc6.cu | #include <cassert>
#define BLOCK_SIZE 256
#define NUM_NEIGHBORS 4
#include "mp2.h"
#include "mp2-part3-util.h"
#include "mp2-util.h"
event_pair timer;
//------------------------------------------------------------------------------
void host_knn_particle(
float3 *particles, int *bins, int *part_knn, int id, int bin_id, int bx,
int by, int bz, int3 binning_dim, int bin_size
) {
// for each particle
// loop over all the neighbor bins in x,y and z,
// as well as the bin it is in itself
float neigh_dist[NUM_NEIGHBORS];
int neigh_ids[NUM_NEIGHBORS];
init_list(&neigh_dist[0],NUM_NEIGHBORS,2.0f);
init_list(&neigh_ids[0],NUM_NEIGHBORS,-1);
float3 pos = particles[id];
for(int x_offset=-1;x_offset<2;x_offset++)
{
int nx = bx + x_offset;
if(nx > -1 && nx < binning_dim.x)
{
for(int y_offset=-1;y_offset<2;y_offset++)
{
int ny = by + y_offset;
if(ny > -1 && ny < binning_dim.y)
{
for(int z_offset=-1;z_offset<2;z_offset++)
{
int nz = bz + z_offset;
if(nz > -1 && nz < binning_dim.z)
{
int neigh_bin_id = nx + binning_dim.x*(ny + binning_dim.y*nz);
// loop over all the particles in those bins
for(int bin_offset=0;bin_offset<bin_size;bin_offset++)
{
int neigh_particle_id = bins[neigh_bin_id*bin_size + bin_offset];
// skip empty bin entries and don't interact with yourself
if(neigh_particle_id != -1 && neigh_particle_id != id)
{
float rsq = dist2(pos,particles[neigh_particle_id]);
insert_list(
&neigh_dist[0], &neigh_ids[0], rsq,
neigh_particle_id
);
}
}
}
}
}
}
}
}
for(int j=0;j<NUM_NEIGHBORS;j++)
{
part_knn[j] = neigh_ids[j];
}
}
//------------------------------------------------------------------------------
__device__
void device_knn_particle(
float3 *particles, int *bins, int *part_knn, int id, int bin_id, int bx,
int by, int bz, int3 binning_dim, int bin_size
) {
// for each particle
// loop over all the neighbor bins in x,y and z,
// as well as the bin it is in itself
float neigh_dist[NUM_NEIGHBORS];
int neigh_ids[NUM_NEIGHBORS];
init_list(&neigh_dist[0],NUM_NEIGHBORS,2.0f);
init_list(&neigh_ids[0],NUM_NEIGHBORS,-1);
float3 pos = particles[id];
for(int x_offset=-1;x_offset<2;x_offset++)
{
int nx = bx + x_offset;
if(nx > -1 && nx < binning_dim.x)
{
for(int y_offset=-1;y_offset<2;y_offset++)
{
int ny = by + y_offset;
if(ny > -1 && ny < binning_dim.y)
{
for(int z_offset=-1;z_offset<2;z_offset++)
{
int nz = bz + z_offset;
if(nz > -1 && nz < binning_dim.z)
{
int neigh_bin_id = nx + binning_dim.x*(ny + binning_dim.y*nz);
// loop over all the particles in those bins
for(int bin_offset=0;bin_offset<bin_size;bin_offset++)
{
int neigh_particle_id = bins[neigh_bin_id*bin_size + bin_offset];
// skip empty bin entries and don't interact with yourself
__syncthreads();
if(neigh_particle_id != -1 && neigh_particle_id != id)
{
__syncthreads();
float rsq = dist2(pos,particles[neigh_particle_id]);
insert_list(
&neigh_dist[0], &neigh_ids[0], rsq,
neigh_particle_id
);
__syncthreads();
}
__syncthreads();
}
}
}
}
}
}
}
__syncthreads();
for(int j=0;j<NUM_NEIGHBORS;j++)
{
__syncthreads();
part_knn[j] = neigh_ids[j];
}
__syncthreads();
}
//------------------------------------------------------------------------------
void host_binned_knn(
float3 *particles, int *bins, int *knn, int3 binning_dim, int bin_size
) {
// loop over all bins
for(int bx=0;bx<binning_dim.x;bx++)
{
for(int by=0;by<binning_dim.y;by++)
{
for(int bz=0;bz<binning_dim.z;bz++)
{
int bin_id = bx + binning_dim.x*(by + binning_dim.y*bz);
for(int j=0;j<bin_size;j++)
{
int id = bins[bin_id*bin_size + j];
if(id != -1)
{
host_knn_particle(
particles, bins, &knn[id*NUM_NEIGHBORS],id, bin_id, bx, by, bz,
binning_dim, bin_size
);
}
}
}
}
}
}
//------------------------------------------------------------------------------
__global__
void device_binned_knn(
float3 *particles, int *bins, int *knn, int3 binning_dim, int bin_size
) {
int bx = blockIdx.x * blockDim.x + threadIdx.x;
int by = blockIdx.y * blockDim.y + threadIdx.y;
int bz = blockIdx.z * blockDim.z + threadIdx.z;
if (bx < binning_dim.x && by < binning_dim.y && bz < binning_dim.z){
int bin_id = bx + binning_dim.x*(by + binning_dim.y*bz);
for(int j=0; j<bin_size; j++)
{
int id = bins[bin_id * bin_size + j];
if(id != -1)
{
device_knn_particle(
particles, bins, &knn[id * NUM_NEIGHBORS], id, bin_id, bx, by, bz,
binning_dim, bin_size
);
}
}
}
}
//------------------------------------------------------------------------------
void host_binning(
float3 *particles, int *bins, int *bin_counters, int3 gridding, int bin_size,
int num_particles
) {
for (int i=0; i<num_particles; i++)
{
unsigned int bin = bin_index(particles[i],gridding);
if(bin_counters[bin] < bin_size)
{
unsigned int offset = bin_counters[bin];
bin_counters[bin]++;
bins[bin * bin_size + offset] = i;
}
}
}
//------------------------------------------------------------------------------
__global__
void device_binning(
float3 * d_particles, int * d_bins, int * d_bin_counters, int3 gridding,
int bin_size, int num_particles
) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < num_particles)
{
unsigned int bin = bin_index(d_particles[i], gridding);
if(d_bin_counters[bin] < bin_size)
{
int offset = atomicAdd(&d_bin_counters[bin], 1);
d_bins[bin * bin_size + offset] = i;
}
}
}
//------------------------------------------------------------------------------
void allocate_host_memory(
int num_particles, int num_bins, int bin_size, float3 *&h_particles,
float3 *&h_particles_checker, int *&h_bins, int *&h_bins_checker,
int *&h_bin_counters, int *&h_bin_counters_checker, int *&h_knn,
int *&h_knn_checker
) {
h_particles = (float3*)malloc(num_particles * sizeof(float3));
h_particles_checker = (float3*)malloc(num_particles * sizeof(float3));
h_bins = (int*)malloc(num_bins * bin_size * sizeof(int));
h_bins_checker = (int*)malloc(num_bins * bin_size * sizeof(int));
h_bin_counters = (int*)malloc(num_bins * sizeof(int));
h_bin_counters_checker = (int*)malloc(num_bins * sizeof(int));
h_knn = (int*)malloc(num_particles * NUM_NEIGHBORS * sizeof(int));
h_knn_checker = (int*)malloc(num_particles * NUM_NEIGHBORS * sizeof(int));
}
//------------------------------------------------------------------------------
void allocate_device_memory(
int num_particles, int num_bins, int bin_size, float3 *&d_particles,
int *&d_bins, int *&d_knn, int *&d_bin_counters
) {
cudaMalloc((void**)&d_particles, num_particles * sizeof(float3));
cudaMalloc((void**)&d_bins, num_bins * bin_size * sizeof(int));
cudaMalloc((void**)&d_knn, NUM_NEIGHBORS * num_particles * sizeof(int));
cudaMalloc((void**)&d_bin_counters, num_bins * sizeof(int));
}
//------------------------------------------------------------------------------
void deallocate_host_memory(
float3 *h_particles, int *h_bins, int *h_knn, int *h_bin_counters,
float3 *h_particles_checker, int *h_bins_checker, int *h_knn_checker,
int *h_bin_counters_checker
) {
// deallocate memory
free(h_particles);
free(h_bins);
free(h_knn);
free(h_bin_counters);
free(h_particles_checker);
free(h_bins_checker);
free(h_knn_checker);
free(h_bin_counters_checker);
}
//------------------------------------------------------------------------------
void deallocate_device_memory(
float3 *d_particles, int *d_bins, int *d_knn, int *d_bin_counters
) {
cudaFree(d_particles);
cudaFree(d_bins);
cudaFree(d_knn);
cudaFree(d_bin_counters);
}
//------------------------------------------------------------------------------
int main(void)
{
// Hyperparameters
int num_particles = 64*1024;
int log_bpd = 4;
int bins_per_dim = 1 << log_bpd;
unsigned int num_bins = bins_per_dim * bins_per_dim * bins_per_dim;
int bin_size = num_particles/num_bins * 3;
int3 gridding = make_int3(log_bpd, log_bpd, log_bpd);
int3 binning_dim = make_int3(bins_per_dim,bins_per_dim,bins_per_dim);
float3 *h_particles = 0;
int *h_bins = 0;
int *h_bin_counters = 0;
int *h_bins_checker = 0;
float3 *h_particles_checker = 0;
int *h_bin_counters_checker = 0;
int *h_knn = 0;
int *h_knn_checker = 0;
float3 *d_particles = 0;
int *d_bins = 0;
int *d_knn = 0;
int *d_bin_counters = 0;
allocate_host_memory(
num_particles, num_bins, bin_size, h_particles,
h_particles_checker, h_bins, h_bins_checker,
h_bin_counters, h_bin_counters_checker, h_knn,
h_knn_checker
);
allocate_device_memory(
num_particles, num_bins, bin_size, d_particles, d_bins, d_knn,
d_bin_counters
);
// generate random input
// initialize
srand(13);
for(int i=0;i< num_particles;i++)
{
h_particles[i] = h_particles_checker[i] = make_float3(
(float)rand()/(float)RAND_MAX,
(float)rand()/(float)RAND_MAX,
(float)rand()/(float)RAND_MAX
);
}
for(int i=0;i<num_bins;i++)
{
h_bin_counters[i] = 0; h_bin_counters_checker[i] = 0;
}
for(int i=0;i<num_bins*bin_size;i++)
{
h_bins[i] = -1;
h_bins_checker[i] = -1;
}
for(int i=0;i<num_particles*NUM_NEIGHBORS;i++)
{
h_knn[i] = -1;
h_knn_checker[i] = -1;
}
cudaMemcpy(
d_particles, h_particles, num_particles * sizeof(float3),
cudaMemcpyHostToDevice
);
check_cuda_error("Memcpy error");
cudaMemset(d_bins, -1, num_bins * bin_size * sizeof(int));
cudaMemset(d_knn, -1, NUM_NEIGHBORS * num_particles * sizeof(int));
cudaMemset(d_bin_counters, 0, num_bins * sizeof(int));
check_cuda_error("Memset error");
start_timer(&timer);
device_binning<<<num_particles / 256, 256>>>(
d_particles, d_bins, d_bin_counters, gridding, bin_size, num_particles
);
check_cuda_error("Binning error");
stop_timer(&timer,"Host binning completed");
const dim3 blockSize(
4,
16,
16
);
start_timer(&timer);
device_binned_knn<<<num_bins/1024, blockSize>>>(
d_particles, d_bins, d_knn, binning_dim, bin_size
);
check_cuda_error("Binned knn error");
stop_timer(&timer,"Device binned knn completed");
cudaMemcpy(
h_bin_counters, d_bin_counters, num_bins * sizeof(int),
cudaMemcpyDeviceToHost
);
cudaMemcpy(
h_knn, d_knn, NUM_NEIGHBORS * num_particles * sizeof(int),
cudaMemcpyDeviceToHost
);
// generate reference output
start_timer(&timer);
host_binning(
h_particles_checker, h_bins_checker, h_bin_counters_checker, gridding,
bin_size, num_particles
);
stop_timer(&timer,"Host binning completed");
start_timer(&timer);
host_binned_knn(
h_particles_checker, h_bins_checker, h_knn_checker, binning_dim, bin_size
);
stop_timer(&timer,"Host binned knn completed");
// check CUDA output versus reference output
cross_check_results(
num_particles, num_bins, bin_size, h_bin_counters,
h_bin_counters_checker, h_knn, h_knn_checker
);
deallocate_host_memory(
h_particles, h_bins, h_knn, h_bin_counters, h_particles_checker,
h_bins_checker, h_knn_checker, h_bin_counters_checker
);
deallocate_device_memory(d_particles, d_bins, d_knn, d_bin_counters);
return 0;
}
|
b18cf7961940898ebc4d169e1c32c65d9c2140b4.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <thrust/complex.h>
using namespace std;
#define STB_IMAGE_IMPLEMENTATION
#include "../stbi_headers/stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "../stbi_headers/stb_image_write.h"
#include "../stbi_headers/stb_image_resize.h"
#include <time.h>
#include <hip/hip_runtime.h>
#include "header_hip.cuh"
#include <cstring>
#include <chrono>
using namespace std::chrono;
int main(int argc, char* argv[]){
int width,height,channels;
float sigma = 70.00;
float sigma_r;
float kappa = 2.0;
FILE *arq;
int line_count;
char imagefile[20];
char filelocation[100] = "../images/";
//cuda window parameters
int window_h = 128, window_w = 24;
//Read parameters
arq = fopen("../parameters.txt","r");
if((fscanf(arq,"image %s\n",imagefile)!=1) ||(fscanf(arq,"sigma %f\n",&sigma)!=1) ||
(fscanf(arq,"sigmar %f\n",&sigma_r) !=1)||(fscanf(arq,"kappa %f\n",&kappa)!=1)|| (fscanf(arq,"blocks_per_line %i\n",&line_count) !=1)){
printf("error while reading parameters\n");
return -1;
}
fclose(arq);
strcat(filelocation,imagefile);
unsigned char *img_vector = stbi_load(filelocation,&width,&height,&channels,0); //CARREGA JPG USANDO STBI
uchar4 *i_image;
if(img_vector==NULL){ //could not read image
printf("erro\n");
return -1;
}
unsigned char *outputimage = (unsigned char*)malloc(width*height*channels*sizeof(unsigned char));
i_image = convert_uimg_to_uchar4( width, height, channels, img_vector);
free(img_vector);
image_filter2d(sigma,sigma_r , i_image, width, height, channels, window_w, window_h, kappa, line_count);
//printf("loaded image with w = %i h = %i and c = % i channels \n",width,height,channels);
transfer_uchar4_uint(width,height,channels,i_image,outputimage);
stbi_write_jpg("../output/resultC.jpg",width,height,channels,&outputimage[0],100);
free(outputimage);
hipFree(i_image);
return 0;
}
| b18cf7961940898ebc4d169e1c32c65d9c2140b4.cu | #include <iostream>
#include <thrust/complex.h>
using namespace std;
#define STB_IMAGE_IMPLEMENTATION
#include "../stbi_headers/stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "../stbi_headers/stb_image_write.h"
#include "../stbi_headers/stb_image_resize.h"
#include <time.h>
#include <cuda_runtime.h>
#include "header.cuh"
#include <cstring>
#include <chrono>
using namespace std::chrono;
int main(int argc, char* argv[]){
int width,height,channels;
float sigma = 70.00;
float sigma_r;
float kappa = 2.0;
FILE *arq;
int line_count;
char imagefile[20];
char filelocation[100] = "../images/";
//cuda window parameters
int window_h = 128, window_w = 24;
//Read parameters
arq = fopen("../parameters.txt","r");
if((fscanf(arq,"image %s\n",imagefile)!=1) ||(fscanf(arq,"sigma %f\n",&sigma)!=1) ||
(fscanf(arq,"sigmar %f\n",&sigma_r) !=1)||(fscanf(arq,"kappa %f\n",&kappa)!=1)|| (fscanf(arq,"blocks_per_line %i\n",&line_count) !=1)){
printf("error while reading parameters\n");
return -1;
}
fclose(arq);
strcat(filelocation,imagefile);
unsigned char *img_vector = stbi_load(filelocation,&width,&height,&channels,0); //CARREGA JPG USANDO STBI
uchar4 *i_image;
if(img_vector==NULL){ //could not read image
printf("erro\n");
return -1;
}
unsigned char *outputimage = (unsigned char*)malloc(width*height*channels*sizeof(unsigned char));
i_image = convert_uimg_to_uchar4( width, height, channels, img_vector);
free(img_vector);
image_filter2d(sigma,sigma_r , i_image, width, height, channels, window_w, window_h, kappa, line_count);
//printf("loaded image with w = %i h = %i and c = % i channels \n",width,height,channels);
transfer_uchar4_uint(width,height,channels,i_image,outputimage);
stbi_write_jpg("../output/resultC.jpg",width,height,channels,&outputimage[0],100);
free(outputimage);
cudaFree(i_image);
return 0;
}
|
9e6b1ad64b6f87e2ba9fc82fbeb6f72558ee2244.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kern_ResetSinkBuffer.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *sink = NULL;
hipMalloc(&sink, XSIZE*YSIZE);
float *source = NULL;
hipMalloc(&source, XSIZE*YSIZE);
float *div = NULL;
hipMalloc(&div, XSIZE*YSIZE);
float *label = NULL;
hipMalloc(&label, XSIZE*YSIZE);
float ik = 1;
float iCC = 1;
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kern_ResetSinkBuffer), dim3(gridBlock),dim3(threadBlock), 0, 0, sink,source,div,label,ik,iCC,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kern_ResetSinkBuffer), dim3(gridBlock),dim3(threadBlock), 0, 0, sink,source,div,label,ik,iCC,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kern_ResetSinkBuffer), dim3(gridBlock),dim3(threadBlock), 0, 0, sink,source,div,label,ik,iCC,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 9e6b1ad64b6f87e2ba9fc82fbeb6f72558ee2244.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kern_ResetSinkBuffer.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *sink = NULL;
cudaMalloc(&sink, XSIZE*YSIZE);
float *source = NULL;
cudaMalloc(&source, XSIZE*YSIZE);
float *div = NULL;
cudaMalloc(&div, XSIZE*YSIZE);
float *label = NULL;
cudaMalloc(&label, XSIZE*YSIZE);
float ik = 1;
float iCC = 1;
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kern_ResetSinkBuffer<<<gridBlock,threadBlock>>>(sink,source,div,label,ik,iCC,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kern_ResetSinkBuffer<<<gridBlock,threadBlock>>>(sink,source,div,label,ik,iCC,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kern_ResetSinkBuffer<<<gridBlock,threadBlock>>>(sink,source,div,label,ik,iCC,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
e18320e62444c26678f89640c2f7845728f2796e.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for TensorReduce family of device-wide operators
*/
#include <iostream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "cutlass/reduction/device/tensor_reduce.h"
#include "cutlass/functional.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/tensor_view_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This reduces the C dimension, transforming an NHWC tensor into NHWC with C=1.
template <typename TensorReduction, typename ElementCompute = typename TensorReduction::ElementCompute>
bool TestAllReduction_NHWC_reduce_c(ElementCompute reduction_identity = ElementCompute()) {
using Layout = typename TensorReduction::Layout;
using ElementOutput = typename TensorReduction::ElementOutput;
using ElementSource = typename TensorReduction::ElementSource;
int const kV = TensorReduction::kVectorLength;
int const N_indices[] = {3, 13};
int const H_indices[] = {5, 17};
int const W_indices[] = {7, 19};
int const C_indices[] = {2049, 2048, 2047, 384, 64, 48, 32, 24, 16, 12, 8, 6, 4, 3, 2, 1};
for (int N : N_indices) {
for (int H : H_indices) {
for (int W : W_indices) {
for (int Cx : C_indices) {
int C = Cx * kV;
cutlass::HostTensor<ElementSource, Layout> src_tensor({N, H, W, C});
cutlass::HostTensor<ElementOutput, Layout> dst_tensor({N, H, W, 1});
cutlass::reference::host::TensorFillRandomUniform(
src_tensor.host_view(), 17, 10, -10, 0);
dst_tensor.sync_device();
src_tensor.sync_device();
// Execute a tensor reduction over rank 3 (the 'C' dimension is reduced; NHWC => NHW)
TensorReduction reduction(src_tensor.extent(), 3);
cutlass::DeviceAllocation<uint8_t> device_workspace(reduction.workspace_size());
cutlass::Status status = reduction.reduce(
dst_tensor.device_ref(),
src_tensor.device_ref(),
device_workspace.get(),
reduction_identity
);
EXPECT_EQ(status, cutlass::Status::kSuccess);
EXPECT_EQ(hipDeviceSynchronize(), hipSuccess);
dst_tensor.sync_host();
typename TensorReduction::ReductionOp reduction_op;
//
// Reference check
//
for (int n = 0; n < src_tensor.extent().n(); ++n) {
for (int h = 0; h < src_tensor.extent().h(); ++h) {
for (int w = 0; w < src_tensor.extent().w(); ++w) {
ElementCompute c_accum = reduction_identity;
for (int c = 0; c < src_tensor.extent().c(); ++c) {
c_accum = reduction_op(c_accum, ElementCompute(src_tensor.at({n, h, w, c})));
}
ElementCompute got = ElementCompute(dst_tensor.at({n, h, w, 0}));
bool equal = (c_accum == got);
EXPECT_TRUE(equal);
if (!equal) {
std::cerr
<< "Error at location (" << n << ", " << h << ", " << w << ", 0)" << std::endl;
std::cerr
<< " expected: " << c_accum << std::endl
<< " got: " << got << std::endl;
std::cerr
<< "Problem: " << src_tensor.extent() << " -> "
<< dst_tensor.extent() << std::endl;
std::cerr
<< " Grid: " << reduction.reduction_strided.grid_shape
<< "\n Block: " << reduction.reduction_strided.threadblock_shape << std::endl
<< " FInal: " << reduction.reduction_strided.grid_final
<< "\n Block: " << reduction.reduction_strided.threadblock_final << "\n";
return false;
}
} //w
} // h
} // n
//
// Next problem
//
} // C
} // W
} // H
} // N
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x1) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x1_f16x1) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x2) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 2;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x2_f16x2) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
int const kV = 2;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x4_f16x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_maximum_c_f32x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::maximum<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( -std::numeric_limits<float>::max() ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_minimum_c_f32x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::minimum<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( std::numeric_limits<float>::max() ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ANY_c_s32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = int;
using ElementSource = int;
using ElementCompute = int;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_or<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(0) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ALL_c_s32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = int;
using ElementSource = int;
using ElementCompute = int;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_and<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(1) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ANY_c_f32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_or<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(0) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ALL_c_f32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_and<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(1) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
| e18320e62444c26678f89640c2f7845728f2796e.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Tests for TensorReduce family of device-wide operators
*/
#include <iostream>
#include "../../common/cutlass_unit_test.h"
#include "cutlass/cutlass.h"
#include "cutlass/complex.h"
#include "cutlass/reduction/thread/reduction_operators.h"
#include "cutlass/reduction/device/tensor_reduce.h"
#include "cutlass/functional.h"
#include "cutlass/layout/tensor.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/host/tensor_norm.h"
#include "cutlass/util/tensor_view_io.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// This reduces the C dimension, transforming an NHWC tensor into NHWC with C=1.
template <typename TensorReduction, typename ElementCompute = typename TensorReduction::ElementCompute>
bool TestAllReduction_NHWC_reduce_c(ElementCompute reduction_identity = ElementCompute()) {
using Layout = typename TensorReduction::Layout;
using ElementOutput = typename TensorReduction::ElementOutput;
using ElementSource = typename TensorReduction::ElementSource;
int const kV = TensorReduction::kVectorLength;
int const N_indices[] = {3, 13};
int const H_indices[] = {5, 17};
int const W_indices[] = {7, 19};
int const C_indices[] = {2049, 2048, 2047, 384, 64, 48, 32, 24, 16, 12, 8, 6, 4, 3, 2, 1};
for (int N : N_indices) {
for (int H : H_indices) {
for (int W : W_indices) {
for (int Cx : C_indices) {
int C = Cx * kV;
cutlass::HostTensor<ElementSource, Layout> src_tensor({N, H, W, C});
cutlass::HostTensor<ElementOutput, Layout> dst_tensor({N, H, W, 1});
cutlass::reference::host::TensorFillRandomUniform(
src_tensor.host_view(), 17, 10, -10, 0);
dst_tensor.sync_device();
src_tensor.sync_device();
// Execute a tensor reduction over rank 3 (the 'C' dimension is reduced; NHWC => NHW)
TensorReduction reduction(src_tensor.extent(), 3);
cutlass::DeviceAllocation<uint8_t> device_workspace(reduction.workspace_size());
cutlass::Status status = reduction.reduce(
dst_tensor.device_ref(),
src_tensor.device_ref(),
device_workspace.get(),
reduction_identity
);
EXPECT_EQ(status, cutlass::Status::kSuccess);
EXPECT_EQ(cudaDeviceSynchronize(), cudaSuccess);
dst_tensor.sync_host();
typename TensorReduction::ReductionOp reduction_op;
//
// Reference check
//
for (int n = 0; n < src_tensor.extent().n(); ++n) {
for (int h = 0; h < src_tensor.extent().h(); ++h) {
for (int w = 0; w < src_tensor.extent().w(); ++w) {
ElementCompute c_accum = reduction_identity;
for (int c = 0; c < src_tensor.extent().c(); ++c) {
c_accum = reduction_op(c_accum, ElementCompute(src_tensor.at({n, h, w, c})));
}
ElementCompute got = ElementCompute(dst_tensor.at({n, h, w, 0}));
bool equal = (c_accum == got);
EXPECT_TRUE(equal);
if (!equal) {
std::cerr
<< "Error at location (" << n << ", " << h << ", " << w << ", 0)" << std::endl;
std::cerr
<< " expected: " << c_accum << std::endl
<< " got: " << got << std::endl;
std::cerr
<< "Problem: " << src_tensor.extent() << " -> "
<< dst_tensor.extent() << std::endl;
std::cerr
<< " Grid: " << reduction.reduction_strided.grid_shape
<< "\n Block: " << reduction.reduction_strided.threadblock_shape << std::endl
<< " FInal: " << reduction.reduction_strided.grid_final
<< "\n Block: " << reduction.reduction_strided.threadblock_final << "\n";
return false;
}
} //w
} // h
} // n
//
// Next problem
//
} // C
} // W
} // H
} // N
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x1) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x1_f16x1) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x2) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 2;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x2_f16x2) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
int const kV = 2;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_reduce_c_f32x4_f16x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = cutlass::half_t;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::plus<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>());
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_maximum_c_f32x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::maximum<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( -std::numeric_limits<float>::max() ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_minimum_c_f32x4) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 4;
// Define the functor
using Functor = cutlass::minimum<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( std::numeric_limits<float>::max() ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ANY_c_s32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = int;
using ElementSource = int;
using ElementCompute = int;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_or<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(0) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ALL_c_s32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = int;
using ElementSource = int;
using ElementCompute = int;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_and<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(1) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ANY_c_f32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_or<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(0) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Test tensor reduction from NHWC to NHW
TEST(Reduction_TensorReduce, nhwc_ALL_c_f32) {
using Layout = cutlass::layout::TensorNHWC;
using ElementOutput = float;
using ElementSource = float;
using ElementCompute = float;
int const kV = 1;
// Define the functor
using Functor = cutlass::logical_and<ElementCompute>;
using TensorReduction = cutlass::reduction::device::TensorReduction<
ElementOutput,
ElementSource,
Layout,
Functor,
kV,
ElementCompute
>;
EXPECT_TRUE(TestAllReduction_NHWC_reduce_c<TensorReduction>( ElementCompute(1) ));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
|
ef99aee0f276a434209daaf69d6c01e757163db9.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2018 XIAOLIN WANG ([email protected]; [email protected])
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "LstmCell.h"
#include "CudnnDescriptors.h"
namespace cytonLib
{
void LstmCell::init(string tag, int batchSize_, int inputSize_, int hiddenSize_,
int numLayers_, int maxSeqLen_, Precision dropout,
Variable* hx_, Variable* cx_)
{
this->batchSize=batchSize_;
this->inputSize=inputSize_;
this->hiddenSize=hiddenSize_;
this->numLayers=numLayers_;
this->hx=hx_;
this->cx=cx_;
assert(hx->n==numLayers && hx->c==batchSize && hx->h==hiddenSize);
assert(cx->n==numLayers && cx->c==batchSize && cx->h==hiddenSize);
// -------------------------
// Set up the dropout descriptor (needed for the RNN descriptor)
// -------------------------
checkError(cudnnCreateDropoutDescriptor(&dropoutDesc));
// How much memory does dropout need for states?
// These states are used to generate random numbers internally
// and should not be freed until the RNN descriptor is no longer used
size_t stateSize;
void *states;
checkError(cudnnDropoutGetStatesSize(global.cudnnHandle, &stateSize));
checkError(hipMalloc(&states, stateSize));
Precision tDropOut=dropout;
checkError(cudnnSetDropoutDescriptor(dropoutDesc,
global.cudnnHandle,
tDropOut,
states,
stateSize,
global.rnnDropoutSeed++));
checkError(cudnnCreateRNNDescriptor(&rnnDesc));
// if (mode == 0) RNNMode = miopenRNNRELU;
// else if (mode == 1) RNNMode = miopenRNNTANH;
// else if (mode == 2) RNNMode = miopenLSTM;
// else if (mode == 3) RNNMode = miopenGRU;
RNNMode = miopenLSTM;
checkError(cudnnSetRNNDescriptor(global.cudnnHandle, rnnDesc,
hiddenSize_,
numLayers_,
dropoutDesc,
CUDNN_LINEAR_INPUT, // We can also skip the input matrix transformation
CUDNN_UNIDIRECTIONAL,
RNNMode,
CUDNN_RNN_ALGO_STANDARD,
// CUDNN_RNN_ALGO_PERSIST_STATIC,
cudnnDataType));
{
xDescs.init(maxSeqLen_, batchSize_, inputSize_);
w.init(tag, rnnDesc, xDescs.descs[0] ,numLayers_,false, hiddenSize_);
size_t workSize;
checkError(cudnnGetRNNWorkspaceSize(global.cudnnHandle, rnnDesc, maxSeqLen_, xDescs.descs, &workSize));
workspace.resize(workSize, 1);
}
checkError(hipDeviceSynchronize());
}
} /* namespace cytonLib */
| ef99aee0f276a434209daaf69d6c01e757163db9.cu | /*
Copyright 2018 XIAOLIN WANG ([email protected]; [email protected])
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "LstmCell.h"
#include "CudnnDescriptors.h"
namespace cytonLib
{
void LstmCell::init(string tag, int batchSize_, int inputSize_, int hiddenSize_,
int numLayers_, int maxSeqLen_, Precision dropout,
Variable* hx_, Variable* cx_)
{
this->batchSize=batchSize_;
this->inputSize=inputSize_;
this->hiddenSize=hiddenSize_;
this->numLayers=numLayers_;
this->hx=hx_;
this->cx=cx_;
assert(hx->n==numLayers && hx->c==batchSize && hx->h==hiddenSize);
assert(cx->n==numLayers && cx->c==batchSize && cx->h==hiddenSize);
// -------------------------
// Set up the dropout descriptor (needed for the RNN descriptor)
// -------------------------
checkError(cudnnCreateDropoutDescriptor(&dropoutDesc));
// How much memory does dropout need for states?
// These states are used to generate random numbers internally
// and should not be freed until the RNN descriptor is no longer used
size_t stateSize;
void *states;
checkError(cudnnDropoutGetStatesSize(global.cudnnHandle, &stateSize));
checkError(cudaMalloc(&states, stateSize));
Precision tDropOut=dropout;
checkError(cudnnSetDropoutDescriptor(dropoutDesc,
global.cudnnHandle,
tDropOut,
states,
stateSize,
global.rnnDropoutSeed++));
checkError(cudnnCreateRNNDescriptor(&rnnDesc));
// if (mode == 0) RNNMode = CUDNN_RNN_RELU;
// else if (mode == 1) RNNMode = CUDNN_RNN_TANH;
// else if (mode == 2) RNNMode = CUDNN_LSTM;
// else if (mode == 3) RNNMode = CUDNN_GRU;
RNNMode = CUDNN_LSTM;
checkError(cudnnSetRNNDescriptor(global.cudnnHandle, rnnDesc,
hiddenSize_,
numLayers_,
dropoutDesc,
CUDNN_LINEAR_INPUT, // We can also skip the input matrix transformation
CUDNN_UNIDIRECTIONAL,
RNNMode,
CUDNN_RNN_ALGO_STANDARD,
// CUDNN_RNN_ALGO_PERSIST_STATIC,
cudnnDataType));
{
xDescs.init(maxSeqLen_, batchSize_, inputSize_);
w.init(tag, rnnDesc, xDescs.descs[0] ,numLayers_,false, hiddenSize_);
size_t workSize;
checkError(cudnnGetRNNWorkspaceSize(global.cudnnHandle, rnnDesc, maxSeqLen_, xDescs.descs, &workSize));
workspace.resize(workSize, 1);
}
checkError(cudaDeviceSynchronize());
}
} /* namespace cytonLib */
|
f2ae7defc6e28b4d0b19303e5333f840dba29723.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <vector>
#include "../utils.h"
#include "gpu_expInt_double.h"
__device__ double calcExp_simple_double(int n, double x, int maxIters){
double eulerConstant=0.5772156649015329;
double epsilon=1.E-30;
double bigdouble = 3.40282E38;
double a,b,c,d,del,fact,h,psi,ans=0.0;
int i,ii;
if(n==0){
ans=expf(-x)/x;
} else {
if(x>1.0){
b=(double)n+x;
c=bigdouble;
d=1.0/b;
h=d;
for(i=1;i<=maxIters;i++){
a=(double)(-i)*(n-1+i);
b+=2.0;
d=1.0/(a*d+b);
c=b+a/c;
del=c*d;
h*=del;
if(fabsf(del-1.0)<=epsilon){
ans=h*expf(-x);
return ans;
}
}
ans=h*expf(-x);
return ans;
} else { // Evaluate series
ans=( (n-1) !=0 ? 1.0/(double)(n-1) : -logf(x)-eulerConstant); // First term
fact=1.0;
for(i=1;i<=maxIters;i++){
fact *= -x/(double)i;
if(i != (n-1)){
del = -fact/(double)(i-n+1);
} else {
psi = -eulerConstant;
for(ii=1;ii<=(n-1);ii++){
psi += 1.0/(double)ii;
}
del=fact*(-logf(x)+psi);
}
ans+=del;
if(fabsf(del)<fabsf(ans)*epsilon) return ans;
}
return ans;
}
}
return ans;
}
__device__ double calcExp_shared_double(double *consts, int n, double x){
double a,b,c,d,del,fact,h,psi,ans=0.0;
int i,ii;
if(n==0){
ans=expf(-x)/x;
} else {
if(x>1.0){
b=(double)n+x;
c=consts[2];
d=1.0/b;
h=d;
for(i=1;i<=consts[3];i++){
a=(double)(-i)*(n-1+i);
b+=2.0;
d=1.0/(a*d+b);
c=b+a/c;
del=c*d;
h*=del;
if(fabsf(del-1.0)<=consts[1]){
ans=h*expf(-x);
return ans;
}
}
ans=h*expf(-x);
return ans;
} else { // Evaluate series
ans=( (n-1) !=0 ? 1.0/(double)(n-1) : -logf(x)-consts[0]); // First term
fact=1.0;
for(i=1;i<=consts[3];i++){
fact *= -x/(double)i;
if(i != (n-1)){
del = -fact/(double)(i-n+1);
} else {
psi = -consts[0];
for(ii=1;ii<=(n-1);ii++){
psi += 1.0/(double)ii;
}
del=fact*(-logf(x)+psi);
}
ans+=del;
if(fabsf(del)<fabsf(ans)*consts[1]) return ans;
}
return ans;
}
}
return ans;
}
__global__ void calcExpIntegral_simple_double(double *res_glob, int n, int numSamples, int a, double division, int maxIters){
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int idy = blockIdx.y*blockDim.y + threadIdx.y;
double x = a + (idy+1)*division;
if(idx<n && idy<numSamples){
res_glob[idy + idx*numSamples] = calcExp_simple_double(idx+1, x, maxIters);
}
}
__global__ void calcExpIntegral_shared_double(double *res_glob, int n, int numSamples, int a, double division, int maxIters){
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int idy = blockIdx.y*blockDim.y + threadIdx.y;
extern __shared__ double consts[];
consts[0] = 0.5772156649015329;
consts[1] = 1.E-30;
consts[2] = 3.40282E38;
consts[3] = maxIters;
double x = a + (idy+1)*division;
if(idx<n && idy < numSamples){
res_glob[idy + idx*numSamples] = calcExp_shared_double(consts, idx+1, x);
}
}
extern void GPUexponentialIntegralDouble(double *results, int block_size_X, int block_size_Y, Tau *tau){
double *res_glob, *dynam_glob;
hipStream_t stream[numStreams];
int tmp;
double division = (b-a)/numSamples;
hipEvent_t start, finish;
hipEventCreate(&start);
hipEventCreate(&finish);
hipEventRecord(start);
hipMalloc( (void**)&res_glob, n*numSamples*sizeof(double));
hipMalloc( (void**)&dynam_glob, n*numSamples*sizeof(double));
findBestDevice();
if(numStreams){
tmp = n - (numStreams-1)*n/numStreams;
for(int i=0;i<numStreams;i++)
hipStreamCreate(&stream[i]);
}
dim3 dimBlock(block_size_X, block_size_Y);
dim3 dimGrid((n/dimBlock.x)+(!(n%dimBlock.x)?0:1),
(numSamples/dimBlock.y)+(!(numSamples%dimBlock.y)?0:1));
/////////////////////// APPLYING KERNEL ///////////////////////////////
if(shared){
if(cache) hipDeviceSetCacheConfig(hipFuncCachePreferL1);
hipLaunchKernelGGL(( calcExpIntegral_shared_double), dim3(dimGrid), dim3(dimBlock), 4*sizeof(double), 0,
res_glob, n, numSamples, a, division, maxIters);
} else if(dynamic) {
hipLaunchKernelGGL(( calcExpIntegral_dynamic_double), dim3(dimGrid),dim3(dimBlock), 4*sizeof(double), 0,
res_glob, dynam_glob, n, numSamples, a, division, maxIters);
} else if(numStreams) {
dim3 dimGrid((tmp/dimBlock.x)+(!(tmp%dimBlock.x)?0:1),
(numSamples/dimBlock.y)+(!(numSamples%dimBlock.y)?0:1));
for(int i=0;i<numStreams;i++){
int val = i == (numStreams - 1) ? tmp : n/numStreams;
hipLaunchKernelGGL(( calcExpIntegral_portion_double), dim3(dimGrid),dim3(dimBlock),4*sizeof(double),stream[i],
&res_glob[numSamples*i*n/numStreams], i*n/numStreams,
val, numSamples, a, division, maxIters);
}
} else {
hipLaunchKernelGGL(( calcExpIntegral_simple_double), dim3(dimGrid), dim3(dimBlock), 0, 0,
res_glob, n, numSamples, a, division, maxIters);
}
////////////////////////////////////////////////////////////////////
///////////////// TRANSFERRING RESULTS TO HOST /////////////////////
if(numStreams){
for(int i=0; i<numStreams;i++){
int val = i == (numStreams - 1) ? tmp : n/numStreams;
hipMemcpyAsync(&results[numSamples*i*n/numStreams],
&res_glob[numSamples*i*n/numStreams], numSamples*val*sizeof(double),
hipMemcpyDeviceToHost, stream[i]);
}
for(int i=0;i<numStreams;i++)
hipStreamDestroy(stream[i]);
} else {
hipMemcpy(results, res_glob, n*numSamples*sizeof(double), hipMemcpyDeviceToHost);
}
//////////////////////////////////////////////////////////////////////
hipEventRecord(finish);
hipEventSynchronize(finish);
hipEventElapsedTime((float *)&tau->double_CPU, start, finish);
hipFree(res_glob);
}
| f2ae7defc6e28b4d0b19303e5333f840dba29723.cu | #include <cstdio>
#include <vector>
#include "../utils.h"
#include "gpu_expInt_double.h"
__device__ double calcExp_simple_double(int n, double x, int maxIters){
double eulerConstant=0.5772156649015329;
double epsilon=1.E-30;
double bigdouble = 3.40282E38;
double a,b,c,d,del,fact,h,psi,ans=0.0;
int i,ii;
if(n==0){
ans=expf(-x)/x;
} else {
if(x>1.0){
b=(double)n+x;
c=bigdouble;
d=1.0/b;
h=d;
for(i=1;i<=maxIters;i++){
a=(double)(-i)*(n-1+i);
b+=2.0;
d=1.0/(a*d+b);
c=b+a/c;
del=c*d;
h*=del;
if(fabsf(del-1.0)<=epsilon){
ans=h*expf(-x);
return ans;
}
}
ans=h*expf(-x);
return ans;
} else { // Evaluate series
ans=( (n-1) !=0 ? 1.0/(double)(n-1) : -logf(x)-eulerConstant); // First term
fact=1.0;
for(i=1;i<=maxIters;i++){
fact *= -x/(double)i;
if(i != (n-1)){
del = -fact/(double)(i-n+1);
} else {
psi = -eulerConstant;
for(ii=1;ii<=(n-1);ii++){
psi += 1.0/(double)ii;
}
del=fact*(-logf(x)+psi);
}
ans+=del;
if(fabsf(del)<fabsf(ans)*epsilon) return ans;
}
return ans;
}
}
return ans;
}
__device__ double calcExp_shared_double(double *consts, int n, double x){
double a,b,c,d,del,fact,h,psi,ans=0.0;
int i,ii;
if(n==0){
ans=expf(-x)/x;
} else {
if(x>1.0){
b=(double)n+x;
c=consts[2];
d=1.0/b;
h=d;
for(i=1;i<=consts[3];i++){
a=(double)(-i)*(n-1+i);
b+=2.0;
d=1.0/(a*d+b);
c=b+a/c;
del=c*d;
h*=del;
if(fabsf(del-1.0)<=consts[1]){
ans=h*expf(-x);
return ans;
}
}
ans=h*expf(-x);
return ans;
} else { // Evaluate series
ans=( (n-1) !=0 ? 1.0/(double)(n-1) : -logf(x)-consts[0]); // First term
fact=1.0;
for(i=1;i<=consts[3];i++){
fact *= -x/(double)i;
if(i != (n-1)){
del = -fact/(double)(i-n+1);
} else {
psi = -consts[0];
for(ii=1;ii<=(n-1);ii++){
psi += 1.0/(double)ii;
}
del=fact*(-logf(x)+psi);
}
ans+=del;
if(fabsf(del)<fabsf(ans)*consts[1]) return ans;
}
return ans;
}
}
return ans;
}
__global__ void calcExpIntegral_simple_double(double *res_glob, int n, int numSamples, int a, double division, int maxIters){
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int idy = blockIdx.y*blockDim.y + threadIdx.y;
double x = a + (idy+1)*division;
if(idx<n && idy<numSamples){
res_glob[idy + idx*numSamples] = calcExp_simple_double(idx+1, x, maxIters);
}
}
__global__ void calcExpIntegral_shared_double(double *res_glob, int n, int numSamples, int a, double division, int maxIters){
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int idy = blockIdx.y*blockDim.y + threadIdx.y;
extern __shared__ double consts[];
consts[0] = 0.5772156649015329;
consts[1] = 1.E-30;
consts[2] = 3.40282E38;
consts[3] = maxIters;
double x = a + (idy+1)*division;
if(idx<n && idy < numSamples){
res_glob[idy + idx*numSamples] = calcExp_shared_double(consts, idx+1, x);
}
}
extern void GPUexponentialIntegralDouble(double *results, int block_size_X, int block_size_Y, Tau *tau){
double *res_glob, *dynam_glob;
cudaStream_t stream[numStreams];
int tmp;
double division = (b-a)/numSamples;
cudaEvent_t start, finish;
cudaEventCreate(&start);
cudaEventCreate(&finish);
cudaEventRecord(start);
cudaMalloc( (void**)&res_glob, n*numSamples*sizeof(double));
cudaMalloc( (void**)&dynam_glob, n*numSamples*sizeof(double));
findBestDevice();
if(numStreams){
tmp = n - (numStreams-1)*n/numStreams;
for(int i=0;i<numStreams;i++)
cudaStreamCreate(&stream[i]);
}
dim3 dimBlock(block_size_X, block_size_Y);
dim3 dimGrid((n/dimBlock.x)+(!(n%dimBlock.x)?0:1),
(numSamples/dimBlock.y)+(!(numSamples%dimBlock.y)?0:1));
/////////////////////// APPLYING KERNEL ///////////////////////////////
if(shared){
if(cache) cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
calcExpIntegral_shared_double<<<dimGrid, dimBlock, 4*sizeof(double)>>>
(res_glob, n, numSamples, a, division, maxIters);
} else if(dynamic) {
calcExpIntegral_dynamic_double<<<dimGrid,dimBlock, 4*sizeof(double)>>>
(res_glob, dynam_glob, n, numSamples, a, division, maxIters);
} else if(numStreams) {
dim3 dimGrid((tmp/dimBlock.x)+(!(tmp%dimBlock.x)?0:1),
(numSamples/dimBlock.y)+(!(numSamples%dimBlock.y)?0:1));
for(int i=0;i<numStreams;i++){
int val = i == (numStreams - 1) ? tmp : n/numStreams;
calcExpIntegral_portion_double<<<dimGrid,dimBlock,4*sizeof(double),stream[i]>>>
(&res_glob[numSamples*i*n/numStreams], i*n/numStreams,
val, numSamples, a, division, maxIters);
}
} else {
calcExpIntegral_simple_double<<<dimGrid, dimBlock>>>
(res_glob, n, numSamples, a, division, maxIters);
}
////////////////////////////////////////////////////////////////////
///////////////// TRANSFERRING RESULTS TO HOST /////////////////////
if(numStreams){
for(int i=0; i<numStreams;i++){
int val = i == (numStreams - 1) ? tmp : n/numStreams;
cudaMemcpyAsync(&results[numSamples*i*n/numStreams],
&res_glob[numSamples*i*n/numStreams], numSamples*val*sizeof(double),
cudaMemcpyDeviceToHost, stream[i]);
}
for(int i=0;i<numStreams;i++)
cudaStreamDestroy(stream[i]);
} else {
cudaMemcpy(results, res_glob, n*numSamples*sizeof(double), cudaMemcpyDeviceToHost);
}
//////////////////////////////////////////////////////////////////////
cudaEventRecord(finish);
cudaEventSynchronize(finish);
cudaEventElapsedTime((float *)&tau->double_CPU, start, finish);
cudaFree(res_glob);
}
|
a7efa316ee8995cb5d8555be3a9b7df183dd091c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=1 --blockDim=[9,9]
#include "common.h"
__global__ void
addForces_k(cData *v, int dx, int dy, int spx, int spy, float fx, float fy, int r, size_t pitch)
{
__requires(dx == 512);
__requires(dy == 512);
__requires(spx == 1);
__requires(spy == 1);
__requires(r == 4);
__requires(pitch == 4096);
int tx = threadIdx.x;
int ty = threadIdx.y;
cData *fj = (cData *)((char *)v + (ty + spy) * pitch) + tx + spx;
cData vterm = *fj;
tx -= r;
ty -= r;
float s = 1.f / (1.f + tx*tx*tx*tx + ty*ty*ty*ty);
vterm.x += s * fx;
vterm.y += s * fy;
*fj = vterm;
}
| a7efa316ee8995cb5d8555be3a9b7df183dd091c.cu | //pass
//--gridDim=1 --blockDim=[9,9]
#include "common.h"
__global__ void
addForces_k(cData *v, int dx, int dy, int spx, int spy, float fx, float fy, int r, size_t pitch)
{
__requires(dx == 512);
__requires(dy == 512);
__requires(spx == 1);
__requires(spy == 1);
__requires(r == 4);
__requires(pitch == 4096);
int tx = threadIdx.x;
int ty = threadIdx.y;
cData *fj = (cData *)((char *)v + (ty + spy) * pitch) + tx + spx;
cData vterm = *fj;
tx -= r;
ty -= r;
float s = 1.f / (1.f + tx*tx*tx*tx + ty*ty*ty*ty);
vterm.x += s * fx;
vterm.y += s * fy;
*fj = vterm;
}
|
92efb9dcd850a0b56823140e12e43b2ceab7c599.hip | // !!! This is a file automatically generated by hipify!!!
#include "matrix.h"
#include "gpuinfo.h"
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
/**
* @brief cuda kernal -- matrix mul C = alpha * A * B + beta * C, single precision
* @param alpha coefficient parameter
* @param beta coefficient parameter
* @param C result
* @param A matrix A
* @param B matrix B
*/
__global__ void kernel_matrix_mul_sp(float alpha, float beta,
float* C, float* A, float* B,
unsigned int widthC, unsigned int widthA, unsigned int widthB)
{
const int row = threadIdx.y + blockIdx.y * blockDim.y;
const int col = threadIdx.x + blockIdx.x * blockDim.x;
const int indexC = row * widthC + col;
const int indexA = row * widthA;
C[indexC] = 0.0;
for (int i = 0; i < widthA; i++)
C[indexC] += alpha * A[indexA + i] * B[i * widthB + col] + beta * C[indexC];
}
/**
* @brief cuda -- matrix mul, single precision
* @param alpha coefficient parameter
* @param beta coefficient parameter
* @param C result
* @param A matrix A
* @param B matrix B
*/
void cuda_matrix_mul_sp(const float& alpha, const float& beta,
gemm::MatrixSP& C, gemm::MatrixSP& A, gemm::MatrixSP& B);
void cuda_matrix_mul_sp(const float& alpha, const float& beta,
gemm::MatrixSP& C, gemm::MatrixSP& A, gemm::MatrixSP& B)
{
if (A.empty() || B.empty() || C.empty())
{
std::fprintf(stderr, "cuda matrix mul sp error, matrix is empty!\r\n");
return;
}
if (A.width() != B.height() || A.height() != C.height() || B.width() != C.width())
{
std::fprintf(stderr, "cuda matrix mul sp error, matrix doesnot match!\r\n");
return;
}
/* initialize gpu preference */
hipDeviceProp_t devProp;
int gpu_id = std::atoi(std::getenv("CUDA_VISIBLE_DEVICES"));
int gpucount;
hipGetDeviceCount(&gpucount);
if (gpu_id < 0 || gpu_id >= gpucount)
{
std::fprintf(stderr, "cuda matrix mul sp error, gpu %d doesnot exist!\r\n", gpu_id);
return;
}
hipGetDeviceProperties(&devProp, gpu_id);
const int blockSize = devProp.maxThreadsPerBlock;
const int blockLen = (int)::floor(std::sqrt((double)blockSize));
dim3 cudaBlockSize(blockLen, blockLen);
dim3 cudaGridSize((C.height() + cudaBlockSize.x - 1) / cudaBlockSize.x,
(C.width() + cudaBlockSize.y - 1) / cudaBlockSize.y);
/* allocate memory on gpu */
float *cuC, *cuA, *cuB;
hipMalloc((void**)&cuC, C.height() * C.width() * sizeof(float));
hipMalloc((void**)&cuA, A.height() * A.width() * sizeof(float));
hipMalloc((void**)&cuB, B.height() * B.width() * sizeof(float));
/* copy data */
hipMemcpy(cuA, (A._element), A.height() * A.width() * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(cuB, (B._element), B.height() * B.width() * sizeof(float), hipMemcpyHostToDevice);
/* execute kernel */
hipLaunchKernelGGL(( kernel_matrix_mul_sp), dim3(cudaGridSize), dim3(cudaBlockSize), 0, 0, alpha, beta, cuC, cuA, cuB, C.width(), A.width(), B.width());
/* copy data */
hipMemcpy(C._element, cuC, C.height() * C.width() * sizeof(float), hipMemcpyDeviceToHost);
/* free memory on gpu */
hipFree(cuC);
hipFree(cuA);
hipFree(cuB);
}
| 92efb9dcd850a0b56823140e12e43b2ceab7c599.cu | #include "matrix.h"
#include "gpuinfo.h"
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <vector>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
/**
* @brief cuda kernal -- matrix mul C = alpha * A * B + beta * C, single precision
* @param alpha coefficient parameter
* @param beta coefficient parameter
* @param C result
* @param A matrix A
* @param B matrix B
*/
__global__ void kernel_matrix_mul_sp(float alpha, float beta,
float* C, float* A, float* B,
unsigned int widthC, unsigned int widthA, unsigned int widthB)
{
const int row = threadIdx.y + blockIdx.y * blockDim.y;
const int col = threadIdx.x + blockIdx.x * blockDim.x;
const int indexC = row * widthC + col;
const int indexA = row * widthA;
C[indexC] = 0.0;
for (int i = 0; i < widthA; i++)
C[indexC] += alpha * A[indexA + i] * B[i * widthB + col] + beta * C[indexC];
}
/**
* @brief cuda -- matrix mul, single precision
* @param alpha coefficient parameter
* @param beta coefficient parameter
* @param C result
* @param A matrix A
* @param B matrix B
*/
void cuda_matrix_mul_sp(const float& alpha, const float& beta,
gemm::MatrixSP& C, gemm::MatrixSP& A, gemm::MatrixSP& B);
void cuda_matrix_mul_sp(const float& alpha, const float& beta,
gemm::MatrixSP& C, gemm::MatrixSP& A, gemm::MatrixSP& B)
{
if (A.empty() || B.empty() || C.empty())
{
std::fprintf(stderr, "cuda matrix mul sp error, matrix is empty!\r\n");
return;
}
if (A.width() != B.height() || A.height() != C.height() || B.width() != C.width())
{
std::fprintf(stderr, "cuda matrix mul sp error, matrix doesnot match!\r\n");
return;
}
/* initialize gpu preference */
cudaDeviceProp devProp;
int gpu_id = std::atoi(std::getenv("CUDA_VISIBLE_DEVICES"));
int gpucount;
cudaGetDeviceCount(&gpucount);
if (gpu_id < 0 || gpu_id >= gpucount)
{
std::fprintf(stderr, "cuda matrix mul sp error, gpu %d doesnot exist!\r\n", gpu_id);
return;
}
cudaGetDeviceProperties(&devProp, gpu_id);
const int blockSize = devProp.maxThreadsPerBlock;
const int blockLen = (int)std::floor(std::sqrt((double)blockSize));
dim3 cudaBlockSize(blockLen, blockLen);
dim3 cudaGridSize((C.height() + cudaBlockSize.x - 1) / cudaBlockSize.x,
(C.width() + cudaBlockSize.y - 1) / cudaBlockSize.y);
/* allocate memory on gpu */
float *cuC, *cuA, *cuB;
cudaMalloc((void**)&cuC, C.height() * C.width() * sizeof(float));
cudaMalloc((void**)&cuA, A.height() * A.width() * sizeof(float));
cudaMalloc((void**)&cuB, B.height() * B.width() * sizeof(float));
/* copy data */
cudaMemcpy(cuA, (A._element), A.height() * A.width() * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(cuB, (B._element), B.height() * B.width() * sizeof(float), cudaMemcpyHostToDevice);
/* execute kernel */
kernel_matrix_mul_sp<<<cudaGridSize, cudaBlockSize>>>(alpha, beta, cuC, cuA, cuB, C.width(), A.width(), B.width());
/* copy data */
cudaMemcpy(C._element, cuC, C.height() * C.width() * sizeof(float), cudaMemcpyDeviceToHost);
/* free memory on gpu */
cudaFree(cuC);
cudaFree(cuA);
cudaFree(cuB);
}
|
5fe50de79b46ffb7c08c4f03d8e2675d2038e2db.hip | // !!! This is a file automatically generated by hipify!!!
// ref_confusion_matrix should be copied from gpu 0 to others
hipStream_t stream[NGPU];
for (int i = 0; i < NGPU; ++i)
hipStreamCreate (&stream[i]);
for (int i = 0; i < NGPU; ++i) {
hipSetDevice (i);
CUDAKERNELSTREAMSYNC (MonteCarlo_Init_d, dim_grid, dim_block, 0, stream[i], rep_begin[i], rep_end[i]);
}
for (int i = 0; i < NGPU; ++i)
hipStreamSynchronize (stream[i]);
int filesequence = 0;
for (int s1 = 0; s1 < mcpara->steps_total; s1 += mcpara->steps_per_dump) {
double t0 = host_time_now ();
printf ("\t%d / %d \r", s1, mcpara->steps_total);
fflush (stdout);
for (int s2 = 0; s2 < mcpara->steps_per_dump; s2 += mcpara->steps_per_exchange) {
// monte carlo
for (int i = 0; i < NGPU; ++i) {
hipSetDevice (i);
CUDAKERNELSTREAM (MonteCarlo_d, dim_grid, dim_block, 0, stream[i], rep_begin[i], rep_end[i], s1, s2);
}
/*
// exchange
// gather to GPU0, and then scater from GPU0
for (int i = 1; i < NGPU; ++i)
hipMemcpyPeerAsync(etotal_d[0], 0, etotal_d[i], i, etotal_sz, stream[0]);
// may duplicate computation and eliminate the following transformation
hipSetDevice (0);
ExchangeReplicas_d <<<dim_grid, dim_block, 0, stream[0] >>> ();
for (int i = 1; i < NGPU; ++i)
hipMemcpyPeerAsync(replica_d[i], i, replica_d[0], 0, replica_sz, stream[i]);
for (int i = 0; i < NGPU; ++i)
hipStreamSynchronize (stream[i]);
*/
}
// accumulate for compute time
mclog->t0 += host_time_now () - t0;
// dump: GPU -> CPU -> disk
/*
#if IS_OUTPUT == 1
// copy ligand record from GPU to CPU memory
for (int i = 0; i < NGPU; ++i) {
hipSetDevice (i);
CUDAMEMCPY (&ligrecord[rep_begin[i]], ligrecord_d[i], ligrecord_sz_per_gpu[i], hipMemcpyDeviceToHost);
}
// dump ligand record from CPU memory to disk
char myoutputfile[MAXSTRINGLENG];
sprintf(myoutputfile, "%s/%s_%04d.h5", mcpara->outputdir, mcpara->outputfile, filesequence++);
DumpLigRecord (ligrecord, n_rep, myoutputfile);
#endif
*/
// accumulate for wall time (compute time plus I/O time)
mclog->t1 += host_time_now () - t0;
}
for (int i = 0; i < NGPU; ++i)
hipStreamDestroy (stream[i]);
| 5fe50de79b46ffb7c08c4f03d8e2675d2038e2db.cu | // ref_confusion_matrix should be copied from gpu 0 to others
cudaStream_t stream[NGPU];
for (int i = 0; i < NGPU; ++i)
cudaStreamCreate (&stream[i]);
for (int i = 0; i < NGPU; ++i) {
cudaSetDevice (i);
CUDAKERNELSTREAMSYNC (MonteCarlo_Init_d, dim_grid, dim_block, 0, stream[i], rep_begin[i], rep_end[i]);
}
for (int i = 0; i < NGPU; ++i)
cudaStreamSynchronize (stream[i]);
int filesequence = 0;
for (int s1 = 0; s1 < mcpara->steps_total; s1 += mcpara->steps_per_dump) {
double t0 = host_time_now ();
printf ("\t%d / %d \r", s1, mcpara->steps_total);
fflush (stdout);
for (int s2 = 0; s2 < mcpara->steps_per_dump; s2 += mcpara->steps_per_exchange) {
// monte carlo
for (int i = 0; i < NGPU; ++i) {
cudaSetDevice (i);
CUDAKERNELSTREAM (MonteCarlo_d, dim_grid, dim_block, 0, stream[i], rep_begin[i], rep_end[i], s1, s2);
}
/*
// exchange
// gather to GPU0, and then scater from GPU0
for (int i = 1; i < NGPU; ++i)
cudaMemcpyPeerAsync(etotal_d[0], 0, etotal_d[i], i, etotal_sz, stream[0]);
// may duplicate computation and eliminate the following transformation
cudaSetDevice (0);
ExchangeReplicas_d <<<dim_grid, dim_block, 0, stream[0] >>> ();
for (int i = 1; i < NGPU; ++i)
cudaMemcpyPeerAsync(replica_d[i], i, replica_d[0], 0, replica_sz, stream[i]);
for (int i = 0; i < NGPU; ++i)
cudaStreamSynchronize (stream[i]);
*/
}
// accumulate for compute time
mclog->t0 += host_time_now () - t0;
// dump: GPU -> CPU -> disk
/*
#if IS_OUTPUT == 1
// copy ligand record from GPU to CPU memory
for (int i = 0; i < NGPU; ++i) {
cudaSetDevice (i);
CUDAMEMCPY (&ligrecord[rep_begin[i]], ligrecord_d[i], ligrecord_sz_per_gpu[i], cudaMemcpyDeviceToHost);
}
// dump ligand record from CPU memory to disk
char myoutputfile[MAXSTRINGLENG];
sprintf(myoutputfile, "%s/%s_%04d.h5", mcpara->outputdir, mcpara->outputfile, filesequence++);
DumpLigRecord (ligrecord, n_rep, myoutputfile);
#endif
*/
// accumulate for wall time (compute time plus I/O time)
mclog->t1 += host_time_now () - t0;
}
for (int i = 0; i < NGPU; ++i)
cudaStreamDestroy (stream[i]);
|
89c11f09617dbcbc7bd08770b20a63e0881ce906.hip | // !!! This is a file automatically generated by hipify!!!
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Math.cuh>
#include <ATen/native/hip/zmath.cuh>
namespace at { namespace native {
// We manually overload ceil because std::ceil does not work with std::complex types.
template <typename scalar_t>
__host__ __device__ static inline scalar_t ceil_wrapper(scalar_t a) {
return ::ceil(a);
}
template<typename T>
__host__ __device__ static inline std::complex<T> ceil_wrapper(std::complex<T> v) {
return std::complex<T>(::ceil(v.real()), ::ceil(v.imag()));
}
void ceil_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(ScalarType::Half, iter.dtype(), "ceil_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ceil_wrapper(a);
});
});
}
void frac_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "frac_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return a - ::trunc(a);
});
});
}
// We manually overload floor because std::floor does not work with std::complex types.
template <typename scalar_t>
__host__ __device__ static inline scalar_t floor_wrapper(scalar_t a) {
return ::floor(a);
}
template<typename T>
__host__ __device__ static inline std::complex<T> floor_wrapper(std::complex<T> v) {
return std::complex<T>(::floor(v.real()), ::floor(v.imag()));
}
void floor_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(ScalarType::Half, iter.dtype(), "floor_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return floor_wrapper(a);
});
});
}
template <typename scalar_t>
__host__ __device__ static inline scalar_t reciprocal_wrapper(scalar_t a) {
return static_cast<scalar_t>(1)/a;
}
template<typename T>
__host__ __device__ static inline thrust::complex<T> reciprocal_wrapper(thrust::complex<T> v) {
// Handle extreme cases for numpy compatibility
auto both_inf = [](T real, T imag) {
return (::isinf(real) && ::isinf(imag));
};
auto either_inf = [](T real, T imag) {
return ::isinf(real) || ::isinf(imag);
};
auto either_nan = [](T real, T imag) {
return ::isnan(real) || ::isnan(imag);
};
if (either_nan(v.real(), v.imag()) || both_inf(v.real(), v.imag())) {
// If either is Nan or both are infinite, return {nan, nan}
return {std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()};
} else if (either_inf(v.real(), v.imag())) {
// If either is Inf, return {0, 0}
return {0, 0};
}
const thrust::complex<T> one = thrust::complex<T>(1.0, 0);
return one/v;
}
void reciprocal_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "reciprocal_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "reciprocal_cuda", [&] {
using thrust_t = typename ztype_cuda<scalar_t>::thrust_t;
gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t {
return reciprocal_wrapper(a);
});
});
});
}
// We manually overload nearbyint because std::nearbyint does not work with std::complex types and ROCm.
template <typename scalar_t>
__host__ __device__ static inline scalar_t nearbyint_wrapper(scalar_t a) {
return static_cast<scalar_t>(::nearbyintf(static_cast<float>(a)));
}
__host__ __device__ static inline double nearbyint_wrapper(double a) {
return ::nearbyint(a);
}
__host__ __device__ static inline c10::complex<float> nearbyint_wrapper(c10::complex<float> a) {
return c10::complex<float>(::nearbyintf(static_cast<float>(a.real())), ::nearbyintf(static_cast<float>(a.imag())));
}
__host__ __device__ static inline c10::complex<double> nearbyint_wrapper(c10::complex<double> a) {
return c10::complex<double>(::nearbyint(static_cast<double>(a.real())), ::nearbyint(static_cast<double>(a.imag())));
}
void round_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.dtype(), "round_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
// We do not use std::round because we would like to round midway numbers to the nearest even integer.
return nearbyint_wrapper(a);
});
});
}
// We manually overload trunc because std::trunc does not work with std::complex types and ROCm.
template <typename scalar_t>
__host__ __device__ static inline scalar_t trunc_wrapper(scalar_t a) {
return static_cast<scalar_t>(::truncf(static_cast<float>(a)));
}
__host__ __device__ static inline double trunc_wrapper(double a) {
return ::trunc(a);
}
__host__ __device__ static inline c10::complex<float> trunc_wrapper(c10::complex<float> a) {
return c10::complex<float>(::truncf(static_cast<float>(a.real())), ::truncf(static_cast<float>(a.imag())));
}
__host__ __device__ static inline c10::complex<double> trunc_wrapper(c10::complex<double> a) {
return c10::complex<double>(::trunc(static_cast<double>(a.real())), ::trunc(static_cast<double>(a.imag())));
}
void trunc_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(ScalarType::Half, iter.dtype(), "trunc_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return trunc_wrapper(a);
});
});
}
REGISTER_DISPATCH(ceil_stub, &ceil_kernel_cuda);
REGISTER_DISPATCH(frac_stub, &frac_kernel_cuda);
REGISTER_DISPATCH(floor_stub, &floor_kernel_cuda);
REGISTER_DISPATCH(reciprocal_stub, &reciprocal_kernel_cuda);
REGISTER_DISPATCH(round_stub, &round_kernel_cuda);
REGISTER_DISPATCH(trunc_stub, &trunc_kernel_cuda);
}} // namespace at::native
| 89c11f09617dbcbc7bd08770b20a63e0881ce906.cu | #include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/zmath.cuh>
namespace at { namespace native {
// We manually overload ceil because std::ceil does not work with std::complex types.
template <typename scalar_t>
__host__ __device__ static inline scalar_t ceil_wrapper(scalar_t a) {
return std::ceil(a);
}
template<typename T>
__host__ __device__ static inline std::complex<T> ceil_wrapper(std::complex<T> v) {
return std::complex<T>(std::ceil(v.real()), std::ceil(v.imag()));
}
void ceil_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(ScalarType::Half, iter.dtype(), "ceil_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ceil_wrapper(a);
});
});
}
void frac_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "frac_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return a - ::trunc(a);
});
});
}
// We manually overload floor because std::floor does not work with std::complex types.
template <typename scalar_t>
__host__ __device__ static inline scalar_t floor_wrapper(scalar_t a) {
return std::floor(a);
}
template<typename T>
__host__ __device__ static inline std::complex<T> floor_wrapper(std::complex<T> v) {
return std::complex<T>(std::floor(v.real()), std::floor(v.imag()));
}
void floor_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(ScalarType::Half, iter.dtype(), "floor_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return floor_wrapper(a);
});
});
}
template <typename scalar_t>
__host__ __device__ static inline scalar_t reciprocal_wrapper(scalar_t a) {
return static_cast<scalar_t>(1)/a;
}
template<typename T>
__host__ __device__ static inline thrust::complex<T> reciprocal_wrapper(thrust::complex<T> v) {
// Handle extreme cases for numpy compatibility
auto both_inf = [](T real, T imag) {
return (::isinf(real) && ::isinf(imag));
};
auto either_inf = [](T real, T imag) {
return ::isinf(real) || ::isinf(imag);
};
auto either_nan = [](T real, T imag) {
return ::isnan(real) || ::isnan(imag);
};
if (either_nan(v.real(), v.imag()) || both_inf(v.real(), v.imag())) {
// If either is Nan or both are infinite, return {nan, nan}
return {std::numeric_limits<T>::quiet_NaN(), std::numeric_limits<T>::quiet_NaN()};
} else if (either_inf(v.real(), v.imag())) {
// If either is Inf, return {0, 0}
return {0, 0};
}
const thrust::complex<T> one = thrust::complex<T>(1.0, 0);
return one/v;
}
void reciprocal_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "reciprocal_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "reciprocal_cuda", [&] {
using thrust_t = typename ztype_cuda<scalar_t>::thrust_t;
gpu_kernel(iter, []GPU_LAMBDA(thrust_t a) -> thrust_t {
return reciprocal_wrapper(a);
});
});
});
}
// We manually overload nearbyint because std::nearbyint does not work with std::complex types and ROCm.
template <typename scalar_t>
__host__ __device__ static inline scalar_t nearbyint_wrapper(scalar_t a) {
return static_cast<scalar_t>(::nearbyintf(static_cast<float>(a)));
}
__host__ __device__ static inline double nearbyint_wrapper(double a) {
return ::nearbyint(a);
}
__host__ __device__ static inline c10::complex<float> nearbyint_wrapper(c10::complex<float> a) {
return c10::complex<float>(::nearbyintf(static_cast<float>(a.real())), ::nearbyintf(static_cast<float>(a.imag())));
}
__host__ __device__ static inline c10::complex<double> nearbyint_wrapper(c10::complex<double> a) {
return c10::complex<double>(::nearbyint(static_cast<double>(a.real())), ::nearbyint(static_cast<double>(a.imag())));
}
void round_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, iter.dtype(), "round_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
// We do not use std::round because we would like to round midway numbers to the nearest even integer.
return nearbyint_wrapper(a);
});
});
}
// We manually overload trunc because std::trunc does not work with std::complex types and ROCm.
template <typename scalar_t>
__host__ __device__ static inline scalar_t trunc_wrapper(scalar_t a) {
return static_cast<scalar_t>(::truncf(static_cast<float>(a)));
}
__host__ __device__ static inline double trunc_wrapper(double a) {
return ::trunc(a);
}
__host__ __device__ static inline c10::complex<float> trunc_wrapper(c10::complex<float> a) {
return c10::complex<float>(::truncf(static_cast<float>(a.real())), ::truncf(static_cast<float>(a.imag())));
}
__host__ __device__ static inline c10::complex<double> trunc_wrapper(c10::complex<double> a) {
return c10::complex<double>(::trunc(static_cast<double>(a.real())), ::trunc(static_cast<double>(a.imag())));
}
void trunc_kernel_cuda(TensorIterator& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(ScalarType::Half, iter.dtype(), "trunc_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return trunc_wrapper(a);
});
});
}
REGISTER_DISPATCH(ceil_stub, &ceil_kernel_cuda);
REGISTER_DISPATCH(frac_stub, &frac_kernel_cuda);
REGISTER_DISPATCH(floor_stub, &floor_kernel_cuda);
REGISTER_DISPATCH(reciprocal_stub, &reciprocal_kernel_cuda);
REGISTER_DISPATCH(round_stub, &round_kernel_cuda);
REGISTER_DISPATCH(trunc_stub, &trunc_kernel_cuda);
}} // namespace at::native
|
ace930cf526e5f70bd24351364aab71437bfa4c5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/legacy/binaryop.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/timestamp_utilities.cuh>
#include <tests/utilities/type_lists.hpp>
#include "cudf/column/column_view.hpp"
#include "cudf/types.hpp"
#include "tests/utilities/column_utilities.hpp"
#include "tests/utilities/column_wrapper.hpp"
#include <gmock/gmock.h>
template <typename T>
struct TimestampColumnTest : public cudf::test::BaseFixture {
hipStream_t stream() { return hipStream_t(0); }
cudf::size_type size() { return cudf::size_type(100); }
cudf::data_type type() {
return cudf::data_type{cudf::experimental::type_to_id<T>()};
}
};
template <typename Timestamp>
struct compare_timestamp_elements_to_primitive_representation {
cudf::column_device_view primitives;
cudf::column_device_view timestamps;
compare_timestamp_elements_to_primitive_representation(
cudf::column_device_view& _primitives,
cudf::column_device_view& _timestamps)
: primitives(_primitives), timestamps(_timestamps) {}
__host__ __device__ bool operator()(const int32_t element_index) {
using Primitive = typename Timestamp::rep;
auto primitive = primitives.element<Primitive>(element_index);
auto timestamp = timestamps.element<Timestamp>(element_index);
return primitive == timestamp.time_since_epoch().count();
}
};
TYPED_TEST_CASE(TimestampColumnTest, cudf::test::TimestampTypes);
TYPED_TEST(TimestampColumnTest,
TimestampDurationsMatchPrimitiveRepresentation) {
using T = TypeParam;
using Rep = typename T::rep;
using namespace cudf::test;
using namespace simt::std::chrono;
auto start = milliseconds(-2500000000000); // Sat, 11 Oct 1890 19:33:20 GMT
auto stop_ = milliseconds(2500000000000); // Mon, 22 Mar 2049 04:26:40 GMT
auto timestamp_col = generate_timestamps<T>(
this->size(), time_point_ms(start), time_point_ms(stop_));
// rount-trip through the host to copy `timestamp_col` values
// to a new fixed_width_column_wrapper `primitive_col`
std::vector<Rep> timestamp_col_data;
std::vector<cudf::bitmask_type> timestamp_col_mask;
std::tie(timestamp_col_data, timestamp_col_mask) = to_host<Rep>(timestamp_col);
auto primitive_col = fixed_width_column_wrapper<Rep>(
timestamp_col_data.begin(), timestamp_col_data.end());
thrust::device_vector<int32_t> indices(this->size());
thrust::sequence(indices.begin(), indices.end());
EXPECT_TRUE(
thrust::all_of(indices.begin(), indices.end(),
compare_timestamp_elements_to_primitive_representation<T>{
*cudf::column_device_view::create(primitive_col),
*cudf::column_device_view::create(timestamp_col)}));
}
template <typename Timestamp>
struct compare_timestamp_elements {
gdf_binary_operator comp;
cudf::column_device_view lhs;
cudf::column_device_view rhs;
compare_timestamp_elements(gdf_binary_operator _comp,
cudf::column_device_view& _lhs,
cudf::column_device_view& _rhs)
: comp(_comp), lhs(_lhs), rhs(_rhs) {}
__host__ __device__ bool operator()(const int32_t element_index) {
auto lhs_elt = lhs.element<Timestamp>(element_index);
auto rhs_elt = rhs.element<Timestamp>(element_index);
switch (comp) {
case GDF_LESS:
return lhs_elt < rhs_elt;
case GDF_GREATER:
return lhs_elt > rhs_elt;
case GDF_LESS_EQUAL:
return lhs_elt <= rhs_elt;
case GDF_GREATER_EQUAL:
return lhs_elt >= rhs_elt;
default:
return false;
}
}
};
TYPED_TEST(TimestampColumnTest, TimestampsCanBeComparedInDeviceCode) {
using T = TypeParam;
using namespace cudf::test;
using namespace simt::std::chrono;
auto start_lhs = milliseconds(-2500000000000); // Sat, 11 Oct 1890 19:33:20 GMT
auto start_rhs = milliseconds(-2400000000000); // Tue, 12 Dec 1893 05:20:00 GMT
auto stop_lhs_ = milliseconds(2500000000000); // Mon, 22 Mar 2049 04:26:40 GMT
auto stop_rhs_ = milliseconds(2600000000000); // Wed, 22 May 2052 14:13:20 GMT
auto timestamp_lhs_col = generate_timestamps<T>(
this->size(), time_point_ms(start_lhs), time_point_ms(stop_lhs_));
auto timestamp_rhs_col = generate_timestamps<T>(
this->size(), time_point_ms(start_rhs), time_point_ms(stop_rhs_));
thrust::device_vector<int32_t> indices(this->size());
thrust::sequence(indices.begin(), indices.end());
EXPECT_TRUE(thrust::all_of(
indices.begin(), indices.end(),
compare_timestamp_elements<TypeParam>{
GDF_LESS,
*cudf::column_device_view::create(timestamp_lhs_col),
*cudf::column_device_view::create(timestamp_rhs_col)}));
EXPECT_TRUE(thrust::all_of(
indices.begin(), indices.end(),
compare_timestamp_elements<TypeParam>{
GDF_GREATER,
*cudf::column_device_view::create(timestamp_rhs_col),
*cudf::column_device_view::create(timestamp_lhs_col)}));
EXPECT_TRUE(thrust::all_of(
indices.begin(), indices.end(),
compare_timestamp_elements<TypeParam>{
GDF_LESS_EQUAL,
*cudf::column_device_view::create(timestamp_lhs_col),
*cudf::column_device_view::create(timestamp_lhs_col)}));
EXPECT_TRUE(thrust::all_of(
indices.begin(), indices.end(),
compare_timestamp_elements<TypeParam>{
GDF_GREATER_EQUAL,
*cudf::column_device_view::create(timestamp_rhs_col),
*cudf::column_device_view::create(timestamp_rhs_col)}));
}
TYPED_TEST(TimestampColumnTest, TimestampFactoryNullMaskAsParm) {
rmm::device_buffer null_mask{
create_null_mask(this->size(), cudf::mask_state::ALL_NULL)};
auto column = cudf::make_timestamp_column(
cudf::data_type{cudf::experimental::type_to_id<TypeParam>()},
this->size(), null_mask, this->size(), this->stream(), this->mr());
EXPECT_EQ(column->type(),
cudf::data_type{cudf::experimental::type_to_id<TypeParam>()});
EXPECT_EQ(column->size(), this->size());
EXPECT_EQ(this->size(), column->null_count());
EXPECT_TRUE(column->nullable());
EXPECT_TRUE(column->has_nulls());
EXPECT_EQ(0, column->num_children());
}
TYPED_TEST(TimestampColumnTest, TimestampFactoryNullMaskAsEmptyParm) {
rmm::device_buffer null_mask{};
auto column = cudf::make_timestamp_column(
cudf::data_type{cudf::experimental::type_to_id<TypeParam>()},
this->size(), null_mask, 0, this->stream(), this->mr());
EXPECT_EQ(column->type(),
cudf::data_type{cudf::experimental::type_to_id<TypeParam>()});
EXPECT_EQ(column->size(), this->size());
EXPECT_EQ(0, column->null_count());
EXPECT_FALSE(column->nullable());
EXPECT_FALSE(column->has_nulls());
EXPECT_EQ(0, column->num_children());
}
| ace930cf526e5f70bd24351364aab71437bfa4c5.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/legacy/binaryop.hpp>
#include <cudf/wrappers/timestamps.hpp>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/timestamp_utilities.cuh>
#include <tests/utilities/type_lists.hpp>
#include "cudf/column/column_view.hpp"
#include "cudf/types.hpp"
#include "tests/utilities/column_utilities.hpp"
#include "tests/utilities/column_wrapper.hpp"
#include <gmock/gmock.h>
template <typename T>
struct TimestampColumnTest : public cudf::test::BaseFixture {
cudaStream_t stream() { return cudaStream_t(0); }
cudf::size_type size() { return cudf::size_type(100); }
cudf::data_type type() {
return cudf::data_type{cudf::experimental::type_to_id<T>()};
}
};
template <typename Timestamp>
struct compare_timestamp_elements_to_primitive_representation {
cudf::column_device_view primitives;
cudf::column_device_view timestamps;
compare_timestamp_elements_to_primitive_representation(
cudf::column_device_view& _primitives,
cudf::column_device_view& _timestamps)
: primitives(_primitives), timestamps(_timestamps) {}
__host__ __device__ bool operator()(const int32_t element_index) {
using Primitive = typename Timestamp::rep;
auto primitive = primitives.element<Primitive>(element_index);
auto timestamp = timestamps.element<Timestamp>(element_index);
return primitive == timestamp.time_since_epoch().count();
}
};
TYPED_TEST_CASE(TimestampColumnTest, cudf::test::TimestampTypes);
TYPED_TEST(TimestampColumnTest,
TimestampDurationsMatchPrimitiveRepresentation) {
using T = TypeParam;
using Rep = typename T::rep;
using namespace cudf::test;
using namespace simt::std::chrono;
auto start = milliseconds(-2500000000000); // Sat, 11 Oct 1890 19:33:20 GMT
auto stop_ = milliseconds(2500000000000); // Mon, 22 Mar 2049 04:26:40 GMT
auto timestamp_col = generate_timestamps<T>(
this->size(), time_point_ms(start), time_point_ms(stop_));
// rount-trip through the host to copy `timestamp_col` values
// to a new fixed_width_column_wrapper `primitive_col`
std::vector<Rep> timestamp_col_data;
std::vector<cudf::bitmask_type> timestamp_col_mask;
std::tie(timestamp_col_data, timestamp_col_mask) = to_host<Rep>(timestamp_col);
auto primitive_col = fixed_width_column_wrapper<Rep>(
timestamp_col_data.begin(), timestamp_col_data.end());
thrust::device_vector<int32_t> indices(this->size());
thrust::sequence(indices.begin(), indices.end());
EXPECT_TRUE(
thrust::all_of(indices.begin(), indices.end(),
compare_timestamp_elements_to_primitive_representation<T>{
*cudf::column_device_view::create(primitive_col),
*cudf::column_device_view::create(timestamp_col)}));
}
template <typename Timestamp>
struct compare_timestamp_elements {
gdf_binary_operator comp;
cudf::column_device_view lhs;
cudf::column_device_view rhs;
compare_timestamp_elements(gdf_binary_operator _comp,
cudf::column_device_view& _lhs,
cudf::column_device_view& _rhs)
: comp(_comp), lhs(_lhs), rhs(_rhs) {}
__host__ __device__ bool operator()(const int32_t element_index) {
auto lhs_elt = lhs.element<Timestamp>(element_index);
auto rhs_elt = rhs.element<Timestamp>(element_index);
switch (comp) {
case GDF_LESS:
return lhs_elt < rhs_elt;
case GDF_GREATER:
return lhs_elt > rhs_elt;
case GDF_LESS_EQUAL:
return lhs_elt <= rhs_elt;
case GDF_GREATER_EQUAL:
return lhs_elt >= rhs_elt;
default:
return false;
}
}
};
TYPED_TEST(TimestampColumnTest, TimestampsCanBeComparedInDeviceCode) {
using T = TypeParam;
using namespace cudf::test;
using namespace simt::std::chrono;
auto start_lhs = milliseconds(-2500000000000); // Sat, 11 Oct 1890 19:33:20 GMT
auto start_rhs = milliseconds(-2400000000000); // Tue, 12 Dec 1893 05:20:00 GMT
auto stop_lhs_ = milliseconds(2500000000000); // Mon, 22 Mar 2049 04:26:40 GMT
auto stop_rhs_ = milliseconds(2600000000000); // Wed, 22 May 2052 14:13:20 GMT
auto timestamp_lhs_col = generate_timestamps<T>(
this->size(), time_point_ms(start_lhs), time_point_ms(stop_lhs_));
auto timestamp_rhs_col = generate_timestamps<T>(
this->size(), time_point_ms(start_rhs), time_point_ms(stop_rhs_));
thrust::device_vector<int32_t> indices(this->size());
thrust::sequence(indices.begin(), indices.end());
EXPECT_TRUE(thrust::all_of(
indices.begin(), indices.end(),
compare_timestamp_elements<TypeParam>{
GDF_LESS,
*cudf::column_device_view::create(timestamp_lhs_col),
*cudf::column_device_view::create(timestamp_rhs_col)}));
EXPECT_TRUE(thrust::all_of(
indices.begin(), indices.end(),
compare_timestamp_elements<TypeParam>{
GDF_GREATER,
*cudf::column_device_view::create(timestamp_rhs_col),
*cudf::column_device_view::create(timestamp_lhs_col)}));
EXPECT_TRUE(thrust::all_of(
indices.begin(), indices.end(),
compare_timestamp_elements<TypeParam>{
GDF_LESS_EQUAL,
*cudf::column_device_view::create(timestamp_lhs_col),
*cudf::column_device_view::create(timestamp_lhs_col)}));
EXPECT_TRUE(thrust::all_of(
indices.begin(), indices.end(),
compare_timestamp_elements<TypeParam>{
GDF_GREATER_EQUAL,
*cudf::column_device_view::create(timestamp_rhs_col),
*cudf::column_device_view::create(timestamp_rhs_col)}));
}
TYPED_TEST(TimestampColumnTest, TimestampFactoryNullMaskAsParm) {
rmm::device_buffer null_mask{
create_null_mask(this->size(), cudf::mask_state::ALL_NULL)};
auto column = cudf::make_timestamp_column(
cudf::data_type{cudf::experimental::type_to_id<TypeParam>()},
this->size(), null_mask, this->size(), this->stream(), this->mr());
EXPECT_EQ(column->type(),
cudf::data_type{cudf::experimental::type_to_id<TypeParam>()});
EXPECT_EQ(column->size(), this->size());
EXPECT_EQ(this->size(), column->null_count());
EXPECT_TRUE(column->nullable());
EXPECT_TRUE(column->has_nulls());
EXPECT_EQ(0, column->num_children());
}
TYPED_TEST(TimestampColumnTest, TimestampFactoryNullMaskAsEmptyParm) {
rmm::device_buffer null_mask{};
auto column = cudf::make_timestamp_column(
cudf::data_type{cudf::experimental::type_to_id<TypeParam>()},
this->size(), null_mask, 0, this->stream(), this->mr());
EXPECT_EQ(column->type(),
cudf::data_type{cudf::experimental::type_to_id<TypeParam>()});
EXPECT_EQ(column->size(), this->size());
EXPECT_EQ(0, column->null_count());
EXPECT_FALSE(column->nullable());
EXPECT_FALSE(column->has_nulls());
EXPECT_EQ(0, column->num_children());
}
|
ef8adbba787ce25f6330bb2e1ad9c41e9e061a35.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifndef _SCAN_NAIVE_KERNEL_H_
#define _SCAN_NAIVE_KERNEL_H_
#define NUM_ELEMENTS 512
// **===----------------- MP3 - Modify this function ---------------------===**
//! @param g_idata input data in global memory
// result is expected in index 0 of g_idata
//! @param n input number of elements to scan from input data
// **===------------------------------------------------------------------===**
__global__ void reduction(float *g_data, int n)
{
__shared__ float s_data[NUM_ELEMENTS];
int tid = threadIdx.x;
int myIndex = threadIdx.x + blockIdx.x*blockDim.x;
//s_data[tid] = 0.0;
s_data[tid] = g_data[myIndex];
__syncthreads();
for(int s = blockDim.x / 2; s > 0; s >>=1)
{
if(tid < s)
{
s_data[tid] += s_data[tid + s];
}
__syncthreads();
}
if (tid == 0)
{
g_data[blockIdx.x] = s_data[tid];
}
}
#endif // #ifndef _SCAN_NAIVE_KERNEL_H_
| ef8adbba787ce25f6330bb2e1ad9c41e9e061a35.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifndef _SCAN_NAIVE_KERNEL_H_
#define _SCAN_NAIVE_KERNEL_H_
#define NUM_ELEMENTS 512
// **===----------------- MP3 - Modify this function ---------------------===**
//! @param g_idata input data in global memory
// result is expected in index 0 of g_idata
//! @param n input number of elements to scan from input data
// **===------------------------------------------------------------------===**
__global__ void reduction(float *g_data, int n)
{
__shared__ float s_data[NUM_ELEMENTS];
int tid = threadIdx.x;
int myIndex = threadIdx.x + blockIdx.x*blockDim.x;
//s_data[tid] = 0.0;
s_data[tid] = g_data[myIndex];
__syncthreads();
for(int s = blockDim.x / 2; s > 0; s >>=1)
{
if(tid < s)
{
s_data[tid] += s_data[tid + s];
}
__syncthreads();
}
if (tid == 0)
{
g_data[blockIdx.x] = s_data[tid];
}
}
#endif // #ifndef _SCAN_NAIVE_KERNEL_H_
|
87e90361c8c34c3a71a1282e2e3d7aaae41e3237.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "GPU_Car_Drive.h"
#include <despot/GPUcore/thread_globals.h>
#include <ped_pomdp.h>
#include <despot/util/coord.h>
#include <hip/driver_types.h>
#include <stddef.h>
#include "despot/GPUutil/GPUmemorypool.h"
#include "despot/GPUutil/GPUrandom.h"
#include "GPU_CarUpperBound.h"
#define THREADDIM 128
using namespace std;
using namespace despot;
using namespace Globals;
static GPU_MemoryPool<Dvc_PomdpState>* gpu_mainstate_pool_=NULL;
static GPU_MemoryPool<Dvc_PedStruct>* gpu_ped_pool_=NULL;
static Dvc_PedStruct **Dvc_tempPeds=NULL;
static Dvc_PedStruct **Hst_tempPeds=NULL;
static float** Dvc_temp_weight=NULL;
static int** Hst_temp_IDs=NULL;
static float** Hst_temp_weight=NULL;
static Dvc_PomdpState** Hst_temp_mainstates=NULL;
static Dvc_PomdpState* Managed_rootnode_particles=NULL;
DEVICE Dvc_Path* path=NULL;
DEVICE Dvc_COORD* goals=NULL;
DEVICE double freq=0;
DEVICE double in_front_angle_cos=0;
using namespace despot;
/* ==============================================================================
* Dvc_PomdpState class
* ==============================================================================*/
DEVICE Dvc_PomdpState::Dvc_PomdpState():num(0), peds(NULL)
{
}
DEVICE Dvc_PomdpState::Dvc_PomdpState(const Dvc_PomdpState& src)
{
*this=src;
}
/**
* CopyPeds_to_Particles kernel:
* Copy pedestrian states in a combined source list (in contigeous memory) to destination particles
* This is for copying back to CPU
*/
__global__ void CopyPeds_to_Particles(Dvc_PomdpState* dvc_particles, Dvc_PedStruct* src)
{
int scenarioID=blockIdx.x;
int ped_id=threadIdx.x;
Dvc_PomdpState* Dvc_i=dvc_particles+scenarioID;
Dvc_PedStruct* Src_i=src+scenarioID*Dvc_ModelParams::N_PED_IN;
if(ped_id<Dvc_i->num)
Dvc_i->peds[ped_id]=Src_i[ped_id];
}
/**
* CopyPeds_to_list kernel:
* Copy pedestrian states in particles into a combined contigoues memory list
* This is for copying back to CPU
*/
__global__ void CopyPeds_to_list(const Dvc_PomdpState* particles, Dvc_PedStruct* peds_list)
{
int scenarioID=blockIdx.x;
int ped_id=threadIdx.x;
const Dvc_PomdpState* Dvc_i = particles + scenarioID;
Dvc_PedStruct* Des_i = peds_list + scenarioID * Dvc_ModelParams::N_PED_IN;
if(ped_id<Dvc_i->num)
Des_i[ped_id]=Dvc_i->peds[ped_id];
}
HOST void Dvc_PomdpState::CopyMainStateToGPU(Dvc_PomdpState* dvc_particles, int scenarioID, const PomdpState* hst_particle)
{
dvc_particles[scenarioID].car.dist_travelled=hst_particle->car.dist_travelled;
dvc_particles[scenarioID].car.pos=hst_particle->car.pos;
dvc_particles[scenarioID].car.vel=hst_particle->car.vel;
dvc_particles[scenarioID].num=hst_particle->num;
dvc_particles[scenarioID].weight=hst_particle->weight;
dvc_particles[scenarioID].state_id=hst_particle->state_id;
dvc_particles[scenarioID].scenario_id=hst_particle->scenario_id;
int Data_block_size=ModelParams::N_PED_IN;
if(Globals::config.use_multi_thread_ && StreamManager::MANAGER.cuda_streams)
{
memcpy((void*)(Hst_tempPeds[GetCurrentStream()]+Data_block_size*scenarioID),
(const void*)hst_particle->peds,
Data_block_size*sizeof(Dvc_PedStruct));
}
else
{
memcpy((void*)(Hst_tempPeds[0]+Data_block_size*scenarioID),
(const void*)hst_particle->peds,
Data_block_size*sizeof(Dvc_PedStruct));
}
}
HOST void Dvc_PomdpState::CopyPedsToGPU(Dvc_PomdpState* dvc_particles, int NumParticles, bool deep_copy)
{
if(deep_copy)
{
int Data_size=NumParticles*ModelParams::N_PED_IN;
dim3 grid1(NumParticles,1);dim3 threads1(ModelParams::N_PED_IN,1);
if(Globals::config.use_multi_thread_ && StreamManager::MANAGER.cuda_streams)
{
HANDLE_ERROR(hipMemcpyAsync((void*)Dvc_tempPeds[GetCurrentStream()],
(const void*)Hst_tempPeds[GetCurrentStream()],
Data_size*sizeof(Dvc_PedStruct),
hipMemcpyHostToDevice,((hipStream_t*)StreamManager::MANAGER.cuda_streams)[GetCurrentStream()]));
logd << "dvc_particles=" << dvc_particles<< ",Dvc_tempPeds[i]=" << Dvc_tempPeds[GetCurrentStream()] <<", GetCurrentStream()="<< GetCurrentStream()<< endl;
hipLaunchKernelGGL(( CopyPeds_to_Particles), dim3(grid1), dim3(threads1), 0, ((hipStream_t*)StreamManager::MANAGER.cuda_streams)[GetCurrentStream()],
dvc_particles,Dvc_tempPeds[GetCurrentStream()]);
}
else
{
HANDLE_ERROR(hipMemcpy((void*)Dvc_tempPeds[0],
(const void*)Hst_tempPeds[0],
Data_size*sizeof(Dvc_PedStruct),
hipMemcpyHostToDevice));
logd << "dvc_particles=" << dvc_particles<< ",Dvc_tempPeds[0]=" << Dvc_tempPeds[0]<< endl;
hipLaunchKernelGGL(( CopyPeds_to_Particles), dim3(grid1), dim3(threads1), 0, 0, dvc_particles,Dvc_tempPeds[0]);
}
//HANDLE_ERROR( hipDeviceSynchronize());
}
}
HOST void Dvc_PomdpState::ReadMainStateBackToCPU(const Dvc_PomdpState* dvc_particles, PomdpState* hst_particle)
{
int ThreadID=0;
if(Globals::config.use_multi_thread_)
ThreadID=Globals::MapThread(this_thread::get_id());
HANDLE_ERROR(hipMemcpy((void*)Hst_temp_mainstates[ThreadID], (const void*)dvc_particles, sizeof(Dvc_PomdpState), hipMemcpyDeviceToHost));
hst_particle->car.dist_travelled=Hst_temp_mainstates[ThreadID]->car.dist_travelled;
hst_particle->car.pos=Hst_temp_mainstates[ThreadID]->car.pos;
hst_particle->car.vel=Hst_temp_mainstates[ThreadID]->car.vel;
hst_particle->num=Hst_temp_mainstates[ThreadID]->num;
hst_particle->weight=Hst_temp_mainstates[ThreadID]->weight;
hst_particle->state_id=Hst_temp_mainstates[ThreadID]->state_id;
hst_particle->scenario_id=Hst_temp_mainstates[ThreadID]->scenario_id;
}
HOST void Dvc_PomdpState::ReadPedsBackToCPU(const Dvc_PomdpState* dvc_particles,
std::vector<State*> hst_particles, bool deep_copy)
{
if(deep_copy)
{
int ThreadID=0;
if(Globals::config.use_multi_thread_)
ThreadID=Globals::MapThread(this_thread::get_id());
int NumParticles=hst_particles.size();
int Data_size=NumParticles*ModelParams::N_PED_IN;
dim3 grid1(NumParticles,1);dim3 threads1(ModelParams::N_PED_IN,1);
if(Globals::config.use_multi_thread_ && StreamManager::MANAGER.cuda_streams)
{
hipLaunchKernelGGL(( CopyPeds_to_list), dim3(grid1), dim3(threads1), 0, ((hipStream_t*)StreamManager::MANAGER.cuda_streams)[ThreadID],
dvc_particles,Dvc_tempPeds[ThreadID]);
HANDLE_ERROR(hipMemcpyAsync((void*)Hst_tempPeds[ThreadID],
(const void*)Dvc_tempPeds[ThreadID],
Data_size*sizeof(Dvc_PedStruct),
hipMemcpyDeviceToHost,((hipStream_t*)StreamManager::MANAGER.cuda_streams)[ThreadID]));
hipStreamSynchronize(((hipStream_t*)StreamManager::MANAGER.cuda_streams)[ThreadID]);
}
else
{
hipLaunchKernelGGL(( CopyPeds_to_list), dim3(grid1), dim3(threads1), 0, 0, dvc_particles,Dvc_tempPeds[0]);
HANDLE_ERROR(hipMemcpy((void*)Hst_tempPeds[0],
(const void*)Dvc_tempPeds[0],
Data_size*sizeof(Dvc_PedStruct),
hipMemcpyDeviceToHost));
}
int Data_block_size=ModelParams::N_PED_IN;
for(int i=0;i<NumParticles;i++)
{
PomdpState* car_state=static_cast<PomdpState*>(hst_particles[i]);
if(Globals::config.use_multi_thread_ && StreamManager::MANAGER.cuda_streams)
{
memcpy((void*)car_state->peds,
(const void*)(Hst_tempPeds[ThreadID]+Data_block_size*i),
Data_block_size*sizeof(Dvc_PedStruct));
}
else
{
memcpy((void*)car_state->peds,
(const void*)(Hst_tempPeds[0]+Data_block_size*i),
Data_block_size*sizeof(Dvc_PedStruct));
}
}
}
}
__global__ void CopyParticles(Dvc_PomdpState* des,Dvc_PomdpState* src,
float* weight,int* particle_IDs,int num_particles,
Dvc_RandomStreams* streams, int stream_pos
)
{
int pos=blockIdx.x*blockDim.x+threadIdx.x;
if(pos==0)
{
weight[0]=0;
if(streams) streams->position_=stream_pos;
}
if(pos < num_particles)
{
int scenarioID=particle_IDs[pos];
Dvc_PomdpState* src_i=src+scenarioID;//src is a full length array for all particles
Dvc_PomdpState* des_i=des+pos;//des is short, only for the new partition
des_i->car.dist_travelled=src_i->car.dist_travelled;
des_i->car.pos=src_i->car.pos;
des_i->car.vel=src_i->car.vel;
des_i->num=src_i->num;
des_i->weight=src_i->weight;
des_i->state_id=src_i->state_id;
des_i->scenario_id=src_i->scenario_id;
for(int i=0;i<src_i->num;i++)
{
des_i->peds[i].goal=src_i->peds[i].goal;
des_i->peds[i].id=src_i->peds[i].id;
des_i->peds[i].pos.x=src_i->peds[i].pos.x;
des_i->peds[i].pos.y=src_i->peds[i].pos.y;
des_i->peds[i].vel=src_i->peds[i].vel;
}
if(!Dvc_config->track_alpha_vector)
{
//Accumulate weight of the particles
atomicAdd(weight, des_i->weight);
}
}
}
void PedPomdp::CreateMemoryPool() const
{
if(gpu_mainstate_pool_==NULL)
gpu_mainstate_pool_=new GPU_MemoryPool<Dvc_PomdpState>;
if(gpu_ped_pool_==NULL)
gpu_ped_pool_=new GPU_MemoryPool<Dvc_PedStruct>;
}
void PedPomdp::DestroyMemoryPool(MEMORY_MODE mode) const
{
switch(mode)
{
case DESTROY:
if(gpu_mainstate_pool_){delete gpu_mainstate_pool_;gpu_mainstate_pool_=NULL;}
if(gpu_ped_pool_){delete gpu_ped_pool_;gpu_ped_pool_=NULL;}
break;
case RESET:
if(gpu_mainstate_pool_ ){ gpu_mainstate_pool_->ResetChuncks();};
if(gpu_ped_pool_ ){ gpu_ped_pool_->ResetChuncks();};
break;
}
}
__global__ void LinkPeds(Dvc_PomdpState* state, Dvc_PedStruct* peds_memory, int numParticles)
{
for(int i=0;i<numParticles;i++)
{
state[i].peds=peds_memory+i*Dvc_ModelParams::N_PED_IN;
}
}
Dvc_State* PedPomdp::AllocGPUParticles(int numParticles, MEMORY_MODE mode, Dvc_State*** particles_for_all_actions) const
{
clock_t start=clock();
dim3 grid((numParticles+THREADDIM-1)/THREADDIM,1); dim3 threads(THREADDIM,1);
int num_threads=1;
if(Globals::config.use_multi_thread_)
{
num_threads = Globals::config.NUM_THREADS;
}
Dvc_PedStruct* node_particle_peds;
switch(mode)
{
case INIT:
CreateMemoryPool();
/* Intermediate pedestrian container for copying pedestrians in host particles to device particles */
if(Dvc_tempPeds == NULL && Hst_tempPeds == NULL){
Dvc_tempPeds=new Dvc_PedStruct*[num_threads];
Hst_tempPeds=new Dvc_PedStruct*[num_threads];
for(int i=0;i<num_threads;i++)
{
HANDLE_ERROR(hipMalloc((void**)&Dvc_tempPeds[i],numParticles*ModelParams::N_PED_IN*sizeof(Dvc_PedStruct) ));
HANDLE_ERROR(hipHostMalloc((void**)&Hst_tempPeds[i],numParticles*ModelParams::N_PED_IN*sizeof(Dvc_PedStruct),0 ));
}
}
cout<<"numParticles="<<numParticles<<endl;
if(particles_for_all_actions[0] == NULL){
particles_for_all_actions[0]=new Dvc_State*[num_threads];
//Allocate pedestrian memory separately
Dvc_PedStruct* peds_tmp=gpu_ped_pool_->Allocate((NumActions()*num_threads)*numParticles*ModelParams::N_PED_IN);
for(int i=0;i<num_threads;i++)
{
HANDLE_ERROR(hipMalloc((void**)&particles_for_all_actions[0][i],
NumActions()*numParticles*sizeof(Dvc_PomdpState)));
//Link pre-allocated pedestrian memory
hipLaunchKernelGGL(( LinkPeds), dim3(dim3(numParticles,1)), dim3(dim3(ModelParams::N_PED_IN,1)), 0, 0,
static_cast<Dvc_PomdpState*>(particles_for_all_actions[0][i]),
peds_tmp+(NumActions()*i)*numParticles*ModelParams::N_PED_IN,
NumActions()*numParticles);
}
//Record the ped memory used by the pre-allocated lists
//never reuse these memory for vnode particles
gpu_ped_pool_->RecordHead();
}
/*Intermediate memory for copying particle IDs to device memory
hipHostMalloc enables the copying to interleave with kernel executions*/
Hst_temp_IDs=new int*[num_threads];
for(int i=0;i<num_threads;i++)
{
if(Globals::config.track_alpha_vector)
{
hipHostMalloc(&Hst_temp_IDs[i],(2+ Globals::config.num_scenarios + Globals::config.num_obs)*NumActions()*sizeof(int),0);
}
else
{
hipHostMalloc(&Hst_temp_IDs[i],numParticles*sizeof(int),0);
}
}
/*Intermediate memory for copying weights to device memory.
hipHostMalloc enables the copying to interleave with kernel executions*/
Hst_temp_weight=new float*[num_threads];
for(int i=0;i<num_threads;i++)
hipHostMalloc(&Hst_temp_weight[i],1*sizeof(float),0);
Dvc_temp_weight=new float*[num_threads];
for(int i=0;i<num_threads;i++)
HANDLE_ERROR(hipMalloc(&Dvc_temp_weight[i], sizeof(float)));
/*Intermediate memory for copying main memory of particle (everything except pedestrians) from device back to host
hipHostMalloc enables the copying to interleave with kernel executions*/
Hst_temp_mainstates=new Dvc_PomdpState*[num_threads];
for(int i=0;i<num_threads;i++)
HANDLE_ERROR(hipHostMalloc((void**)&Hst_temp_mainstates[i],1*sizeof(Dvc_PomdpState),0));
/* No node particle allocated */
return NULL;
case ALLOC_ROOT:
/*Intermediate managed memory for root node particles.
* Managed memory enables data copying between CPU and GPU without launching memcpy (which is expensive)
*/
HANDLE_ERROR(hipMallocManaged((void**)&Managed_rootnode_particles, numParticles*sizeof(Dvc_PomdpState)));
node_particle_peds = gpu_ped_pool_->Allocate(numParticles*ModelParams::N_PED_IN);
/* Link pedestrian lists to the main memory of particles */
hipLaunchKernelGGL(( LinkPeds), dim3(dim3(numParticles,1)), dim3(dim3(ModelParams::N_PED_IN,1)), 0, 0, Managed_rootnode_particles, node_particle_peds, numParticles);
HANDLE_ERROR(hipDeviceSynchronize());
return Managed_rootnode_particles;
case ALLOC:
/* Allocate vnode particles: main memory and the pedestrian lists */
Dvc_PomdpState* vnode_particles = gpu_mainstate_pool_->Allocate(numParticles);
Dvc_PedStruct* vnode_particle_peds = gpu_ped_pool_->Allocate(numParticles*ModelParams::N_PED_IN);
/* Link pedestrian lists to the main memory of particles */
hipLaunchKernelGGL(( LinkPeds), dim3(dim3(numParticles,1)), dim3(dim3(ModelParams::N_PED_IN,1)), 0, 0, vnode_particles, vnode_particle_peds, numParticles);
HANDLE_ERROR(hipDeviceSynchronize());
return vnode_particles;
};
return NULL;
}
void PedPomdp::CopyGPUParticlesFromParent(Dvc_State* des,Dvc_State* src,int src_offset,
int* dvc_particle_IDs,int num_particles,bool interleave,
Dvc_RandomStreams* streams, int stream_pos,
void* cudaStream, int shift) const
{
dim3 grid((num_particles+THREADDIM-1)/THREADDIM,1); dim3 threads(THREADDIM,1);
if(num_particles<THREADDIM)
{
grid.x=1;grid.y=1;threads.x=num_particles;
}
int ThreadID=0;
if(Globals::config.use_multi_thread_)
ThreadID=Globals::MapThread(this_thread::get_id());
if(cudaStream)
{
hipLaunchKernelGGL(( CopyParticles), dim3(grid), dim3(threads),0, *(hipStream_t*)cudaStream, static_cast<Dvc_PomdpState*>(des),
static_cast<Dvc_PomdpState*>(src)+src_offset,Dvc_temp_weight[(ThreadID+shift)%Globals::config.NUM_THREADS],
dvc_particle_IDs,num_particles, streams,stream_pos);
if(!interleave)
;
}
else
{
hipLaunchKernelGGL(( CopyParticles), dim3(grid), dim3(threads),0, 0, static_cast<Dvc_PomdpState*>(des),
static_cast<Dvc_PomdpState*>(src)+src_offset,Dvc_temp_weight[ThreadID],
dvc_particle_IDs,num_particles, streams,stream_pos);
if(!interleave)
HANDLE_ERROR(hipDeviceSynchronize());
}
}
Dvc_State* PedPomdp::GetPointerToParticleList(int offset, Dvc_State* full_list) const
{
return static_cast<Dvc_PomdpState*>(full_list)+ offset;
}
Dvc_State* PedPomdp::CopyParticlesToGPU(Dvc_State* dvc_particles, const std::vector<State*>& particles, bool deep_copy) const
//dvc_particles: managed device memory storing particles
// deep_copy: option on whether to copy list objects in side particles
{
auto start = Time::now();
for (int i=0;i<particles.size();i++)
{
const PomdpState* src=static_cast<const PomdpState*>(particles[i]);
Dvc_PomdpState::CopyMainStateToGPU(static_cast<const Dvc_PomdpState*>(dvc_particles),src->scenario_id,src);
}
Dvc_PomdpState::CopyPedsToGPU(static_cast<const Dvc_PomdpState*>(dvc_particles),particles.size());
return dvc_particles;
}
void PedPomdp::CopyParticleIDsToGPU( int* Dvc_ptr, const std::vector<int>& particleIDs, void *cudaStream) const
{
if(cudaStream)
{
int ThreadID=Globals::MapThread(this_thread::get_id());
memcpy(Hst_temp_IDs[ThreadID],particleIDs.data(),particleIDs.size()*sizeof(int));
HANDLE_ERROR(hipMemcpyAsync(Dvc_ptr,Hst_temp_IDs[ThreadID],particleIDs.size()*sizeof(int), hipMemcpyHostToDevice,*(hipStream_t*)cudaStream));
}
else
{
logd << "Dvc_ptr = "<< Dvc_ptr << " particleIDs.size() = " << particleIDs.size()<< " cudaStream = "<< cudaStream<< endl;
HANDLE_ERROR(hipMemcpy(Dvc_ptr,particleIDs.data(),particleIDs.size()*sizeof(int), hipMemcpyHostToDevice));
}
}
void PedPomdp::DeleteGPUParticles( MEMORY_MODE mode, Dvc_State** particles_for_all_actions ) const
{
int num_threads=1;
switch (mode){
case DESTROY:
if(Globals::config.use_multi_thread_)
{
num_threads=Globals::config.NUM_THREADS;
}
for(int i=0;i<num_threads;i++)
{
if(particles_for_all_actions[i]!=NULL)
{HANDLE_ERROR(hipFree(particles_for_all_actions[i]));particles_for_all_actions[i]=NULL;}
}
if(particles_for_all_actions)delete [] particles_for_all_actions;particles_for_all_actions=NULL;
for(int i=0;i<num_threads;i++)
{
hipHostFree(Hst_temp_IDs[i]);
}
delete [] Hst_temp_IDs;
for(int i=0;i<num_threads;i++)
{
hipHostFree(Hst_temp_weight[i]);
}
delete [] Hst_temp_weight;
for(int i=0;i<num_threads;i++)
{
hipFree(Dvc_temp_weight[i]);
}
delete [] Dvc_temp_weight;
for(int i=0;i<num_threads;i++)
{
hipFree(Dvc_tempPeds[i]);
hipHostFree(Hst_tempPeds[i]);
hipHostFree(Hst_temp_mainstates[i]);
}
delete [] Dvc_tempPeds;
delete [] Hst_tempPeds;
delete [] Hst_temp_mainstates;
break;
case RESET:
HANDLE_ERROR(hipFree(static_cast<Dvc_PomdpState*>(Managed_rootnode_particles)));
break;
};
DestroyMemoryPool(mode);
}
DEVICE float Dvc_PedPomdpParticleUpperBound1::Value(
const Dvc_State* particles, int scenarioID, Dvc_History& history) {
return Dvc_ModelParams::GOAL_REWARD / (1 - Dvc_Globals::Dvc_Discount(Dvc_config));
}
DEVICE bool Dvc_PedPomdp::Dvc_Step(Dvc_State& state, float rand_num, int action, float& reward,
int* obs) {
Dvc_PomdpState& pedpomdp_state = static_cast<Dvc_PomdpState&>(state);//copy contents, link cells to existing ones
__shared__ int iscollision[32];
if(FIX_SCENARIO==1 || GPUDoPrint)
if(GPUDoPrint && pedpomdp_state.scenario_id==PRINT_ID && blockIdx.x==ACTION_ID && threadIdx.y==0){
printf("(GPU) Before step: scenario=%d \n", pedpomdp_state.scenario_id);
printf("action= %d\n ",action);
printf("Before step:\n");
int pos=pedpomdp_state.car.pos;
printf("car_pox= %d ",pos);
printf("trav_dist=%f\n",pedpomdp_state.car.dist_travelled);
printf("car_vel= %f\n",pedpomdp_state.car.vel);
for(int i=0;i<pedpomdp_state.num;i++)
{
printf("ped %d pox_x= %f pos_y=%f\n",i,
pedpomdp_state.peds[i].pos.x,pedpomdp_state.peds[i].pos.y);
}
}
bool terminal=false;
reward = 0;
unsigned long long int Temp=INIT_QUICKRANDSEED;
/* Termination checking */
if(threadIdx.y==0)
{
// Terminate upon reaching goal
if (pedpomdp_state.car.dist_travelled > Dvc_ModelParams::GOAL_TRAVELLED-1e-4
|| pedpomdp_state.car.pos >= path->size_-1) {
reward = Dvc_ModelParams::GOAL_REWARD;
terminal= true;
}
}
/* Collision checking */
iscollision[threadIdx.x]=false;
__syncthreads();
if(!terminal)
{
const int car = pedpomdp_state.car.pos;
const Dvc_COORD& car_pos = path->way_points_[car];
const Dvc_COORD& forward_pos = path->way_points_[path->forward(car, 1.0)];
if(threadIdx.y<pedpomdp_state.num){
const Dvc_COORD& pedpos = pedpomdp_state.peds[threadIdx.y].pos;
bool collide_ped=false;
float HNx = forward_pos.x - car_pos.x, // car direction
HNy = forward_pos.y - car_pos.y;
float HMx = pedpos.x - car_pos.x,
HMy = pedpos.y - car_pos.y;
/// car geomery for golfcart
/* double car_width = 0.87,
car_length = 1.544;
double safe_margin = 0.92, side_safe_margin = 0.4, back_safe_margin = 0.33,
side_margin = car_width / 2.0 + side_safe_margin,
front_margin = car_length/2.0 + safe_margin,
back_margin = car_length/2.0 + back_safe_margin;
*/
/// end golfcart
/// car geomery for audi r8
/*double car_width = 2.0,
car_length = 4.4;
double safe_margin = 0.8, side_safe_margin = 0.35, back_safe_margin = 0.2,
side_margin = car_width / 2.0 + side_safe_margin,
front_margin = 3.6 + safe_margin,
back_margin = 0.8 + back_safe_margin;*/
/// end audi r8
/// car geometry for pomdp car
double car_width = 1.2,
car_length = 2.2;
double safe_margin = 0.3,
side_margin = car_width / 2.0 + safe_margin,
front_margin = safe_margin,
back_margin = car_length + safe_margin;
/// end pomdp car
float HLx = - HNy, // direction after 90 degree anticlockwise rotation
HLy = HNx;
float HM_HN = HMx * HNx + HMy * HNy, // HM . HN
HN_HN = HNx * HNx + HNy * HNy; // HN . HN
if (HM_HN >= 0 && HM_HN * HM_HN > HN_HN * front_margin * front_margin)
collide_ped = false;
else if (HM_HN <= 0 && HM_HN * HM_HN > HN_HN * back_margin * back_margin)
collide_ped = false;
else
{
float HM_HL = HMx * HLx + HMy * HLy, // HM . HL
HL_HL = HLx * HLx + HLy * HLy; // HL . HL
collide_ped= HM_HL * HM_HL <= HL_HL * side_margin * side_margin;
}
atomicOr(iscollision+threadIdx.x, collide_ped);
}
}
__syncthreads(); // Synchronize the block to wait for collision checking with all peds (parallelized in the Y dimemsion) to finish.
if(threadIdx.y==0 && !terminal)
{
/* Terminate if collision is detected */
if(pedpomdp_state.car.vel > 0.001 && iscollision[threadIdx.x] ) { /// collision occurs only when car is moving
reward= Dvc_ModelParams::CRASH_PENALTY *
(pedpomdp_state.car.vel * pedpomdp_state.car.vel +
Dvc_ModelParams::REWARD_BASE_CRASH_VEL);
if(action == ACT_DEC) reward += 0.1;
terminal= true;
}
/* Compute reward */
if(!terminal)
{
// Smoothness penalty
reward += (action == ACT_DEC || action == ACT_ACC) ? -0.1 : 0.0;
reward += Dvc_ModelParams::REWARD_FACTOR_VEL *
(pedpomdp_state.car.vel - Dvc_ModelParams::VEL_MAX) / Dvc_ModelParams::VEL_MAX;
float acc = (action == ACT_ACC) ? Dvc_ModelParams::AccSpeed :
((action == ACT_CUR) ? 0 : (-Dvc_ModelParams::AccSpeed));
/* State transition: car */
float dist = pedpomdp_state.car.vel / freq;
int nxt = path->forward(pedpomdp_state.car.pos, dist);
pedpomdp_state.car.pos = nxt;
pedpomdp_state.car.dist_travelled += dist;
const float N = Dvc_ModelParams::NOISE_ROBVEL;
if (N>0) {
if(FIX_SCENARIO!=1 && !GPUDoPrint)
rand_num=Dvc_QuickRandom::RandGeneration(&Temp, rand_num);
float prob = rand_num;
if (prob > N) {
pedpomdp_state.car.vel += acc / freq;
}
} else {
pedpomdp_state.car.vel += acc / freq;
}
pedpomdp_state.car.vel = max(min(pedpomdp_state.car.vel, Dvc_ModelParams::VEL_MAX), 0.0);
}
}
__syncthreads();
if(!terminal)
{
/* State transition: peds */
if(threadIdx.y<pedpomdp_state.num)
{
int i=0;
while(i<threadIdx.y)
{
if(FIX_SCENARIO!=1 && !GPUDoPrint)
rand_num=Dvc_QuickRandom::RandGeneration(&Temp, rand_num);
i++;
}
if(threadIdx.y!=0 && FIX_SCENARIO!=1 && !GPUDoPrint)
rand_num=Dvc_QuickRandom::RandGeneration(&Temp, rand_num);
const Dvc_COORD& goal = goals[pedpomdp_state.peds[threadIdx.y].goal];
if (abs(goal.x+1)<1e-5 && abs(goal.y+1)<1e-5) { //stop intention, ped doesn't move
;
}
else
{
// Straightline model with Gussian noise on directions
Dvc_Vector goal_vec(goal.x - pedpomdp_state.peds[threadIdx.y].pos.x, goal.y - pedpomdp_state.peds[threadIdx.y].pos.y);
float a = goal_vec.GetAngle();
float noise = sqrt(-2 * log(rand_num));
if(FIX_SCENARIO!=1 && !GPUDoPrint)
rand_num=Dvc_QuickRandom::RandGeneration(&Temp, rand_num);
noise *= cos(2 * M_PI * rand_num)* Dvc_ModelParams::NOISE_GOAL_ANGLE;
a += noise;
Dvc_Vector move(a, pedpomdp_state.peds[threadIdx.y].vel/freq, 0);
pedpomdp_state.peds[threadIdx.y].pos.x += move.dw;
pedpomdp_state.peds[threadIdx.y].pos.y += move.dh;
}
}
}
__syncthreads();
if(threadIdx.y==0 && obs!=NULL)//for each particle in the thread block
{
/* generate observations by descretizing the observable part of the state */
if(!terminal)
{
int i=0;
obs[i++]=2+2*pedpomdp_state.num;
obs[i++] = int(pedpomdp_state.car.pos);
obs[i++] = int((pedpomdp_state.car.vel+1e-5) / Dvc_ModelParams::vel_rln);
for(int j = 0; j < pedpomdp_state.num; j ++) {
obs[i++] = int(pedpomdp_state.peds[j].pos.x / Dvc_ModelParams::pos_rln);
obs[i++] = int(pedpomdp_state.peds[j].pos.y / Dvc_ModelParams::pos_rln);
}
}
else
{
int i=0;
obs[i++]=0;
obs[i++] = 0;
obs[i++] = 0;
for(int j = 0; j < pedpomdp_state.num; j ++) {
obs[i++] = 0;
obs[i++] = 0;
}
}
}
if(!terminal && GPUDoPrint && pedpomdp_state.scenario_id==PRINT_ID && blockIdx.x==ACTION_ID && threadIdx.y==0){
printf("(GPU) After step: scenario=%d \n", pedpomdp_state.scenario_id);
printf("rand=%f, action=%d \n", rand_num, action);
printf("After step:\n");
printf("Reward=%f\n",reward);
int pos=pedpomdp_state.car.pos;
printf("car pox= %d ",pos);
printf("dist=%f\n",pedpomdp_state.car.dist_travelled);
printf("car vel= %f\n",pedpomdp_state.car.vel);
for(int i=0;i<pedpomdp_state.num;i++)
{
printf("ped %d pox_x= %f pos_y=%f\n",i,
pedpomdp_state.peds[i].pos.x,pedpomdp_state.peds[i].pos.y);
}
}
return terminal;
}
DEVICE int Dvc_PedPomdp::NumActions() {
return 3;
}
DEVICE float Dvc_PedPomdp::Dvc_ObsProbInt(int* obs, Dvc_State& state, int action)
{
//const PomdpState& state = static_cast<const PomdpState&>(s);
Dvc_PomdpState& pedpomdp_state = static_cast<Dvc_PomdpState&>(state);//copy contents, link cells to existing ones
//PrintState(state);
float prob = 1.0;
float b = 0.0;
for (int j = 0; j < pedpomdp_state.num; j ++) {
b = b + ((obs[2*j + 3]*Dvc_ModelParams::pos_rln) - pedpomdp_state.peds[j].pos.x )*((obs[2*j + 3]*Dvc_ModelParams::pos_rln) - pedpomdp_state.peds[j].pos.x );
b = b + ((obs[2*j + 4]*Dvc_ModelParams::pos_rln) - pedpomdp_state.peds[j].pos.y )*((obs[2*j + 4]*Dvc_ModelParams::pos_rln) - pedpomdp_state.peds[j].pos.y );
//std::cout << j << " obs vec " << obs[2*j + 2]<< "," << obs[2*j + 3] << ")b= " << b<< std::endl;
}
float stddev = 1.0;
b = - b / (2.0* stddev*stddev);
//std::cout << "b= " << b << std::endl;
return expf(b);
}
DEVICE void Dvc_PedPomdp::Dvc_Copy_NoAlloc(Dvc_State* des, const Dvc_State* src, int pos, bool offset_des) {
/*Pass member values, assign member pointers to existing state pointer*/
const Dvc_PomdpState* src_i= static_cast<const Dvc_PomdpState*>(src)+pos;
if(!offset_des) pos=0;
Dvc_PomdpState* des_i= static_cast<const Dvc_PomdpState*>(des)+pos;
des_i->weight=src_i->weight;
des_i->scenario_id=src_i->scenario_id;
des_i->num=src_i->num;
des_i->car.dist_travelled=src_i->car.dist_travelled;
des_i->car.pos=src_i->car.pos;
des_i->car.vel=src_i->car.vel;
for(int i=0;i< des_i->num;i++)
{
des_i->peds[i].vel=src_i->peds[i].vel;
des_i->peds[i].pos.x=src_i->peds[i].pos.x;
des_i->peds[i].pos.y=src_i->peds[i].pos.y;
des_i->peds[i].goal=src_i->peds[i].goal;
des_i->peds[i].id=src_i->peds[i].id;
}
}
DEVICE void Dvc_PedPomdp::Dvc_Copy_ToShared(Dvc_State* des, const Dvc_State* src, int pos, bool offset_des) {
/*Pass member values, assign member pointers to existing state pointer*/
const Dvc_PomdpState* src_i= static_cast<const Dvc_PomdpState*>(src)+pos;
if(!offset_des) pos=0;
Dvc_PomdpState* des_i= static_cast<const Dvc_PomdpState*>(des)+pos;
des_i->weight=src_i->weight;
des_i->scenario_id=src_i->scenario_id;
des_i->num=src_i->num;
des_i->car.dist_travelled=src_i->car.dist_travelled;
des_i->car.pos=src_i->car.pos;
des_i->car.vel=src_i->car.vel;
des_i->peds=(Dvc_PedStruct*)((void*)(des_i)+3*sizeof(Dvc_PedStruct));
for(int i=0;i< des_i->num;i++)
{
des_i->peds[i].vel=src_i->peds[i].vel;
des_i->peds[i].pos.x=src_i->peds[i].pos.x;
des_i->peds[i].pos.y=src_i->peds[i].pos.y;
des_i->peds[i].goal=src_i->peds[i].goal;
des_i->peds[i].id=src_i->peds[i].id;
}
}
DEVICE Dvc_State* Dvc_PedPomdp::Dvc_Get(Dvc_State* particles, int pos) {
Dvc_PomdpState* particle_i= static_cast<Dvc_PomdpState*>(particles)+pos;
return particle_i;
}
DEVICE float Dvc_PedPomdp::Dvc_GetCarVel(Dvc_State* particles, int pos) {
Dvc_PomdpState* particle_i= static_cast<Dvc_PomdpState*>(particles)+pos;
return particle_i->car.vel;
}
DEVICE Dvc_ValuedAction Dvc_PedPomdp::Dvc_GetBestAction() {
return Dvc_ValuedAction(0,
Dvc_ModelParams::CRASH_PENALTY * (Dvc_ModelParams::VEL_MAX*Dvc_ModelParams::VEL_MAX + Dvc_ModelParams::REWARD_BASE_CRASH_VEL));
}
void PedPomdp::ReadParticlesBackToCPU(std::vector<State*>& particles ,const Dvc_State* dvc_particles,
bool deepcopy) const
{
for (int i=0;i<particles.size();i++)
{
const Dvc_PomdpState* src=static_cast<const Dvc_PomdpState*>(dvc_particles)+i;
PomdpState* des=static_cast<PomdpState*>(particles[i]);
Dvc_PomdpState::ReadMainStateBackToCPU(src,des);
}
Dvc_PomdpState::ReadPedsBackToCPU(
static_cast<const Dvc_PomdpState*>(dvc_particles),
particles);
}
| 87e90361c8c34c3a71a1282e2e3d7aaae41e3237.cu | #include "GPU_Car_Drive.h"
#include <despot/GPUcore/thread_globals.h>
#include <ped_pomdp.h>
#include <despot/util/coord.h>
#include <driver_types.h>
#include <stddef.h>
#include "despot/GPUutil/GPUmemorypool.h"
#include "despot/GPUutil/GPUrandom.h"
#include "GPU_CarUpperBound.h"
#define THREADDIM 128
using namespace std;
using namespace despot;
using namespace Globals;
static GPU_MemoryPool<Dvc_PomdpState>* gpu_mainstate_pool_=NULL;
static GPU_MemoryPool<Dvc_PedStruct>* gpu_ped_pool_=NULL;
static Dvc_PedStruct **Dvc_tempPeds=NULL;
static Dvc_PedStruct **Hst_tempPeds=NULL;
static float** Dvc_temp_weight=NULL;
static int** Hst_temp_IDs=NULL;
static float** Hst_temp_weight=NULL;
static Dvc_PomdpState** Hst_temp_mainstates=NULL;
static Dvc_PomdpState* Managed_rootnode_particles=NULL;
DEVICE Dvc_Path* path=NULL;
DEVICE Dvc_COORD* goals=NULL;
DEVICE double freq=0;
DEVICE double in_front_angle_cos=0;
using namespace despot;
/* ==============================================================================
* Dvc_PomdpState class
* ==============================================================================*/
DEVICE Dvc_PomdpState::Dvc_PomdpState():num(0), peds(NULL)
{
}
DEVICE Dvc_PomdpState::Dvc_PomdpState(const Dvc_PomdpState& src)
{
*this=src;
}
/**
* CopyPeds_to_Particles kernel:
* Copy pedestrian states in a combined source list (in contigeous memory) to destination particles
* This is for copying back to CPU
*/
__global__ void CopyPeds_to_Particles(Dvc_PomdpState* dvc_particles, Dvc_PedStruct* src)
{
int scenarioID=blockIdx.x;
int ped_id=threadIdx.x;
Dvc_PomdpState* Dvc_i=dvc_particles+scenarioID;
Dvc_PedStruct* Src_i=src+scenarioID*Dvc_ModelParams::N_PED_IN;
if(ped_id<Dvc_i->num)
Dvc_i->peds[ped_id]=Src_i[ped_id];
}
/**
* CopyPeds_to_list kernel:
* Copy pedestrian states in particles into a combined contigoues memory list
* This is for copying back to CPU
*/
__global__ void CopyPeds_to_list(const Dvc_PomdpState* particles, Dvc_PedStruct* peds_list)
{
int scenarioID=blockIdx.x;
int ped_id=threadIdx.x;
const Dvc_PomdpState* Dvc_i = particles + scenarioID;
Dvc_PedStruct* Des_i = peds_list + scenarioID * Dvc_ModelParams::N_PED_IN;
if(ped_id<Dvc_i->num)
Des_i[ped_id]=Dvc_i->peds[ped_id];
}
HOST void Dvc_PomdpState::CopyMainStateToGPU(Dvc_PomdpState* dvc_particles, int scenarioID, const PomdpState* hst_particle)
{
dvc_particles[scenarioID].car.dist_travelled=hst_particle->car.dist_travelled;
dvc_particles[scenarioID].car.pos=hst_particle->car.pos;
dvc_particles[scenarioID].car.vel=hst_particle->car.vel;
dvc_particles[scenarioID].num=hst_particle->num;
dvc_particles[scenarioID].weight=hst_particle->weight;
dvc_particles[scenarioID].state_id=hst_particle->state_id;
dvc_particles[scenarioID].scenario_id=hst_particle->scenario_id;
int Data_block_size=ModelParams::N_PED_IN;
if(Globals::config.use_multi_thread_ && StreamManager::MANAGER.cuda_streams)
{
memcpy((void*)(Hst_tempPeds[GetCurrentStream()]+Data_block_size*scenarioID),
(const void*)hst_particle->peds,
Data_block_size*sizeof(Dvc_PedStruct));
}
else
{
memcpy((void*)(Hst_tempPeds[0]+Data_block_size*scenarioID),
(const void*)hst_particle->peds,
Data_block_size*sizeof(Dvc_PedStruct));
}
}
HOST void Dvc_PomdpState::CopyPedsToGPU(Dvc_PomdpState* dvc_particles, int NumParticles, bool deep_copy)
{
if(deep_copy)
{
int Data_size=NumParticles*ModelParams::N_PED_IN;
dim3 grid1(NumParticles,1);dim3 threads1(ModelParams::N_PED_IN,1);
if(Globals::config.use_multi_thread_ && StreamManager::MANAGER.cuda_streams)
{
HANDLE_ERROR(cudaMemcpyAsync((void*)Dvc_tempPeds[GetCurrentStream()],
(const void*)Hst_tempPeds[GetCurrentStream()],
Data_size*sizeof(Dvc_PedStruct),
cudaMemcpyHostToDevice,((cudaStream_t*)StreamManager::MANAGER.cuda_streams)[GetCurrentStream()]));
logd << "dvc_particles=" << dvc_particles<< ",Dvc_tempPeds[i]=" << Dvc_tempPeds[GetCurrentStream()] <<", GetCurrentStream()="<< GetCurrentStream()<< endl;
CopyPeds_to_Particles<<<grid1, threads1, 0, ((cudaStream_t*)StreamManager::MANAGER.cuda_streams)[GetCurrentStream()]>>>
(dvc_particles,Dvc_tempPeds[GetCurrentStream()]);
}
else
{
HANDLE_ERROR(cudaMemcpy((void*)Dvc_tempPeds[0],
(const void*)Hst_tempPeds[0],
Data_size*sizeof(Dvc_PedStruct),
cudaMemcpyHostToDevice));
logd << "dvc_particles=" << dvc_particles<< ",Dvc_tempPeds[0]=" << Dvc_tempPeds[0]<< endl;
CopyPeds_to_Particles<<<grid1, threads1>>>(dvc_particles,Dvc_tempPeds[0]);
}
//HANDLE_ERROR( cudaDeviceSynchronize());
}
}
HOST void Dvc_PomdpState::ReadMainStateBackToCPU(const Dvc_PomdpState* dvc_particles, PomdpState* hst_particle)
{
int ThreadID=0;
if(Globals::config.use_multi_thread_)
ThreadID=Globals::MapThread(this_thread::get_id());
HANDLE_ERROR(cudaMemcpy((void*)Hst_temp_mainstates[ThreadID], (const void*)dvc_particles, sizeof(Dvc_PomdpState), cudaMemcpyDeviceToHost));
hst_particle->car.dist_travelled=Hst_temp_mainstates[ThreadID]->car.dist_travelled;
hst_particle->car.pos=Hst_temp_mainstates[ThreadID]->car.pos;
hst_particle->car.vel=Hst_temp_mainstates[ThreadID]->car.vel;
hst_particle->num=Hst_temp_mainstates[ThreadID]->num;
hst_particle->weight=Hst_temp_mainstates[ThreadID]->weight;
hst_particle->state_id=Hst_temp_mainstates[ThreadID]->state_id;
hst_particle->scenario_id=Hst_temp_mainstates[ThreadID]->scenario_id;
}
HOST void Dvc_PomdpState::ReadPedsBackToCPU(const Dvc_PomdpState* dvc_particles,
std::vector<State*> hst_particles, bool deep_copy)
{
if(deep_copy)
{
int ThreadID=0;
if(Globals::config.use_multi_thread_)
ThreadID=Globals::MapThread(this_thread::get_id());
int NumParticles=hst_particles.size();
int Data_size=NumParticles*ModelParams::N_PED_IN;
dim3 grid1(NumParticles,1);dim3 threads1(ModelParams::N_PED_IN,1);
if(Globals::config.use_multi_thread_ && StreamManager::MANAGER.cuda_streams)
{
CopyPeds_to_list<<<grid1, threads1, 0, ((cudaStream_t*)StreamManager::MANAGER.cuda_streams)[ThreadID]>>>
(dvc_particles,Dvc_tempPeds[ThreadID]);
HANDLE_ERROR(cudaMemcpyAsync((void*)Hst_tempPeds[ThreadID],
(const void*)Dvc_tempPeds[ThreadID],
Data_size*sizeof(Dvc_PedStruct),
cudaMemcpyDeviceToHost,((cudaStream_t*)StreamManager::MANAGER.cuda_streams)[ThreadID]));
cudaStreamSynchronize(((cudaStream_t*)StreamManager::MANAGER.cuda_streams)[ThreadID]);
}
else
{
CopyPeds_to_list<<<grid1, threads1>>>(dvc_particles,Dvc_tempPeds[0]);
HANDLE_ERROR(cudaMemcpy((void*)Hst_tempPeds[0],
(const void*)Dvc_tempPeds[0],
Data_size*sizeof(Dvc_PedStruct),
cudaMemcpyDeviceToHost));
}
int Data_block_size=ModelParams::N_PED_IN;
for(int i=0;i<NumParticles;i++)
{
PomdpState* car_state=static_cast<PomdpState*>(hst_particles[i]);
if(Globals::config.use_multi_thread_ && StreamManager::MANAGER.cuda_streams)
{
memcpy((void*)car_state->peds,
(const void*)(Hst_tempPeds[ThreadID]+Data_block_size*i),
Data_block_size*sizeof(Dvc_PedStruct));
}
else
{
memcpy((void*)car_state->peds,
(const void*)(Hst_tempPeds[0]+Data_block_size*i),
Data_block_size*sizeof(Dvc_PedStruct));
}
}
}
}
__global__ void CopyParticles(Dvc_PomdpState* des,Dvc_PomdpState* src,
float* weight,int* particle_IDs,int num_particles,
Dvc_RandomStreams* streams, int stream_pos
)
{
int pos=blockIdx.x*blockDim.x+threadIdx.x;
if(pos==0)
{
weight[0]=0;
if(streams) streams->position_=stream_pos;
}
if(pos < num_particles)
{
int scenarioID=particle_IDs[pos];
Dvc_PomdpState* src_i=src+scenarioID;//src is a full length array for all particles
Dvc_PomdpState* des_i=des+pos;//des is short, only for the new partition
des_i->car.dist_travelled=src_i->car.dist_travelled;
des_i->car.pos=src_i->car.pos;
des_i->car.vel=src_i->car.vel;
des_i->num=src_i->num;
des_i->weight=src_i->weight;
des_i->state_id=src_i->state_id;
des_i->scenario_id=src_i->scenario_id;
for(int i=0;i<src_i->num;i++)
{
des_i->peds[i].goal=src_i->peds[i].goal;
des_i->peds[i].id=src_i->peds[i].id;
des_i->peds[i].pos.x=src_i->peds[i].pos.x;
des_i->peds[i].pos.y=src_i->peds[i].pos.y;
des_i->peds[i].vel=src_i->peds[i].vel;
}
if(!Dvc_config->track_alpha_vector)
{
//Accumulate weight of the particles
atomicAdd(weight, des_i->weight);
}
}
}
void PedPomdp::CreateMemoryPool() const
{
if(gpu_mainstate_pool_==NULL)
gpu_mainstate_pool_=new GPU_MemoryPool<Dvc_PomdpState>;
if(gpu_ped_pool_==NULL)
gpu_ped_pool_=new GPU_MemoryPool<Dvc_PedStruct>;
}
void PedPomdp::DestroyMemoryPool(MEMORY_MODE mode) const
{
switch(mode)
{
case DESTROY:
if(gpu_mainstate_pool_){delete gpu_mainstate_pool_;gpu_mainstate_pool_=NULL;}
if(gpu_ped_pool_){delete gpu_ped_pool_;gpu_ped_pool_=NULL;}
break;
case RESET:
if(gpu_mainstate_pool_ ){ gpu_mainstate_pool_->ResetChuncks();};
if(gpu_ped_pool_ ){ gpu_ped_pool_->ResetChuncks();};
break;
}
}
__global__ void LinkPeds(Dvc_PomdpState* state, Dvc_PedStruct* peds_memory, int numParticles)
{
for(int i=0;i<numParticles;i++)
{
state[i].peds=peds_memory+i*Dvc_ModelParams::N_PED_IN;
}
}
Dvc_State* PedPomdp::AllocGPUParticles(int numParticles, MEMORY_MODE mode, Dvc_State*** particles_for_all_actions) const
{
clock_t start=clock();
dim3 grid((numParticles+THREADDIM-1)/THREADDIM,1); dim3 threads(THREADDIM,1);
int num_threads=1;
if(Globals::config.use_multi_thread_)
{
num_threads = Globals::config.NUM_THREADS;
}
Dvc_PedStruct* node_particle_peds;
switch(mode)
{
case INIT:
CreateMemoryPool();
/* Intermediate pedestrian container for copying pedestrians in host particles to device particles */
if(Dvc_tempPeds == NULL && Hst_tempPeds == NULL){
Dvc_tempPeds=new Dvc_PedStruct*[num_threads];
Hst_tempPeds=new Dvc_PedStruct*[num_threads];
for(int i=0;i<num_threads;i++)
{
HANDLE_ERROR(cudaMalloc((void**)&Dvc_tempPeds[i],numParticles*ModelParams::N_PED_IN*sizeof(Dvc_PedStruct) ));
HANDLE_ERROR(cudaHostAlloc((void**)&Hst_tempPeds[i],numParticles*ModelParams::N_PED_IN*sizeof(Dvc_PedStruct),0 ));
}
}
cout<<"numParticles="<<numParticles<<endl;
if(particles_for_all_actions[0] == NULL){
particles_for_all_actions[0]=new Dvc_State*[num_threads];
//Allocate pedestrian memory separately
Dvc_PedStruct* peds_tmp=gpu_ped_pool_->Allocate((NumActions()*num_threads)*numParticles*ModelParams::N_PED_IN);
for(int i=0;i<num_threads;i++)
{
HANDLE_ERROR(cudaMalloc((void**)&particles_for_all_actions[0][i],
NumActions()*numParticles*sizeof(Dvc_PomdpState)));
//Link pre-allocated pedestrian memory
LinkPeds<<<dim3(numParticles,1), dim3(ModelParams::N_PED_IN,1)>>>
(static_cast<Dvc_PomdpState*>(particles_for_all_actions[0][i]),
peds_tmp+(NumActions()*i)*numParticles*ModelParams::N_PED_IN,
NumActions()*numParticles);
}
//Record the ped memory used by the pre-allocated lists
//never reuse these memory for vnode particles
gpu_ped_pool_->RecordHead();
}
/*Intermediate memory for copying particle IDs to device memory
cudaHostAlloc enables the copying to interleave with kernel executions*/
Hst_temp_IDs=new int*[num_threads];
for(int i=0;i<num_threads;i++)
{
if(Globals::config.track_alpha_vector)
{
cudaHostAlloc(&Hst_temp_IDs[i],(2+ Globals::config.num_scenarios + Globals::config.num_obs)*NumActions()*sizeof(int),0);
}
else
{
cudaHostAlloc(&Hst_temp_IDs[i],numParticles*sizeof(int),0);
}
}
/*Intermediate memory for copying weights to device memory.
cudaHostAlloc enables the copying to interleave with kernel executions*/
Hst_temp_weight=new float*[num_threads];
for(int i=0;i<num_threads;i++)
cudaHostAlloc(&Hst_temp_weight[i],1*sizeof(float),0);
Dvc_temp_weight=new float*[num_threads];
for(int i=0;i<num_threads;i++)
HANDLE_ERROR(cudaMalloc(&Dvc_temp_weight[i], sizeof(float)));
/*Intermediate memory for copying main memory of particle (everything except pedestrians) from device back to host
cudaHostAlloc enables the copying to interleave with kernel executions*/
Hst_temp_mainstates=new Dvc_PomdpState*[num_threads];
for(int i=0;i<num_threads;i++)
HANDLE_ERROR(cudaHostAlloc((void**)&Hst_temp_mainstates[i],1*sizeof(Dvc_PomdpState),0));
/* No node particle allocated */
return NULL;
case ALLOC_ROOT:
/*Intermediate managed memory for root node particles.
* Managed memory enables data copying between CPU and GPU without launching memcpy (which is expensive)
*/
HANDLE_ERROR(cudaMallocManaged((void**)&Managed_rootnode_particles, numParticles*sizeof(Dvc_PomdpState)));
node_particle_peds = gpu_ped_pool_->Allocate(numParticles*ModelParams::N_PED_IN);
/* Link pedestrian lists to the main memory of particles */
LinkPeds<<<dim3(numParticles,1), dim3(ModelParams::N_PED_IN,1)>>>(Managed_rootnode_particles, node_particle_peds, numParticles);
HANDLE_ERROR(cudaDeviceSynchronize());
return Managed_rootnode_particles;
case ALLOC:
/* Allocate vnode particles: main memory and the pedestrian lists */
Dvc_PomdpState* vnode_particles = gpu_mainstate_pool_->Allocate(numParticles);
Dvc_PedStruct* vnode_particle_peds = gpu_ped_pool_->Allocate(numParticles*ModelParams::N_PED_IN);
/* Link pedestrian lists to the main memory of particles */
LinkPeds<<<dim3(numParticles,1), dim3(ModelParams::N_PED_IN,1)>>>(vnode_particles, vnode_particle_peds, numParticles);
HANDLE_ERROR(cudaDeviceSynchronize());
return vnode_particles;
};
return NULL;
}
void PedPomdp::CopyGPUParticlesFromParent(Dvc_State* des,Dvc_State* src,int src_offset,
int* dvc_particle_IDs,int num_particles,bool interleave,
Dvc_RandomStreams* streams, int stream_pos,
void* cudaStream, int shift) const
{
dim3 grid((num_particles+THREADDIM-1)/THREADDIM,1); dim3 threads(THREADDIM,1);
if(num_particles<THREADDIM)
{
grid.x=1;grid.y=1;threads.x=num_particles;
}
int ThreadID=0;
if(Globals::config.use_multi_thread_)
ThreadID=Globals::MapThread(this_thread::get_id());
if(cudaStream)
{
CopyParticles<<<grid, threads,0, *(cudaStream_t*)cudaStream>>>(static_cast<Dvc_PomdpState*>(des),
static_cast<Dvc_PomdpState*>(src)+src_offset,Dvc_temp_weight[(ThreadID+shift)%Globals::config.NUM_THREADS],
dvc_particle_IDs,num_particles, streams,stream_pos);
if(!interleave)
;
}
else
{
CopyParticles<<<grid, threads,0, 0>>>(static_cast<Dvc_PomdpState*>(des),
static_cast<Dvc_PomdpState*>(src)+src_offset,Dvc_temp_weight[ThreadID],
dvc_particle_IDs,num_particles, streams,stream_pos);
if(!interleave)
HANDLE_ERROR(cudaDeviceSynchronize());
}
}
Dvc_State* PedPomdp::GetPointerToParticleList(int offset, Dvc_State* full_list) const
{
return static_cast<Dvc_PomdpState*>(full_list)+ offset;
}
Dvc_State* PedPomdp::CopyParticlesToGPU(Dvc_State* dvc_particles, const std::vector<State*>& particles, bool deep_copy) const
//dvc_particles: managed device memory storing particles
// deep_copy: option on whether to copy list objects in side particles
{
auto start = Time::now();
for (int i=0;i<particles.size();i++)
{
const PomdpState* src=static_cast<const PomdpState*>(particles[i]);
Dvc_PomdpState::CopyMainStateToGPU(static_cast<const Dvc_PomdpState*>(dvc_particles),src->scenario_id,src);
}
Dvc_PomdpState::CopyPedsToGPU(static_cast<const Dvc_PomdpState*>(dvc_particles),particles.size());
return dvc_particles;
}
void PedPomdp::CopyParticleIDsToGPU( int* Dvc_ptr, const std::vector<int>& particleIDs, void *cudaStream) const
{
if(cudaStream)
{
int ThreadID=Globals::MapThread(this_thread::get_id());
memcpy(Hst_temp_IDs[ThreadID],particleIDs.data(),particleIDs.size()*sizeof(int));
HANDLE_ERROR(cudaMemcpyAsync(Dvc_ptr,Hst_temp_IDs[ThreadID],particleIDs.size()*sizeof(int), cudaMemcpyHostToDevice,*(cudaStream_t*)cudaStream));
}
else
{
logd << "Dvc_ptr = "<< Dvc_ptr << " particleIDs.size() = " << particleIDs.size()<< " cudaStream = "<< cudaStream<< endl;
HANDLE_ERROR(cudaMemcpy(Dvc_ptr,particleIDs.data(),particleIDs.size()*sizeof(int), cudaMemcpyHostToDevice));
}
}
void PedPomdp::DeleteGPUParticles( MEMORY_MODE mode, Dvc_State** particles_for_all_actions ) const
{
int num_threads=1;
switch (mode){
case DESTROY:
if(Globals::config.use_multi_thread_)
{
num_threads=Globals::config.NUM_THREADS;
}
for(int i=0;i<num_threads;i++)
{
if(particles_for_all_actions[i]!=NULL)
{HANDLE_ERROR(cudaFree(particles_for_all_actions[i]));particles_for_all_actions[i]=NULL;}
}
if(particles_for_all_actions)delete [] particles_for_all_actions;particles_for_all_actions=NULL;
for(int i=0;i<num_threads;i++)
{
cudaFreeHost(Hst_temp_IDs[i]);
}
delete [] Hst_temp_IDs;
for(int i=0;i<num_threads;i++)
{
cudaFreeHost(Hst_temp_weight[i]);
}
delete [] Hst_temp_weight;
for(int i=0;i<num_threads;i++)
{
cudaFree(Dvc_temp_weight[i]);
}
delete [] Dvc_temp_weight;
for(int i=0;i<num_threads;i++)
{
cudaFree(Dvc_tempPeds[i]);
cudaFreeHost(Hst_tempPeds[i]);
cudaFreeHost(Hst_temp_mainstates[i]);
}
delete [] Dvc_tempPeds;
delete [] Hst_tempPeds;
delete [] Hst_temp_mainstates;
break;
case RESET:
HANDLE_ERROR(cudaFree(static_cast<Dvc_PomdpState*>(Managed_rootnode_particles)));
break;
};
DestroyMemoryPool(mode);
}
DEVICE float Dvc_PedPomdpParticleUpperBound1::Value(
const Dvc_State* particles, int scenarioID, Dvc_History& history) {
return Dvc_ModelParams::GOAL_REWARD / (1 - Dvc_Globals::Dvc_Discount(Dvc_config));
}
DEVICE bool Dvc_PedPomdp::Dvc_Step(Dvc_State& state, float rand_num, int action, float& reward,
int* obs) {
Dvc_PomdpState& pedpomdp_state = static_cast<Dvc_PomdpState&>(state);//copy contents, link cells to existing ones
__shared__ int iscollision[32];
if(FIX_SCENARIO==1 || GPUDoPrint)
if(GPUDoPrint && pedpomdp_state.scenario_id==PRINT_ID && blockIdx.x==ACTION_ID && threadIdx.y==0){
printf("(GPU) Before step: scenario=%d \n", pedpomdp_state.scenario_id);
printf("action= %d\n ",action);
printf("Before step:\n");
int pos=pedpomdp_state.car.pos;
printf("car_pox= %d ",pos);
printf("trav_dist=%f\n",pedpomdp_state.car.dist_travelled);
printf("car_vel= %f\n",pedpomdp_state.car.vel);
for(int i=0;i<pedpomdp_state.num;i++)
{
printf("ped %d pox_x= %f pos_y=%f\n",i,
pedpomdp_state.peds[i].pos.x,pedpomdp_state.peds[i].pos.y);
}
}
bool terminal=false;
reward = 0;
unsigned long long int Temp=INIT_QUICKRANDSEED;
/* Termination checking */
if(threadIdx.y==0)
{
// Terminate upon reaching goal
if (pedpomdp_state.car.dist_travelled > Dvc_ModelParams::GOAL_TRAVELLED-1e-4
|| pedpomdp_state.car.pos >= path->size_-1) {
reward = Dvc_ModelParams::GOAL_REWARD;
terminal= true;
}
}
/* Collision checking */
iscollision[threadIdx.x]=false;
__syncthreads();
if(!terminal)
{
const int car = pedpomdp_state.car.pos;
const Dvc_COORD& car_pos = path->way_points_[car];
const Dvc_COORD& forward_pos = path->way_points_[path->forward(car, 1.0)];
if(threadIdx.y<pedpomdp_state.num){
const Dvc_COORD& pedpos = pedpomdp_state.peds[threadIdx.y].pos;
bool collide_ped=false;
float HNx = forward_pos.x - car_pos.x, // car direction
HNy = forward_pos.y - car_pos.y;
float HMx = pedpos.x - car_pos.x,
HMy = pedpos.y - car_pos.y;
/// car geomery for golfcart
/* double car_width = 0.87,
car_length = 1.544;
double safe_margin = 0.92, side_safe_margin = 0.4, back_safe_margin = 0.33,
side_margin = car_width / 2.0 + side_safe_margin,
front_margin = car_length/2.0 + safe_margin,
back_margin = car_length/2.0 + back_safe_margin;
*/
/// end golfcart
/// car geomery for audi r8
/*double car_width = 2.0,
car_length = 4.4;
double safe_margin = 0.8, side_safe_margin = 0.35, back_safe_margin = 0.2,
side_margin = car_width / 2.0 + side_safe_margin,
front_margin = 3.6 + safe_margin,
back_margin = 0.8 + back_safe_margin;*/
/// end audi r8
/// car geometry for pomdp car
double car_width = 1.2,
car_length = 2.2;
double safe_margin = 0.3,
side_margin = car_width / 2.0 + safe_margin,
front_margin = safe_margin,
back_margin = car_length + safe_margin;
/// end pomdp car
float HLx = - HNy, // direction after 90 degree anticlockwise rotation
HLy = HNx;
float HM_HN = HMx * HNx + HMy * HNy, // HM . HN
HN_HN = HNx * HNx + HNy * HNy; // HN . HN
if (HM_HN >= 0 && HM_HN * HM_HN > HN_HN * front_margin * front_margin)
collide_ped = false;
else if (HM_HN <= 0 && HM_HN * HM_HN > HN_HN * back_margin * back_margin)
collide_ped = false;
else
{
float HM_HL = HMx * HLx + HMy * HLy, // HM . HL
HL_HL = HLx * HLx + HLy * HLy; // HL . HL
collide_ped= HM_HL * HM_HL <= HL_HL * side_margin * side_margin;
}
atomicOr(iscollision+threadIdx.x, collide_ped);
}
}
__syncthreads(); // Synchronize the block to wait for collision checking with all peds (parallelized in the Y dimemsion) to finish.
if(threadIdx.y==0 && !terminal)
{
/* Terminate if collision is detected */
if(pedpomdp_state.car.vel > 0.001 && iscollision[threadIdx.x] ) { /// collision occurs only when car is moving
reward= Dvc_ModelParams::CRASH_PENALTY *
(pedpomdp_state.car.vel * pedpomdp_state.car.vel +
Dvc_ModelParams::REWARD_BASE_CRASH_VEL);
if(action == ACT_DEC) reward += 0.1;
terminal= true;
}
/* Compute reward */
if(!terminal)
{
// Smoothness penalty
reward += (action == ACT_DEC || action == ACT_ACC) ? -0.1 : 0.0;
reward += Dvc_ModelParams::REWARD_FACTOR_VEL *
(pedpomdp_state.car.vel - Dvc_ModelParams::VEL_MAX) / Dvc_ModelParams::VEL_MAX;
float acc = (action == ACT_ACC) ? Dvc_ModelParams::AccSpeed :
((action == ACT_CUR) ? 0 : (-Dvc_ModelParams::AccSpeed));
/* State transition: car */
float dist = pedpomdp_state.car.vel / freq;
int nxt = path->forward(pedpomdp_state.car.pos, dist);
pedpomdp_state.car.pos = nxt;
pedpomdp_state.car.dist_travelled += dist;
const float N = Dvc_ModelParams::NOISE_ROBVEL;
if (N>0) {
if(FIX_SCENARIO!=1 && !GPUDoPrint)
rand_num=Dvc_QuickRandom::RandGeneration(&Temp, rand_num);
float prob = rand_num;
if (prob > N) {
pedpomdp_state.car.vel += acc / freq;
}
} else {
pedpomdp_state.car.vel += acc / freq;
}
pedpomdp_state.car.vel = max(min(pedpomdp_state.car.vel, Dvc_ModelParams::VEL_MAX), 0.0);
}
}
__syncthreads();
if(!terminal)
{
/* State transition: peds */
if(threadIdx.y<pedpomdp_state.num)
{
int i=0;
while(i<threadIdx.y)
{
if(FIX_SCENARIO!=1 && !GPUDoPrint)
rand_num=Dvc_QuickRandom::RandGeneration(&Temp, rand_num);
i++;
}
if(threadIdx.y!=0 && FIX_SCENARIO!=1 && !GPUDoPrint)
rand_num=Dvc_QuickRandom::RandGeneration(&Temp, rand_num);
const Dvc_COORD& goal = goals[pedpomdp_state.peds[threadIdx.y].goal];
if (abs(goal.x+1)<1e-5 && abs(goal.y+1)<1e-5) { //stop intention, ped doesn't move
;
}
else
{
// Straightline model with Gussian noise on directions
Dvc_Vector goal_vec(goal.x - pedpomdp_state.peds[threadIdx.y].pos.x, goal.y - pedpomdp_state.peds[threadIdx.y].pos.y);
float a = goal_vec.GetAngle();
float noise = sqrt(-2 * log(rand_num));
if(FIX_SCENARIO!=1 && !GPUDoPrint)
rand_num=Dvc_QuickRandom::RandGeneration(&Temp, rand_num);
noise *= cos(2 * M_PI * rand_num)* Dvc_ModelParams::NOISE_GOAL_ANGLE;
a += noise;
Dvc_Vector move(a, pedpomdp_state.peds[threadIdx.y].vel/freq, 0);
pedpomdp_state.peds[threadIdx.y].pos.x += move.dw;
pedpomdp_state.peds[threadIdx.y].pos.y += move.dh;
}
}
}
__syncthreads();
if(threadIdx.y==0 && obs!=NULL)//for each particle in the thread block
{
/* generate observations by descretizing the observable part of the state */
if(!terminal)
{
int i=0;
obs[i++]=2+2*pedpomdp_state.num;
obs[i++] = int(pedpomdp_state.car.pos);
obs[i++] = int((pedpomdp_state.car.vel+1e-5) / Dvc_ModelParams::vel_rln);
for(int j = 0; j < pedpomdp_state.num; j ++) {
obs[i++] = int(pedpomdp_state.peds[j].pos.x / Dvc_ModelParams::pos_rln);
obs[i++] = int(pedpomdp_state.peds[j].pos.y / Dvc_ModelParams::pos_rln);
}
}
else
{
int i=0;
obs[i++]=0;
obs[i++] = 0;
obs[i++] = 0;
for(int j = 0; j < pedpomdp_state.num; j ++) {
obs[i++] = 0;
obs[i++] = 0;
}
}
}
if(!terminal && GPUDoPrint && pedpomdp_state.scenario_id==PRINT_ID && blockIdx.x==ACTION_ID && threadIdx.y==0){
printf("(GPU) After step: scenario=%d \n", pedpomdp_state.scenario_id);
printf("rand=%f, action=%d \n", rand_num, action);
printf("After step:\n");
printf("Reward=%f\n",reward);
int pos=pedpomdp_state.car.pos;
printf("car pox= %d ",pos);
printf("dist=%f\n",pedpomdp_state.car.dist_travelled);
printf("car vel= %f\n",pedpomdp_state.car.vel);
for(int i=0;i<pedpomdp_state.num;i++)
{
printf("ped %d pox_x= %f pos_y=%f\n",i,
pedpomdp_state.peds[i].pos.x,pedpomdp_state.peds[i].pos.y);
}
}
return terminal;
}
DEVICE int Dvc_PedPomdp::NumActions() {
return 3;
}
DEVICE float Dvc_PedPomdp::Dvc_ObsProbInt(int* obs, Dvc_State& state, int action)
{
//const PomdpState& state = static_cast<const PomdpState&>(s);
Dvc_PomdpState& pedpomdp_state = static_cast<Dvc_PomdpState&>(state);//copy contents, link cells to existing ones
//PrintState(state);
float prob = 1.0;
float b = 0.0;
for (int j = 0; j < pedpomdp_state.num; j ++) {
b = b + ((obs[2*j + 3]*Dvc_ModelParams::pos_rln) - pedpomdp_state.peds[j].pos.x )*((obs[2*j + 3]*Dvc_ModelParams::pos_rln) - pedpomdp_state.peds[j].pos.x );
b = b + ((obs[2*j + 4]*Dvc_ModelParams::pos_rln) - pedpomdp_state.peds[j].pos.y )*((obs[2*j + 4]*Dvc_ModelParams::pos_rln) - pedpomdp_state.peds[j].pos.y );
//std::cout << j << " obs vec " << obs[2*j + 2]<< "," << obs[2*j + 3] << ")b= " << b<< std::endl;
}
float stddev = 1.0;
b = - b / (2.0* stddev*stddev);
//std::cout << "b= " << b << std::endl;
return expf(b);
}
DEVICE void Dvc_PedPomdp::Dvc_Copy_NoAlloc(Dvc_State* des, const Dvc_State* src, int pos, bool offset_des) {
/*Pass member values, assign member pointers to existing state pointer*/
const Dvc_PomdpState* src_i= static_cast<const Dvc_PomdpState*>(src)+pos;
if(!offset_des) pos=0;
Dvc_PomdpState* des_i= static_cast<const Dvc_PomdpState*>(des)+pos;
des_i->weight=src_i->weight;
des_i->scenario_id=src_i->scenario_id;
des_i->num=src_i->num;
des_i->car.dist_travelled=src_i->car.dist_travelled;
des_i->car.pos=src_i->car.pos;
des_i->car.vel=src_i->car.vel;
for(int i=0;i< des_i->num;i++)
{
des_i->peds[i].vel=src_i->peds[i].vel;
des_i->peds[i].pos.x=src_i->peds[i].pos.x;
des_i->peds[i].pos.y=src_i->peds[i].pos.y;
des_i->peds[i].goal=src_i->peds[i].goal;
des_i->peds[i].id=src_i->peds[i].id;
}
}
DEVICE void Dvc_PedPomdp::Dvc_Copy_ToShared(Dvc_State* des, const Dvc_State* src, int pos, bool offset_des) {
/*Pass member values, assign member pointers to existing state pointer*/
const Dvc_PomdpState* src_i= static_cast<const Dvc_PomdpState*>(src)+pos;
if(!offset_des) pos=0;
Dvc_PomdpState* des_i= static_cast<const Dvc_PomdpState*>(des)+pos;
des_i->weight=src_i->weight;
des_i->scenario_id=src_i->scenario_id;
des_i->num=src_i->num;
des_i->car.dist_travelled=src_i->car.dist_travelled;
des_i->car.pos=src_i->car.pos;
des_i->car.vel=src_i->car.vel;
des_i->peds=(Dvc_PedStruct*)((void*)(des_i)+3*sizeof(Dvc_PedStruct));
for(int i=0;i< des_i->num;i++)
{
des_i->peds[i].vel=src_i->peds[i].vel;
des_i->peds[i].pos.x=src_i->peds[i].pos.x;
des_i->peds[i].pos.y=src_i->peds[i].pos.y;
des_i->peds[i].goal=src_i->peds[i].goal;
des_i->peds[i].id=src_i->peds[i].id;
}
}
DEVICE Dvc_State* Dvc_PedPomdp::Dvc_Get(Dvc_State* particles, int pos) {
Dvc_PomdpState* particle_i= static_cast<Dvc_PomdpState*>(particles)+pos;
return particle_i;
}
DEVICE float Dvc_PedPomdp::Dvc_GetCarVel(Dvc_State* particles, int pos) {
Dvc_PomdpState* particle_i= static_cast<Dvc_PomdpState*>(particles)+pos;
return particle_i->car.vel;
}
DEVICE Dvc_ValuedAction Dvc_PedPomdp::Dvc_GetBestAction() {
return Dvc_ValuedAction(0,
Dvc_ModelParams::CRASH_PENALTY * (Dvc_ModelParams::VEL_MAX*Dvc_ModelParams::VEL_MAX + Dvc_ModelParams::REWARD_BASE_CRASH_VEL));
}
void PedPomdp::ReadParticlesBackToCPU(std::vector<State*>& particles ,const Dvc_State* dvc_particles,
bool deepcopy) const
{
for (int i=0;i<particles.size();i++)
{
const Dvc_PomdpState* src=static_cast<const Dvc_PomdpState*>(dvc_particles)+i;
PomdpState* des=static_cast<PomdpState*>(particles[i]);
Dvc_PomdpState::ReadMainStateBackToCPU(src,des);
}
Dvc_PomdpState::ReadPedsBackToCPU(
static_cast<const Dvc_PomdpState*>(dvc_particles),
particles);
}
|
ede267e55264748c70cf116af2776239d3518ebf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* sphTartakovskyMeakin.cuh
*
* Author: Kamil Szewc ([email protected])
* Modified on: 26-09-2014
*
*/
#include <thrust/device_vector.h>
#include "../sph.h"
#include "sphTartakovskyMeakin/sphTartakovskyMeakin.cuh"
#include "general/calcNumberOfCells/calcNumberOfCells.cuh"
#include "general/calcTimeStep/calcTimeStep.cuh"
#include "general/renormalizePressure/renormalizePressure.cuh"
#include "../methods/hashSortReorder.cuh"
#include "../methods/copyParticles.cuh"
#include <iostream>
#include "../errlog.h"
void modelSphTartakovskyMeakin(int NOB, int TPB,
thrust::device_vector<Particle>& pVector,
Particle *pSort,
uint *gridParticleHash,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
Parameters *par,
Parameters *parHost,
real time)
{
/*static bool isConverted = false;
if (isConverted == false)
{
std::cout << "Convertion..." << std::endl;
calcDeformationSTM << <NOB, TPB >> >(p, par);
isConverted = true;
}*/
STARTLOG("logs/models.log");
Particle* p = thrust::raw_pointer_cast(pVector.data());
calcNumberOfCells(pVector, par, parHost);
calcTimeStep(pVector, par, parHost);
hashSortReorder(NOB, TPB, p, par, pSort, gridParticleHash, gridParticleIndex, cellStart, cellEnd, parHost->N);
copyParticles << <NOB, TPB >> >(pSort, p, gridParticleIndex, true, par, parHost->N);
//calcDensitySTM << <NOB, TPB >> >(p, pSort, gridParticleIndex, cellStart, cellEnd, par);
hipLaunchKernelGGL(( calcDensitySTM) , dim3(NOB), dim3(TPB), 0, 0, pSort, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcDensitySTM");
hipLaunchKernelGGL(( calcPressureSTM) , dim3(NOB), dim3(TPB), 0, 0, pSort, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcPressureSTM");
hipLaunchKernelGGL(( calcInteractionSTM) , dim3(NOB), dim3(TPB), 0, 0, pSort, gridParticleIndex, cellStart, cellEnd, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcInteractionSTM");
hipLaunchKernelGGL(( calcAdvectionSTM) , dim3(NOB), dim3(TPB), 0, 0, pSort, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcAdvectionSTM");
copyParticles << <NOB, TPB >> >(p, pSort, gridParticleIndex, false, par, parHost->N);
}
| ede267e55264748c70cf116af2776239d3518ebf.cu | /*
* sphTartakovskyMeakin.cuh
*
* Author: Kamil Szewc ([email protected])
* Modified on: 26-09-2014
*
*/
#include <thrust/device_vector.h>
#include "../sph.h"
#include "sphTartakovskyMeakin/sphTartakovskyMeakin.cuh"
#include "general/calcNumberOfCells/calcNumberOfCells.cuh"
#include "general/calcTimeStep/calcTimeStep.cuh"
#include "general/renormalizePressure/renormalizePressure.cuh"
#include "../methods/hashSortReorder.cuh"
#include "../methods/copyParticles.cuh"
#include <iostream>
#include "../errlog.h"
void modelSphTartakovskyMeakin(int NOB, int TPB,
thrust::device_vector<Particle>& pVector,
Particle *pSort,
uint *gridParticleHash,
uint *gridParticleIndex,
uint *cellStart,
uint *cellEnd,
Parameters *par,
Parameters *parHost,
real time)
{
/*static bool isConverted = false;
if (isConverted == false)
{
std::cout << "Convertion..." << std::endl;
calcDeformationSTM << <NOB, TPB >> >(p, par);
isConverted = true;
}*/
STARTLOG("logs/models.log");
Particle* p = thrust::raw_pointer_cast(pVector.data());
calcNumberOfCells(pVector, par, parHost);
calcTimeStep(pVector, par, parHost);
hashSortReorder(NOB, TPB, p, par, pSort, gridParticleHash, gridParticleIndex, cellStart, cellEnd, parHost->N);
copyParticles << <NOB, TPB >> >(pSort, p, gridParticleIndex, true, par, parHost->N);
//calcDensitySTM << <NOB, TPB >> >(p, pSort, gridParticleIndex, cellStart, cellEnd, par);
calcDensitySTM <<<NOB, TPB>>>(pSort, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcDensitySTM");
calcPressureSTM <<<NOB, TPB>>>(pSort, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcPressureSTM");
calcInteractionSTM <<<NOB, TPB>>>(pSort, gridParticleIndex, cellStart, cellEnd, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcInteractionSTM");
calcAdvectionSTM <<<NOB, TPB>>>(pSort, par);
HANDLE_CUDA_KERNEL_RUNTIME_ERROR("calcAdvectionSTM");
copyParticles << <NOB, TPB >> >(p, pSort, gridParticleIndex, false, par, parHost->N);
}
|
be6cf5b5a54ba1aae2ada593fef9dfbbbea2b7a9.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
////Kernel that writes the image to the OpenGL PBO directly.
//__global__ void sendDenoiseToPBO(uchar4* pbo, glm::ivec2 resolution,
// int iter, glm::vec3* image) {
// int x = (blockIdx.x * blockDim.x) + threadIdx.x;
// int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//
// if (x < resolution.x && y < resolution.y) {
// int index = x + (y * resolution.x);
// glm::vec3 pix = image[index];
//
// glm::ivec3 color;
// color.x = glm::clamp((int) (pix.x * 255.0), 0, 255);
// color.y = glm::clamp((int) (pix.y * 255.0), 0, 255);
// color.z = glm::clamp((int) (pix.z * 255.0), 0, 255);
//
// // Each thread writes one pixel location in the texture (textel)
// pbo[index].w = 0;
// pbo[index].x = color.x;
// pbo[index].y = color.y;
// pbo[index].z = color.z;
// }
//}
__global__ void gbufferToPBO(uchar4* pbo, glm::ivec2 resolution, GBufferPixel* gBuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
//float timeToIntersect = gBuffer[index].t * 256.0;
pbo[index].w = 0;
pbo[index].x = gBuffer[index].normal.x * 256.0f;
pbo[index].y = gBuffer[index].normal.y * 256.0f;
pbo[index].z = gBuffer[index].normal.z * 256.0f;
}
}
static Scene * hst_scene = NULL;
static DenoiseSettings * denoiseSettings = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
static GBufferPixel* dev_gBuffer = NULL;
static glm::vec3 * dev_dnImage = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice);
hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice);
hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
hipMalloc(&dev_gBuffer, pixelcount * sizeof(GBufferPixel));
hipMalloc(&dev_dnImage, pixelcount * sizeof(glm::vec3));
hipMemset(dev_dnImage, 0, pixelcount * sizeof(glm::vec3));
// TODO: initialize any extra device memeory you need
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
hipFree(dev_image); // no-op if dev_image is null
hipFree(dev_paths);
hipFree(dev_geoms);
hipFree(dev_materials);
hipFree(dev_intersections);
hipFree(dev_gBuffer);
hipFree(dev_dnImage);
// TODO: clean up any extra device memory you created
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
__global__ void shadeSimpleMaterials (
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
PathSegment segment = pathSegments[idx];
if (segment.remainingBounces == 0) {
return;
}
if (intersection.t > 0.0f) { // if the intersection exists...
segment.remainingBounces--;
// Set up the RNG
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, segment.remainingBounces);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
segment.color *= (materialColor * material.emittance);
segment.remainingBounces = 0;
}
else {
segment.color *= materialColor;
glm::vec3 intersectPos = intersection.t * segment.ray.direction + segment.ray.origin;
scatterRay(segment, intersectPos, intersection.surfaceNormal, material, rng);
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
} else {
segment.color = glm::vec3(0.0f);
segment.remainingBounces = 0;
}
pathSegments[idx] = segment;
}
}
__global__ void generateGBuffer (
int num_paths,
ShadeableIntersection* shadeableIntersections,
PathSegment* pathSegments,
GBufferPixel* gBuffer) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
//gBuffer[idx].t = shadeableIntersections[idx].t;
gBuffer[idx].normal = shadeableIntersections[idx].surfaceNormal;
gBuffer[idx].position = getPointOnRay(pathSegments[idx].ray,
shadeableIntersections[idx].t);
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
__global__ void denoise(int n,
GBufferPixel* gbuff,
glm::vec3* image,
glm::vec3 * dnImage,
int step,
int imageWidth,
float normalWeight,
float posWeight,
float colorWeight)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n)
{
glm::vec3 colSum = glm::vec3(0.0f);
float wSum = 0.0f;
// hardcode a 5x5 Gaussian filter
float GaussianFilter[5][5] = { {1, 4, 6, 4, 1},
{4, 16, 24, 16, 4},
{6, 24, 36, 24, 6},
{4, 16, 24, 16, 4},
{1, 4, 6, 4, 1} };
// a way to convert from 2d pixel space to the 1d pixel array we have
int uStepIm = 1;
int vStepIm = imageWidth;
// the relative offset from the center pixel in the image
// e.x. -2, -2 is two pixels left and two pixels up in screenspace
int imStartX = -2;
int imStartY = -2;
// store the gbuffer values for the center pixel of our filter
// i.e. the one we're actually calculating the color for
glm::vec3 centralNorm = gbuff[index].normal;
glm::vec3 centralPos = gbuff[index].position;
glm::vec3 centralCol = image[index];
// the cell count in 2d, starting in the upper left corner of
// our 5x5 filter
for (int y = 0; y < 5; y++) {
for (int x = 0; x < 5; x++) {
int imX = (imStartX + x) * uStepIm * step;
int imY = (imStartY + y) * vStepIm * step;
// i is the index for 1d representations of our 2d
// data, i.e. the beauty pass and the gbuffer
int i = index + imX + imY;
if (i < 0 || i >= n) {
// i can be out of bounds along the edges of the image
continue;
}
// get the Gaussian value for this pixel
float gVal = GaussianFilter[y][x];
// get the gbuffer values for this pixel
glm::vec3 nVal = gbuff[i].normal;
glm::vec3 pVal = gbuff[i].position;
glm::vec3 cVal = image[i];
// get the distance of the gbuffer values
// from our central pixel
//glm::vec3 a = centralCol - cVal;
float nDist = max(glm::length(centralNorm - nVal)/(step*step), 0.0f);
float pDist = glm::length(centralPos - pVal);// , centralPos - pVal);
float cDist = glm::length(centralCol - cVal);// , centralCol - cVal);
// get the weights based on these distances
// and our input values
float nw = min(exp(-1.0f * nDist / normalWeight), 1.0f);
float pw = min(exp(-1.0f * pDist / posWeight), 1.0f);
float cw = min(exp(-1.0f * cDist / colorWeight), 1.0f);
// get the overall
float w = nw * pw * cw;
colSum += cVal * w * gVal;
wSum += w * gVal;
}
}
//bring denoise
volatile float3 foo = make_float3(colSum.x, colSum.y, colSum.z);
volatile float3 bar = make_float3(centralCol.x, centralCol.y, centralCol.z);
dnImage[index] = colSum / wSum;
//dnImage[index] = colSum / (256.0f * steps);
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Pathtracing Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * NEW: For the first depth, generate geometry buffers (gbuffers)
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally:
// * if not denoising, add this iteration's results to the image
// * TODO: if denoising, run kernels that take both the raw pathtraced result and the gbuffer, and put the result in the "pbo" from opengl
hipLaunchKernelGGL(( generateRayFromCamera) , dim3(blocksPerGrid2d), dim3(blockSize2d) , 0, 0, cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
// Empty gbuffer
hipMemset(dev_gBuffer, 0, pixelcount * sizeof(GBufferPixel));
// clean shading chunks
hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
bool iterationComplete = false;
while (!iterationComplete) {
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
hipLaunchKernelGGL(( computeIntersections) , dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0,
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
checkCUDAError("trace one bounce");
hipDeviceSynchronize();
if (depth == 0) {
hipLaunchKernelGGL(( generateGBuffer), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0, num_paths, dev_intersections, dev_paths, dev_gBuffer);
}
depth++;
hipLaunchKernelGGL(( shadeSimpleMaterials), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0,
iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials
);
iterationComplete = depth == traceDepth;
}
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
hipLaunchKernelGGL(( finalGather), dim3(numBlocksPixels), dim3(blockSize1d), 0, 0, num_paths, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
if (*hst_scene->state.denoiseSettings->denoise){
float nWeight = pow(*hst_scene->state.denoiseSettings->normalWeight, 2);
float pWeight = pow(*hst_scene->state.denoiseSettings->positionWeight, 2);
float cWeight = pow(*hst_scene->state.denoiseSettings->colorWeight, 2);
int steps = *hst_scene->state.denoiseSettings->filterSize / 5;
for (int step = 1; step <= steps; step++) {
hipLaunchKernelGGL(( denoise) , dim3(numBlocksPixels), dim3(blockSize1d), 0, 0, num_paths,
dev_gBuffer,
dev_image,
dev_dnImage,
step,
cam.resolution.x,
nWeight,
pWeight,
cWeight);
}
}
// CHECKITOUT: use dev_image as reference if you want to implement saving denoised images.
// Otherwise, screenshots are also acceptable.
// Retrieve image from GPU
hipMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
// CHECKITOUT: this kernel "post-processes" the gbuffer/gbuffers into something that you can visualize for debugging.
void showGBuffer(uchar4* pbo) {
const Camera &cam = hst_scene->state.camera;
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// CHECKITOUT: process the gbuffer results and send them to OpenGL buffer for visualization
hipLaunchKernelGGL(( gbufferToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, dev_gBuffer);
}
void showDenoise(uchar4* pbo, int iter) {
const Camera &cam = hst_scene->state.camera;
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// CHECKITOUT: process the gbuffer results and send them to OpenGL buffer for visualization
hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_dnImage);
}
void showImage(uchar4* pbo, int iter) {
const Camera &cam = hst_scene->state.camera;
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// Send results to OpenGL buffer for rendering
hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image);
}
| be6cf5b5a54ba1aae2ada593fef9dfbbbea2b7a9.cu | #include <cstdio>
#include <cuda.h>
#include <cmath>
#include <thrust/execution_policy.h>
#include <thrust/random.h>
#include <thrust/remove.h>
#include "sceneStructs.h"
#include "scene.h"
#include "glm/glm.hpp"
#include "glm/gtx/norm.hpp"
#include "utilities.h"
#include "pathtrace.h"
#include "intersections.h"
#include "interactions.h"
#define ERRORCHECK 1
#define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__)
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
#if ERRORCHECK
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
# ifdef _WIN32
getchar();
# endif
exit(EXIT_FAILURE);
#endif
}
__host__ __device__
thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) {
int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index);
return thrust::default_random_engine(h);
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution,
int iter, glm::vec3* image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
glm::vec3 pix = image[index];
glm::ivec3 color;
color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255);
color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255);
color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255);
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
////Kernel that writes the image to the OpenGL PBO directly.
//__global__ void sendDenoiseToPBO(uchar4* pbo, glm::ivec2 resolution,
// int iter, glm::vec3* image) {
// int x = (blockIdx.x * blockDim.x) + threadIdx.x;
// int y = (blockIdx.y * blockDim.y) + threadIdx.y;
//
// if (x < resolution.x && y < resolution.y) {
// int index = x + (y * resolution.x);
// glm::vec3 pix = image[index];
//
// glm::ivec3 color;
// color.x = glm::clamp((int) (pix.x * 255.0), 0, 255);
// color.y = glm::clamp((int) (pix.y * 255.0), 0, 255);
// color.z = glm::clamp((int) (pix.z * 255.0), 0, 255);
//
// // Each thread writes one pixel location in the texture (textel)
// pbo[index].w = 0;
// pbo[index].x = color.x;
// pbo[index].y = color.y;
// pbo[index].z = color.z;
// }
//}
__global__ void gbufferToPBO(uchar4* pbo, glm::ivec2 resolution, GBufferPixel* gBuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < resolution.x && y < resolution.y) {
int index = x + (y * resolution.x);
//float timeToIntersect = gBuffer[index].t * 256.0;
pbo[index].w = 0;
pbo[index].x = gBuffer[index].normal.x * 256.0f;
pbo[index].y = gBuffer[index].normal.y * 256.0f;
pbo[index].z = gBuffer[index].normal.z * 256.0f;
}
}
static Scene * hst_scene = NULL;
static DenoiseSettings * denoiseSettings = NULL;
static glm::vec3 * dev_image = NULL;
static Geom * dev_geoms = NULL;
static Material * dev_materials = NULL;
static PathSegment * dev_paths = NULL;
static ShadeableIntersection * dev_intersections = NULL;
static GBufferPixel* dev_gBuffer = NULL;
static glm::vec3 * dev_dnImage = NULL;
// TODO: static variables for device memory, any extra info you need, etc
// ...
void pathtraceInit(Scene *scene) {
hst_scene = scene;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3));
cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment));
cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom));
cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice);
cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material));
cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice);
cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection));
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
cudaMalloc(&dev_gBuffer, pixelcount * sizeof(GBufferPixel));
cudaMalloc(&dev_dnImage, pixelcount * sizeof(glm::vec3));
cudaMemset(dev_dnImage, 0, pixelcount * sizeof(glm::vec3));
// TODO: initialize any extra device memeory you need
checkCUDAError("pathtraceInit");
}
void pathtraceFree() {
cudaFree(dev_image); // no-op if dev_image is null
cudaFree(dev_paths);
cudaFree(dev_geoms);
cudaFree(dev_materials);
cudaFree(dev_intersections);
cudaFree(dev_gBuffer);
cudaFree(dev_dnImage);
// TODO: clean up any extra device memory you created
checkCUDAError("pathtraceFree");
}
/**
* Generate PathSegments with rays from the camera through the screen into the
* scene, which is the first bounce of rays.
*
* Antialiasing - add rays for sub-pixel sampling
* motion blur - jitter rays "in time"
* lens effect - jitter ray origin positions based on a lens
*/
__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < cam.resolution.x && y < cam.resolution.y) {
int index = x + (y * cam.resolution.x);
PathSegment & segment = pathSegments[index];
segment.ray.origin = cam.position;
segment.color = glm::vec3(1.0f, 1.0f, 1.0f);
segment.ray.direction = glm::normalize(cam.view
- cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f)
- cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f)
);
segment.pixelIndex = index;
segment.remainingBounces = traceDepth;
}
}
__global__ void computeIntersections(
int depth
, int num_paths
, PathSegment * pathSegments
, Geom * geoms
, int geoms_size
, ShadeableIntersection * intersections
)
{
int path_index = blockIdx.x * blockDim.x + threadIdx.x;
if (path_index < num_paths)
{
PathSegment pathSegment = pathSegments[path_index];
float t;
glm::vec3 intersect_point;
glm::vec3 normal;
float t_min = FLT_MAX;
int hit_geom_index = -1;
bool outside = true;
glm::vec3 tmp_intersect;
glm::vec3 tmp_normal;
// naive parse through global geoms
for (int i = 0; i < geoms_size; i++)
{
Geom & geom = geoms[i];
if (geom.type == CUBE)
{
t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
else if (geom.type == SPHERE)
{
t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside);
}
// Compute the minimum t from the intersection tests to determine what
// scene geometry object was hit first.
if (t > 0.0f && t_min > t)
{
t_min = t;
hit_geom_index = i;
intersect_point = tmp_intersect;
normal = tmp_normal;
}
}
if (hit_geom_index == -1)
{
intersections[path_index].t = -1.0f;
}
else
{
//The ray hits something
intersections[path_index].t = t_min;
intersections[path_index].materialId = geoms[hit_geom_index].materialid;
intersections[path_index].surfaceNormal = normal;
}
}
}
__global__ void shadeSimpleMaterials (
int iter
, int num_paths
, ShadeableIntersection * shadeableIntersections
, PathSegment * pathSegments
, Material * materials
)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
ShadeableIntersection intersection = shadeableIntersections[idx];
PathSegment segment = pathSegments[idx];
if (segment.remainingBounces == 0) {
return;
}
if (intersection.t > 0.0f) { // if the intersection exists...
segment.remainingBounces--;
// Set up the RNG
thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, segment.remainingBounces);
Material material = materials[intersection.materialId];
glm::vec3 materialColor = material.color;
// If the material indicates that the object was a light, "light" the ray
if (material.emittance > 0.0f) {
segment.color *= (materialColor * material.emittance);
segment.remainingBounces = 0;
}
else {
segment.color *= materialColor;
glm::vec3 intersectPos = intersection.t * segment.ray.direction + segment.ray.origin;
scatterRay(segment, intersectPos, intersection.surfaceNormal, material, rng);
}
// If there was no intersection, color the ray black.
// Lots of renderers use 4 channel color, RGBA, where A = alpha, often
// used for opacity, in which case they can indicate "no opacity".
// This can be useful for post-processing and image compositing.
} else {
segment.color = glm::vec3(0.0f);
segment.remainingBounces = 0;
}
pathSegments[idx] = segment;
}
}
__global__ void generateGBuffer (
int num_paths,
ShadeableIntersection* shadeableIntersections,
PathSegment* pathSegments,
GBufferPixel* gBuffer) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_paths)
{
//gBuffer[idx].t = shadeableIntersections[idx].t;
gBuffer[idx].normal = shadeableIntersections[idx].surfaceNormal;
gBuffer[idx].position = getPointOnRay(pathSegments[idx].ray,
shadeableIntersections[idx].t);
}
}
// Add the current iteration's output to the overall image
__global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < nPaths)
{
PathSegment iterationPath = iterationPaths[index];
image[iterationPath.pixelIndex] += iterationPath.color;
}
}
__global__ void denoise(int n,
GBufferPixel* gbuff,
glm::vec3* image,
glm::vec3 * dnImage,
int step,
int imageWidth,
float normalWeight,
float posWeight,
float colorWeight)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < n)
{
glm::vec3 colSum = glm::vec3(0.0f);
float wSum = 0.0f;
// hardcode a 5x5 Gaussian filter
float GaussianFilter[5][5] = { {1, 4, 6, 4, 1},
{4, 16, 24, 16, 4},
{6, 24, 36, 24, 6},
{4, 16, 24, 16, 4},
{1, 4, 6, 4, 1} };
// a way to convert from 2d pixel space to the 1d pixel array we have
int uStepIm = 1;
int vStepIm = imageWidth;
// the relative offset from the center pixel in the image
// e.x. -2, -2 is two pixels left and two pixels up in screenspace
int imStartX = -2;
int imStartY = -2;
// store the gbuffer values for the center pixel of our filter
// i.e. the one we're actually calculating the color for
glm::vec3 centralNorm = gbuff[index].normal;
glm::vec3 centralPos = gbuff[index].position;
glm::vec3 centralCol = image[index];
// the cell count in 2d, starting in the upper left corner of
// our 5x5 filter
for (int y = 0; y < 5; y++) {
for (int x = 0; x < 5; x++) {
int imX = (imStartX + x) * uStepIm * step;
int imY = (imStartY + y) * vStepIm * step;
// i is the index for 1d representations of our 2d
// data, i.e. the beauty pass and the gbuffer
int i = index + imX + imY;
if (i < 0 || i >= n) {
// i can be out of bounds along the edges of the image
continue;
}
// get the Gaussian value for this pixel
float gVal = GaussianFilter[y][x];
// get the gbuffer values for this pixel
glm::vec3 nVal = gbuff[i].normal;
glm::vec3 pVal = gbuff[i].position;
glm::vec3 cVal = image[i];
// get the distance of the gbuffer values
// from our central pixel
//glm::vec3 a = centralCol - cVal;
float nDist = max(glm::length(centralNorm - nVal)/(step*step), 0.0f);
float pDist = glm::length(centralPos - pVal);// , centralPos - pVal);
float cDist = glm::length(centralCol - cVal);// , centralCol - cVal);
// get the weights based on these distances
// and our input values
float nw = min(exp(-1.0f * nDist / normalWeight), 1.0f);
float pw = min(exp(-1.0f * pDist / posWeight), 1.0f);
float cw = min(exp(-1.0f * cDist / colorWeight), 1.0f);
// get the overall
float w = nw * pw * cw;
colSum += cVal * w * gVal;
wSum += w * gVal;
}
}
//bring denoise
volatile float3 foo = make_float3(colSum.x, colSum.y, colSum.z);
volatile float3 bar = make_float3(centralCol.x, centralCol.y, centralCol.z);
dnImage[index] = colSum / wSum;
//dnImage[index] = colSum / (256.0f * steps);
}
}
/**
* Wrapper for the __global__ call that sets up the kernel calls and does a ton
* of memory management
*/
void pathtrace(int frame, int iter) {
const int traceDepth = hst_scene->state.traceDepth;
const Camera &cam = hst_scene->state.camera;
const int pixelcount = cam.resolution.x * cam.resolution.y;
// 2D block for generating ray from camera
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// 1D block for path tracing
const int blockSize1d = 128;
///////////////////////////////////////////////////////////////////////////
// Pathtracing Recap:
// * Initialize array of path rays (using rays that come out of the camera)
// * You can pass the Camera object to that kernel.
// * Each path ray must carry at minimum a (ray, color) pair,
// * where color starts as the multiplicative identity, white = (1, 1, 1).
// * This has already been done for you.
// * NEW: For the first depth, generate geometry buffers (gbuffers)
// * For each depth:
// * Compute an intersection in the scene for each path ray.
// A very naive version of this has been implemented for you, but feel
// free to add more primitives and/or a better algorithm.
// Currently, intersection distance is recorded as a parametric distance,
// t, or a "distance along the ray." t = -1.0 indicates no intersection.
// * Color is attenuated (multiplied) by reflections off of any object
// * Stream compact away all of the terminated paths.
// You may use either your implementation or `thrust::remove_if` or its
// cousins.
// * Note that you can't really use a 2D kernel launch any more - switch
// to 1D.
// * Shade the rays that intersected something or didn't bottom out.
// That is, color the ray by performing a color computation according
// to the shader, then generate a new ray to continue the ray path.
// We recommend just updating the ray's PathSegment in place.
// Note that this step may come before or after stream compaction,
// since some shaders you write may also cause a path to terminate.
// * Finally:
// * if not denoising, add this iteration's results to the image
// * TODO: if denoising, run kernels that take both the raw pathtraced result and the gbuffer, and put the result in the "pbo" from opengl
generateRayFromCamera <<<blocksPerGrid2d, blockSize2d >>>(cam, iter, traceDepth, dev_paths);
checkCUDAError("generate camera ray");
int depth = 0;
PathSegment* dev_path_end = dev_paths + pixelcount;
int num_paths = dev_path_end - dev_paths;
// --- PathSegment Tracing Stage ---
// Shoot ray into scene, bounce between objects, push shading chunks
// Empty gbuffer
cudaMemset(dev_gBuffer, 0, pixelcount * sizeof(GBufferPixel));
// clean shading chunks
cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection));
bool iterationComplete = false;
while (!iterationComplete) {
// tracing
dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d;
computeIntersections <<<numblocksPathSegmentTracing, blockSize1d>>> (
depth
, num_paths
, dev_paths
, dev_geoms
, hst_scene->geoms.size()
, dev_intersections
);
checkCUDAError("trace one bounce");
cudaDeviceSynchronize();
if (depth == 0) {
generateGBuffer<<<numblocksPathSegmentTracing, blockSize1d>>>(num_paths, dev_intersections, dev_paths, dev_gBuffer);
}
depth++;
shadeSimpleMaterials<<<numblocksPathSegmentTracing, blockSize1d>>> (
iter,
num_paths,
dev_intersections,
dev_paths,
dev_materials
);
iterationComplete = depth == traceDepth;
}
// Assemble this iteration and apply it to the image
dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d;
finalGather<<<numBlocksPixels, blockSize1d>>>(num_paths, dev_image, dev_paths);
///////////////////////////////////////////////////////////////////////////
if (*hst_scene->state.denoiseSettings->denoise){
float nWeight = pow(*hst_scene->state.denoiseSettings->normalWeight, 2);
float pWeight = pow(*hst_scene->state.denoiseSettings->positionWeight, 2);
float cWeight = pow(*hst_scene->state.denoiseSettings->colorWeight, 2);
int steps = *hst_scene->state.denoiseSettings->filterSize / 5;
for (int step = 1; step <= steps; step++) {
denoise <<<numBlocksPixels, blockSize1d>>>(num_paths,
dev_gBuffer,
dev_image,
dev_dnImage,
step,
cam.resolution.x,
nWeight,
pWeight,
cWeight);
}
}
// CHECKITOUT: use dev_image as reference if you want to implement saving denoised images.
// Otherwise, screenshots are also acceptable.
// Retrieve image from GPU
cudaMemcpy(hst_scene->state.image.data(), dev_image,
pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost);
checkCUDAError("pathtrace");
}
// CHECKITOUT: this kernel "post-processes" the gbuffer/gbuffers into something that you can visualize for debugging.
void showGBuffer(uchar4* pbo) {
const Camera &cam = hst_scene->state.camera;
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// CHECKITOUT: process the gbuffer results and send them to OpenGL buffer for visualization
gbufferToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, dev_gBuffer);
}
void showDenoise(uchar4* pbo, int iter) {
const Camera &cam = hst_scene->state.camera;
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// CHECKITOUT: process the gbuffer results and send them to OpenGL buffer for visualization
sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_dnImage);
}
void showImage(uchar4* pbo, int iter) {
const Camera &cam = hst_scene->state.camera;
const dim3 blockSize2d(8, 8);
const dim3 blocksPerGrid2d(
(cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x,
(cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y);
// Send results to OpenGL buffer for rendering
sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image);
}
|
b9c505e48ea8300e6e9f70c9215af46b39b54955.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (C) 2020 THL A29 Limited, a Tencent company.
// All rights reserved.
// Licensed under the BSD 3-Clause License (the "License"); you may
// not use this file except in compliance with the License. You may
// obtain a copy of the License at
// https://opensource.org/licenses/BSD-3-Clause
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" basis,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
// See the AUTHORS file for names of contributors.
#include <hip/hip_runtime.h>
#include <algorithm>
#include <cstdio>
#include <numeric>
#include "turbo_transformers/layers/kernels/gpu_transpose_kernel.h"
namespace turbo_transformers {
namespace layers {
namespace kernels {
/*
input : (batch_size, seq_len, weight_num, head_num, size_per_head) ->
output : (weight_num, batch_size, head_num, seq_len, size_per_head)
bias (weight_num, head_num, size_per_head)
*/
static __global__ void split_add_bias_transpose_for_score(
const float* input_data, const float* bias_data, const int batch_size,
const int seq_len, const int head_num, const int weight_num,
const int size_per_head, float* output_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = tid;
int batch_id = bid / (seq_len * weight_num * head_num);
int seq_id =
bid % (seq_len * weight_num * head_num) / (weight_num * head_num);
int weight_id = bid % (weight_num * head_num) / head_num;
int head_id = bid % head_num;
int head_num_size_per_head = head_num * size_per_head;
int weight_id_head_num_size_per_head = weight_id * head_num_size_per_head;
int head_id_size_per_head = head_id * size_per_head;
while (idx < size_per_head) {
float bias_val = bias_data[weight_id_head_num_size_per_head +
head_id_size_per_head + idx];
output_data[weight_id * batch_size * seq_len * head_num_size_per_head +
batch_id * seq_len * head_num_size_per_head +
head_id * seq_len * size_per_head + seq_id * size_per_head +
idx] =
input_data[batch_id * seq_len * weight_num * head_num_size_per_head +
seq_id * weight_num * head_num_size_per_head +
weight_id_head_num_size_per_head + head_id_size_per_head +
idx] +
bias_val;
idx += blockDim.x;
}
}
template <>
void GPUSplitAddBiasTransposeForScore(
const float* input_data, const float* bias_data, float* out_data,
int64_t batch_size, int64_t seq_len, int64_t weight_num,
int64_t num_attention_heads, int64_t size_per_head, hipStream_t stream) {
const int n = size_per_head;
const int m = batch_size * seq_len * num_attention_heads * weight_num;
dim3 grid(m);
dim3 block(min(n, 1024));
hipLaunchKernelGGL(( split_add_bias_transpose_for_score), dim3(grid), dim3(block), 0, stream,
input_data, bias_data, batch_size, seq_len, num_attention_heads,
weight_num, size_per_head, out_data);
}
/*
Output transpose results into three tensors
*/
static __global__ void split_add_bias_transpose_for_score_3output(
const float* input_data, const float* bias_data, const int batch_size,
const int seq_len, const int head_num, const int weight_num,
const int size_per_head, float* q_output_data, float* k_output_data,
float* v_output_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = tid;
int batch_id = bid / (seq_len * weight_num * head_num);
int seq_id =
bid % (seq_len * weight_num * head_num) / (weight_num * head_num);
int weight_id = bid % (weight_num * head_num) / head_num;
int head_id = bid % head_num;
int head_num_size_per_head = head_num * size_per_head;
int weight_id_head_num_size_per_head = weight_id * head_num_size_per_head;
int head_id_size_per_head = head_id * size_per_head;
float* output_data = nullptr;
if (weight_id == 0) {
output_data = q_output_data;
} else if (weight_id == 1) {
output_data = k_output_data;
} else if (weight_id == 2) {
output_data = v_output_data;
}
while (idx < size_per_head) {
float bias_val = bias_data[weight_id_head_num_size_per_head +
head_id_size_per_head + idx];
output_data[batch_id * seq_len * head_num_size_per_head +
head_id * seq_len * size_per_head + seq_id * size_per_head +
idx] =
input_data[batch_id * seq_len * weight_num * head_num_size_per_head +
seq_id * weight_num * head_num_size_per_head +
weight_id_head_num_size_per_head + head_id_size_per_head +
idx] +
bias_val;
idx += blockDim.x;
}
}
/*
foward transpose (1, sum_seq_len, 3, head, hidden) -> 3 X (batch, head,
max_seq_len, hidden) Output transpose results into three tensors with pad block
dim size_per_head grid dim product of the rest dim
*/
static __global__ void split_add_bias_transpose_for_score_3output_pad(
const float* input_data, const float* bias_data, const int batch_size,
const int max_seq_len, const int64_t* seq_len_list, const int head_num,
const int weight_num, const int size_per_head, float* q_output_data,
float* k_output_data, float* v_output_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = tid;
int batch_id = bid / (max_seq_len * weight_num * head_num);
int seq_id =
bid % (max_seq_len * weight_num * head_num) / (weight_num * head_num);
int weight_id = bid % (weight_num * head_num) / head_num;
int head_id = bid % head_num;
// if (seq_id >= seq_len_list[batch_id]) {
// return;
// }
int head_num_size_per_head = head_num * size_per_head;
int weight_id_head_num_size_per_head = weight_id * head_num_size_per_head;
int head_id_size_per_head = head_id * size_per_head;
float* output_data = nullptr;
if (weight_id == 0) {
output_data = q_output_data;
} else if (weight_id == 1) {
output_data = k_output_data;
} else if (weight_id == 2) {
output_data = v_output_data;
}
int acc_seq_len = 0; // std::accumulate(seq_len_list.begin(),
// seq_len_list.begin() + batch_id, 0);
for (size_t i = 0; i < batch_id; ++i) {
acc_seq_len += seq_len_list[i];
}
while (idx < size_per_head) {
if (seq_id >= seq_len_list[batch_id]) {
output_data[batch_id * max_seq_len * head_num_size_per_head +
head_id * max_seq_len * size_per_head +
seq_id * size_per_head + idx] = 0.f;
} else {
float bias_val = bias_data[weight_id_head_num_size_per_head +
head_id_size_per_head + idx];
output_data[batch_id * max_seq_len * head_num_size_per_head +
head_id * max_seq_len * size_per_head +
seq_id * size_per_head + idx] =
input_data[(acc_seq_len + seq_id) * weight_num *
head_num_size_per_head +
weight_id_head_num_size_per_head + head_id_size_per_head +
idx] +
bias_val;
}
idx += blockDim.x;
}
}
template <>
void GPUSplitAddBiasTransposeForScoreThreeOutput(
const float* input_data, const float* bias_data, int64_t batch_size,
int64_t seq_len, int64_t weight_num, int64_t num_attention_heads,
int64_t size_per_head, hipStream_t stream, float* q_out_data,
float* k_out_data, float* v_out_data) {
const int n = size_per_head;
const int m = batch_size * seq_len * num_attention_heads * weight_num;
dim3 grid(m);
dim3 block(min(n, 1024));
hipLaunchKernelGGL(( split_add_bias_transpose_for_score_3output), dim3(grid), dim3(block), 0, stream,
input_data, bias_data, batch_size, seq_len, num_attention_heads,
weight_num, size_per_head, q_out_data, k_out_data, v_out_data);
}
template <>
void GPUSplitAddBiasTransposeForScoreThreeOutputPad(
const float* input_data, const float* bias_data,
const std::vector<int64_t>& seq_len_list, int64_t weight_num,
int64_t num_attention_heads, int64_t size_per_head, hipStream_t stream,
float* q_out_data, float* k_out_data, float* v_out_data) {
const int n = size_per_head;
int64_t batch_size = seq_len_list.size();
int64_t max_seq_length =
*std::max_element(seq_len_list.begin(), seq_len_list.end());
const int m = batch_size * max_seq_length * num_attention_heads * weight_num;
dim3 grid(m);
dim3 block(min(n, 1024));
int64_t* d_seq_len_list;
hipMalloc((void**)&(d_seq_len_list), batch_size * sizeof(int64_t));
hipMemcpy(d_seq_len_list, seq_len_list.data(), batch_size * sizeof(int64_t),
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( split_add_bias_transpose_for_score_3output_pad), dim3(grid), dim3(block), 0, stream,
input_data, bias_data, batch_size, max_seq_length,
d_seq_len_list, // device vector
num_attention_heads, weight_num, size_per_head, q_out_data, k_out_data,
v_out_data);
hipFree(d_seq_len_list);
}
namespace {
// backward
// batch, head, seq, size_per_head -> batch seq head size_per_head
template <bool AddBias>
__global__ void transpose(const float* src, const float* bias,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
float* dst) {
int tid = threadIdx.x;
int idx = tid;
if (AddBias) {
int batch_id = blockIdx.x / (seq_len * head_num);
int seq_id = blockIdx.x / head_num % seq_len;
int head_id = blockIdx.x % head_num;
while (idx < size_per_head) {
dst[batch_id * (head_num * seq_len * size_per_head) +
head_id * seq_len * size_per_head + seq_id * size_per_head + idx] =
src[blockIdx.x * size_per_head + idx] +
bias[head_id * size_per_head + idx];
idx += blockDim.x;
}
} else {
//(batch, head, seq_len, size_per_head) -> (batch, seq_len, head,
// size_per_head)
int batch_id = blockIdx.x / (head_num * seq_len);
int head_id = (blockIdx.x % (head_num * seq_len)) / seq_len;
int seq_id = blockIdx.x % seq_len;
while (idx < size_per_head) {
dst[batch_id * (head_num * seq_len * size_per_head) +
seq_id * head_num * size_per_head + head_id * size_per_head + idx] =
src[blockIdx.x * size_per_head + idx];
idx += blockDim.x;
}
}
}
// batch, head, max_seq, size_per_head -> 1, sum_seq, head, size_per_head
__global__ void transpose_back_pad(const float* src, const int batch_size,
const int max_seq_len,
const int64_t* seq_len_list,
const int head_num, const int size_per_head,
float* dst) {
int tid = threadIdx.x;
int idx = tid;
//(batch, head, max_seq_len, size_per_head) -> (batch, seq_len, head,
// size_per_head)
int batch_id = blockIdx.x / (head_num * max_seq_len);
int head_id = (blockIdx.x % (head_num * max_seq_len)) / max_seq_len;
int seq_id = blockIdx.x % max_seq_len;
if (seq_id >= seq_len_list[batch_id]) {
return;
}
// int64_t acc_seq_len = std::accumulate(seq_len_list.begin(),
// seq_len_list.begin() + batch_idx, 0);
int64_t acc_seq_len = 0;
for (size_t i = 0; i < batch_id; ++i) {
acc_seq_len += seq_len_list[i];
}
while (idx < size_per_head) {
// set the invalid elements to 0.
dst[(acc_seq_len + seq_id) * (head_num * size_per_head) +
head_id * size_per_head + idx] = src[blockIdx.x * size_per_head + idx];
idx += blockDim.x;
}
}
// 1, sum_seq, head size_per_head -> batch, head, max_seq, size_per_head
__global__ void add_bias_transpose_forward_pad(
const float* src, const float* bias, const int batch_size,
const int max_seq_len, const int64_t* seq_len_list, const int head_num,
const int size_per_head, float* dst) {
int tid = threadIdx.x;
int idx = tid;
int batch_id = blockIdx.x / (head_num * max_seq_len);
int head_id = (blockIdx.x % (head_num * max_seq_len)) / max_seq_len;
int seq_id = blockIdx.x % max_seq_len;
int64_t acc_seq_len = 0;
for (size_t i = 0; i < batch_id; ++i) {
acc_seq_len += seq_len_list[i];
}
while (idx < size_per_head) {
// set the invalid elements to 0.
if (seq_id >= seq_len_list[batch_id]) {
dst[blockIdx.x * size_per_head + idx] = 0.f;
} else {
dst[blockIdx.x * size_per_head + idx] =
src[(acc_seq_len + seq_id) * (head_num * size_per_head) +
head_id * size_per_head + idx] +
bias[head_id * size_per_head + idx];
}
idx += blockDim.x;
}
}
} // namespace
/*
(batch_size, seq_len, num_attention_heads, size_per_head) ->
(batch_size, head_num, seq_len, size_per_head)
*/
template <typename T, bool AddBias>
void GPUTransposeForScore(const T* input_data, const T* bias,
int64_t batch_size, int64_t seq_len,
int64_t num_attention_heads, int64_t size_per_head,
hipStream_t stream, T* output_data) {
dim3 grid, block;
grid.x = batch_size * num_attention_heads * seq_len;
block.x = min(1024, int(size_per_head));
hipLaunchKernelGGL(( transpose<AddBias>), dim3(grid), dim3(block), 0, stream, input_data, bias, batch_size,
seq_len, num_attention_heads,
size_per_head, output_data);
}
template void GPUTransposeForScore<float, true>(
const float* input_data, const float* bias, int64_t batch_size,
int64_t seq_len, int64_t num_attention_heads, int64_t size_per_head,
hipStream_t stream, float* output_data);
template void GPUTransposeForScore<float, false>(
const float* input_data, const float* bias, int64_t batch_size,
int64_t seq_len, int64_t num_attention_heads, int64_t size_per_head,
hipStream_t stream, float* output_data);
/*
(1, sum_seq_len, num_attention_heads, size_per_head) ->
(batch_size, head_num, max_seq_len, size_per_head)
*/
template <>
void GPUTransposeForScorePad(const float* input_data, int64_t batch_size,
const std::vector<int64_t>& seq_len_list,
int64_t num_attention_heads, int64_t size_per_head,
hipStream_t stream, float* output_data) {
dim3 grid, block;
int64_t max_seq_length =
*std::max_element(seq_len_list.begin(), seq_len_list.end());
grid.x = batch_size * num_attention_heads * max_seq_length;
block.x = min(1024, int(size_per_head));
int64_t* d_seq_len_list;
hipMalloc((void**)&(d_seq_len_list), batch_size * sizeof(int64_t));
hipMemcpy(d_seq_len_list, seq_len_list.data(), batch_size * sizeof(int64_t),
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( transpose_back_pad), dim3(grid), dim3(block), 0, stream,
input_data, batch_size, max_seq_length, d_seq_len_list,
num_attention_heads, size_per_head, output_data);
hipFree(d_seq_len_list);
}
// (1, sum_seq_len, head, hidden_size) -> (batch, head, max_seq_len,
// hidden_size)
template <>
void GPUAddBiasTransposeForScorePad(const float* input_data,
const float* bias_data,
const std::vector<int64_t>& seq_len_list,
int64_t num_attention_heads,
int64_t size_per_head, hipStream_t stream,
float* output_data) {
dim3 grid, block;
int64_t batch_size = seq_len_list.size();
int64_t max_seq_length =
*std::max_element(seq_len_list.begin(), seq_len_list.end());
grid.x = batch_size * num_attention_heads * max_seq_length;
block.x = min(1024, int(size_per_head));
int64_t* d_seq_len_list;
hipMalloc((void**)&(d_seq_len_list), batch_size * sizeof(int64_t));
hipMemcpy(d_seq_len_list, seq_len_list.data(), batch_size * sizeof(int64_t),
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add_bias_transpose_forward_pad), dim3(grid), dim3(block), 0, stream,
input_data, bias_data, batch_size, max_seq_length, d_seq_len_list,
num_attention_heads, size_per_head, output_data);
hipFree(d_seq_len_list);
}
} // namespace kernels
} // namespace layers
} // namespace turbo_transformers
| b9c505e48ea8300e6e9f70c9215af46b39b54955.cu | // Copyright (C) 2020 THL A29 Limited, a Tencent company.
// All rights reserved.
// Licensed under the BSD 3-Clause License (the "License"); you may
// not use this file except in compliance with the License. You may
// obtain a copy of the License at
// https://opensource.org/licenses/BSD-3-Clause
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" basis,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
// See the AUTHORS file for names of contributors.
#include <cuda_runtime.h>
#include <algorithm>
#include <cstdio>
#include <numeric>
#include "turbo_transformers/layers/kernels/gpu_transpose_kernel.h"
namespace turbo_transformers {
namespace layers {
namespace kernels {
/*
input : (batch_size, seq_len, weight_num, head_num, size_per_head) ->
output : (weight_num, batch_size, head_num, seq_len, size_per_head)
bias (weight_num, head_num, size_per_head)
*/
static __global__ void split_add_bias_transpose_for_score(
const float* input_data, const float* bias_data, const int batch_size,
const int seq_len, const int head_num, const int weight_num,
const int size_per_head, float* output_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = tid;
int batch_id = bid / (seq_len * weight_num * head_num);
int seq_id =
bid % (seq_len * weight_num * head_num) / (weight_num * head_num);
int weight_id = bid % (weight_num * head_num) / head_num;
int head_id = bid % head_num;
int head_num_size_per_head = head_num * size_per_head;
int weight_id_head_num_size_per_head = weight_id * head_num_size_per_head;
int head_id_size_per_head = head_id * size_per_head;
while (idx < size_per_head) {
float bias_val = bias_data[weight_id_head_num_size_per_head +
head_id_size_per_head + idx];
output_data[weight_id * batch_size * seq_len * head_num_size_per_head +
batch_id * seq_len * head_num_size_per_head +
head_id * seq_len * size_per_head + seq_id * size_per_head +
idx] =
input_data[batch_id * seq_len * weight_num * head_num_size_per_head +
seq_id * weight_num * head_num_size_per_head +
weight_id_head_num_size_per_head + head_id_size_per_head +
idx] +
bias_val;
idx += blockDim.x;
}
}
template <>
void GPUSplitAddBiasTransposeForScore(
const float* input_data, const float* bias_data, float* out_data,
int64_t batch_size, int64_t seq_len, int64_t weight_num,
int64_t num_attention_heads, int64_t size_per_head, cudaStream_t stream) {
const int n = size_per_head;
const int m = batch_size * seq_len * num_attention_heads * weight_num;
dim3 grid(m);
dim3 block(min(n, 1024));
split_add_bias_transpose_for_score<<<grid, block, 0, stream>>>(
input_data, bias_data, batch_size, seq_len, num_attention_heads,
weight_num, size_per_head, out_data);
}
/*
Output transpose results into three tensors
*/
static __global__ void split_add_bias_transpose_for_score_3output(
const float* input_data, const float* bias_data, const int batch_size,
const int seq_len, const int head_num, const int weight_num,
const int size_per_head, float* q_output_data, float* k_output_data,
float* v_output_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = tid;
int batch_id = bid / (seq_len * weight_num * head_num);
int seq_id =
bid % (seq_len * weight_num * head_num) / (weight_num * head_num);
int weight_id = bid % (weight_num * head_num) / head_num;
int head_id = bid % head_num;
int head_num_size_per_head = head_num * size_per_head;
int weight_id_head_num_size_per_head = weight_id * head_num_size_per_head;
int head_id_size_per_head = head_id * size_per_head;
float* output_data = nullptr;
if (weight_id == 0) {
output_data = q_output_data;
} else if (weight_id == 1) {
output_data = k_output_data;
} else if (weight_id == 2) {
output_data = v_output_data;
}
while (idx < size_per_head) {
float bias_val = bias_data[weight_id_head_num_size_per_head +
head_id_size_per_head + idx];
output_data[batch_id * seq_len * head_num_size_per_head +
head_id * seq_len * size_per_head + seq_id * size_per_head +
idx] =
input_data[batch_id * seq_len * weight_num * head_num_size_per_head +
seq_id * weight_num * head_num_size_per_head +
weight_id_head_num_size_per_head + head_id_size_per_head +
idx] +
bias_val;
idx += blockDim.x;
}
}
/*
foward transpose (1, sum_seq_len, 3, head, hidden) -> 3 X (batch, head,
max_seq_len, hidden) Output transpose results into three tensors with pad block
dim size_per_head grid dim product of the rest dim
*/
static __global__ void split_add_bias_transpose_for_score_3output_pad(
const float* input_data, const float* bias_data, const int batch_size,
const int max_seq_len, const int64_t* seq_len_list, const int head_num,
const int weight_num, const int size_per_head, float* q_output_data,
float* k_output_data, float* v_output_data) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int idx = tid;
int batch_id = bid / (max_seq_len * weight_num * head_num);
int seq_id =
bid % (max_seq_len * weight_num * head_num) / (weight_num * head_num);
int weight_id = bid % (weight_num * head_num) / head_num;
int head_id = bid % head_num;
// if (seq_id >= seq_len_list[batch_id]) {
// return;
// }
int head_num_size_per_head = head_num * size_per_head;
int weight_id_head_num_size_per_head = weight_id * head_num_size_per_head;
int head_id_size_per_head = head_id * size_per_head;
float* output_data = nullptr;
if (weight_id == 0) {
output_data = q_output_data;
} else if (weight_id == 1) {
output_data = k_output_data;
} else if (weight_id == 2) {
output_data = v_output_data;
}
int acc_seq_len = 0; // std::accumulate(seq_len_list.begin(),
// seq_len_list.begin() + batch_id, 0);
for (size_t i = 0; i < batch_id; ++i) {
acc_seq_len += seq_len_list[i];
}
while (idx < size_per_head) {
if (seq_id >= seq_len_list[batch_id]) {
output_data[batch_id * max_seq_len * head_num_size_per_head +
head_id * max_seq_len * size_per_head +
seq_id * size_per_head + idx] = 0.f;
} else {
float bias_val = bias_data[weight_id_head_num_size_per_head +
head_id_size_per_head + idx];
output_data[batch_id * max_seq_len * head_num_size_per_head +
head_id * max_seq_len * size_per_head +
seq_id * size_per_head + idx] =
input_data[(acc_seq_len + seq_id) * weight_num *
head_num_size_per_head +
weight_id_head_num_size_per_head + head_id_size_per_head +
idx] +
bias_val;
}
idx += blockDim.x;
}
}
template <>
void GPUSplitAddBiasTransposeForScoreThreeOutput(
const float* input_data, const float* bias_data, int64_t batch_size,
int64_t seq_len, int64_t weight_num, int64_t num_attention_heads,
int64_t size_per_head, cudaStream_t stream, float* q_out_data,
float* k_out_data, float* v_out_data) {
const int n = size_per_head;
const int m = batch_size * seq_len * num_attention_heads * weight_num;
dim3 grid(m);
dim3 block(min(n, 1024));
split_add_bias_transpose_for_score_3output<<<grid, block, 0, stream>>>(
input_data, bias_data, batch_size, seq_len, num_attention_heads,
weight_num, size_per_head, q_out_data, k_out_data, v_out_data);
}
template <>
void GPUSplitAddBiasTransposeForScoreThreeOutputPad(
const float* input_data, const float* bias_data,
const std::vector<int64_t>& seq_len_list, int64_t weight_num,
int64_t num_attention_heads, int64_t size_per_head, cudaStream_t stream,
float* q_out_data, float* k_out_data, float* v_out_data) {
const int n = size_per_head;
int64_t batch_size = seq_len_list.size();
int64_t max_seq_length =
*std::max_element(seq_len_list.begin(), seq_len_list.end());
const int m = batch_size * max_seq_length * num_attention_heads * weight_num;
dim3 grid(m);
dim3 block(min(n, 1024));
int64_t* d_seq_len_list;
cudaMalloc((void**)&(d_seq_len_list), batch_size * sizeof(int64_t));
cudaMemcpy(d_seq_len_list, seq_len_list.data(), batch_size * sizeof(int64_t),
cudaMemcpyHostToDevice);
split_add_bias_transpose_for_score_3output_pad<<<grid, block, 0, stream>>>(
input_data, bias_data, batch_size, max_seq_length,
d_seq_len_list, // device vector
num_attention_heads, weight_num, size_per_head, q_out_data, k_out_data,
v_out_data);
cudaFree(d_seq_len_list);
}
namespace {
// backward
// batch, head, seq, size_per_head -> batch seq head size_per_head
template <bool AddBias>
__global__ void transpose(const float* src, const float* bias,
const int batch_size, const int seq_len,
const int head_num, const int size_per_head,
float* dst) {
int tid = threadIdx.x;
int idx = tid;
if (AddBias) {
int batch_id = blockIdx.x / (seq_len * head_num);
int seq_id = blockIdx.x / head_num % seq_len;
int head_id = blockIdx.x % head_num;
while (idx < size_per_head) {
dst[batch_id * (head_num * seq_len * size_per_head) +
head_id * seq_len * size_per_head + seq_id * size_per_head + idx] =
src[blockIdx.x * size_per_head + idx] +
bias[head_id * size_per_head + idx];
idx += blockDim.x;
}
} else {
//(batch, head, seq_len, size_per_head) -> (batch, seq_len, head,
// size_per_head)
int batch_id = blockIdx.x / (head_num * seq_len);
int head_id = (blockIdx.x % (head_num * seq_len)) / seq_len;
int seq_id = blockIdx.x % seq_len;
while (idx < size_per_head) {
dst[batch_id * (head_num * seq_len * size_per_head) +
seq_id * head_num * size_per_head + head_id * size_per_head + idx] =
src[blockIdx.x * size_per_head + idx];
idx += blockDim.x;
}
}
}
// batch, head, max_seq, size_per_head -> 1, sum_seq, head, size_per_head
__global__ void transpose_back_pad(const float* src, const int batch_size,
const int max_seq_len,
const int64_t* seq_len_list,
const int head_num, const int size_per_head,
float* dst) {
int tid = threadIdx.x;
int idx = tid;
//(batch, head, max_seq_len, size_per_head) -> (batch, seq_len, head,
// size_per_head)
int batch_id = blockIdx.x / (head_num * max_seq_len);
int head_id = (blockIdx.x % (head_num * max_seq_len)) / max_seq_len;
int seq_id = blockIdx.x % max_seq_len;
if (seq_id >= seq_len_list[batch_id]) {
return;
}
// int64_t acc_seq_len = std::accumulate(seq_len_list.begin(),
// seq_len_list.begin() + batch_idx, 0);
int64_t acc_seq_len = 0;
for (size_t i = 0; i < batch_id; ++i) {
acc_seq_len += seq_len_list[i];
}
while (idx < size_per_head) {
// set the invalid elements to 0.
dst[(acc_seq_len + seq_id) * (head_num * size_per_head) +
head_id * size_per_head + idx] = src[blockIdx.x * size_per_head + idx];
idx += blockDim.x;
}
}
// 1, sum_seq, head size_per_head -> batch, head, max_seq, size_per_head
__global__ void add_bias_transpose_forward_pad(
const float* src, const float* bias, const int batch_size,
const int max_seq_len, const int64_t* seq_len_list, const int head_num,
const int size_per_head, float* dst) {
int tid = threadIdx.x;
int idx = tid;
int batch_id = blockIdx.x / (head_num * max_seq_len);
int head_id = (blockIdx.x % (head_num * max_seq_len)) / max_seq_len;
int seq_id = blockIdx.x % max_seq_len;
int64_t acc_seq_len = 0;
for (size_t i = 0; i < batch_id; ++i) {
acc_seq_len += seq_len_list[i];
}
while (idx < size_per_head) {
// set the invalid elements to 0.
if (seq_id >= seq_len_list[batch_id]) {
dst[blockIdx.x * size_per_head + idx] = 0.f;
} else {
dst[blockIdx.x * size_per_head + idx] =
src[(acc_seq_len + seq_id) * (head_num * size_per_head) +
head_id * size_per_head + idx] +
bias[head_id * size_per_head + idx];
}
idx += blockDim.x;
}
}
} // namespace
/*
(batch_size, seq_len, num_attention_heads, size_per_head) ->
(batch_size, head_num, seq_len, size_per_head)
*/
template <typename T, bool AddBias>
void GPUTransposeForScore(const T* input_data, const T* bias,
int64_t batch_size, int64_t seq_len,
int64_t num_attention_heads, int64_t size_per_head,
cudaStream_t stream, T* output_data) {
dim3 grid, block;
grid.x = batch_size * num_attention_heads * seq_len;
block.x = min(1024, int(size_per_head));
transpose<AddBias><<<grid, block, 0, stream>>>(input_data, bias, batch_size,
seq_len, num_attention_heads,
size_per_head, output_data);
}
template void GPUTransposeForScore<float, true>(
const float* input_data, const float* bias, int64_t batch_size,
int64_t seq_len, int64_t num_attention_heads, int64_t size_per_head,
cudaStream_t stream, float* output_data);
template void GPUTransposeForScore<float, false>(
const float* input_data, const float* bias, int64_t batch_size,
int64_t seq_len, int64_t num_attention_heads, int64_t size_per_head,
cudaStream_t stream, float* output_data);
/*
(1, sum_seq_len, num_attention_heads, size_per_head) ->
(batch_size, head_num, max_seq_len, size_per_head)
*/
template <>
void GPUTransposeForScorePad(const float* input_data, int64_t batch_size,
const std::vector<int64_t>& seq_len_list,
int64_t num_attention_heads, int64_t size_per_head,
cudaStream_t stream, float* output_data) {
dim3 grid, block;
int64_t max_seq_length =
*std::max_element(seq_len_list.begin(), seq_len_list.end());
grid.x = batch_size * num_attention_heads * max_seq_length;
block.x = min(1024, int(size_per_head));
int64_t* d_seq_len_list;
cudaMalloc((void**)&(d_seq_len_list), batch_size * sizeof(int64_t));
cudaMemcpy(d_seq_len_list, seq_len_list.data(), batch_size * sizeof(int64_t),
cudaMemcpyHostToDevice);
transpose_back_pad<<<grid, block, 0, stream>>>(
input_data, batch_size, max_seq_length, d_seq_len_list,
num_attention_heads, size_per_head, output_data);
cudaFree(d_seq_len_list);
}
// (1, sum_seq_len, head, hidden_size) -> (batch, head, max_seq_len,
// hidden_size)
template <>
void GPUAddBiasTransposeForScorePad(const float* input_data,
const float* bias_data,
const std::vector<int64_t>& seq_len_list,
int64_t num_attention_heads,
int64_t size_per_head, cudaStream_t stream,
float* output_data) {
dim3 grid, block;
int64_t batch_size = seq_len_list.size();
int64_t max_seq_length =
*std::max_element(seq_len_list.begin(), seq_len_list.end());
grid.x = batch_size * num_attention_heads * max_seq_length;
block.x = min(1024, int(size_per_head));
int64_t* d_seq_len_list;
cudaMalloc((void**)&(d_seq_len_list), batch_size * sizeof(int64_t));
cudaMemcpy(d_seq_len_list, seq_len_list.data(), batch_size * sizeof(int64_t),
cudaMemcpyHostToDevice);
add_bias_transpose_forward_pad<<<grid, block, 0, stream>>>(
input_data, bias_data, batch_size, max_seq_length, d_seq_len_list,
num_attention_heads, size_per_head, output_data);
cudaFree(d_seq_len_list);
}
} // namespace kernels
} // namespace layers
} // namespace turbo_transformers
|
08e52aa563ae71a79771469c8c709ca7bbe74f6b.hip | // !!! This is a file automatically generated by hipify!!!
/*
Author: Cao Thanh Tung
Filename: pba3DHost.cu
Copyright (c) 2010, School of Computing, National University of Singapore.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer. Redistributions in binary form must reproduce
the above copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the distribution.
Neither the name of the National University of Singapore nor the names of its contributors
may be used to endorse or promote products derived from this software without specific
prior written permission from the National University of Singapore.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
#include <hip/device_functions.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <fstream>
using namespace std;
#include "pba3D.h"
#include "Geometry.h"
#include "CudaWrapper.h"
// Parameters for CUDA kernel executions
#define BLOCKX 32
#define BLOCKY 4
#define BLOCKXY 16
#define PBA_INFINITY 0x3ff
/****** Global Variables *******/
int **pbaTextures;
int pbaMemSize;
int pbaCurrentBuffer;
int pbaTexSize;
texture<int> pbaTexColor;
texture<int> pbaTexLinks;
texture<short> pbaTexPointer;
/********* Kernels ********/
#include "pba3DKernel.h"
///////////////////////////////////////////////////////////////////////////
//
// Initialize necessary memory for 3D Voronoi Diagram computation
// - textureSize: The size of the Discrete Voronoi Diagram (width = height)
//
///////////////////////////////////////////////////////////////////////////
void pba3DInitialization(int fboSize)
{
pbaTexSize = fboSize;
pbaTextures = (int **) malloc(2 * sizeof(int *));
pbaMemSize = pbaTexSize * pbaTexSize * pbaTexSize * sizeof(int);
// Allocate 2 textures
//hipMalloc((void **) &pbaTextures[0], pbaMemSize);
//hipMalloc((void **) &pbaTextures[1], pbaMemSize);
}
///////////////////////////////////////////////////////////////////////////
//
// Deallocate all allocated memory
//
///////////////////////////////////////////////////////////////////////////
void pba3DDeinitialization()
{
free(pbaTextures);
return;
}
// Copy input to GPU
void pba3DInitializeInput(int *input, int *output)
{
//hipMemcpy(pbaTextures[0], input, pbaMemSize, hipMemcpyHostToDevice);
pbaTextures[0] = input;
pbaTextures[1] = output;
// Set Current Source Buffer
pbaCurrentBuffer = 0;
}
// In-place transpose a cubic texture.
// Transposition are performed on each XY plane.
// Point coordinates are also swapped.
void pba3DTransposeXY(int *texture)
{
dim3 block(BLOCKXY, BLOCKXY);
dim3 grid((pbaTexSize / BLOCKXY) * pbaTexSize, pbaTexSize / BLOCKXY);
hipLaunchKernelGGL(( kernelTransposeXY), dim3(grid), dim3(block) , 0, 0, texture, pbaTexSize);
CudaCheckError();
}
// Phase 1 of PBA. m1 must divides texture size
// Sweeping are done along the Z axiz.
void pba3DColorZAxis(int m1)
{
dim3 block = dim3(BLOCKX, BLOCKY);
dim3 grid = dim3((pbaTexSize / block.x) * m1, pbaTexSize / block.y);
CudaSafeCall( hipBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]) );
hipLaunchKernelGGL(( kernelFloodZ), dim3(grid), dim3(block) , 0, 0, pbaTextures[1 - pbaCurrentBuffer], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m1);
CudaCheckError();
pbaCurrentBuffer = 1 - pbaCurrentBuffer;
if (m1 > 1)
{
// Passing information between bands
CudaSafeCall( hipBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]) );
hipLaunchKernelGGL(( kernelPropagateInterband), dim3(grid), dim3(block) , 0, 0, pbaTextures[1 - pbaCurrentBuffer], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m1);
CudaCheckError();
CudaSafeCall( hipBindTexture(0, pbaTexLinks, pbaTextures[1 - pbaCurrentBuffer]) );
hipLaunchKernelGGL(( kernelUpdateVertical), dim3(grid), dim3(block) , 0, 0, pbaTextures[pbaCurrentBuffer], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m1);
CudaCheckError();
}
}
// Phase 2 of PBA. m2 must divides texture size.
// This method work along the Y axis
void pba3DComputeProximatePointsYAxis(int m2)
{
int iStack = 1 - pbaCurrentBuffer;
int iForward = pbaCurrentBuffer;
dim3 block = dim3(BLOCKX, BLOCKY);
dim3 grid = dim3((pbaTexSize / block.x) * m2, pbaTexSize / block.y);
// Compute proximate points locally in each band
CudaSafeCall( hipBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]) );
hipLaunchKernelGGL(( kernelMaurerAxis), dim3(grid), dim3(block) , 0, 0, pbaTextures[iStack], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m2);
CudaCheckError();
// Construct forward pointers
CudaSafeCall( hipBindTexture(0, pbaTexLinks, pbaTextures[iStack]) );
hipLaunchKernelGGL(( kernelCreateForwardPointers), dim3(grid), dim3(block) , 0, 0, (short *) pbaTextures[iForward], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m2);
CudaCheckError();
CudaSafeCall( hipBindTexture(0, pbaTexPointer, pbaTextures[iForward], pbaTexSize * pbaTexSize * pbaTexSize * sizeof( short ) ) );
// Repeatly merging two bands into one
for (int noBand = m2; noBand > 1; noBand /= 2)
{
grid = dim3((pbaTexSize / block.x) * (noBand / 2), pbaTexSize / block.y);
hipLaunchKernelGGL(( kernelMergeBands), dim3(grid), dim3(block) , 0, 0, pbaTextures[iStack],
(short *) pbaTextures[iForward], pbaTexSize, pbaTexSize / block.x, pbaTexSize / noBand);
CudaCheckError();
}
CudaSafeCall( hipUnbindTexture(pbaTexLinks) );
CudaSafeCall( hipUnbindTexture(pbaTexColor) );
CudaSafeCall( hipUnbindTexture(pbaTexPointer) );
}
// Phase 3 of PBA. m3 must divides texture size
// This method color along the Y axis
void pba3DColorYAxis(int m3)
{
dim3 block = dim3(BLOCKX, m3);
dim3 grid = dim3(pbaTexSize / block.x, pbaTexSize);
CudaSafeCall( hipBindTexture(0, pbaTexColor, pbaTextures[1 - pbaCurrentBuffer] ) );
hipLaunchKernelGGL(( kernelColorAxis), dim3(grid), dim3(block) , 0, 0, pbaTextures[pbaCurrentBuffer], pbaTexSize);
CudaCheckError();
CudaSafeCall( hipUnbindTexture(pbaTexColor) );
return;
}
void pba3DCompute(int m1, int m2, int m3)
{
/************* Compute Z axis *************/
// --> (X, Y, Z)
pba3DColorZAxis(m1);
/************* Compute Y axis *************/
// --> (X, Y, Z)
pba3DComputeProximatePointsYAxis(m2);
pba3DColorYAxis(m3);
// --> (Y, X, Z)
pba3DTransposeXY(pbaTextures[pbaCurrentBuffer]);
/************** Compute X axis *************/
// Compute X axis
pba3DComputeProximatePointsYAxis(m2);
pba3DColorYAxis(m3);
// --> (X, Y, Z)
pba3DTransposeXY(pbaTextures[pbaCurrentBuffer]);
}
// Compute 3D Voronoi diagram
// Input: a 3D texture. Each pixel is an integer encoding 3 coordinates.
// For each site at (x, y, z), the pixel at coordinate (x, y, z) should contain
// the encoded coordinate (x, y, z). Pixels that are not sites should contain
// the integer MARKER. Use ENCODE (and DECODE) macro to encode (and decode).
// See original paper for the effect of the three parameters:
// phase1Band, phase2Band, phase3Band
// Parameters must divide textureSize
// Note: input texture will be released after this.
void pba3DVoronoiDiagram(int *dInput, int **dOutput,
int phase1Band, int phase2Band, int phase3Band)
{
// Initialization
pba3DInitializeInput(dInput, *dOutput);
// Compute the 3D Voronoi Diagram
pba3DCompute(phase1Band, phase2Band, phase3Band);
// Pass back the result
*dOutput = pbaTextures[pbaCurrentBuffer];
return;
}
// A function to draw points onto GPU texture
void setPointsInGrid( Point3DVec& pointDVec, int *dInputVoronoi )
{
const int BlocksPerGrid = 64;
const int ThreadsPerBlock = 256;
CudaSafeCall( hipMemset( dInputVoronoi, MARKER, pbaMemSize ) );
hipLaunchKernelGGL(( kerSetPointsInGrid), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
thrust::raw_pointer_cast( &pointDVec[0] ),
( int ) pointDVec.size(),
dInputVoronoi,
pbaTexSize
);
CudaCheckError();
return;
}
// A function to draw point's IDs onto GPU texture
void setPointIndicesInGrid( Point3DVec& pointDVec, int* dMapToID )
{
const int BlocksPerGrid = 64;
const int ThreadsPerBlock = 256;
hipLaunchKernelGGL(( kerSetPointIndicesInGrid), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0,
thrust::raw_pointer_cast( &pointDVec[0] ),
( int ) pointDVec.size(),
dMapToID,
pbaTexSize
);
CudaCheckError();
return;
}
void setIndexInGrid( int gridWidth, int* dPointIndexGrid, int* dGrid )
{
const int BlocksPerGrid = 64;
const int ThreadsPerBlock = 256;
hipLaunchKernelGGL(( kerSetIndexInGrid), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, gridWidth, dPointIndexGrid, dGrid );
CudaCheckError();
// Free grid
CudaSafeCall( hipFree( dPointIndexGrid ) );
return;
}
| 08e52aa563ae71a79771469c8c709ca7bbe74f6b.cu | /*
Author: Cao Thanh Tung
Filename: pba3DHost.cu
Copyright (c) 2010, School of Computing, National University of Singapore.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer. Redistributions in binary form must reproduce
the above copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the distribution.
Neither the name of the National University of Singapore nor the names of its contributors
may be used to endorse or promote products derived from this software without specific
prior written permission from the National University of Singapore.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
#include <device_functions.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <fstream>
using namespace std;
#include "pba3D.h"
#include "Geometry.h"
#include "CudaWrapper.h"
// Parameters for CUDA kernel executions
#define BLOCKX 32
#define BLOCKY 4
#define BLOCKXY 16
#define PBA_INFINITY 0x3ff
/****** Global Variables *******/
int **pbaTextures;
int pbaMemSize;
int pbaCurrentBuffer;
int pbaTexSize;
texture<int> pbaTexColor;
texture<int> pbaTexLinks;
texture<short> pbaTexPointer;
/********* Kernels ********/
#include "pba3DKernel.h"
///////////////////////////////////////////////////////////////////////////
//
// Initialize necessary memory for 3D Voronoi Diagram computation
// - textureSize: The size of the Discrete Voronoi Diagram (width = height)
//
///////////////////////////////////////////////////////////////////////////
void pba3DInitialization(int fboSize)
{
pbaTexSize = fboSize;
pbaTextures = (int **) malloc(2 * sizeof(int *));
pbaMemSize = pbaTexSize * pbaTexSize * pbaTexSize * sizeof(int);
// Allocate 2 textures
//cudaMalloc((void **) &pbaTextures[0], pbaMemSize);
//cudaMalloc((void **) &pbaTextures[1], pbaMemSize);
}
///////////////////////////////////////////////////////////////////////////
//
// Deallocate all allocated memory
//
///////////////////////////////////////////////////////////////////////////
void pba3DDeinitialization()
{
free(pbaTextures);
return;
}
// Copy input to GPU
void pba3DInitializeInput(int *input, int *output)
{
//cudaMemcpy(pbaTextures[0], input, pbaMemSize, cudaMemcpyHostToDevice);
pbaTextures[0] = input;
pbaTextures[1] = output;
// Set Current Source Buffer
pbaCurrentBuffer = 0;
}
// In-place transpose a cubic texture.
// Transposition are performed on each XY plane.
// Point coordinates are also swapped.
void pba3DTransposeXY(int *texture)
{
dim3 block(BLOCKXY, BLOCKXY);
dim3 grid((pbaTexSize / BLOCKXY) * pbaTexSize, pbaTexSize / BLOCKXY);
kernelTransposeXY<<< grid, block >>>(texture, pbaTexSize);
CudaCheckError();
}
// Phase 1 of PBA. m1 must divides texture size
// Sweeping are done along the Z axiz.
void pba3DColorZAxis(int m1)
{
dim3 block = dim3(BLOCKX, BLOCKY);
dim3 grid = dim3((pbaTexSize / block.x) * m1, pbaTexSize / block.y);
CudaSafeCall( cudaBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]) );
kernelFloodZ<<< grid, block >>>(pbaTextures[1 - pbaCurrentBuffer], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m1);
CudaCheckError();
pbaCurrentBuffer = 1 - pbaCurrentBuffer;
if (m1 > 1)
{
// Passing information between bands
CudaSafeCall( cudaBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]) );
kernelPropagateInterband<<< grid, block >>>(pbaTextures[1 - pbaCurrentBuffer], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m1);
CudaCheckError();
CudaSafeCall( cudaBindTexture(0, pbaTexLinks, pbaTextures[1 - pbaCurrentBuffer]) );
kernelUpdateVertical<<< grid, block >>>(pbaTextures[pbaCurrentBuffer], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m1);
CudaCheckError();
}
}
// Phase 2 of PBA. m2 must divides texture size.
// This method work along the Y axis
void pba3DComputeProximatePointsYAxis(int m2)
{
int iStack = 1 - pbaCurrentBuffer;
int iForward = pbaCurrentBuffer;
dim3 block = dim3(BLOCKX, BLOCKY);
dim3 grid = dim3((pbaTexSize / block.x) * m2, pbaTexSize / block.y);
// Compute proximate points locally in each band
CudaSafeCall( cudaBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]) );
kernelMaurerAxis<<< grid, block >>>(pbaTextures[iStack], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m2);
CudaCheckError();
// Construct forward pointers
CudaSafeCall( cudaBindTexture(0, pbaTexLinks, pbaTextures[iStack]) );
kernelCreateForwardPointers<<< grid, block >>>((short *) pbaTextures[iForward], pbaTexSize, pbaTexSize / block.x, pbaTexSize / m2);
CudaCheckError();
CudaSafeCall( cudaBindTexture(0, pbaTexPointer, pbaTextures[iForward], pbaTexSize * pbaTexSize * pbaTexSize * sizeof( short ) ) );
// Repeatly merging two bands into one
for (int noBand = m2; noBand > 1; noBand /= 2)
{
grid = dim3((pbaTexSize / block.x) * (noBand / 2), pbaTexSize / block.y);
kernelMergeBands<<< grid, block >>>(pbaTextures[iStack],
(short *) pbaTextures[iForward], pbaTexSize, pbaTexSize / block.x, pbaTexSize / noBand);
CudaCheckError();
}
CudaSafeCall( cudaUnbindTexture(pbaTexLinks) );
CudaSafeCall( cudaUnbindTexture(pbaTexColor) );
CudaSafeCall( cudaUnbindTexture(pbaTexPointer) );
}
// Phase 3 of PBA. m3 must divides texture size
// This method color along the Y axis
void pba3DColorYAxis(int m3)
{
dim3 block = dim3(BLOCKX, m3);
dim3 grid = dim3(pbaTexSize / block.x, pbaTexSize);
CudaSafeCall( cudaBindTexture(0, pbaTexColor, pbaTextures[1 - pbaCurrentBuffer] ) );
kernelColorAxis<<< grid, block >>>(pbaTextures[pbaCurrentBuffer], pbaTexSize);
CudaCheckError();
CudaSafeCall( cudaUnbindTexture(pbaTexColor) );
return;
}
void pba3DCompute(int m1, int m2, int m3)
{
/************* Compute Z axis *************/
// --> (X, Y, Z)
pba3DColorZAxis(m1);
/************* Compute Y axis *************/
// --> (X, Y, Z)
pba3DComputeProximatePointsYAxis(m2);
pba3DColorYAxis(m3);
// --> (Y, X, Z)
pba3DTransposeXY(pbaTextures[pbaCurrentBuffer]);
/************** Compute X axis *************/
// Compute X axis
pba3DComputeProximatePointsYAxis(m2);
pba3DColorYAxis(m3);
// --> (X, Y, Z)
pba3DTransposeXY(pbaTextures[pbaCurrentBuffer]);
}
// Compute 3D Voronoi diagram
// Input: a 3D texture. Each pixel is an integer encoding 3 coordinates.
// For each site at (x, y, z), the pixel at coordinate (x, y, z) should contain
// the encoded coordinate (x, y, z). Pixels that are not sites should contain
// the integer MARKER. Use ENCODE (and DECODE) macro to encode (and decode).
// See original paper for the effect of the three parameters:
// phase1Band, phase2Band, phase3Band
// Parameters must divide textureSize
// Note: input texture will be released after this.
void pba3DVoronoiDiagram(int *dInput, int **dOutput,
int phase1Band, int phase2Band, int phase3Band)
{
// Initialization
pba3DInitializeInput(dInput, *dOutput);
// Compute the 3D Voronoi Diagram
pba3DCompute(phase1Band, phase2Band, phase3Band);
// Pass back the result
*dOutput = pbaTextures[pbaCurrentBuffer];
return;
}
// A function to draw points onto GPU texture
void setPointsInGrid( Point3DVec& pointDVec, int *dInputVoronoi )
{
const int BlocksPerGrid = 64;
const int ThreadsPerBlock = 256;
CudaSafeCall( cudaMemset( dInputVoronoi, MARKER, pbaMemSize ) );
kerSetPointsInGrid<<< BlocksPerGrid, ThreadsPerBlock >>>(
thrust::raw_pointer_cast( &pointDVec[0] ),
( int ) pointDVec.size(),
dInputVoronoi,
pbaTexSize
);
CudaCheckError();
return;
}
// A function to draw point's IDs onto GPU texture
void setPointIndicesInGrid( Point3DVec& pointDVec, int* dMapToID )
{
const int BlocksPerGrid = 64;
const int ThreadsPerBlock = 256;
kerSetPointIndicesInGrid<<< BlocksPerGrid, ThreadsPerBlock >>>(
thrust::raw_pointer_cast( &pointDVec[0] ),
( int ) pointDVec.size(),
dMapToID,
pbaTexSize
);
CudaCheckError();
return;
}
void setIndexInGrid( int gridWidth, int* dPointIndexGrid, int* dGrid )
{
const int BlocksPerGrid = 64;
const int ThreadsPerBlock = 256;
kerSetIndexInGrid<<< BlocksPerGrid, ThreadsPerBlock >>>( gridWidth, dPointIndexGrid, dGrid );
CudaCheckError();
// Free grid
CudaSafeCall( cudaFree( dPointIndexGrid ) );
return;
}
|
da2aea7746f9a7fae233064aa68c98849905ef40.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPBlas.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/im2col.cuh>
namespace at {
namespace native {
namespace {
static inline void slow_conv_transpose2d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
const Tensor& bias,
int kernel_height,
int kernel_width,
int stride_height,
int stride_width,
int pad_height,
int pad_width,
int output_padding_height,
int output_padding_width,
int dilation_height,
int dilation_width,
bool weight_nullable) {
TORCH_CHECK(
kernel_width > 0 && kernel_height > 0,
"kernel size should be greater than zero, but got kernel_height: ",
kernel_height,
" kernel_width: ",
kernel_width);
TORCH_CHECK(
stride_width > 0 && stride_height > 0,
"stride should be greater than zero, but got stride_height: ",
stride_height,
" stride_width: ",
stride_width);
TORCH_CHECK(
dilation_width > 0 && dilation_height > 0,
"dilation should be greater than zero, but got dilation_height: ",
dilation_height,
", dilation_width: ",
dilation_width);
TORCH_CHECK(
(output_padding_width < stride_width ||
output_padding_width < dilation_width) &&
(output_padding_height < stride_height ||
output_padding_height < dilation_height),
"output padding must be smaller than either stride or dilation, ",
"but got output_padding_height: ",
output_padding_height,
" output_padding_width: ",
output_padding_width,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width,
" dilation_height: ",
dilation_height,
" dilation_width: ",
dilation_width);
if (weight.defined()) {
TORCH_CHECK(
weight.numel() != 0 && (weight.dim() == 2 || weight.dim() == 4),
"non-empty 2D or 4D weight tensor expected, but got: ",
weight.sizes());
if (bias.defined()) {
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
}
int ndim = input.dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
TORCH_CHECK(
input.numel() != 0 && (ndim == 3 || ndim == 4),
"non-empty 3D or 4D input tensor expected but got a tensor with size ",
input.sizes());
int64_t input_height = input.size(dimh);
int64_t input_width = input.size(dimw);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_width < 1 || output_height < 1) {
AT_ERROR(
"Given input size per channel: (",
input_height,
" x ",
input_width,
"). Calculated output spatial size per channel: (",
output_height,
" x ",
output_width,
"). Output size is too small");
}
if (weight.defined()) {
int64_t n_input_plane = weight.size(0);
check_dim_size(input, ndim, dimf, n_input_plane);
}
if (grad_output.defined()) {
if (weight.defined()) {
int64_t n_output_plane = weight.size(1);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
} else if (bias.defined()) {
int64_t n_output_plane = bias.size(0);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
}
check_dim_size(grad_output, ndim, dimh, output_height);
check_dim_size(grad_output, ndim, dimw, output_width);
}
}
void slow_conv_transpose2d_out_cuda_template(
Tensor& output,
const Tensor& input_,
const Tensor& weight_,
IntArrayRef kernel_size,
const Tensor& bias_,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& columns_,
Tensor& ones_) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2},
weight_arg{weight_, "weight", 3}, bias_arg{bias_, "bias", 4},
columns_arg{columns_, "columns", 5}, ones_arg{ones_, "ones", 6};
checkAllSameGPU(
"slow_conv_transpose2d_out_cuda",
{input_arg, output_arg, weight_arg, bias_arg, columns_arg, ones_arg});
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
Tensor columns = columns_;
Tensor ones = ones_;
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
slow_conv_transpose2d_shape_check(
input_,
Tensor(),
weight_,
bias_,
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
false);
Tensor input = input_.contiguous();
Tensor weight = weight_.contiguous();
Tensor bias = Tensor();
if (bias_.defined()) {
bias = bias_.contiguous();
TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous");
}
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
}
int64_t input_height = input.size(2);
int64_t input_width = input.size(3);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
output.resize_({batch_size, n_output_plane, output_height, output_width});
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height,
input_height * input_width});
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets
// increased, and always contains ones.
if (ones.dim() != 2 ||
ones.size(0) * ones.size(1) < output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_height, output_width});
ones.fill_(1);
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_transpose2d_out_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
// Helpers
Tensor input_n;
Tensor output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
output_n = output.select(0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(1) * weight.size(2) * weight.size(3);
int64_t n = columns.size(1);
int64_t k = weight.size(0);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
'n',
't',
n,
m,
k,
1,
input_n.data_ptr<scalar_t>(),
n,
weight.data_ptr<scalar_t>(),
m,
0,
columns.data_ptr<scalar_t>(),
n);
// Unpack columns back into input:
col2im<scalar_t, accscalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
columns.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
output_n.data_ptr<scalar_t>());
// Do Bias after:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t n_ = output_height * output_width;
int64_t k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
if (bias.defined()) {
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n_,
m_,
k_,
1,
ones.data_ptr<scalar_t>(),
k_,
bias.data_ptr<scalar_t>(),
k_,
1,
output_n.data_ptr<scalar_t>(),
n_);
}
}
// Resize output
if (is_batch) {
output.resize_({n_output_plane, output_height, output_width});
input.resize_({n_input_plane, input_height, input_width});
}
}); // end of dispatch
}
static void slow_conv_transpose2d_backward_out_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_input,
const Tensor& weight_,
const Tensor& grad_columns_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
weight_arg{weight_, "weight", 3},
grad_columns_arg{grad_columns_, "grad_columns", 4},
grad_input_arg{grad_input, "grad_input", 5};
checkAllSameGPU(
"slow_conv_transpose2d_backward_out_cuda",
{input_arg,
grad_output_arg,
weight_arg,
grad_columns_arg,
grad_input_arg});
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
Tensor grad_columns = grad_columns_;
slow_conv_transpose2d_shape_check(
input_,
grad_output_,
weight_,
Tensor(),
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
false);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
Tensor weight = weight_.contiguous();
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
grad_output.resize_(
{1, grad_output.size(0), grad_output.size(1), grad_output.size(2)});
}
int64_t input_width = input.size(3);
int64_t input_height = input.size(2);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
grad_input.resize_({batch_size, n_input_plane, input_height, input_width});
// Resize temporary columns
grad_columns.resize_({n_output_plane * kernel_width * kernel_height,
input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
grad_output.scalar_type(), "slow_conv_transpose2d_backward_out_cuda", [&] {
// Helpers
Tensor grad_input_n = Tensor();
Tensor grad_output_n = Tensor();
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per sample:
grad_input_n = grad_input.select(0, elt);
grad_output_n = grad_output.select(0, elt);
if (kernel_height != 1 || kernel_width != 1) {
im2col<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
grad_columns.data_ptr<scalar_t>());
}
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(0);
int64_t n = grad_columns.size(1);
int64_t k = weight.size(1) * weight.size(2) * weight.size(3);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
auto gemm_in_ptr = (kernel_height != 1 || kernel_width != 1) ?
grad_columns.data_ptr<scalar_t>() : grad_output_n.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
'n',
'n',
n,
m,
k,
1,
gemm_in_ptr,
n,
weight.data_ptr<scalar_t>(),
k,
0,
grad_input_n.data_ptr<scalar_t>(),
n);
}
// Resize output
if (is_batch) {
grad_output.resize_({n_output_plane, output_height, output_width});
input.resize_({n_input_plane, input_height, input_width});
grad_input.resize_({n_input_plane, input_height, input_width});
}
}); // end of dispatch
}
void slow_conv_transpose2d_acc_grad_parameters_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& columns_,
const Tensor& ones_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
int scale_) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
grad_weight_arg{grad_weight, "grad_weight", 3},
grad_bias_arg{grad_bias, "grad_bias", 4},
columns_arg{columns_, "columns", 5}, ones_arg{ones_, "ones", 6};
checkAllSameGPU(
"slow_conv_transpose2d_acc_grad_parameters_cuda",
{input_arg,
grad_output_arg,
grad_weight_arg,
grad_bias_arg,
columns_arg,
ones_arg});
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
Tensor columns = columns_;
Tensor ones = ones_;
slow_conv_transpose2d_shape_check(
input_,
grad_output_,
grad_weight,
grad_bias,
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
true);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
int64_t n_output_plane;
if (grad_weight.defined()) {
n_output_plane = grad_weight.size(1);
} else if (grad_bias.defined()) {
n_output_plane = grad_bias.size(0);
} else {
return;
}
if (grad_weight.defined()) {
TORCH_CHECK(
grad_weight.is_contiguous(), "grad_weight needs to be contiguous");
}
TORCH_CHECK(columns.is_contiguous(), "columns needs to be contiguous");
if (grad_bias.defined()) {
TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous");
TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous");
}
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
grad_output.resize_(
{1, grad_output.size(0), grad_output.size(1), grad_output.size(2)});
}
int64_t input_width = input.size(3);
int64_t input_height = input.size(2);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Define a buffer of ones, for bias accumulation
if (ones.dim() != 2 ||
ones.size(0) * ones.size(1) < output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_height, output_width});
ones.fill_(1); // or static_cast<scalar_t>(1)
}
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height,
input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_transpose2d_acc_grad_parameters_cuda", [&] {
// Helpers
Tensor input_n = Tensor();
Tensor grad_output_n = Tensor();
scalar_t scale = static_cast<scalar_t>(scale_);
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
grad_output_n = grad_output.select(0, elt);
// Do Weight:
if (grad_weight.defined()) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
if (kernel_height == 1 && kernel_width == 1) {
// for 1x1 column skip im2col step
columns.copy_(grad_output_n);
} else {
// Extract columns:
im2col<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
columns.data_ptr<scalar_t>());
}
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t n = columns.size(0); // n_output_plane * kh * kw
int64_t m = input_n.size(0); // n_input_plane
int64_t k = columns.size(1); // input_height * input_width
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
auto gemm_in_ptr = (kernel_height != 1 || kernel_width != 1) ?
columns.data_ptr<scalar_t>() : grad_output_n.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n,
m,
k,
scale,
gemm_in_ptr,
k,
input_n.data_ptr<scalar_t>(),
k,
1,
grad_weight.data_ptr<scalar_t>(),
n);
}
// Do Bias:
if (grad_bias.defined()) {
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t k_ = output_height * output_width;
// Do GEMV (note: this is a bit confusing because gemv assumes
// column-major matrices)
at::cuda::blas::gemv<scalar_t>(
't',
k_,
m_,
scale,
grad_output_n.data_ptr<scalar_t>(),
k_,
ones.data_ptr<scalar_t>(),
1,
1,
grad_bias.data_ptr<scalar_t>(),
1);
}
}
// Resize
if (is_batch) {
grad_output.resize_({n_output_plane, output_height, output_width});
input.resize_({input.size(1), input_height, input_width});
}
}); // end of dispatch
}
} // namespace
Tensor& slow_conv_transpose2d_out_cuda(
Tensor& output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor columns = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor ones = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
slow_conv_transpose2d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
columns,
ones);
return output;
}
Tensor slow_conv_transpose2d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor columns = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor ones = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
slow_conv_transpose2d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
columns,
ones);
return output;
}
std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose2d_backward_out_cuda(
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& columns,
const Tensor& ones) {
if (grad_input.defined()) {
slow_conv_transpose2d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
columns,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose2d_acc_grad_parameters_cuda_template(
input,
grad_output,
grad_weight,
grad_bias,
columns,
ones,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor&, Tensor&, Tensor&>(
grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose2d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& columns,
const Tensor& ones,
std::array<bool, 3> output_mask) {
Tensor grad_input;
Tensor grad_weight;
Tensor grad_bias;
if (output_mask[0]) {
grad_input = at::empty({0}, grad_output.options());
} else {
grad_input = Tensor();
}
if (output_mask[1]) {
grad_weight = at::empty({0}, grad_output.options());
} else {
grad_weight = Tensor();
}
if (output_mask[2]) {
grad_bias = at::empty({0}, grad_output.options());
} else {
grad_bias = Tensor();
}
if (grad_input.defined()) {
slow_conv_transpose2d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
columns,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose2d_acc_grad_parameters_cuda_template(
input,
grad_output,
grad_weight,
grad_bias,
columns,
ones,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias);
}
} // namespace native
} // namespace at
| da2aea7746f9a7fae233064aa68c98849905ef40.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDABlas.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/im2col.cuh>
namespace at {
namespace native {
namespace {
static inline void slow_conv_transpose2d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
const Tensor& bias,
int kernel_height,
int kernel_width,
int stride_height,
int stride_width,
int pad_height,
int pad_width,
int output_padding_height,
int output_padding_width,
int dilation_height,
int dilation_width,
bool weight_nullable) {
TORCH_CHECK(
kernel_width > 0 && kernel_height > 0,
"kernel size should be greater than zero, but got kernel_height: ",
kernel_height,
" kernel_width: ",
kernel_width);
TORCH_CHECK(
stride_width > 0 && stride_height > 0,
"stride should be greater than zero, but got stride_height: ",
stride_height,
" stride_width: ",
stride_width);
TORCH_CHECK(
dilation_width > 0 && dilation_height > 0,
"dilation should be greater than zero, but got dilation_height: ",
dilation_height,
", dilation_width: ",
dilation_width);
TORCH_CHECK(
(output_padding_width < stride_width ||
output_padding_width < dilation_width) &&
(output_padding_height < stride_height ||
output_padding_height < dilation_height),
"output padding must be smaller than either stride or dilation, ",
"but got output_padding_height: ",
output_padding_height,
" output_padding_width: ",
output_padding_width,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width,
" dilation_height: ",
dilation_height,
" dilation_width: ",
dilation_width);
if (weight.defined()) {
TORCH_CHECK(
weight.numel() != 0 && (weight.dim() == 2 || weight.dim() == 4),
"non-empty 2D or 4D weight tensor expected, but got: ",
weight.sizes());
if (bias.defined()) {
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
}
int ndim = input.dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
TORCH_CHECK(
input.numel() != 0 && (ndim == 3 || ndim == 4),
"non-empty 3D or 4D input tensor expected but got a tensor with size ",
input.sizes());
int64_t input_height = input.size(dimh);
int64_t input_width = input.size(dimw);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_width < 1 || output_height < 1) {
AT_ERROR(
"Given input size per channel: (",
input_height,
" x ",
input_width,
"). Calculated output spatial size per channel: (",
output_height,
" x ",
output_width,
"). Output size is too small");
}
if (weight.defined()) {
int64_t n_input_plane = weight.size(0);
check_dim_size(input, ndim, dimf, n_input_plane);
}
if (grad_output.defined()) {
if (weight.defined()) {
int64_t n_output_plane = weight.size(1);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
} else if (bias.defined()) {
int64_t n_output_plane = bias.size(0);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
}
check_dim_size(grad_output, ndim, dimh, output_height);
check_dim_size(grad_output, ndim, dimw, output_width);
}
}
void slow_conv_transpose2d_out_cuda_template(
Tensor& output,
const Tensor& input_,
const Tensor& weight_,
IntArrayRef kernel_size,
const Tensor& bias_,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& columns_,
Tensor& ones_) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2},
weight_arg{weight_, "weight", 3}, bias_arg{bias_, "bias", 4},
columns_arg{columns_, "columns", 5}, ones_arg{ones_, "ones", 6};
checkAllSameGPU(
"slow_conv_transpose2d_out_cuda",
{input_arg, output_arg, weight_arg, bias_arg, columns_arg, ones_arg});
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
Tensor columns = columns_;
Tensor ones = ones_;
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
slow_conv_transpose2d_shape_check(
input_,
Tensor(),
weight_,
bias_,
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
false);
Tensor input = input_.contiguous();
Tensor weight = weight_.contiguous();
Tensor bias = Tensor();
if (bias_.defined()) {
bias = bias_.contiguous();
TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous");
}
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
}
int64_t input_height = input.size(2);
int64_t input_width = input.size(3);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
output.resize_({batch_size, n_output_plane, output_height, output_width});
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height,
input_height * input_width});
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets
// increased, and always contains ones.
if (ones.dim() != 2 ||
ones.size(0) * ones.size(1) < output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_height, output_width});
ones.fill_(1);
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_transpose2d_out_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
// Helpers
Tensor input_n;
Tensor output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
output_n = output.select(0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(1) * weight.size(2) * weight.size(3);
int64_t n = columns.size(1);
int64_t k = weight.size(0);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
'n',
't',
n,
m,
k,
1,
input_n.data_ptr<scalar_t>(),
n,
weight.data_ptr<scalar_t>(),
m,
0,
columns.data_ptr<scalar_t>(),
n);
// Unpack columns back into input:
col2im<scalar_t, accscalar_t>(
at::cuda::getCurrentCUDAStream(),
columns.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
output_n.data_ptr<scalar_t>());
// Do Bias after:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t n_ = output_height * output_width;
int64_t k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
if (bias.defined()) {
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n_,
m_,
k_,
1,
ones.data_ptr<scalar_t>(),
k_,
bias.data_ptr<scalar_t>(),
k_,
1,
output_n.data_ptr<scalar_t>(),
n_);
}
}
// Resize output
if (is_batch) {
output.resize_({n_output_plane, output_height, output_width});
input.resize_({n_input_plane, input_height, input_width});
}
}); // end of dispatch
}
static void slow_conv_transpose2d_backward_out_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_input,
const Tensor& weight_,
const Tensor& grad_columns_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
weight_arg{weight_, "weight", 3},
grad_columns_arg{grad_columns_, "grad_columns", 4},
grad_input_arg{grad_input, "grad_input", 5};
checkAllSameGPU(
"slow_conv_transpose2d_backward_out_cuda",
{input_arg,
grad_output_arg,
weight_arg,
grad_columns_arg,
grad_input_arg});
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
Tensor grad_columns = grad_columns_;
slow_conv_transpose2d_shape_check(
input_,
grad_output_,
weight_,
Tensor(),
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
false);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
Tensor weight = weight_.contiguous();
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
grad_output.resize_(
{1, grad_output.size(0), grad_output.size(1), grad_output.size(2)});
}
int64_t input_width = input.size(3);
int64_t input_height = input.size(2);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
grad_input.resize_({batch_size, n_input_plane, input_height, input_width});
// Resize temporary columns
grad_columns.resize_({n_output_plane * kernel_width * kernel_height,
input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
grad_output.scalar_type(), "slow_conv_transpose2d_backward_out_cuda", [&] {
// Helpers
Tensor grad_input_n = Tensor();
Tensor grad_output_n = Tensor();
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per sample:
grad_input_n = grad_input.select(0, elt);
grad_output_n = grad_output.select(0, elt);
if (kernel_height != 1 || kernel_width != 1) {
im2col<scalar_t>(
at::cuda::getCurrentCUDAStream(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
grad_columns.data_ptr<scalar_t>());
}
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(0);
int64_t n = grad_columns.size(1);
int64_t k = weight.size(1) * weight.size(2) * weight.size(3);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
auto gemm_in_ptr = (kernel_height != 1 || kernel_width != 1) ?
grad_columns.data_ptr<scalar_t>() : grad_output_n.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
'n',
'n',
n,
m,
k,
1,
gemm_in_ptr,
n,
weight.data_ptr<scalar_t>(),
k,
0,
grad_input_n.data_ptr<scalar_t>(),
n);
}
// Resize output
if (is_batch) {
grad_output.resize_({n_output_plane, output_height, output_width});
input.resize_({n_input_plane, input_height, input_width});
grad_input.resize_({n_input_plane, input_height, input_width});
}
}); // end of dispatch
}
void slow_conv_transpose2d_acc_grad_parameters_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& columns_,
const Tensor& ones_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
int scale_) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
grad_weight_arg{grad_weight, "grad_weight", 3},
grad_bias_arg{grad_bias, "grad_bias", 4},
columns_arg{columns_, "columns", 5}, ones_arg{ones_, "ones", 6};
checkAllSameGPU(
"slow_conv_transpose2d_acc_grad_parameters_cuda",
{input_arg,
grad_output_arg,
grad_weight_arg,
grad_bias_arg,
columns_arg,
ones_arg});
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
Tensor columns = columns_;
Tensor ones = ones_;
slow_conv_transpose2d_shape_check(
input_,
grad_output_,
grad_weight,
grad_bias,
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
true);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
int64_t n_output_plane;
if (grad_weight.defined()) {
n_output_plane = grad_weight.size(1);
} else if (grad_bias.defined()) {
n_output_plane = grad_bias.size(0);
} else {
return;
}
if (grad_weight.defined()) {
TORCH_CHECK(
grad_weight.is_contiguous(), "grad_weight needs to be contiguous");
}
TORCH_CHECK(columns.is_contiguous(), "columns needs to be contiguous");
if (grad_bias.defined()) {
TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous");
TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous");
}
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
grad_output.resize_(
{1, grad_output.size(0), grad_output.size(1), grad_output.size(2)});
}
int64_t input_width = input.size(3);
int64_t input_height = input.size(2);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Define a buffer of ones, for bias accumulation
if (ones.dim() != 2 ||
ones.size(0) * ones.size(1) < output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_height, output_width});
ones.fill_(1); // or static_cast<scalar_t>(1)
}
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height,
input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "slow_conv_transpose2d_acc_grad_parameters_cuda", [&] {
// Helpers
Tensor input_n = Tensor();
Tensor grad_output_n = Tensor();
scalar_t scale = static_cast<scalar_t>(scale_);
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
grad_output_n = grad_output.select(0, elt);
// Do Weight:
if (grad_weight.defined()) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
if (kernel_height == 1 && kernel_width == 1) {
// for 1x1 column skip im2col step
columns.copy_(grad_output_n);
} else {
// Extract columns:
im2col<scalar_t>(
at::cuda::getCurrentCUDAStream(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
columns.data_ptr<scalar_t>());
}
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t n = columns.size(0); // n_output_plane * kh * kw
int64_t m = input_n.size(0); // n_input_plane
int64_t k = columns.size(1); // input_height * input_width
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
auto gemm_in_ptr = (kernel_height != 1 || kernel_width != 1) ?
columns.data_ptr<scalar_t>() : grad_output_n.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
't',
'n',
n,
m,
k,
scale,
gemm_in_ptr,
k,
input_n.data_ptr<scalar_t>(),
k,
1,
grad_weight.data_ptr<scalar_t>(),
n);
}
// Do Bias:
if (grad_bias.defined()) {
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t k_ = output_height * output_width;
// Do GEMV (note: this is a bit confusing because gemv assumes
// column-major matrices)
at::cuda::blas::gemv<scalar_t>(
't',
k_,
m_,
scale,
grad_output_n.data_ptr<scalar_t>(),
k_,
ones.data_ptr<scalar_t>(),
1,
1,
grad_bias.data_ptr<scalar_t>(),
1);
}
}
// Resize
if (is_batch) {
grad_output.resize_({n_output_plane, output_height, output_width});
input.resize_({input.size(1), input_height, input_width});
}
}); // end of dispatch
}
} // namespace
Tensor& slow_conv_transpose2d_out_cuda(
Tensor& output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor columns = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor ones = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
slow_conv_transpose2d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
columns,
ones);
return output;
}
Tensor slow_conv_transpose2d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor columns = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor ones = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
slow_conv_transpose2d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
columns,
ones);
return output;
}
std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose2d_backward_out_cuda(
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& columns,
const Tensor& ones) {
if (grad_input.defined()) {
slow_conv_transpose2d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
columns,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose2d_acc_grad_parameters_cuda_template(
input,
grad_output,
grad_weight,
grad_bias,
columns,
ones,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor&, Tensor&, Tensor&>(
grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose2d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& columns,
const Tensor& ones,
std::array<bool, 3> output_mask) {
Tensor grad_input;
Tensor grad_weight;
Tensor grad_bias;
if (output_mask[0]) {
grad_input = at::empty({0}, grad_output.options());
} else {
grad_input = Tensor();
}
if (output_mask[1]) {
grad_weight = at::empty({0}, grad_output.options());
} else {
grad_weight = Tensor();
}
if (output_mask[2]) {
grad_bias = at::empty({0}, grad_output.options());
} else {
grad_bias = Tensor();
}
if (grad_input.defined()) {
slow_conv_transpose2d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
columns,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose2d_acc_grad_parameters_cuda_template(
input,
grad_output,
grad_weight,
grad_bias,
columns,
ones,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias);
}
} // namespace native
} // namespace at
|
cd4a980004202fb7f783d68e73cb4e87f432676c.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l2/zgemv2_offset.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ahmad Abdelfattah
* @date 2018-11-14
**/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
#include "gemv2_offset_core.cuh"
#if(TARGET_SM >= 30)
#define zgemvn_offset_nb (32)
#define zgemvn_offset_ntcol (4)
#define zgemvn_offset_ept (2)
#define zgemvn_offset_width (zgemvn_offset_ntcol*zgemvn_offset_ept)
#define zgemvn_offset_by (4)
#define zgemvt_offset_nb (32)
#define zgemvt_offset_ntcol (4)
#define zgemvt_offset_ept (4)
#define zgemvt_offset_width (zgemvt_offset_ntcol*zgemvt_offset_ept)
#define zgemvt_offset_by (4)
#else
#define zgemvn_offset_nb (64)
#define zgemvn_offset_ntcol (8)
#define zgemvn_offset_ept (2)
#define zgemvn_offset_width (zgemvn_offset_ntcol*zgemvn_offset_ept)
#define zgemvn_offset_by (1)
#define zgemvt_offset_nb (64)
#define zgemvt_offset_ntcol (8)
#define zgemvt_offset_ept (2)
#define zgemvt_offset_width (zgemvt_offset_ntcol*zgemvt_offset_ept)
#define zgemvt_offset_by (1)
#endif
extern "C"
int kblas_zscal_async(int n, hipDoubleComplex alpha, hipDoubleComplex *x, int incx, hipStream_t stream);
int kblas_zgemv2_offset_driver(char trans, int rows, int cols,
hipDoubleComplex alpha, hipDoubleComplex *dA, int lda,
hipDoubleComplex *dX, int incx,
hipDoubleComplex beta, hipDoubleComplex *dY, int incy,
int offset_r, int offset_c,
hipStream_t stream)
{
if(trans == 'n' || trans == 'N')
{
// offset necessary calculations
int offset_r_ = offset_r % zgemvn_offset_nb;
int offset_c_ = offset_c % zgemvn_offset_width;
int rows_ = rows - (offset_r - offset_r_);
int cols_ = cols - (offset_c - offset_c_);
// Advance pointers
dA += (offset_c - offset_c_) * lda + (offset_r - offset_r_);
dX += (offset_c - offset_c_) * incx;
dY += (offset_r - offset_r_) * incy;
// scaling with beta
kblas_zscal_async(rows_, beta, dY, incy, stream);
int mod_r = rows_ % zgemvn_offset_nb;
int mod_c = cols_ % zgemvn_offset_width;
int blocks = rows_/zgemvn_offset_nb;
if(mod_r != 0) blocks += 1;
const int thread_x = zgemvn_offset_nb;
const int thread_y = zgemvn_offset_ntcol;
const int ept = zgemvn_offset_ept;
int threshold = mod_c / ept;
int ept_ = mod_c % ept;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, zgemvn_offset_by);
//printf("rows_ = %d - cols_ = %d - mod_r = %d - mod_c = %d - offset_r_ = %d - offset_c_ = %d \n", rows_, cols_, mod_r, mod_c, offset_r_, offset_c_);
switch(ept_)
{
case 0:hipLaunchKernelGGL(( gemvn_offset<hipDoubleComplex, zgemvn_offset_nb, zgemvn_offset_ntcol, ept, zgemvn_offset_width, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break;
case 1:hipLaunchKernelGGL(( gemvn_offset<hipDoubleComplex, zgemvn_offset_nb, zgemvn_offset_ntcol, ept, zgemvn_offset_width, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break;
case 2:hipLaunchKernelGGL(( gemvn_offset<hipDoubleComplex, zgemvn_offset_nb, zgemvn_offset_ntcol, ept, zgemvn_offset_width, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break;
case 3:hipLaunchKernelGGL(( gemvn_offset<hipDoubleComplex, zgemvn_offset_nb, zgemvn_offset_ntcol, ept, zgemvn_offset_width, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break;
case 4:hipLaunchKernelGGL(( gemvn_offset<hipDoubleComplex, zgemvn_offset_nb, zgemvn_offset_ntcol, ept, zgemvn_offset_width, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break;
case 5:hipLaunchKernelGGL(( gemvn_offset<hipDoubleComplex, zgemvn_offset_nb, zgemvn_offset_ntcol, ept, zgemvn_offset_width, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break;
case 6:hipLaunchKernelGGL(( gemvn_offset<hipDoubleComplex, zgemvn_offset_nb, zgemvn_offset_ntcol, ept, zgemvn_offset_width, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break;
case 7:hipLaunchKernelGGL(( gemvn_offset<hipDoubleComplex, zgemvn_offset_nb, zgemvn_offset_ntcol, ept, zgemvn_offset_width, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break;
case 8:hipLaunchKernelGGL(( gemvn_offset<hipDoubleComplex, zgemvn_offset_nb, zgemvn_offset_ntcol, ept, zgemvn_offset_width, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break;
default: printf("irregular part %d is not supported, please extend the case statement of zgemv\n", ept_); exit(1);
}
} // end of non-transpose case
else if(trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
{
// offset necessary calculations
int offset_r_ = offset_r % zgemvt_offset_nb;
int offset_c_ = offset_c % zgemvt_offset_width;
int rows_ = rows - (offset_r - offset_r_);
int cols_ = cols - (offset_c - offset_c_);
// Advance pointers
dA += (offset_c - offset_c_) * lda + (offset_r - offset_r_);
dX += (offset_r - offset_r_) * incx;
dY += (offset_c - offset_c_) * incy;
// scaling with beta
kblas_zscal_async(cols_, beta, dY, incy, stream);
int mod_r = rows_ % zgemvt_offset_nb;
int mod_c = cols_ % zgemvt_offset_width;
int blocks = cols_/zgemvt_offset_width;
if(mod_c != 0) blocks += 1;
const int thread_x = zgemvt_offset_nb;
const int thread_y = zgemvt_offset_ntcol;
const int ept = zgemvt_offset_ept;
int threshold = mod_c / ept;
int ept_ = mod_c % ept;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, zgemvt_offset_by);
int conj;
if(trans == 'c' || trans == 'C')conj = 1;
else conj = 0;
//printf("modr = %d, modc = %d, threshold = %d, ept_ = %d \n", mod_r, mod_c, threshold, ept_);
//printf("rows_ = %d - cols_ = %d - mod_r = %d - mod_c = %d - offset_r_ = %d - offset_c_ = %d \n", rows_, cols_, mod_r, mod_c, offset_r_, offset_c_);
switch(ept_)
{
case 0:hipLaunchKernelGGL(( gemvt_offset<hipDoubleComplex, zgemvt_offset_nb, zgemvt_offset_ntcol, ept, zgemvt_offset_width, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break;
case 1:hipLaunchKernelGGL(( gemvt_offset<hipDoubleComplex, zgemvt_offset_nb, zgemvt_offset_ntcol, ept, zgemvt_offset_width, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break;
case 2:hipLaunchKernelGGL(( gemvt_offset<hipDoubleComplex, zgemvt_offset_nb, zgemvt_offset_ntcol, ept, zgemvt_offset_width, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break;
case 3:hipLaunchKernelGGL(( gemvt_offset<hipDoubleComplex, zgemvt_offset_nb, zgemvt_offset_ntcol, ept, zgemvt_offset_width, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break;
case 4:hipLaunchKernelGGL(( gemvt_offset<hipDoubleComplex, zgemvt_offset_nb, zgemvt_offset_ntcol, ept, zgemvt_offset_width, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break;
case 5:hipLaunchKernelGGL(( gemvt_offset<hipDoubleComplex, zgemvt_offset_nb, zgemvt_offset_ntcol, ept, zgemvt_offset_width, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break;
case 6:hipLaunchKernelGGL(( gemvt_offset<hipDoubleComplex, zgemvt_offset_nb, zgemvt_offset_ntcol, ept, zgemvt_offset_width, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break;
case 7:hipLaunchKernelGGL(( gemvt_offset<hipDoubleComplex, zgemvt_offset_nb, zgemvt_offset_ntcol, ept, zgemvt_offset_width, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break;
case 8:hipLaunchKernelGGL(( gemvt_offset<hipDoubleComplex, zgemvt_offset_nb, zgemvt_offset_ntcol, ept, zgemvt_offset_width, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break;
default: printf("irregular part %d is not supported, please extend the case statement of zgemv\n", ept_); exit(1);
}
}
else
{
printf("ZGEMV error: Unrecognized transpose mode %c \n", trans);
return -1;
}
return 0;
}
extern "C"
int kblas_zgemv2_offset(char trans, int rows, int cols,
hipDoubleComplex alpha, hipDoubleComplex *dA, int lda,
hipDoubleComplex *dX, int incx,
hipDoubleComplex beta, hipDoubleComplex *dY, int incy,
int offset_r, int offset_c)
{
return kblas_zgemv2_offset_driver(trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, offset_r, offset_c, 0);
}
extern "C"
int kblas_zgemv2_offset_async( char trans, int rows, int cols,
hipDoubleComplex alpha, hipDoubleComplex *dA, int lda,
hipDoubleComplex *dX, int incx,
hipDoubleComplex beta, hipDoubleComplex *dY, int incy,
int offset_r, int offset_c,
hipStream_t stream)
{
return kblas_zgemv2_offset_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, offset_r, offset_c, stream);
}
| cd4a980004202fb7f783d68e73cb4e87f432676c.cu | /**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l2/zgemv2_offset.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ahmad Abdelfattah
* @date 2018-11-14
**/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cublas.h>
#include "gemv2_offset_core.cuh"
#if(TARGET_SM >= 30)
#define zgemvn_offset_nb (32)
#define zgemvn_offset_ntcol (4)
#define zgemvn_offset_ept (2)
#define zgemvn_offset_width (zgemvn_offset_ntcol*zgemvn_offset_ept)
#define zgemvn_offset_by (4)
#define zgemvt_offset_nb (32)
#define zgemvt_offset_ntcol (4)
#define zgemvt_offset_ept (4)
#define zgemvt_offset_width (zgemvt_offset_ntcol*zgemvt_offset_ept)
#define zgemvt_offset_by (4)
#else
#define zgemvn_offset_nb (64)
#define zgemvn_offset_ntcol (8)
#define zgemvn_offset_ept (2)
#define zgemvn_offset_width (zgemvn_offset_ntcol*zgemvn_offset_ept)
#define zgemvn_offset_by (1)
#define zgemvt_offset_nb (64)
#define zgemvt_offset_ntcol (8)
#define zgemvt_offset_ept (2)
#define zgemvt_offset_width (zgemvt_offset_ntcol*zgemvt_offset_ept)
#define zgemvt_offset_by (1)
#endif
extern "C"
int kblas_zscal_async(int n, cuDoubleComplex alpha, cuDoubleComplex *x, int incx, cudaStream_t stream);
int kblas_zgemv2_offset_driver(char trans, int rows, int cols,
cuDoubleComplex alpha, cuDoubleComplex *dA, int lda,
cuDoubleComplex *dX, int incx,
cuDoubleComplex beta, cuDoubleComplex *dY, int incy,
int offset_r, int offset_c,
cudaStream_t stream)
{
if(trans == 'n' || trans == 'N')
{
// offset necessary calculations
int offset_r_ = offset_r % zgemvn_offset_nb;
int offset_c_ = offset_c % zgemvn_offset_width;
int rows_ = rows - (offset_r - offset_r_);
int cols_ = cols - (offset_c - offset_c_);
// Advance pointers
dA += (offset_c - offset_c_) * lda + (offset_r - offset_r_);
dX += (offset_c - offset_c_) * incx;
dY += (offset_r - offset_r_) * incy;
// scaling with beta
kblas_zscal_async(rows_, beta, dY, incy, stream);
int mod_r = rows_ % zgemvn_offset_nb;
int mod_c = cols_ % zgemvn_offset_width;
int blocks = rows_/zgemvn_offset_nb;
if(mod_r != 0) blocks += 1;
const int thread_x = zgemvn_offset_nb;
const int thread_y = zgemvn_offset_ntcol;
const int ept = zgemvn_offset_ept;
int threshold = mod_c / ept;
int ept_ = mod_c % ept;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, zgemvn_offset_by);
//printf("rows_ = %d - cols_ = %d - mod_r = %d - mod_c = %d - offset_r_ = %d - offset_c_ = %d \n", rows_, cols_, mod_r, mod_c, offset_r_, offset_c_);
switch(ept_)
{
case 0: gemvn_offset<cuDoubleComplex, zgemvn_offset_nb, zgemvn_offset_ntcol, ept, zgemvn_offset_width, 0><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break;
case 1: gemvn_offset<cuDoubleComplex, zgemvn_offset_nb, zgemvn_offset_ntcol, ept, zgemvn_offset_width, 1><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break;
case 2: gemvn_offset<cuDoubleComplex, zgemvn_offset_nb, zgemvn_offset_ntcol, ept, zgemvn_offset_width, 2><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break;
case 3: gemvn_offset<cuDoubleComplex, zgemvn_offset_nb, zgemvn_offset_ntcol, ept, zgemvn_offset_width, 3><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break;
case 4: gemvn_offset<cuDoubleComplex, zgemvn_offset_nb, zgemvn_offset_ntcol, ept, zgemvn_offset_width, 4><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break;
case 5: gemvn_offset<cuDoubleComplex, zgemvn_offset_nb, zgemvn_offset_ntcol, ept, zgemvn_offset_width, 5><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break;
case 6: gemvn_offset<cuDoubleComplex, zgemvn_offset_nb, zgemvn_offset_ntcol, ept, zgemvn_offset_width, 6><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break;
case 7: gemvn_offset<cuDoubleComplex, zgemvn_offset_nb, zgemvn_offset_ntcol, ept, zgemvn_offset_width, 7><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break;
case 8: gemvn_offset<cuDoubleComplex, zgemvn_offset_nb, zgemvn_offset_ntcol, ept, zgemvn_offset_width, 8><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, offset_r_, offset_c_); break;
default: printf("irregular part %d is not supported, please extend the case statement of zgemv\n", ept_); exit(1);
}
} // end of non-transpose case
else if(trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
{
// offset necessary calculations
int offset_r_ = offset_r % zgemvt_offset_nb;
int offset_c_ = offset_c % zgemvt_offset_width;
int rows_ = rows - (offset_r - offset_r_);
int cols_ = cols - (offset_c - offset_c_);
// Advance pointers
dA += (offset_c - offset_c_) * lda + (offset_r - offset_r_);
dX += (offset_r - offset_r_) * incx;
dY += (offset_c - offset_c_) * incy;
// scaling with beta
kblas_zscal_async(cols_, beta, dY, incy, stream);
int mod_r = rows_ % zgemvt_offset_nb;
int mod_c = cols_ % zgemvt_offset_width;
int blocks = cols_/zgemvt_offset_width;
if(mod_c != 0) blocks += 1;
const int thread_x = zgemvt_offset_nb;
const int thread_y = zgemvt_offset_ntcol;
const int ept = zgemvt_offset_ept;
int threshold = mod_c / ept;
int ept_ = mod_c % ept;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, zgemvt_offset_by);
int conj;
if(trans == 'c' || trans == 'C')conj = 1;
else conj = 0;
//printf("modr = %d, modc = %d, threshold = %d, ept_ = %d \n", mod_r, mod_c, threshold, ept_);
//printf("rows_ = %d - cols_ = %d - mod_r = %d - mod_c = %d - offset_r_ = %d - offset_c_ = %d \n", rows_, cols_, mod_r, mod_c, offset_r_, offset_c_);
switch(ept_)
{
case 0: gemvt_offset<cuDoubleComplex, zgemvt_offset_nb, zgemvt_offset_ntcol, ept, zgemvt_offset_width, 0><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break;
case 1: gemvt_offset<cuDoubleComplex, zgemvt_offset_nb, zgemvt_offset_ntcol, ept, zgemvt_offset_width, 1><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break;
case 2: gemvt_offset<cuDoubleComplex, zgemvt_offset_nb, zgemvt_offset_ntcol, ept, zgemvt_offset_width, 2><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break;
case 3: gemvt_offset<cuDoubleComplex, zgemvt_offset_nb, zgemvt_offset_ntcol, ept, zgemvt_offset_width, 3><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break;
case 4: gemvt_offset<cuDoubleComplex, zgemvt_offset_nb, zgemvt_offset_ntcol, ept, zgemvt_offset_width, 4><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break;
case 5: gemvt_offset<cuDoubleComplex, zgemvt_offset_nb, zgemvt_offset_ntcol, ept, zgemvt_offset_width, 5><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break;
case 6: gemvt_offset<cuDoubleComplex, zgemvt_offset_nb, zgemvt_offset_ntcol, ept, zgemvt_offset_width, 6><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break;
case 7: gemvt_offset<cuDoubleComplex, zgemvt_offset_nb, zgemvt_offset_ntcol, ept, zgemvt_offset_width, 7><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break;
case 8: gemvt_offset<cuDoubleComplex, zgemvt_offset_nb, zgemvt_offset_ntcol, ept, zgemvt_offset_width, 8><<<dimGrid, dimBlock, 0, stream>>>(rows_, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, threshold, conj, offset_r_, offset_c_); break;
default: printf("irregular part %d is not supported, please extend the case statement of zgemv\n", ept_); exit(1);
}
}
else
{
printf("ZGEMV error: Unrecognized transpose mode %c \n", trans);
return -1;
}
return 0;
}
extern "C"
int kblas_zgemv2_offset(char trans, int rows, int cols,
cuDoubleComplex alpha, cuDoubleComplex *dA, int lda,
cuDoubleComplex *dX, int incx,
cuDoubleComplex beta, cuDoubleComplex *dY, int incy,
int offset_r, int offset_c)
{
return kblas_zgemv2_offset_driver(trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, offset_r, offset_c, 0);
}
extern "C"
int kblas_zgemv2_offset_async( char trans, int rows, int cols,
cuDoubleComplex alpha, cuDoubleComplex *dA, int lda,
cuDoubleComplex *dX, int incx,
cuDoubleComplex beta, cuDoubleComplex *dY, int incy,
int offset_r, int offset_c,
cudaStream_t stream)
{
return kblas_zgemv2_offset_driver( trans, rows, cols, alpha, dA, lda, dX, incx, beta, dY, incy, offset_r, offset_c, stream);
}
|
e4727a75c82119d3a1fe9be064f686406df3a067.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void printValue( int *value) {
printf("value %d\n",value[0]);
printf("value %d\n",value[1]);
}
void hostFunction(){
int *value;
hipMallocManaged(&value, 2 * sizeof(int));
value[0]=1;
value[1]=2;hipLaunchKernelGGL((
printValue), dim3(1), dim3(1) , 0, 0, value);
hipDeviceSynchronize();
hipFree(value);
}
int main() {
hostFunction();
return 0;
}
| e4727a75c82119d3a1fe9be064f686406df3a067.cu | #include <stdio.h>
__global__ void printValue( int *value) {
printf("value %d\n",value[0]);
printf("value %d\n",value[1]);
}
void hostFunction(){
int *value;
cudaMallocManaged(&value, 2 * sizeof(int));
value[0]=1;
value[1]=2;
printValue<<< 1, 1 >>>(value);
cudaDeviceSynchronize();
cudaFree(value);
}
int main() {
hostFunction();
return 0;
}
|
030cf60cb6b007d4336e19b7f1d4425aa8ce27b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
using namespace std;
__device__ void _2Dstencil_(int *d_e,int *d_r,float* c_coeff,int X,int Y,int k, int x, int y,int GX,int Gx,int Gy)
{
int h_e_i;
int h_r_i = x + ( y * (X) );
h_e_i = h_r_i;
int temp = d_e[h_r_i];
temp *= c_coeff[0];
for(int lk =1;lk<(k/2)+1;lk++)
{
h_e_i = (x+lk) + ( (y) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
h_e_i = (x-lk) + ( (y) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
h_e_i = (x) + ( (y+lk) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
h_e_i = (x) + ( (y-lk) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
}
h_r_i = Gx + ( (Gy) * (GX) );
d_r[h_r_i] = temp;
}
__global__ void _2Dstencil_global(int *d_e,int *d_r,float *c_coeff,int X,int Y,int k,int times){
int x,y;//,h_e_i,h_r_i,Xs,Ys,Dx,Dy;
x = threadIdx.x + (blockIdx.x*blockDim.x);
y = threadIdx.y + (blockIdx.y*blockDim.y);
int k2 = k/2*times;
extern __shared__ int shared[];
int blockThreadIndex = threadIdx.x + threadIdx.y*blockDim.x;
// Xs = threadIdx.x;
// Ys = threadIdx.y;
int Dx = blockDim.x+(k*times);
int Dy = blockDim.y+(k*times);
int sharedTam = Dx*Dy;
int * sharedRes = &shared[sharedTam];
for(int stride=blockThreadIndex;stride<sharedTam;stride+=(blockDim.x*blockDim.y))
{
int globalIdx = (blockIdx.x*blockDim.x)-k2+stride%Dx + ((blockIdx.y*blockDim.y)-k2+stride/Dx)*X;
if(globalIdx > 0 && (blockIdx.x*blockDim.x)-k2+stride%Dx < X && ((blockIdx.y*blockDim.y)-k2+stride/Dx)<Y)
shared[stride] = d_e[globalIdx];
else
shared[stride] = 0;
}
__syncthreads();
for(int t=times-1;t>0;t--)
{
//_2Dstencil_(shared,sharedRes,c_coeff,Dx,Dy,k,threadIdx.x+k2,threadIdx.y+k2,Dx,threadIdx.x+k2,threadIdx.y+k2);
int tDx = blockDim.x+(t*k);
int tDy = blockDim.y+(t*k);
int tk2 = (times-t)*k/2;
// int tDx = blockDim.x+(1*k);
// int tDy = blockDim.y+(1*k);
// int tk2 = (1)*k/2;
int tSharedTam = tDx * tDy;
for(int stride=blockThreadIndex;stride<tSharedTam;stride+=(blockDim.x*blockDim.y))
{
_2Dstencil_(shared,sharedRes,c_coeff,Dx,Dy,k,(stride%tDx)+tk2,(stride/tDx)+tk2,Dx,(stride%tDx)+tk2,(stride/tDx)+tk2);
}
__syncthreads();
for(int stride=blockThreadIndex;stride<sharedTam;stride+=(blockDim.x*blockDim.y))
{
shared[stride]=sharedRes[stride];
}
__syncthreads();
}
_2Dstencil_(shared,d_r,c_coeff,Dx,Dy,k,threadIdx.x+k2,threadIdx.y+k2,X,x,y);
// for(int stride=blockThreadIndex;stride<sharedTam;stride+=(blockDim.x*blockDim.y))
// {
// int globalIdx = (blockIdx.x*blockDim.x)-k2+stride%Dx + ((blockIdx.y*blockDim.y)-k2+stride/Dx)*X;
// if(globalIdx > 0 && (blockIdx.x*blockDim.x)-k2+stride%Dx < X && ((blockIdx.y*blockDim.y)-k2+stride/Dx)<Y)
// d_r[globalIdx] = sharedRes[stride];
// }
}
int main(int argc, char* argv[]) {
int *h_e,*h_r;
int *d_e, *d_r;
int size,tam,sharedSize,sharedTam;
int X=32;
int Y=32;
int k=4;
int times = 1;
int BX=32;
int BY=32;
int GX=1;
int GY=1;
float *c_coeff,*d_c_coeff;
if(argc > 1)
{
X = atoi(argv[1]);
Y = X;
}
if(argc > 2)
{
k = atoi(argv[2]);
}
if(argc > 3)
{
times = atoi(argv[3]);
}
if(X>32)
{
GX = ceil((float)X/(float)32);
BX = 32;
}
if(Y>32)
{
GY = ceil((float)Y/(float)32);
BY = 32;
}
dim3 block_dim(BX,BY,1);
dim3 grid_dim(GX,GY,1);
//sharedSize = ((block_dim.x+k)*(block_dim.y+k))*sizeof(int);
sharedSize = ((block_dim.x+(k*times))*(block_dim.y+(k*times)))*sizeof(int)*2;
//sharedTam = ((block_dim.x+(k*2))*(block_dim.y+(k*2)));
size = X * Y * sizeof(int);
tam = X * Y;
h_e = (int*) malloc(size);
h_r = (int*) malloc(size);
c_coeff = (float*)malloc((k/2+1)*sizeof(float));
hipMalloc(&d_e, size);
hipMalloc(&d_r, size);
hipMalloc(&d_c_coeff,(k/2+1)*sizeof(float));
printf("\n coefs \n");
for(int i=0;i<(k/2+1);i++)
{
c_coeff[i]=(float)((k/2+1)-i)/(float)(k/2+1);
}
for(int i=0;i<(k/2+1);i++)
{
printf(" %f",c_coeff[i]);
}
printf("\n coefs \n");
FILE *arq;
arq = fopen("entrada.txt", "rt");
for(int i=0;i<X;i++)
for(int j=0;j<Y;j++)
fscanf(arq," %d",&h_e[i+j*X]);
fclose(arq);
/* Copy vectors from host memory to device memory */
hipMemcpy(d_e, h_e, size, hipMemcpyHostToDevice);
hipMemcpy(d_c_coeff, c_coeff, (k/2+1)*sizeof(float), hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate (&start);
hipEventCreate (&stop);
hipEventRecord (start, 0);
/******************
*** Kernel Call ***
*******************/
//_3Dstencil_global<<<blks,th_p_blk>>>(d_e,d_r,X,Y,Z);hipLaunchKernelGGL((
_2Dstencil_global), dim3(grid_dim),dim3(block_dim),sharedSize, 0, d_e,d_r,d_c_coeff,X,Y,k,times);
hipError_t err = hipSuccess;
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", hipGetErrorString(err));
}
/******************
*** Kernel Call ***
*******************/
hipDeviceSynchronize();
hipEventRecord (stop, 0);
hipEventSynchronize (stop);
float elapsedTime;
hipEventElapsedTime (&elapsedTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("X %d || Y %d \nBX %d || BY %d \n",X,Y,BX,BY);
printf ("[%d,%.5f],\n", tam,elapsedTime);
hipMemcpy(h_r, d_r, size, hipMemcpyDeviceToHost);
arq = fopen("resultado.txt", "wt");
for(int i=0;i<X;i++)
{
for(int j=0;j<Y;j++)
{
fprintf(arq," %d",h_r[i+j*X]);
}
fprintf(arq,"\n");
}
fclose(arq);
hipFree(d_e);
hipFree(d_r);
hipFree(d_c_coeff);
std::free(h_e);
std::free(h_r);
std::free(c_coeff);
return 0;
} /* main */
/*
for(int lk = 1;lk<(k/2)+1;lk++)
{
if(x+lk < X)
{
if((x+lk)/Dx == blockIdx.x)
{
h_e_i = ((x+lk)%Dx) + ( (Ys) * (Dx) );
temp += shared[h_e_i]*c_coeff[lk];
}else
{
h_e_i = (x+lk) + ( (y) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
}
}
if(x-lk >= 0)
{
if((x-lk)/Dx == blockIdx.x)
{
h_e_i = ((x-lk)%Dx) + ( (Ys) * (Dx) );
temp += shared[h_e_i]*c_coeff[lk];
}
else
{
h_e_i = (x-lk) + ( (y) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
}
}
if(y+lk < Y)
{
if((y+lk)/Dy == blockIdx.y)
{
h_e_i = ((Xs) + ( ((y+lk)%Dy) * (Dx) ));
temp += shared[h_e_i]*c_coeff[lk];
}
else
{
h_e_i = (x) + ( (y+lk) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
}
}
if(y-lk >= 0)
{
if((y-lk)/Dy == blockIdx.y)
{
h_e_i = ((Xs) + ( ((y-lk)%Dy) * (Dx) ));
temp += shared[h_e_i]*c_coeff[lk];
}
else
{
h_e_i = (x) + ( (y-lk) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
}
}
}
d_r[h_r_i] = temp;
*/ | 030cf60cb6b007d4336e19b7f1d4425aa8ce27b9.cu | #include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
using namespace std;
__device__ void _2Dstencil_(int *d_e,int *d_r,float* c_coeff,int X,int Y,int k, int x, int y,int GX,int Gx,int Gy)
{
int h_e_i;
int h_r_i = x + ( y * (X) );
h_e_i = h_r_i;
int temp = d_e[h_r_i];
temp *= c_coeff[0];
for(int lk =1;lk<(k/2)+1;lk++)
{
h_e_i = (x+lk) + ( (y) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
h_e_i = (x-lk) + ( (y) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
h_e_i = (x) + ( (y+lk) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
h_e_i = (x) + ( (y-lk) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
}
h_r_i = Gx + ( (Gy) * (GX) );
d_r[h_r_i] = temp;
}
__global__ void _2Dstencil_global(int *d_e,int *d_r,float *c_coeff,int X,int Y,int k,int times){
int x,y;//,h_e_i,h_r_i,Xs,Ys,Dx,Dy;
x = threadIdx.x + (blockIdx.x*blockDim.x);
y = threadIdx.y + (blockIdx.y*blockDim.y);
int k2 = k/2*times;
extern __shared__ int shared[];
int blockThreadIndex = threadIdx.x + threadIdx.y*blockDim.x;
// Xs = threadIdx.x;
// Ys = threadIdx.y;
int Dx = blockDim.x+(k*times);
int Dy = blockDim.y+(k*times);
int sharedTam = Dx*Dy;
int * sharedRes = &shared[sharedTam];
for(int stride=blockThreadIndex;stride<sharedTam;stride+=(blockDim.x*blockDim.y))
{
int globalIdx = (blockIdx.x*blockDim.x)-k2+stride%Dx + ((blockIdx.y*blockDim.y)-k2+stride/Dx)*X;
if(globalIdx > 0 && (blockIdx.x*blockDim.x)-k2+stride%Dx < X && ((blockIdx.y*blockDim.y)-k2+stride/Dx)<Y)
shared[stride] = d_e[globalIdx];
else
shared[stride] = 0;
}
__syncthreads();
for(int t=times-1;t>0;t--)
{
//_2Dstencil_(shared,sharedRes,c_coeff,Dx,Dy,k,threadIdx.x+k2,threadIdx.y+k2,Dx,threadIdx.x+k2,threadIdx.y+k2);
int tDx = blockDim.x+(t*k);
int tDy = blockDim.y+(t*k);
int tk2 = (times-t)*k/2;
// int tDx = blockDim.x+(1*k);
// int tDy = blockDim.y+(1*k);
// int tk2 = (1)*k/2;
int tSharedTam = tDx * tDy;
for(int stride=blockThreadIndex;stride<tSharedTam;stride+=(blockDim.x*blockDim.y))
{
_2Dstencil_(shared,sharedRes,c_coeff,Dx,Dy,k,(stride%tDx)+tk2,(stride/tDx)+tk2,Dx,(stride%tDx)+tk2,(stride/tDx)+tk2);
}
__syncthreads();
for(int stride=blockThreadIndex;stride<sharedTam;stride+=(blockDim.x*blockDim.y))
{
shared[stride]=sharedRes[stride];
}
__syncthreads();
}
_2Dstencil_(shared,d_r,c_coeff,Dx,Dy,k,threadIdx.x+k2,threadIdx.y+k2,X,x,y);
// for(int stride=blockThreadIndex;stride<sharedTam;stride+=(blockDim.x*blockDim.y))
// {
// int globalIdx = (blockIdx.x*blockDim.x)-k2+stride%Dx + ((blockIdx.y*blockDim.y)-k2+stride/Dx)*X;
// if(globalIdx > 0 && (blockIdx.x*blockDim.x)-k2+stride%Dx < X && ((blockIdx.y*blockDim.y)-k2+stride/Dx)<Y)
// d_r[globalIdx] = sharedRes[stride];
// }
}
int main(int argc, char* argv[]) {
int *h_e,*h_r;
int *d_e, *d_r;
int size,tam,sharedSize,sharedTam;
int X=32;
int Y=32;
int k=4;
int times = 1;
int BX=32;
int BY=32;
int GX=1;
int GY=1;
float *c_coeff,*d_c_coeff;
if(argc > 1)
{
X = atoi(argv[1]);
Y = X;
}
if(argc > 2)
{
k = atoi(argv[2]);
}
if(argc > 3)
{
times = atoi(argv[3]);
}
if(X>32)
{
GX = ceil((float)X/(float)32);
BX = 32;
}
if(Y>32)
{
GY = ceil((float)Y/(float)32);
BY = 32;
}
dim3 block_dim(BX,BY,1);
dim3 grid_dim(GX,GY,1);
//sharedSize = ((block_dim.x+k)*(block_dim.y+k))*sizeof(int);
sharedSize = ((block_dim.x+(k*times))*(block_dim.y+(k*times)))*sizeof(int)*2;
//sharedTam = ((block_dim.x+(k*2))*(block_dim.y+(k*2)));
size = X * Y * sizeof(int);
tam = X * Y;
h_e = (int*) malloc(size);
h_r = (int*) malloc(size);
c_coeff = (float*)malloc((k/2+1)*sizeof(float));
cudaMalloc(&d_e, size);
cudaMalloc(&d_r, size);
cudaMalloc(&d_c_coeff,(k/2+1)*sizeof(float));
printf("\n coefs \n");
for(int i=0;i<(k/2+1);i++)
{
c_coeff[i]=(float)((k/2+1)-i)/(float)(k/2+1);
}
for(int i=0;i<(k/2+1);i++)
{
printf(" %f",c_coeff[i]);
}
printf("\n coefs \n");
FILE *arq;
arq = fopen("entrada.txt", "rt");
for(int i=0;i<X;i++)
for(int j=0;j<Y;j++)
fscanf(arq," %d",&h_e[i+j*X]);
fclose(arq);
/* Copy vectors from host memory to device memory */
cudaMemcpy(d_e, h_e, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_c_coeff, c_coeff, (k/2+1)*sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate (&start);
cudaEventCreate (&stop);
cudaEventRecord (start, 0);
/******************
*** Kernel Call ***
*******************/
//_3Dstencil_global<<<blks,th_p_blk>>>(d_e,d_r,X,Y,Z);
_2Dstencil_global<<<grid_dim,block_dim,sharedSize>>>(d_e,d_r,d_c_coeff,X,Y,k,times);
cudaError_t err = cudaSuccess;
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", cudaGetErrorString(err));
}
/******************
*** Kernel Call ***
*******************/
cudaDeviceSynchronize();
cudaEventRecord (stop, 0);
cudaEventSynchronize (stop);
float elapsedTime;
cudaEventElapsedTime (&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("X %d || Y %d \nBX %d || BY %d \n",X,Y,BX,BY);
printf ("[%d,%.5f],\n", tam,elapsedTime);
cudaMemcpy(h_r, d_r, size, cudaMemcpyDeviceToHost);
arq = fopen("resultado.txt", "wt");
for(int i=0;i<X;i++)
{
for(int j=0;j<Y;j++)
{
fprintf(arq," %d",h_r[i+j*X]);
}
fprintf(arq,"\n");
}
fclose(arq);
cudaFree(d_e);
cudaFree(d_r);
cudaFree(d_c_coeff);
std::free(h_e);
std::free(h_r);
std::free(c_coeff);
return 0;
} /* main */
/*
for(int lk = 1;lk<(k/2)+1;lk++)
{
if(x+lk < X)
{
if((x+lk)/Dx == blockIdx.x)
{
h_e_i = ((x+lk)%Dx) + ( (Ys) * (Dx) );
temp += shared[h_e_i]*c_coeff[lk];
}else
{
h_e_i = (x+lk) + ( (y) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
}
}
if(x-lk >= 0)
{
if((x-lk)/Dx == blockIdx.x)
{
h_e_i = ((x-lk)%Dx) + ( (Ys) * (Dx) );
temp += shared[h_e_i]*c_coeff[lk];
}
else
{
h_e_i = (x-lk) + ( (y) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
}
}
if(y+lk < Y)
{
if((y+lk)/Dy == blockIdx.y)
{
h_e_i = ((Xs) + ( ((y+lk)%Dy) * (Dx) ));
temp += shared[h_e_i]*c_coeff[lk];
}
else
{
h_e_i = (x) + ( (y+lk) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
}
}
if(y-lk >= 0)
{
if((y-lk)/Dy == blockIdx.y)
{
h_e_i = ((Xs) + ( ((y-lk)%Dy) * (Dx) ));
temp += shared[h_e_i]*c_coeff[lk];
}
else
{
h_e_i = (x) + ( (y-lk) * (X) );
temp += d_e[h_e_i]*c_coeff[lk];
}
}
}
d_r[h_r_i] = temp;
*/ |
0dc03596e6e16796fffa389331065763f7531d5f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorRandom.cu"
#else
#define NUM_BLOCKS min((int)THCCeilDiv(size, (ptrdiff_t) BLOCK_SIZE), MAX_NUM_BLOCKS)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(uniform)(THCState* state, THCTensor *self_, double a, double b)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
THCGenerator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
scalar_t *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generate_uniform), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->state.gen_states, size, data, a, b);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(normal)(THCState* state, THCTensor *self_, double mean, double stdv)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
THCGenerator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
scalar_t *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generate_normal), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->state.gen_states, size, data, mean, stdv);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(normal_means)(THCState *state, THCTensor *self, THCTensor *means, double stddev) {
THCTensor_(resizeAs)(state, self, means);
THCTensor_(normal)(state, self, 0, stddev);
THCTensor_(cadd)(state, self, self, ScalarConvert<int, scalar_t>::to(1), means);
}
void THCTensor_(normal_stddevs)(THCState *state, THCTensor *self, double mean, THCTensor *stddevs)
{
THCTensor_(resizeAs)(state, self, stddevs);
THCTensor_(normal)(state, self, 0, 1);
THCTensor_(cmul)(state, self, self, stddevs);
THCTensor_(add)(state, self, self, ScalarConvert<double, scalar_t>::to(mean));
}
void THCTensor_(normal_means_stddevs)(THCState *state, THCTensor *self, THCTensor *means, THCTensor *stddevs)
{
THCTensor_(resizeAs)(state, self, means);
THCTensor_(normal)(state, self, 0, 1);
THCTensor_(cmul)(state, self, self, stddevs);
THCTensor_(cadd)(state, self, self, ScalarConvert<int, scalar_t>::to(1), means);
}
void THCTensor_(logNormal)(THCState* state, THCTensor *self_, double mean, double stdv)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
THCGenerator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
scalar_t *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generateLogNormal<scalar_t>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->state.gen_states, size, data, mean, stdv);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(exponential)(THCState* state, THCTensor *self_, double lambda)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
THCGenerator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
scalar_t *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generate_exponential), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->state.gen_states, size, data, lambda);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(cauchy)(THCState* state, THCTensor *self_, double median, double sigma)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
THCGenerator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
scalar_t *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generate_cauchy), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->state.gen_states, size, data, median, sigma);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(renormRows)(struct THCState* state,
THCTensor* t) {
THAssert(THCTensor_(nDimensionLegacyAll)(state, t) == 2);
int64_t rows = THCTensor_(size)(state, t, 0);
int64_t cols = THCTensor_(size)(state, t, 1);
hipDeviceProp_t* props = THCState_getCurrentDeviceProperties(state);
THAssert(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
dim3 grid(rows < numSM * 4 ? rows : numSM * 4);
dim3 block(cols < maxThreads ? cols : maxThreads);
hipLaunchKernelGGL(( renormRowsL1<scalar_t>)
, dim3(grid), dim3(block), block.x * sizeof(scalar_t),
THCState_getCurrentStream(state), THCTensor_(data)(state, t),
rows, cols);
}
void THCTensor_(multinomial)(struct THCState *state,
THCudaLongTensor *self,
THCTensor *prob_dist,
int n_sample,
int with_replacement)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, prob_dist));
THCGenerator* gen = THCRandom_getGenerator(state);
int inputSize = THCTensor_(nDimensionLegacyAll)(state, prob_dist);
THArgCheck(inputSize > 0 && inputSize <= 2, 2,
"prob_dist must be 1 or 2 dim");
// Categories are in the innermost dimension
int64_t numDist =
inputSize == 1 ? 1 : THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 0);
int64_t numCategoriesLong =
inputSize == 1 ? THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 0) :
THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 1);
// Since the index tensor is float, numCategories cannot exceed max
// float integer precision
THArgCheck(numCategoriesLong <= FLOAT32_MAX_CONSECUTIVE_INT, 2,
"number of categories cannot exceed 2^24");
int numCategories = (int) numCategoriesLong;
THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples");
if (!with_replacement) {
THArgCheck(n_sample <= numCategories, 2,
"cannot sample n_sample > prob_dist:size(1) samples without "
"replacement");
}
int free_prob_dist = 0;
// Restructure data for 2d
if (inputSize == 1) {
THCTensor *temp = THCTensor_(new)(state);
THCTensor_(unsqueeze1d)(state, temp, prob_dist, 0);
prob_dist = temp;
free_prob_dist = 1;
}
THCudaLongTensor_resize2d(state, self, numDist, n_sample);
// get current device properties
hipDeviceProp_t* props = THCState_getCurrentDeviceProperties(state);
THAssert(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
int maxShared = props->sharedMemPerBlock;
int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads)
* (sizeof(scalar_t) + sizeof(accreal));
if (n_sample == 1 && maxShared >= requiredShared) {
// Optimized allocation-free implementation
// To exploit greater parallelism for the sampling, generate the
// Uniform random samples in a separate kernel launch, into
// temporarily allocated memory. The device RNG is thread-limited
THCTensor *sampled = THCTensor_(newWithSize2d)(state, numDist, n_sample);
THCTensor_(uniform)(state, sampled, 0.0, 1.0);
dim3 block(numCategories < maxThreads ? numCategories : maxThreads);
dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4);
hipLaunchKernelGGL(( sampleMultinomialOnce<scalar_t, accreal>)
, dim3(grid), dim3(block),
requiredShared,
THCState_getCurrentStream(state),
THCudaLongTensor_data(state, self),
numDist,
numCategories,
THCTensor_(data)(state, sampled),
THCTensor_(data)(state, prob_dist),
THCTensor_(stride)(state, prob_dist, 0),
THCTensor_(stride)(state, prob_dist, 1)
);
THCTensor_(free)(state, sampled);
} else {
// Generic, slow implementation with memory allocations
// For sampling without replacement, we modify the distribution
// for subsequent samples in this space
THCTensor* origDist = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, origDist, prob_dist);
THCTensor_(copy)(state, origDist, prob_dist);
THCTensor* normDist = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, normDist, prob_dist);
THCTensor* prefixSum = THCTensor_(new)(state);
// Renorm along rows
THCTensor_(copy)(state, normDist, origDist);
THCTensor_(renormRows)(state, normDist);
// Prefix sum along rows
THCTensor_(cumsum)(state, prefixSum, normDist, 1);
if (with_replacement) {
// Sample with replacement
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(32, 4);
// Each warp in a block will generate a sample from one
// distribution concurrently.
dim3 grid(numDist < MAX_NUM_BLOCKS ? numDist : MAX_NUM_BLOCKS);
hipLaunchKernelGGL(( sampleMultinomialWithReplacement)
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
gen->state.gen_states,
n_sample,
THCudaLongTensor_data(state, self),
numDist, numCategories,
THCTensor_(data)(state, prefixSum),
THCTensor_(data)(state, normDist));
} else {
// Sample without replacement
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(32, 4);
// Each warp in a block will generate a sample from a different
// distribution concurrently.
ptrdiff_t numBlocks = THCCeilDiv(numDist, (int64_t) 4);
dim3 grid(numBlocks < MAX_NUM_BLOCKS ? numBlocks : MAX_NUM_BLOCKS);
for (int sample = 0; sample < n_sample; ++sample) {
if (sample > 0) {
// Update probabilities
// Renorm along rows
THCTensor_(copy)(state, normDist, origDist);
THCTensor_(renormRows)(state, normDist);
// Prefix sum along rows
THCTensor_(cumsum)(state, prefixSum, normDist, 1);
}
// The kernel can only draw one sample before we have to
// recalculate our distribution
hipLaunchKernelGGL(( sampleMultinomialWithoutReplacement)
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state),
gen->state.gen_states,
n_sample,
sample,
THCudaLongTensor_data(state, self),
numDist, numCategories,
THCTensor_(data)(state, origDist),
THCTensor_(data)(state, prefixSum));
}
}
THCTensor_(free)(state, prefixSum);
THCTensor_(free)(state, normDist);
THCTensor_(free)(state, origDist);
}
// Revert data restructuring based on input sizes
if (inputSize == 1) {
THCudaLongTensor_resize1d(state, self, n_sample);
}
if (free_prob_dist) {
THCTensor_(free)(state, prob_dist);
}
}
void THCTensor_(multinomialAliasSetup)(THCState *state, THCTensor *_probs, THCudaLongTensor *_J, THCTensor *_q){
THAssert(THCTensor_(isContiguous)(state, _q));
THAssert(THCudaLongTensor_isContiguous(state, _J));
THAssert(THCTensor_(isContiguous)(state, _probs));
int64_t inputsize = THCTensor_(nElement)(state, _probs);
THCudaLongTensor *smaller = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *larger = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *smaller_short = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *larger_short = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor_resize1d(state, _J, inputsize);
THCTensor_(resize1d)(state, _q, inputsize);
scalar_t one = ScalarConvert<int64_t, scalar_t>::to(1);
int inputBlockDim = THCCeilDiv((int)inputsize + BLOCK_SIZE - 1, BLOCK_SIZE);
hipLaunchKernelGGL(( aliasMultinomialFilter)
, dim3(inputBlockDim), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state) ,
THCTensor_(data)(state, _q),
THCTensor_(data)(state, _probs),
THCudaLongTensor_data(state, smaller),
THCudaLongTensor_data(state, larger),
THCudaLongTensor_data(state, _J),
THCudaLongTensor_data(state, smaller_short),
THCudaLongTensor_data(state, larger_short),
one, inputsize
);
THCudaLongTensor_nonzero(state, smaller_short, smaller);
THCudaLongTensor_nonzero(state, larger_short, larger);
int h_large_c = THCudaLongTensor_nElement(state, larger_short);
THCudaLongTensor_resize1d(state, smaller_short, inputsize);
THCudaLongTensor_resize1d(state, larger_short, inputsize);
hipLaunchKernelGGL(( aliasMultinomialSetup)
, dim3(1), dim3(1), 0, THCState_getCurrentStream(state),
THCudaLongTensor_data(state, _J),
THCTensor_(data)(state, _q),
inputsize,
THCudaLongTensor_data(state, smaller_short),
THCudaLongTensor_data(state, larger_short),
inputsize - h_large_c, h_large_c
);
scalar_t q_max = THCTensor_(maxall)(state, _q);
hipLaunchKernelGGL(( condDiv),
dim3(inputBlockDim), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, _q),
THCudaLongTensor_data(state, _J),
inputsize, q_max
);
THCudaLongTensor_free(state, smaller);
THCudaLongTensor_free(state, larger);
THCudaLongTensor_free(state, smaller_short);
THCudaLongTensor_free(state, larger_short);
}
void THCTensor_(multinomialAliasDraw)(THCState *state, THCudaLongTensor *self, THCudaLongTensor *_J, THCTensor *_q){
THAssert(THCTensor_(isContiguous)(state, _q));
THAssert(THCudaLongTensor_isContiguous(state, _J));
THCGenerator* gen = THCRandom_getGenerator(state);
int64_t K = THCudaLongTensor_nElement(state, _J);
int64_t output_nelem = THCudaLongTensor_nElement(state, self);
ptrdiff_t size = THCudaLongTensor_nElement(state, self);
THCTensor *uniform = THCTensor_(newWithSize1d)(state, output_nelem);
THCTensor *bernoulli = THCTensor_(newWithSize1d)(state, output_nelem);
THCTensor_(uniform)(state, uniform, 0, K);
THCTensor_(uniform)(state, bernoulli, 0, 1);
hipLaunchKernelGGL(( multinomialAliasDrawKernel)
, dim3(THCCeilDiv((int)output_nelem+BLOCK_SIZE-1, BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
size,
THCudaLongTensor_data(state, self),
THCudaLongTensor_data(state, _J),
THCTensor_(data)(state, _q),
K,
THCTensor_(data)(state, uniform),
THCTensor_(data)(state, bernoulli)
);
}
#endif
#if defined(THC_REAL_IS_DOUBLE)
GENERATE_KERNEL1(generate_geometric, double, double p, double, hiprand_uniform_double, ceil(log(x) / log(1-p)))
#else
GENERATE_KERNEL1(generate_geometric, scalar_t, double p, float, hiprand_uniform, (ScalarConvert<float, scalar_t>::to(ceilf(logf(x) / log(1-p)))))
#endif
#if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT)
#define CURAND64(STATE) (((uint64_t)hiprand(STATE)) << 32) | (uint64_t)hiprand(STATE)
GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, hiprand, \
static_cast<scalar_t>(static_cast<int32_t>((x % range) + base)))
GENERATE_KERNEL2(generate_random_64, scalar_t, int64_t base, uint64_t range, uint64_t, CURAND64, \
static_cast<scalar_t>(static_cast<int64_t>((x % range) + base)))
#elif defined(THC_REAL_IS_HALF)
GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, hiprand,
(ScalarConvert<int32_t, scalar_t>::to(static_cast<int32_t>(x % range + base))))
#else
GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, hiprand,
static_cast<scalar_t>(static_cast<int32_t>(x % range + base)))
#endif
void THCTensor_(geometric)(THCState* state, THCTensor *self_, double p)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
THCGenerator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
scalar_t *data = THCTensor_(data)(state, self);
hipLaunchKernelGGL(( generate_geometric), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->state.gen_states, size, data, p);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(clampedRandom)(THCState* state, THCTensor *self_, int64_t min_val, int64_t max_val)
{
THArgCheck(min_val < max_val, 2,
"max must be greater than min, but got: min = %lld, max = %lld", min_val, max_val);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
THCGenerator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
scalar_t *data = THCTensor_(data)(state, self);
uint64_t range = max_val - min_val;
#if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT)
if (range > 1ULL << 32) {
hipLaunchKernelGGL(( generate_random_64), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->state.gen_states, static_cast<int>(size), data, min_val, range);
} else {
#endif
hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(min_val), static_cast<uint32_t>(range));
#if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT)
}
#endif
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(cappedRandom)(THCState* state, THCTensor *self_, int64_t max_val)
{
THCTensor_(clampedRandom)(state, self_, 0LL, max_val);
};
#define HLF_MANT_DIG 11
void THCTensor_(random)(THCState* state, THCTensor *self_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
THCGenerator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
scalar_t *data = THCTensor_(data)(state, self);
#if defined(THC_REAL_IS_HALF)
hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(0UL), static_cast<uint32_t>((1UL << HLF_MANT_DIG) + 1));
#elif defined(THC_REAL_IS_FLOAT)
hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(0UL), static_cast<uint32_t>((1UL << FLT_MANT_DIG) + 1));
#elif defined(THC_REAL_IS_DOUBLE)
hipLaunchKernelGGL(( generate_random_64), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->state.gen_states, static_cast<int>(size), data, static_cast<int64_t>(0ULL), static_cast<uint64_t>((1ULL << DBL_MANT_DIG) + 1));
#elif defined(THC_REAL_IS_LONG)
hipLaunchKernelGGL(( generate_random_64), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->state.gen_states, static_cast<int>(size), data, static_cast<int64_t>(0ULL), static_cast<uint64_t>(std::numeric_limits<scalar_t>::max()) + 1);
#else
hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(0UL), static_cast<uint32_t>(std::numeric_limits<scalar_t>::max()) + 1);
#endif
THCTensor_(freeCopyTo)(state, self, self_);
};
#undef HLF_MANT_DIG
#undef CURAND64
#undef NUM_BLOCKS
#endif
| 0dc03596e6e16796fffa389331065763f7531d5f.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorRandom.cu"
#else
#define NUM_BLOCKS min((int)THCCeilDiv(size, (ptrdiff_t) BLOCK_SIZE), MAX_NUM_BLOCKS)
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(uniform)(THCState* state, THCTensor *self_, double a, double b)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
THCGenerator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
scalar_t *data = THCTensor_(data)(state, self);
generate_uniform<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->state.gen_states, size, data, a, b);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(normal)(THCState* state, THCTensor *self_, double mean, double stdv)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
THCGenerator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
scalar_t *data = THCTensor_(data)(state, self);
generate_normal<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->state.gen_states, size, data, mean, stdv);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(normal_means)(THCState *state, THCTensor *self, THCTensor *means, double stddev) {
THCTensor_(resizeAs)(state, self, means);
THCTensor_(normal)(state, self, 0, stddev);
THCTensor_(cadd)(state, self, self, ScalarConvert<int, scalar_t>::to(1), means);
}
void THCTensor_(normal_stddevs)(THCState *state, THCTensor *self, double mean, THCTensor *stddevs)
{
THCTensor_(resizeAs)(state, self, stddevs);
THCTensor_(normal)(state, self, 0, 1);
THCTensor_(cmul)(state, self, self, stddevs);
THCTensor_(add)(state, self, self, ScalarConvert<double, scalar_t>::to(mean));
}
void THCTensor_(normal_means_stddevs)(THCState *state, THCTensor *self, THCTensor *means, THCTensor *stddevs)
{
THCTensor_(resizeAs)(state, self, means);
THCTensor_(normal)(state, self, 0, 1);
THCTensor_(cmul)(state, self, self, stddevs);
THCTensor_(cadd)(state, self, self, ScalarConvert<int, scalar_t>::to(1), means);
}
void THCTensor_(logNormal)(THCState* state, THCTensor *self_, double mean, double stdv)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
THCGenerator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
scalar_t *data = THCTensor_(data)(state, self);
generateLogNormal<scalar_t><<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->state.gen_states, size, data, mean, stdv);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(exponential)(THCState* state, THCTensor *self_, double lambda)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
THCGenerator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
scalar_t *data = THCTensor_(data)(state, self);
generate_exponential<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->state.gen_states, size, data, lambda);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(cauchy)(THCState* state, THCTensor *self_, double median, double sigma)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
THCGenerator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
scalar_t *data = THCTensor_(data)(state, self);
generate_cauchy<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->state.gen_states, size, data, median, sigma);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(renormRows)(struct THCState* state,
THCTensor* t) {
THAssert(THCTensor_(nDimensionLegacyAll)(state, t) == 2);
int64_t rows = THCTensor_(size)(state, t, 0);
int64_t cols = THCTensor_(size)(state, t, 1);
cudaDeviceProp* props = THCState_getCurrentDeviceProperties(state);
THAssert(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
dim3 grid(rows < numSM * 4 ? rows : numSM * 4);
dim3 block(cols < maxThreads ? cols : maxThreads);
renormRowsL1<scalar_t>
<<<grid, block, block.x * sizeof(scalar_t),
THCState_getCurrentStream(state)>>>(THCTensor_(data)(state, t),
rows, cols);
}
void THCTensor_(multinomial)(struct THCState *state,
THCudaLongTensor *self,
THCTensor *prob_dist,
int n_sample,
int with_replacement)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, prob_dist));
THCGenerator* gen = THCRandom_getGenerator(state);
int inputSize = THCTensor_(nDimensionLegacyAll)(state, prob_dist);
THArgCheck(inputSize > 0 && inputSize <= 2, 2,
"prob_dist must be 1 or 2 dim");
// Categories are in the innermost dimension
int64_t numDist =
inputSize == 1 ? 1 : THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 0);
int64_t numCategoriesLong =
inputSize == 1 ? THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 0) :
THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 1);
// Since the index tensor is float, numCategories cannot exceed max
// float integer precision
THArgCheck(numCategoriesLong <= FLOAT32_MAX_CONSECUTIVE_INT, 2,
"number of categories cannot exceed 2^24");
int numCategories = (int) numCategoriesLong;
THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples");
if (!with_replacement) {
THArgCheck(n_sample <= numCategories, 2,
"cannot sample n_sample > prob_dist:size(1) samples without "
"replacement");
}
int free_prob_dist = 0;
// Restructure data for 2d
if (inputSize == 1) {
THCTensor *temp = THCTensor_(new)(state);
THCTensor_(unsqueeze1d)(state, temp, prob_dist, 0);
prob_dist = temp;
free_prob_dist = 1;
}
THCudaLongTensor_resize2d(state, self, numDist, n_sample);
// get current device properties
cudaDeviceProp* props = THCState_getCurrentDeviceProperties(state);
THAssert(props != NULL);
int numSM = props->multiProcessorCount;
int maxThreads = props->maxThreadsPerBlock;
int maxShared = props->sharedMemPerBlock;
int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads)
* (sizeof(scalar_t) + sizeof(accreal));
if (n_sample == 1 && maxShared >= requiredShared) {
// Optimized allocation-free implementation
// To exploit greater parallelism for the sampling, generate the
// Uniform random samples in a separate kernel launch, into
// temporarily allocated memory. The device RNG is thread-limited
THCTensor *sampled = THCTensor_(newWithSize2d)(state, numDist, n_sample);
THCTensor_(uniform)(state, sampled, 0.0, 1.0);
dim3 block(numCategories < maxThreads ? numCategories : maxThreads);
dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4);
sampleMultinomialOnce<scalar_t, accreal>
<<<grid, block,
requiredShared,
THCState_getCurrentStream(state)>>>(
THCudaLongTensor_data(state, self),
numDist,
numCategories,
THCTensor_(data)(state, sampled),
THCTensor_(data)(state, prob_dist),
THCTensor_(stride)(state, prob_dist, 0),
THCTensor_(stride)(state, prob_dist, 1)
);
THCTensor_(free)(state, sampled);
} else {
// Generic, slow implementation with memory allocations
// For sampling without replacement, we modify the distribution
// for subsequent samples in this space
THCTensor* origDist = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, origDist, prob_dist);
THCTensor_(copy)(state, origDist, prob_dist);
THCTensor* normDist = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, normDist, prob_dist);
THCTensor* prefixSum = THCTensor_(new)(state);
// Renorm along rows
THCTensor_(copy)(state, normDist, origDist);
THCTensor_(renormRows)(state, normDist);
// Prefix sum along rows
THCTensor_(cumsum)(state, prefixSum, normDist, 1);
if (with_replacement) {
// Sample with replacement
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(32, 4);
// Each warp in a block will generate a sample from one
// distribution concurrently.
dim3 grid(numDist < MAX_NUM_BLOCKS ? numDist : MAX_NUM_BLOCKS);
sampleMultinomialWithReplacement
<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
gen->state.gen_states,
n_sample,
THCudaLongTensor_data(state, self),
numDist, numCategories,
THCTensor_(data)(state, prefixSum),
THCTensor_(data)(state, normDist));
} else {
// Sample without replacement
// Binary search is warp divergent (so effectively we're running
// with just a single thread), but for better utilization,
// we need each block to have at least 4 warps.
dim3 block(32, 4);
// Each warp in a block will generate a sample from a different
// distribution concurrently.
ptrdiff_t numBlocks = THCCeilDiv(numDist, (int64_t) 4);
dim3 grid(numBlocks < MAX_NUM_BLOCKS ? numBlocks : MAX_NUM_BLOCKS);
for (int sample = 0; sample < n_sample; ++sample) {
if (sample > 0) {
// Update probabilities
// Renorm along rows
THCTensor_(copy)(state, normDist, origDist);
THCTensor_(renormRows)(state, normDist);
// Prefix sum along rows
THCTensor_(cumsum)(state, prefixSum, normDist, 1);
}
// The kernel can only draw one sample before we have to
// recalculate our distribution
sampleMultinomialWithoutReplacement
<<<grid, block, 0, THCState_getCurrentStream(state)>>>(
gen->state.gen_states,
n_sample,
sample,
THCudaLongTensor_data(state, self),
numDist, numCategories,
THCTensor_(data)(state, origDist),
THCTensor_(data)(state, prefixSum));
}
}
THCTensor_(free)(state, prefixSum);
THCTensor_(free)(state, normDist);
THCTensor_(free)(state, origDist);
}
// Revert data restructuring based on input sizes
if (inputSize == 1) {
THCudaLongTensor_resize1d(state, self, n_sample);
}
if (free_prob_dist) {
THCTensor_(free)(state, prob_dist);
}
}
void THCTensor_(multinomialAliasSetup)(THCState *state, THCTensor *_probs, THCudaLongTensor *_J, THCTensor *_q){
THAssert(THCTensor_(isContiguous)(state, _q));
THAssert(THCudaLongTensor_isContiguous(state, _J));
THAssert(THCTensor_(isContiguous)(state, _probs));
int64_t inputsize = THCTensor_(nElement)(state, _probs);
THCudaLongTensor *smaller = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *larger = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *smaller_short = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor *larger_short = THCudaLongTensor_newWithSize1d(state, inputsize);
THCudaLongTensor_resize1d(state, _J, inputsize);
THCTensor_(resize1d)(state, _q, inputsize);
scalar_t one = ScalarConvert<int64_t, scalar_t>::to(1);
int inputBlockDim = THCCeilDiv((int)inputsize + BLOCK_SIZE - 1, BLOCK_SIZE);
aliasMultinomialFilter
<<<inputBlockDim, BLOCK_SIZE, 0, THCState_getCurrentStream(state) >>>(
THCTensor_(data)(state, _q),
THCTensor_(data)(state, _probs),
THCudaLongTensor_data(state, smaller),
THCudaLongTensor_data(state, larger),
THCudaLongTensor_data(state, _J),
THCudaLongTensor_data(state, smaller_short),
THCudaLongTensor_data(state, larger_short),
one, inputsize
);
THCudaLongTensor_nonzero(state, smaller_short, smaller);
THCudaLongTensor_nonzero(state, larger_short, larger);
int h_large_c = THCudaLongTensor_nElement(state, larger_short);
THCudaLongTensor_resize1d(state, smaller_short, inputsize);
THCudaLongTensor_resize1d(state, larger_short, inputsize);
aliasMultinomialSetup
<<<1, 1, 0, THCState_getCurrentStream(state)>>>(
THCudaLongTensor_data(state, _J),
THCTensor_(data)(state, _q),
inputsize,
THCudaLongTensor_data(state, smaller_short),
THCudaLongTensor_data(state, larger_short),
inputsize - h_large_c, h_large_c
);
scalar_t q_max = THCTensor_(maxall)(state, _q);
condDiv<<<
inputBlockDim, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, _q),
THCudaLongTensor_data(state, _J),
inputsize, q_max
);
THCudaLongTensor_free(state, smaller);
THCudaLongTensor_free(state, larger);
THCudaLongTensor_free(state, smaller_short);
THCudaLongTensor_free(state, larger_short);
}
void THCTensor_(multinomialAliasDraw)(THCState *state, THCudaLongTensor *self, THCudaLongTensor *_J, THCTensor *_q){
THAssert(THCTensor_(isContiguous)(state, _q));
THAssert(THCudaLongTensor_isContiguous(state, _J));
THCGenerator* gen = THCRandom_getGenerator(state);
int64_t K = THCudaLongTensor_nElement(state, _J);
int64_t output_nelem = THCudaLongTensor_nElement(state, self);
ptrdiff_t size = THCudaLongTensor_nElement(state, self);
THCTensor *uniform = THCTensor_(newWithSize1d)(state, output_nelem);
THCTensor *bernoulli = THCTensor_(newWithSize1d)(state, output_nelem);
THCTensor_(uniform)(state, uniform, 0, K);
THCTensor_(uniform)(state, bernoulli, 0, 1);
multinomialAliasDrawKernel
<<<THCCeilDiv((int)output_nelem+BLOCK_SIZE-1, BLOCK_SIZE), BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
size,
THCudaLongTensor_data(state, self),
THCudaLongTensor_data(state, _J),
THCTensor_(data)(state, _q),
K,
THCTensor_(data)(state, uniform),
THCTensor_(data)(state, bernoulli)
);
}
#endif
#if defined(THC_REAL_IS_DOUBLE)
GENERATE_KERNEL1(generate_geometric, double, double p, double, curand_uniform_double, ceil(log(x) / log(1-p)))
#else
GENERATE_KERNEL1(generate_geometric, scalar_t, double p, float, curand_uniform, (ScalarConvert<float, scalar_t>::to(ceilf(logf(x) / log(1-p)))))
#endif
#if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT)
#define CURAND64(STATE) (((uint64_t)curand(STATE)) << 32) | (uint64_t)curand(STATE)
GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, curand, \
static_cast<scalar_t>(static_cast<int32_t>((x % range) + base)))
GENERATE_KERNEL2(generate_random_64, scalar_t, int64_t base, uint64_t range, uint64_t, CURAND64, \
static_cast<scalar_t>(static_cast<int64_t>((x % range) + base)))
#elif defined(THC_REAL_IS_HALF)
GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, curand,
(ScalarConvert<int32_t, scalar_t>::to(static_cast<int32_t>(x % range + base))))
#else
GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, curand,
static_cast<scalar_t>(static_cast<int32_t>(x % range + base)))
#endif
void THCTensor_(geometric)(THCState* state, THCTensor *self_, double p)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
THCGenerator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
scalar_t *data = THCTensor_(data)(state, self);
generate_geometric<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->state.gen_states, size, data, p);
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(clampedRandom)(THCState* state, THCTensor *self_, int64_t min_val, int64_t max_val)
{
THArgCheck(min_val < max_val, 2,
"max must be greater than min, but got: min = %lld, max = %lld", min_val, max_val);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
THCGenerator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
scalar_t *data = THCTensor_(data)(state, self);
uint64_t range = max_val - min_val;
#if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT)
if (range > 1ULL << 32) {
generate_random_64<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->state.gen_states, static_cast<int>(size), data, min_val, range);
} else {
#endif
generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(min_val), static_cast<uint32_t>(range));
#if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT)
}
#endif
THCTensor_(freeCopyTo)(state, self, self_);
};
void THCTensor_(cappedRandom)(THCState* state, THCTensor *self_, int64_t max_val)
{
THCTensor_(clampedRandom)(state, self_, 0LL, max_val);
};
#define HLF_MANT_DIG 11
void THCTensor_(random)(THCState* state, THCTensor *self_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
ptrdiff_t size = THCTensor_(nElement)(state, self_);
if (size == 0) return;
THCGenerator* gen = THCRandom_getGenerator(state);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
scalar_t *data = THCTensor_(data)(state, self);
#if defined(THC_REAL_IS_HALF)
generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(0UL), static_cast<uint32_t>((1UL << HLF_MANT_DIG) + 1));
#elif defined(THC_REAL_IS_FLOAT)
generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(0UL), static_cast<uint32_t>((1UL << FLT_MANT_DIG) + 1));
#elif defined(THC_REAL_IS_DOUBLE)
generate_random_64<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->state.gen_states, static_cast<int>(size), data, static_cast<int64_t>(0ULL), static_cast<uint64_t>((1ULL << DBL_MANT_DIG) + 1));
#elif defined(THC_REAL_IS_LONG)
generate_random_64<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->state.gen_states, static_cast<int>(size), data, static_cast<int64_t>(0ULL), static_cast<uint64_t>(std::numeric_limits<scalar_t>::max()) + 1);
#else
generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(0UL), static_cast<uint32_t>(std::numeric_limits<scalar_t>::max()) + 1);
#endif
THCTensor_(freeCopyTo)(state, self, self_);
};
#undef HLF_MANT_DIG
#undef CURAND64
#undef NUM_BLOCKS
#endif
|
b307be0353b9099adac366a2fb7b472af11ed94f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1;
float Value2;
float Value3;
float Value;
float I1=A[i];
float I2=B[i];
// Excessive Addition access
if(((i%32)<=23))
{
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=I1*I2;
Value3=I1*I2;
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value*Value2;
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL((
PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
| b307be0353b9099adac366a2fb7b472af11ed94f.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS REPLACE_ITERATIONS
#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1;
float Value2;
float Value3;
float Value;
float I1=A[i];
float I2=B[i];
// Excessive Addition access
if(((i%32)<=23))
{
for(unsigned k=0; k<ITERATIONS;k++) {
Value1=I1*I2;
Value3=I1*I2;
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value*Value2;
}
int main()
{
printf("Power Microbenchmarks\n");
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
f081b504ffda005014626d0d0d56568383deadbc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void print_details()
{
printf("blockIdx.x : %d, blockIdx.y : %d, blockIdx.z : %d, blockIdx.x : %d, blockIdx.y : %d, blockIdx.z : %d, gridDim.x : %d, gridDim.y :%d, gridDim.z :%d \n",
blockIdx.x, blockIdx.y, blockIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, gridDim.x, gridDim.y, gridDim.z);
}
int main()
{
dim3 block(4, 4, 4);
dim3 grid(2, 2, 2);
print_details << <grid, block >> > ();
hipDeviceSynchronize();
hipDeviceReset();
return 0;
}
| f081b504ffda005014626d0d0d56568383deadbc.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void print_details()
{
printf("blockIdx.x : %d, blockIdx.y : %d, blockIdx.z : %d, blockIdx.x : %d, blockIdx.y : %d, blockIdx.z : %d, gridDim.x : %d, gridDim.y :%d, gridDim.z :%d \n",
blockIdx.x, blockIdx.y, blockIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, gridDim.x, gridDim.y, gridDim.z);
}
int main()
{
dim3 block(4, 4, 4);
dim3 grid(2, 2, 2);
print_details << <grid, block >> > ();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
a582ba953948270e1fe37e1ac27f4a9c5263523f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_runtime.h"
// y = inputs[0], y_ = inputs[1]
// np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True)
__global__ void matrix_softmax_cross_entropy_kernel(int nrow, int ncol,
const float *input_a,
const float *input_b,
const int ignored_index,
float *output) {
// Two dimensional thread blocks.
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= nrow) return;
if(int(input_b[id])==ignored_index)
{
output[id] = 0;
return;
}
float maxval = input_a[id * ncol];
// Find max for a row.
for (int x = 1; x < ncol; ++x) {
maxval = max(maxval, input_a[id * ncol + x]);
}
// Deduct by max for a row, and raise to exp.
float sum = 0;
for (int x = 0; x < ncol; ++x) {
sum += exp(input_a[id * ncol + x] - maxval);
}
// Compute per-row loss.
size_t curid = id * ncol + int(input_b[id]);
float loss = -(input_a[curid] - maxval) + log(sum);
output[id] = loss;
}
int DLGpuSoftmaxCrossEntropySparse(const DLArrayHandle input_a,
const DLArrayHandle input_b,
const int ignored_index,
DLArrayHandle output, DLStreamHandle stream_handle = NULL) {
size_t indim = input_a->ndim;
assert (output->ndim == input_b->ndim && indim == output->ndim + 1);
int nrow = 1;
for (int i = 0; i < indim-1; ++i) {
assert (input_a->shape[i] == input_b->shape[i] &&
input_a->shape[i] == output->shape[i]);
nrow *= input_a->shape[i];
}
int ncol = input_a->shape[indim-1];
const float *input_data_a = (const float *)input_a->data;
const float *input_data_b = (const float *)input_b->data;
float *output_data = (float *)output->data;
dim3 blocks;
dim3 threads;
if (nrow <= 1024) {
threads.x = nrow;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (nrow + 1023) / 1024;
}
// 1 block
if (stream_handle) {
hipLaunchKernelGGL(( matrix_softmax_cross_entropy_kernel), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle,
nrow, ncol, input_data_a, input_data_b, ignored_index, output_data);
} else {
hipLaunchKernelGGL(( matrix_softmax_cross_entropy_kernel), dim3(blocks), dim3(threads), 0, 0,
nrow, ncol, input_data_a, input_data_b, ignored_index, output_data);
}
return 0;
}
__global__ void softmax_cross_entropy_gradient_kernel(int nrow, int ncol, const float *input_a, const float *input_b, const float *input_c, const int ignored_index, float *output) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= nrow) return;
if(int(input_b[id])==ignored_index)
{
for (int x = 0; x < ncol; ++x)
{
size_t curid = id * ncol + x;
output[curid] = 0;
}
return;
}
float maxval = input_a[id * ncol];
// Find max for a row.
for (int x = 1; x < ncol; ++x) {
maxval = max(maxval, input_a[id * ncol + x]);
}
// Deduct by max for a row, and raise to exp.
float sum = 0;
for (int x = 0; x < ncol; ++x) {
sum += exp(input_a[id * ncol + x] - maxval);
}
for (int x = 0; x < ncol; ++x) {
size_t curid = id * ncol + x;
if(x == int(input_b[id]))
{
output[curid] = (exp(input_a[curid] - maxval) / sum - 1.0) * input_c[id];
}
else
output[curid] = (exp(input_a[curid] - maxval) / sum) * input_c[id];
}
}
int DLGpuSoftmaxCrossEntropySparse_Gradient(const DLArrayHandle input_a, const DLArrayHandle input_b,
const DLArrayHandle input_c, const int ignored_index,
DLArrayHandle output,
DLStreamHandle stream_handle = NULL) {
size_t indim = input_a->ndim;
assert (indim >= 2 && input_c->ndim == input_b->ndim && indim == input_c->ndim + 1 && indim == output->ndim);
int nrow = 1;
for (int i = 0; i < indim-1; ++i) {
assert (input_a->shape[i] == input_b->shape[i] &&
input_a->shape[i] == output->shape[i] &&
input_a->shape[i] == input_c->shape[i]);
nrow *= input_a->shape[i];
}
assert (input_a->shape[indim-1] == output->shape[indim-1]);
int ncol = input_a->shape[indim-1];
const float *input_data_a = (const float *)input_a->data;
const float *input_data_b = (const float *)input_b->data;
const float *input_data_c = (const float *)input_c ->data;
float *output_data = (float *)output->data;
dim3 blocks;
dim3 threads;
if (nrow <= 1024) {
threads.x = nrow;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (nrow + 1023) / 1024;
}
if (stream_handle) {
hipLaunchKernelGGL(( softmax_cross_entropy_gradient_kernel), dim3(blocks), dim3(threads), 0, *(hipStream_t*)stream_handle->handle,
nrow, ncol, input_data_a, input_data_b, input_data_c, ignored_index, output_data);
} else {
hipLaunchKernelGGL(( softmax_cross_entropy_gradient_kernel), dim3(blocks), dim3(threads), 0, 0,
nrow, ncol, input_data_a, input_data_b, input_data_c, ignored_index, output_data);
}
return 0;
}
| a582ba953948270e1fe37e1ac27f4a9c5263523f.cu | #include "gpu_runtime.h"
// y = inputs[0], y_ = inputs[1]
// np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True)
__global__ void matrix_softmax_cross_entropy_kernel(int nrow, int ncol,
const float *input_a,
const float *input_b,
const int ignored_index,
float *output) {
// Two dimensional thread blocks.
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= nrow) return;
if(int(input_b[id])==ignored_index)
{
output[id] = 0;
return;
}
float maxval = input_a[id * ncol];
// Find max for a row.
for (int x = 1; x < ncol; ++x) {
maxval = max(maxval, input_a[id * ncol + x]);
}
// Deduct by max for a row, and raise to exp.
float sum = 0;
for (int x = 0; x < ncol; ++x) {
sum += exp(input_a[id * ncol + x] - maxval);
}
// Compute per-row loss.
size_t curid = id * ncol + int(input_b[id]);
float loss = -(input_a[curid] - maxval) + log(sum);
output[id] = loss;
}
int DLGpuSoftmaxCrossEntropySparse(const DLArrayHandle input_a,
const DLArrayHandle input_b,
const int ignored_index,
DLArrayHandle output, DLStreamHandle stream_handle = NULL) {
size_t indim = input_a->ndim;
assert (output->ndim == input_b->ndim && indim == output->ndim + 1);
int nrow = 1;
for (int i = 0; i < indim-1; ++i) {
assert (input_a->shape[i] == input_b->shape[i] &&
input_a->shape[i] == output->shape[i]);
nrow *= input_a->shape[i];
}
int ncol = input_a->shape[indim-1];
const float *input_data_a = (const float *)input_a->data;
const float *input_data_b = (const float *)input_b->data;
float *output_data = (float *)output->data;
dim3 blocks;
dim3 threads;
if (nrow <= 1024) {
threads.x = nrow;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (nrow + 1023) / 1024;
}
// 1 block
if (stream_handle) {
matrix_softmax_cross_entropy_kernel<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(
nrow, ncol, input_data_a, input_data_b, ignored_index, output_data);
} else {
matrix_softmax_cross_entropy_kernel<<<blocks, threads>>>(
nrow, ncol, input_data_a, input_data_b, ignored_index, output_data);
}
return 0;
}
__global__ void softmax_cross_entropy_gradient_kernel(int nrow, int ncol, const float *input_a, const float *input_b, const float *input_c, const int ignored_index, float *output) {
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= nrow) return;
if(int(input_b[id])==ignored_index)
{
for (int x = 0; x < ncol; ++x)
{
size_t curid = id * ncol + x;
output[curid] = 0;
}
return;
}
float maxval = input_a[id * ncol];
// Find max for a row.
for (int x = 1; x < ncol; ++x) {
maxval = max(maxval, input_a[id * ncol + x]);
}
// Deduct by max for a row, and raise to exp.
float sum = 0;
for (int x = 0; x < ncol; ++x) {
sum += exp(input_a[id * ncol + x] - maxval);
}
for (int x = 0; x < ncol; ++x) {
size_t curid = id * ncol + x;
if(x == int(input_b[id]))
{
output[curid] = (exp(input_a[curid] - maxval) / sum - 1.0) * input_c[id];
}
else
output[curid] = (exp(input_a[curid] - maxval) / sum) * input_c[id];
}
}
int DLGpuSoftmaxCrossEntropySparse_Gradient(const DLArrayHandle input_a, const DLArrayHandle input_b,
const DLArrayHandle input_c, const int ignored_index,
DLArrayHandle output,
DLStreamHandle stream_handle = NULL) {
size_t indim = input_a->ndim;
assert (indim >= 2 && input_c->ndim == input_b->ndim && indim == input_c->ndim + 1 && indim == output->ndim);
int nrow = 1;
for (int i = 0; i < indim-1; ++i) {
assert (input_a->shape[i] == input_b->shape[i] &&
input_a->shape[i] == output->shape[i] &&
input_a->shape[i] == input_c->shape[i]);
nrow *= input_a->shape[i];
}
assert (input_a->shape[indim-1] == output->shape[indim-1]);
int ncol = input_a->shape[indim-1];
const float *input_data_a = (const float *)input_a->data;
const float *input_data_b = (const float *)input_b->data;
const float *input_data_c = (const float *)input_c ->data;
float *output_data = (float *)output->data;
dim3 blocks;
dim3 threads;
if (nrow <= 1024) {
threads.x = nrow;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (nrow + 1023) / 1024;
}
if (stream_handle) {
softmax_cross_entropy_gradient_kernel<<<blocks, threads, 0, *(cudaStream_t*)stream_handle->handle>>>(
nrow, ncol, input_data_a, input_data_b, input_data_c, ignored_index, output_data);
} else {
softmax_cross_entropy_gradient_kernel<<<blocks, threads>>>(
nrow, ncol, input_data_a, input_data_b, input_data_c, ignored_index, output_data);
}
return 0;
}
|
bc8c7b2d669a296bd70256c523683e0718402a89.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sssp_with_aux.cuh"
#include <stdio.h>
#include <stdint.h>
#include <time.h>
#include <algorithm>
#define MAX_INT 2147483647
struct input_line {
int src, dst;
};
//From StackOverflow
int64_t timespecDiff(struct timespec *timeA_p, struct timespec *timeB_p)
{
return ((timeA_p->tv_sec * 1000000000) + timeA_p->tv_nsec) -
((timeB_p->tv_sec * 1000000000) + timeB_p->tv_nsec);
}
bool sort_input(const input_line &a, const input_line &b) {return a.src < b.src || (a.src == b.src && a.dst < b.dst);}
int main(int argc,char ** argv){
if(argc != 3){
printf("Usage: sssp [graph filename] [number of lines]\n");
return 0;
}
int * edge_list_index;
int * edge_dst;
int * edge_weight;
int * distance;
int vert_count = 0;
FILE * fin = fopen(argv[1],"r");
FILE * fout = fopen("output.txt", "w");
int input_line_count;
sscanf(argv[2], " %d", &input_line_count);
input_line * lines = new input_line[input_line_count * 2];
for(int i = 0;i < input_line_count;++i){
fscanf(fin, " %d %d", &(lines[i * 2].src), &(lines[i * 2].dst));
if(lines[i * 2].src >= vert_count) {vert_count = lines[i * 2].src + 1;}
if(lines[i * 2].dst >= vert_count) {vert_count = lines[i * 2].dst + 1;}
lines[i * 2 + 1].src = lines[i * 2].dst;
lines[i * 2 + 1].dst = lines[i * 2].src;
}
std::sort(lines, lines + input_line_count * 2, sort_input);
int edge_count = input_line_count * 2;
edge_list_index = new int[vert_count + 1];
edge_dst = new int[edge_count];
edge_weight = new int[edge_count];
distance = new int[vert_count];
int curr_vert = 0;
edge_list_index[0] = 0;
for(int i = 0;i < edge_count;++i){
while(curr_vert < lines[i].src){++curr_vert; edge_list_index[curr_vert] = i;}
edge_dst[i] = lines[i].dst;
edge_weight[i] = 1;
}
edge_list_index[vert_count] = edge_count;
for(int i = 0;i < vert_count;++i){distance[i] = 2147483647;}
distance[0] = 0;
int * gpu_edge_list_index, * gpu_edge_dst, * gpu_edge_weight, * gpu_distance;
hipMalloc((void **)&gpu_edge_list_index, sizeof(int) * (vert_count + 1));
hipMemcpy(gpu_edge_list_index, edge_list_index, sizeof(int) * (vert_count + 1), hipMemcpyHostToDevice);
hipMalloc((void **)&gpu_edge_dst, sizeof(int) * edge_count);
hipMemcpy(gpu_edge_dst, edge_dst, sizeof(int) * edge_count, hipMemcpyHostToDevice);
hipMalloc((void **)&gpu_edge_weight, sizeof(int) * edge_count);
hipMemcpy(gpu_edge_weight, edge_weight, sizeof(int) * edge_count, hipMemcpyHostToDevice);
hipMalloc((void **)&gpu_distance, sizeof(int) * vert_count);
hipMemcpy(gpu_distance, distance, sizeof(int) * vert_count, hipMemcpyHostToDevice);
sssp_heap_node max_node;
max_node.vert = 135792468;
max_node.curr_dist = 135792468;
int batch_count = vert_count * 2 / CONFIG_BATCH_SIZE;
if(batch_count < 3) {batch_count = 3;}
Heap_With_Aux < sssp_heap_node, int > cpu_heap(batch_count, CONFIG_BATCH_SIZE, max_node, 0);
Heap_With_Aux < sssp_heap_node, int > * gpu_heap;
hipMalloc((void **)&gpu_heap, sizeof(Heap_With_Aux < sssp_heap_node, int >));
hipMemcpy(gpu_heap, &cpu_heap, sizeof(Heap_With_Aux < sssp_heap_node, int >), hipMemcpyHostToDevice);
sssp_heap_node init_node;
init_node.vert = 0;
init_node.curr_dist = 0;
sssp_heap_node * gpu_init_node;
hipMalloc((void **)&gpu_init_node, sizeof(sssp_heap_node));
hipMemcpy(gpu_init_node, &init_node, sizeof(sssp_heap_node), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( insertInitNode), dim3(1), dim3(1), 1024, 0, gpu_heap, gpu_init_node);
int * gpu_term_sig;
hipMalloc((void **)&gpu_term_sig, sizeof(int) * CONFIG_THREAD_GROUP_NUM);
hipMemset(gpu_term_sig, 0, sizeof(int) * CONFIG_THREAD_GROUP_NUM);
printf("Preparation complete\n");
struct timespec start_time, end_time;
clock_gettime(CLOCK_MONOTONIC, &start_time);
int iteration = 0;
int heap_size, pb_size, aux_size;
do{
hipLaunchKernelGGL(( ssspKernel), dim3(CONFIG_THREAD_GROUP_NUM), dim3(CONFIG_THREAD_NUM), 32768, 0, gpu_heap, gpu_edge_list_index, gpu_edge_dst, gpu_edge_weight, gpu_distance, gpu_term_sig, CONFIG_THREAD_GROUP_NUM);
++iteration;
if(iteration % 100 == 0){printf("%d\n",iteration);}
hipMemcpy(&cpu_heap, gpu_heap, sizeof(Heap_With_Aux < sssp_heap_node, int >), hipMemcpyDeviceToHost);
heap_size = cpu_heap.curr_aux_buf_size + cpu_heap.heap.itemCount();
} while(heap_size > 0);
clock_gettime(CLOCK_MONOTONIC, &end_time);
printf("Finished in %d iterations\n", iteration);
int64_t duration = timespecDiff(&end_time, &start_time);
printf("Microseconds: %lld\n", duration / 1000);
hipMemcpy(distance, gpu_distance, sizeof(int) * vert_count, hipMemcpyDeviceToHost);
for(int i = 0;i < vert_count;++i){
fprintf(fout, "%d %d\n", i, distance[i]);
}
return 0;
}
| bc8c7b2d669a296bd70256c523683e0718402a89.cu | #include "sssp_with_aux.cuh"
#include <stdio.h>
#include <stdint.h>
#include <time.h>
#include <algorithm>
#define MAX_INT 2147483647
struct input_line {
int src, dst;
};
//From StackOverflow
int64_t timespecDiff(struct timespec *timeA_p, struct timespec *timeB_p)
{
return ((timeA_p->tv_sec * 1000000000) + timeA_p->tv_nsec) -
((timeB_p->tv_sec * 1000000000) + timeB_p->tv_nsec);
}
bool sort_input(const input_line &a, const input_line &b) {return a.src < b.src || (a.src == b.src && a.dst < b.dst);}
int main(int argc,char ** argv){
if(argc != 3){
printf("Usage: sssp [graph filename] [number of lines]\n");
return 0;
}
int * edge_list_index;
int * edge_dst;
int * edge_weight;
int * distance;
int vert_count = 0;
FILE * fin = fopen(argv[1],"r");
FILE * fout = fopen("output.txt", "w");
int input_line_count;
sscanf(argv[2], " %d", &input_line_count);
input_line * lines = new input_line[input_line_count * 2];
for(int i = 0;i < input_line_count;++i){
fscanf(fin, " %d %d", &(lines[i * 2].src), &(lines[i * 2].dst));
if(lines[i * 2].src >= vert_count) {vert_count = lines[i * 2].src + 1;}
if(lines[i * 2].dst >= vert_count) {vert_count = lines[i * 2].dst + 1;}
lines[i * 2 + 1].src = lines[i * 2].dst;
lines[i * 2 + 1].dst = lines[i * 2].src;
}
std::sort(lines, lines + input_line_count * 2, sort_input);
int edge_count = input_line_count * 2;
edge_list_index = new int[vert_count + 1];
edge_dst = new int[edge_count];
edge_weight = new int[edge_count];
distance = new int[vert_count];
int curr_vert = 0;
edge_list_index[0] = 0;
for(int i = 0;i < edge_count;++i){
while(curr_vert < lines[i].src){++curr_vert; edge_list_index[curr_vert] = i;}
edge_dst[i] = lines[i].dst;
edge_weight[i] = 1;
}
edge_list_index[vert_count] = edge_count;
for(int i = 0;i < vert_count;++i){distance[i] = 2147483647;}
distance[0] = 0;
int * gpu_edge_list_index, * gpu_edge_dst, * gpu_edge_weight, * gpu_distance;
cudaMalloc((void **)&gpu_edge_list_index, sizeof(int) * (vert_count + 1));
cudaMemcpy(gpu_edge_list_index, edge_list_index, sizeof(int) * (vert_count + 1), cudaMemcpyHostToDevice);
cudaMalloc((void **)&gpu_edge_dst, sizeof(int) * edge_count);
cudaMemcpy(gpu_edge_dst, edge_dst, sizeof(int) * edge_count, cudaMemcpyHostToDevice);
cudaMalloc((void **)&gpu_edge_weight, sizeof(int) * edge_count);
cudaMemcpy(gpu_edge_weight, edge_weight, sizeof(int) * edge_count, cudaMemcpyHostToDevice);
cudaMalloc((void **)&gpu_distance, sizeof(int) * vert_count);
cudaMemcpy(gpu_distance, distance, sizeof(int) * vert_count, cudaMemcpyHostToDevice);
sssp_heap_node max_node;
max_node.vert = 135792468;
max_node.curr_dist = 135792468;
int batch_count = vert_count * 2 / CONFIG_BATCH_SIZE;
if(batch_count < 3) {batch_count = 3;}
Heap_With_Aux < sssp_heap_node, int > cpu_heap(batch_count, CONFIG_BATCH_SIZE, max_node, 0);
Heap_With_Aux < sssp_heap_node, int > * gpu_heap;
cudaMalloc((void **)&gpu_heap, sizeof(Heap_With_Aux < sssp_heap_node, int >));
cudaMemcpy(gpu_heap, &cpu_heap, sizeof(Heap_With_Aux < sssp_heap_node, int >), cudaMemcpyHostToDevice);
sssp_heap_node init_node;
init_node.vert = 0;
init_node.curr_dist = 0;
sssp_heap_node * gpu_init_node;
cudaMalloc((void **)&gpu_init_node, sizeof(sssp_heap_node));
cudaMemcpy(gpu_init_node, &init_node, sizeof(sssp_heap_node), cudaMemcpyHostToDevice);
insertInitNode<<<1, 1, 1024>>>(gpu_heap, gpu_init_node);
int * gpu_term_sig;
cudaMalloc((void **)&gpu_term_sig, sizeof(int) * CONFIG_THREAD_GROUP_NUM);
cudaMemset(gpu_term_sig, 0, sizeof(int) * CONFIG_THREAD_GROUP_NUM);
printf("Preparation complete\n");
struct timespec start_time, end_time;
clock_gettime(CLOCK_MONOTONIC, &start_time);
int iteration = 0;
int heap_size, pb_size, aux_size;
do{
ssspKernel<<<CONFIG_THREAD_GROUP_NUM, CONFIG_THREAD_NUM, 32768>>>(gpu_heap, gpu_edge_list_index, gpu_edge_dst, gpu_edge_weight, gpu_distance, gpu_term_sig, CONFIG_THREAD_GROUP_NUM);
++iteration;
if(iteration % 100 == 0){printf("%d\n",iteration);}
cudaMemcpy(&cpu_heap, gpu_heap, sizeof(Heap_With_Aux < sssp_heap_node, int >), cudaMemcpyDeviceToHost);
heap_size = cpu_heap.curr_aux_buf_size + cpu_heap.heap.itemCount();
} while(heap_size > 0);
clock_gettime(CLOCK_MONOTONIC, &end_time);
printf("Finished in %d iterations\n", iteration);
int64_t duration = timespecDiff(&end_time, &start_time);
printf("Microseconds: %lld\n", duration / 1000);
cudaMemcpy(distance, gpu_distance, sizeof(int) * vert_count, cudaMemcpyDeviceToHost);
for(int i = 0;i < vert_count;++i){
fprintf(fout, "%d %d\n", i, distance[i]);
}
return 0;
}
|
cac3e7ce2376af7a30cf1bc11d6c95779989831c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
struct MaxFloat
{
__device__ __forceinline__ float operator()(float max, float v) const
{
return fmaxf(max, v);
}
};
struct SumFloat
{
__device__ __forceinline__ float operator()(float sum, float v) const
{
return sum + v;
}
};
struct SumExpFloat
{
__device__ __forceinline__ SumExpFloat(float v)
: max_k(v)
{}
__device__ __forceinline__ float operator()(float sum, float v) const
{
return sum + expf(v - max_k);
}
const float max_k;
};
struct NoFinal
{
__device__ __forceinline__ float operator()(float v) const
{
return v;
}
};
struct LSMFinal
{
__device__ __forceinline__ LSMFinal(float m)
: max_k(m)
{}
__device__ __forceinline__ float operator()(float v) const
{
return max_k + logf(v);
}
const float max_k;
};
template <typename Reduction, typename Finalize>
__device__ __forceinline__ float
blockReduce(float* smem, float val,
const Reduction& r,
float defaultVal,
const Finalize& f)
{
// To avoid RaW races from chaning blockReduce calls together, we
// need a sync here
__syncthreads();
smem[threadIdx.x] = val;
__syncthreads();
float warpVal = defaultVal;
// First warp will perform per-warp reductions for the remaining warps
if ((threadIdx.x / 32) == 0)
{
int lane = threadIdx.x % 32;
if (lane < blockDim.x / 32)
{
#pragma unroll
for (int i = 0; i < 32; ++i)
{
warpVal = r(warpVal, smem[lane * 32 + i]);
}
smem[lane] = warpVal;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
float blockVal = defaultVal;
if (threadIdx.x == 0)
{
for (int i = 0; i < blockDim.x / 32; ++i)
{
blockVal = r(blockVal, smem[i]);
}
smem[0] = f(blockVal);
}
// Sync and broadcast
__syncthreads();
return smem[0];
}
template <typename Reduction>
__device__ __forceinline__ float
blockReduce(float* smem, float val,
const Reduction& r,
float defaultVal)
{
return blockReduce<Reduction, NoFinal>(smem, val, r, defaultVal, NoFinal());
}
template <typename Reduction, int ILP>
__device__ __forceinline__ float
ilpReduce(float* data,
int size,
const Reduction& r,
float defaultVal)
{
float threadVal = defaultVal;
int offset = threadIdx.x;
int last = size % (ILP * blockDim.x);
// Body (unroll by ILP times)
for (; offset < size - last; offset += blockDim.x * ILP)
{
float tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j)
{
tmp[j] = data[offset + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
{
threadVal = r(threadVal, tmp[j]);
}
}
// Epilogue
for (; offset < size; offset += blockDim.x)
{
threadVal = r(threadVal, data[offset]);
}
return threadVal;
}
template <int ILP>
__global__ void
cunn_LogSoftMax_updateOutput_kernel(float *output, float *input, int classes)
{
extern __shared__ float buffer[];
input += blockIdx.x * classes;
output += blockIdx.x * classes;
float threadMax =
ilpReduce<MaxFloat, ILP>(input, classes, MaxFloat(), -FLT_MAX);
float max_k =
blockReduce<MaxFloat>(buffer, threadMax, MaxFloat(), -FLT_MAX);
float threadExp =
ilpReduce<SumExpFloat, ILP>(input, classes, SumExpFloat(max_k), 0.0f);
float logsum_k =
blockReduce<SumFloat, LSMFinal>(
buffer, threadExp, SumFloat(), 0.0f, LSMFinal(max_k));
// Output LSM (hand ILP)
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP)
{
float tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmp[j] = input[offset + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
{
output[offset + j * blockDim.x] = tmp[j] - logsum_k;
}
}
for (; offset < classes; offset += blockDim.x)
{
output[offset] = input[offset] - logsum_k;
}
}
template <int ILP>
__global__ void
cunn_LogSoftMax_updateGradInput_kernel(float *gradInput,
float *output,
float *gradOutput,
int classes)
{
extern __shared__ float buffer[];
gradInput += blockIdx.x * classes;
output += blockIdx.x * classes;
gradOutput += blockIdx.x * classes;
float threadSum =
ilpReduce<SumFloat, 4>(gradOutput, classes, SumFloat(), 0.0f);
float sum_k =
blockReduce<SumFloat>(buffer, threadSum, SumFloat(), 0.0f);
// Update gradInput (hand ILP)
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP)
{
float tmpGradOutput[ILP];
float tmpOutput[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j)
{
tmpGradOutput[j] = gradOutput[offset + j * blockDim.x];
tmpOutput[j] = output[offset + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
{
gradInput[offset + j * blockDim.x] =
tmpGradOutput[j] - __expf(tmpOutput[j]) * sum_k;
}
}
for (; offset < classes; offset += blockDim.x)
{
gradInput[offset] =
gradOutput[offset] - __expf(output[offset]) * sum_k;
}
}
void THNN_CudaLogSoftMax_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output)
{
THAssert(THCudaTensor_checkGPU(state, 2, input, output));
input = THCudaTensor_newContiguous(state, input);
THCudaTensor_resizeAs(state, output, input);
int batchSize = 1;
int classSize = 0;
if (THCudaTensor_nDimension(state, input) == 1)
{
classSize = THCudaTensor_size(state, input, 0);
}
else if (THCudaTensor_nDimension(state, input) == 2)
{
batchSize = THCudaTensor_size(state, input, 0);
classSize = THCudaTensor_size(state, input, 1);
}
else
{
THError("vector or matrix expected");
}
dim3 grid(batchSize);
dim3 block(1024);
hipLaunchKernelGGL(( cunn_LogSoftMax_updateOutput_kernel<2>)
, dim3(grid), dim3(block), block.x * sizeof(float), THCState_getCurrentStream(state),
THCudaTensor_data(state, output),
THCudaTensor_data(state, input),
classSize
);
hipError_t errcode = hipGetLastError();
if (errcode != hipSuccess)
{
THError(hipGetErrorString(errcode));
}
THCudaTensor_free(state, input);
}
void THNN_CudaLogSoftMax_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput,
THCudaTensor *gradInput, THCudaTensor *output)
{
THAssert(THCudaTensor_checkGPU(state, 3, output, gradOutput, gradInput));
output = THCudaTensor_newContiguous(state, output);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
THCudaTensor_resizeAs(state, gradInput, output);
int batchSize = 1;
int classSize = 0;
if (THCudaTensor_nDimension(state, gradInput) == 1)
{
classSize = THCudaTensor_size(state, gradInput, 0);
}
else if (THCudaTensor_nDimension(state, gradInput) == 2)
{
batchSize = THCudaTensor_size(state, gradInput, 0);
classSize = THCudaTensor_size(state, gradInput, 1);
}
else
{
THError("vector or matrix expected");
}
dim3 grid(batchSize);
dim3 block(1024);
hipLaunchKernelGGL(( cunn_LogSoftMax_updateGradInput_kernel<2>)
, dim3(grid), dim3(block), block.x * sizeof(float), THCState_getCurrentStream(state),
THCudaTensor_data(state, gradInput),
THCudaTensor_data(state, output),
THCudaTensor_data(state, gradOutput),
classSize
);
hipError_t errcode = hipGetLastError();
if (errcode != hipSuccess)
{
THError(hipGetErrorString(errcode));
}
THCudaTensor_free(state, gradOutput);
THCudaTensor_free(state, output);
}
| cac3e7ce2376af7a30cf1bc11d6c95779989831c.cu | #include "THCUNN.h"
struct MaxFloat
{
__device__ __forceinline__ float operator()(float max, float v) const
{
return fmaxf(max, v);
}
};
struct SumFloat
{
__device__ __forceinline__ float operator()(float sum, float v) const
{
return sum + v;
}
};
struct SumExpFloat
{
__device__ __forceinline__ SumExpFloat(float v)
: max_k(v)
{}
__device__ __forceinline__ float operator()(float sum, float v) const
{
return sum + expf(v - max_k);
}
const float max_k;
};
struct NoFinal
{
__device__ __forceinline__ float operator()(float v) const
{
return v;
}
};
struct LSMFinal
{
__device__ __forceinline__ LSMFinal(float m)
: max_k(m)
{}
__device__ __forceinline__ float operator()(float v) const
{
return max_k + logf(v);
}
const float max_k;
};
template <typename Reduction, typename Finalize>
__device__ __forceinline__ float
blockReduce(float* smem, float val,
const Reduction& r,
float defaultVal,
const Finalize& f)
{
// To avoid RaW races from chaning blockReduce calls together, we
// need a sync here
__syncthreads();
smem[threadIdx.x] = val;
__syncthreads();
float warpVal = defaultVal;
// First warp will perform per-warp reductions for the remaining warps
if ((threadIdx.x / 32) == 0)
{
int lane = threadIdx.x % 32;
if (lane < blockDim.x / 32)
{
#pragma unroll
for (int i = 0; i < 32; ++i)
{
warpVal = r(warpVal, smem[lane * 32 + i]);
}
smem[lane] = warpVal;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
float blockVal = defaultVal;
if (threadIdx.x == 0)
{
for (int i = 0; i < blockDim.x / 32; ++i)
{
blockVal = r(blockVal, smem[i]);
}
smem[0] = f(blockVal);
}
// Sync and broadcast
__syncthreads();
return smem[0];
}
template <typename Reduction>
__device__ __forceinline__ float
blockReduce(float* smem, float val,
const Reduction& r,
float defaultVal)
{
return blockReduce<Reduction, NoFinal>(smem, val, r, defaultVal, NoFinal());
}
template <typename Reduction, int ILP>
__device__ __forceinline__ float
ilpReduce(float* data,
int size,
const Reduction& r,
float defaultVal)
{
float threadVal = defaultVal;
int offset = threadIdx.x;
int last = size % (ILP * blockDim.x);
// Body (unroll by ILP times)
for (; offset < size - last; offset += blockDim.x * ILP)
{
float tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j)
{
tmp[j] = data[offset + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
{
threadVal = r(threadVal, tmp[j]);
}
}
// Epilogue
for (; offset < size; offset += blockDim.x)
{
threadVal = r(threadVal, data[offset]);
}
return threadVal;
}
template <int ILP>
__global__ void
cunn_LogSoftMax_updateOutput_kernel(float *output, float *input, int classes)
{
extern __shared__ float buffer[];
input += blockIdx.x * classes;
output += blockIdx.x * classes;
float threadMax =
ilpReduce<MaxFloat, ILP>(input, classes, MaxFloat(), -FLT_MAX);
float max_k =
blockReduce<MaxFloat>(buffer, threadMax, MaxFloat(), -FLT_MAX);
float threadExp =
ilpReduce<SumExpFloat, ILP>(input, classes, SumExpFloat(max_k), 0.0f);
float logsum_k =
blockReduce<SumFloat, LSMFinal>(
buffer, threadExp, SumFloat(), 0.0f, LSMFinal(max_k));
// Output LSM (hand ILP)
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP)
{
float tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmp[j] = input[offset + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
{
output[offset + j * blockDim.x] = tmp[j] - logsum_k;
}
}
for (; offset < classes; offset += blockDim.x)
{
output[offset] = input[offset] - logsum_k;
}
}
template <int ILP>
__global__ void
cunn_LogSoftMax_updateGradInput_kernel(float *gradInput,
float *output,
float *gradOutput,
int classes)
{
extern __shared__ float buffer[];
gradInput += blockIdx.x * classes;
output += blockIdx.x * classes;
gradOutput += blockIdx.x * classes;
float threadSum =
ilpReduce<SumFloat, 4>(gradOutput, classes, SumFloat(), 0.0f);
float sum_k =
blockReduce<SumFloat>(buffer, threadSum, SumFloat(), 0.0f);
// Update gradInput (hand ILP)
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP)
{
float tmpGradOutput[ILP];
float tmpOutput[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j)
{
tmpGradOutput[j] = gradOutput[offset + j * blockDim.x];
tmpOutput[j] = output[offset + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
{
gradInput[offset + j * blockDim.x] =
tmpGradOutput[j] - __expf(tmpOutput[j]) * sum_k;
}
}
for (; offset < classes; offset += blockDim.x)
{
gradInput[offset] =
gradOutput[offset] - __expf(output[offset]) * sum_k;
}
}
void THNN_CudaLogSoftMax_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output)
{
THAssert(THCudaTensor_checkGPU(state, 2, input, output));
input = THCudaTensor_newContiguous(state, input);
THCudaTensor_resizeAs(state, output, input);
int batchSize = 1;
int classSize = 0;
if (THCudaTensor_nDimension(state, input) == 1)
{
classSize = THCudaTensor_size(state, input, 0);
}
else if (THCudaTensor_nDimension(state, input) == 2)
{
batchSize = THCudaTensor_size(state, input, 0);
classSize = THCudaTensor_size(state, input, 1);
}
else
{
THError("vector or matrix expected");
}
dim3 grid(batchSize);
dim3 block(1024);
cunn_LogSoftMax_updateOutput_kernel<2>
<<<grid, block, block.x * sizeof(float), THCState_getCurrentStream(state)>>>(
THCudaTensor_data(state, output),
THCudaTensor_data(state, input),
classSize
);
cudaError errcode = cudaGetLastError();
if (errcode != cudaSuccess)
{
THError(cudaGetErrorString(errcode));
}
THCudaTensor_free(state, input);
}
void THNN_CudaLogSoftMax_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput,
THCudaTensor *gradInput, THCudaTensor *output)
{
THAssert(THCudaTensor_checkGPU(state, 3, output, gradOutput, gradInput));
output = THCudaTensor_newContiguous(state, output);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
THCudaTensor_resizeAs(state, gradInput, output);
int batchSize = 1;
int classSize = 0;
if (THCudaTensor_nDimension(state, gradInput) == 1)
{
classSize = THCudaTensor_size(state, gradInput, 0);
}
else if (THCudaTensor_nDimension(state, gradInput) == 2)
{
batchSize = THCudaTensor_size(state, gradInput, 0);
classSize = THCudaTensor_size(state, gradInput, 1);
}
else
{
THError("vector or matrix expected");
}
dim3 grid(batchSize);
dim3 block(1024);
cunn_LogSoftMax_updateGradInput_kernel<2>
<<<grid, block, block.x * sizeof(float), THCState_getCurrentStream(state)>>>(
THCudaTensor_data(state, gradInput),
THCudaTensor_data(state, output),
THCudaTensor_data(state, gradOutput),
classSize
);
cudaError errcode = cudaGetLastError();
if (errcode != cudaSuccess)
{
THError(cudaGetErrorString(errcode));
}
THCudaTensor_free(state, gradOutput);
THCudaTensor_free(state, output);
}
|
a20d947166568faf5b328b09693c013825b83e7f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pbf_predict.h"
#include "../../util/pbf_cuda_util.h"
#include "../../interaction/cuda/pbf_grid.h"
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <device_launch_parameters.h>
namespace pbf {
namespace cuda {
namespace {
// updated = a + b * c
__global__ void multiplyAddCUDA(dom_dim* updated, const dom_dim* a, dom_dim b, scalar_t c, uint32_t num)
{
uint32_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if (gid >= num) return;
dom_dim src = a[gid];
dom_dim dst;
dst.x = fmaf(b.x, c, src.x);
dst.y = fmaf(b.y, c, src.y);
dst.z = fmaf(b.z, c, src.z);
updated[gid] = dst;
}
// updated = a + b * c
__global__ void multiplyAddCUDA(dom_dim* updated, const dom_dim* a, const dom_dim* b, scalar_t c, uint32_t num)
{
uint32_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if (gid >= num) return;
dom_dim _a = a[gid];
dom_dim _b = b[gid];
dom_dim dst;
dst.x = fmaf(_b.x, c, _a.x);
dst.y = fmaf(_b.y, c, _a.y);
dst.z = fmaf(_b.z, c, _a.z);
updated[gid] = dst;
}
} // end of unnamed ns
void applyExternalForce(
dom_dim* interim_velocity,
const dom_dim* velocity,
dom_dim ext_force,
scalar_t time_step,
int num_particle
)
{
if (num_particle > 0) {
uint32_t num_thread, num_block;
computeGridSize(num_particle, 128, num_block, num_thread);
hipLaunchKernelGGL(( multiplyAddCUDA) , dim3(num_block), dim3(num_thread) , 0, 0, interim_velocity, velocity, ext_force, time_step, num_particle);
}
#ifdef _DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
void predictPosition(
dom_dim* interim_position,
const dom_dim* position,
const dom_dim* velocity,
scalar_t time_step,
int num_particle)
{
if (num_particle > 0) {
uint32_t num_thread, num_block;
computeGridSize(num_particle, 128, num_block, num_thread);
multiplyAddCUDA << < num_block, num_thread >> >(interim_position, position, velocity, time_step, num_particle);
}
#ifdef _DEBUG
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
#endif
}
} // end of cuda ns
} // end of pbf ns
| a20d947166568faf5b328b09693c013825b83e7f.cu | #include "pbf_predict.h"
#include "../../util/pbf_cuda_util.h"
#include "../../interaction/cuda/pbf_grid.h"
#include <thrust/device_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
#include <device_launch_parameters.h>
namespace pbf {
namespace cuda {
namespace {
// updated = a + b * c
__global__ void multiplyAddCUDA(dom_dim* updated, const dom_dim* a, dom_dim b, scalar_t c, uint32_t num)
{
uint32_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if (gid >= num) return;
dom_dim src = a[gid];
dom_dim dst;
dst.x = fmaf(b.x, c, src.x);
dst.y = fmaf(b.y, c, src.y);
dst.z = fmaf(b.z, c, src.z);
updated[gid] = dst;
}
// updated = a + b * c
__global__ void multiplyAddCUDA(dom_dim* updated, const dom_dim* a, const dom_dim* b, scalar_t c, uint32_t num)
{
uint32_t gid = threadIdx.x + blockIdx.x * blockDim.x;
if (gid >= num) return;
dom_dim _a = a[gid];
dom_dim _b = b[gid];
dom_dim dst;
dst.x = fmaf(_b.x, c, _a.x);
dst.y = fmaf(_b.y, c, _a.y);
dst.z = fmaf(_b.z, c, _a.z);
updated[gid] = dst;
}
} // end of unnamed ns
void applyExternalForce(
dom_dim* interim_velocity,
const dom_dim* velocity,
dom_dim ext_force,
scalar_t time_step,
int num_particle
)
{
if (num_particle > 0) {
uint32_t num_thread, num_block;
computeGridSize(num_particle, 128, num_block, num_thread);
multiplyAddCUDA <<< num_block, num_thread >>>(interim_velocity, velocity, ext_force, time_step, num_particle);
}
#ifdef _DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
void predictPosition(
dom_dim* interim_position,
const dom_dim* position,
const dom_dim* velocity,
scalar_t time_step,
int num_particle)
{
if (num_particle > 0) {
uint32_t num_thread, num_block;
computeGridSize(num_particle, 128, num_block, num_thread);
multiplyAddCUDA << < num_block, num_thread >> >(interim_position, position, velocity, time_step, num_particle);
}
#ifdef _DEBUG
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
#endif
}
} // end of cuda ns
} // end of pbf ns
|
7fd7c036baeed6d802b229fce1061ce440280ab5.hip | // !!! This is a file automatically generated by hipify!!!
#define USE_MNIST_LOADER
#define MNIST_DOUBLE
#include "mnist.h"
#include "layer.h"
#include <hip/hip_runtime.h>
#include <cstdio>
#include <time.h>
static mnist_data *train_set, *test_set;
static unsigned int train_cnt, test_cnt;
// Define layers of CNN
static Layer l_input = Layer(0, 0, 28, 0, 1);
static Layer l_conv1 = Layer(3, 28, 24, 1, 8);
static Layer l_conv2 = Layer(1, 24, 24, 8, 8);
static Layer l_conv3 = Layer(3, 24, 24, 8, 8);
static Layer l_conv4 = Layer(5, 24, 24, 8, 8);
static Layer l_maxpool = Layer(3, 24, 24, 8, 8);
static Layer l_conv5 = Layer(1, 24, 24, 8, 8);
static Layer l_conv6 = Layer(1, 24, 24, 8, 8);
static Layer l_conv7 = Layer(1, 24, 24, 8, 8);
static Layer l_FC = Layer(24, 24, 1, 32, 10);
static float* concat_matrix, slice_1, slice_2, slice_3, slice_4, sum_matrix;
static void learn();
static unsigned int classify(double data[28][28]);
static void test();
static double forward_pass(double data[28][28]);
static double back_pass();
static inline void loaddata()
{
mnist_load("data/train-images.idx3-ubyte", "data/train-labels.idx1-ubyte",
&train_set, &train_cnt);
mnist_load("data/t10k-images.idx3-ubyte", "data/t10k-labels.idx1-ubyte",
&test_set, &test_cnt);
}
int main(int argc, const char **argv)
{
srand(time(NULL));
hipError_t err = hipInit(0);
if (err != hipSuccess) {
fprintf(stderr, "CUDA initialisation failed with error code - %d\n", err);
return 1;
}
hipMalloc((void **)&concat_matrix, sizeof(float) * 24 * 24 * 32);
hipMalloc((void **)&slice_1, sizeof(float) * 24 * 24 * 8);
hipMalloc((void **)&slice_2, sizeof(float) * 24 * 24 * 8);
hipMalloc((void **)&slice_3, sizeof(float) * 24 * 24 * 8);
hipMalloc((void **)&slice_4, sizeof(float) * 24 * 24 * 8);
hipMalloc((void **)&sum_matrix, sizeof(float) * 24 * 24 * 8);
loaddata();
//test();
learn();
test();
return 0;
}
// Forward propagation of a single row in dataset
static double forward_pass(double data[28][28])
{
float input[28][28];
//fprintf(stdout, "%f\n", data[14][14]);
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
input[i][j] = data[i][j];
}
}
l_input.clear();
l_conv1.clear();
l_conv2.clear();
l_conv3.clear();
l_conv4.clear();
l_conv5.clear();
l_conv6.clear();
l_conv7.clear();
l_maxpool.clear();
l_FC.clear();
// for (int i = 0; i < 18; i++){
// fprintf(stdout, "%f ", input[i][i]);
// }
l_input.setOutput((float *)input);
//l_input.Out();
// Conv1
hipLaunchKernelGGL(( fp_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv1.preact, l_input.output, l_conv1.weight, l_conv1.kernel_size,
l_conv1.in_size, l_conv1.out_size, l_conv1.in_channel, l_conv1.out_channel, false);
hipLaunchKernelGGL(( fp_bias_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv1.preact, l_conv1.bias, l_conv1.out_size, l_conv1.out_channel);
hipLaunchKernelGGL(( apply_step_function), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv1.preact, l_conv1.output, l_conv1.out_size * l_conv1.out_size * l_conv1.out_channel);
// Conv2 Path #1
hipLaunchKernelGGL(( fp_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv2.preact, l_conv1.output, l_conv2.weight, l_conv2.kernel_size,
l_conv2.in_size, l_conv2.out_size, l_conv2.in_channel, l_conv2.out_channel, true);
hipLaunchKernelGGL(( fp_bias_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv2.preact, l_conv2.bias, l_conv2.out_size, l_conv2.out_channel);
hipLaunchKernelGGL(( apply_step_function), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv2.preact, l_conv2.output, l_conv2.out_size * l_conv2.out_size * l_conv2.out_channel);
// Conv3 + Conv5 Path #2
hipLaunchKernelGGL(( fp_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv3.preact, l_conv1.output, l_conv3.weight, l_conv3.kernel_size,
l_conv3.in_size, l_conv3.out_size, l_conv3.in_channel, l_conv3.out_channel, true);
hipLaunchKernelGGL(( fp_bias_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv3.preact, l_conv3.bias, l_conv3.out_size, l_conv3.out_channel);
hipLaunchKernelGGL(( apply_step_function), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv3.preact, l_conv3.output, l_conv3.out_size * l_conv3.out_size * l_conv3.out_channel);
hipLaunchKernelGGL(( fp_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv5.preact, l_conv3.output, l_conv5.weight, l_conv5.kernel_size,
l_conv5.in_size, l_conv5.out_size, l_conv5.in_channel, l_conv5.out_channel, true);
hipLaunchKernelGGL(( fp_bias_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv5.preact, l_conv5.bias, l_conv5.out_size, l_conv5.out_channel);
hipLaunchKernelGGL(( apply_step_function), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv5.preact, l_conv5.output, l_conv5.out_size * l_conv5.out_size * l_conv5.out_channel);
// Conv4 + Conv6 Path #3
hipLaunchKernelGGL(( fp_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv4.preact, l_conv1.output, l_conv4.weight, l_conv4.kernel_size,
l_conv4.in_size, l_conv4.out_size, l_conv4.in_channel, l_conv4.out_channel, true);
hipLaunchKernelGGL(( fp_bias_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv4.preact, l_conv4.bias, l_conv4.out_size, l_conv4.out_channel);
hipLaunchKernelGGL(( apply_step_function), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv4.preact, l_conv4.output, l_conv4.out_size * l_conv4.out_size * l_conv4.out_channel);
hipLaunchKernelGGL(( fp_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv6.preact, l_conv4.output, l_conv6.weight, l_conv6.kernel_size,
l_conv6.in_size, l_conv6.out_size, l_conv6.in_channel, l_conv6.out_channel, true);
hipLaunchKernelGGL(( fp_bias_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv6.preact, l_conv6.bias, l_conv6.out_size, l_conv6.out_channel);
hipLaunchKernelGGL(( apply_step_function), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv6.preact, l_conv6.output, l_conv6.out_size * l_conv6.out_size * l_conv6.out_channel);
// maxpooling + Conv7 Path #4
hipLaunchKernelGGL(( fp_maxpool), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_maxpool.output, l_conv1.output, l_maxpool.kernel_size, l_maxpool.in_size, l_maxpool.out_size, l_maxpool.out_channel, true);
hipLaunchKernelGGL(( fp_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv7.preact, l_maxpool.output, l_conv7.weight, l_conv7.kernel_size,
l_conv7.in_size, l_conv7.out_size, l_conv7.in_channel, l_conv7.out_channel, true);
hipLaunchKernelGGL(( fp_bias_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv7.preact, l_conv7.bias, l_conv7.out_size, l_conv7.out_channel);
hipLaunchKernelGGL(( apply_step_function), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv7.preact, l_conv7.output, l_conv7.out_size * l_conv7.out_size * l_conv7.out_channel);
// concat
hipLaunchKernelGGL(( concat), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, concat_matrix, l_conv2.output, l_conv5.output, l_conv6.output, l_conv7.output,
l_conv2.out_size, l_conv2.out_channel, l_conv5.out_channel, l_conv6.out_channel, l_conv7.out_channel);
// FC
hipLaunchKernelGGL(( fp_preact_fc), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, concat_matrix, l_FC.preact, l_FC.weight, l_FC.in_size, l_FC.in_channel, l_FC.out_channel);
hipLaunchKernelGGL(( fp_bias_fc), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_FC.preact, l_FC.bias, l_FC.out_channel);
hipLaunchKernelGGL(( apply_step_function), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_FC.preact, l_FC.output, l_FC.out_size * l_FC.out_size * l_FC.out_channel);
//l_FC.Out();
return 0;
}
// Back propagation to update weights
static double back_pass()
{
// FC
hipLaunchKernelGGL(( bp_weight_fc), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_FC.d_weight, l_FC.d_preact, l_maxpool.output, l_FC.in_size, l_FC.in_channel, l_FC.out_channel);
hipLaunchKernelGGL(( bp_bias_fc), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_FC.bias, l_FC.d_preact, l_FC.out_channel);
hipLaunchKernelGGL(( bp_output_fc), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_FC.d_output, l_FC.d_preact, l_FC.weight, l_FC.in_size, l_FC.in_channel, l_FC.out_channel);
//l_FC.dOut();
// parallel block
//bp_four_parallel<<<4,1>>>(&sum_matrix, l_conv2, l_conv3, l_conv4, l_maxpool, l_conv5, l_conv6, l_conv7, &slice_1, &slice_2, &slice_3, &slice_4, l_conv1.output);
// decat
hipLaunchKernelGGL(( decat), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, l_FC.d_output, &slice_1, &slice_2, &slice_3, &slice_4,
l_FC.in_size, l_conv2.out_channel, l_conv3.out_channel, l_conv4.out_channel, l_maxpool.out_channel);
// Conv2 Path #1
hipLaunchKernelGGL(( bp_output_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv2.d_output, l_conv2.weight, &slice_1, l_conv2.in_size, l_conv2.kernel_size,
l_conv2.out_size, l_conv2.in_channel, l_conv2.out_channel, true, true);
hipLaunchKernelGGL(( bp_preact_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv2.d_preact, l_conv2.d_output, l_conv2.preact, l_conv2.out_size, l_conv2.out_channel);
hipLaunchKernelGGL(( bp_weight_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv2.d_weight, l_conv2.d_preact, l_conv2.output, l_conv2.kernel_size, l_conv2.in_size,
l_conv2.out_size, l_conv2.in_channel, l_conv2.out_channel, false);
hipLaunchKernelGGL(( bp_bias_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv2.bias, l_conv2.d_preact, l_conv2.out_size, l_conv2.out_channel);
// Conv3 + Conv5 Path #2
hipLaunchKernelGGL(( bp_output_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv5.d_output, l_conv5.weight, &slice_2, l_conv5.in_size, l_conv5.kernel_size,
l_conv5.out_size, l_conv5.in_channel, l_conv5.out_channel, true, true);
hipLaunchKernelGGL(( bp_preact_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv5.d_preact, l_conv5.d_output, l_conv5.preact, l_conv5.out_size, l_conv5.out_channel);
hipLaunchKernelGGL(( bp_weight_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv5.d_weight, l_conv5.d_preact, l_conv5.output, l_conv5.kernel_size, l_conv5.in_size,
l_conv5.out_size, l_conv5.in_channel, l_conv5.out_channel, false);
hipLaunchKernelGGL(( bp_bias_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv5.bias, l_conv5.d_preact, l_conv5.out_size, l_conv5.out_channel);
hipLaunchKernelGGL(( bp_output_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv3.d_output, l_conv3.weight, l_conv5.d_preact, l_conv3.in_size, l_conv3.kernel_size,
l_conv3.out_size, l_conv3.in_channel, l_conv3.out_channel, true, true);
hipLaunchKernelGGL(( bp_preact_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv3.d_preact, l_conv3.d_output, l_conv3.preact, l_conv3.out_size, l_conv3.out_channel);
hipLaunchKernelGGL(( bp_weight_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv3.d_weight, l_conv3.d_preact, l_conv3.output, l_conv3.kernel_size, l_conv3.in_size,
l_conv3.out_size, l_conv3.in_channel, l_conv3.out_channel, false);
hipLaunchKernelGGL(( bp_bias_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv3.bias, l_conv3.d_preact, l_conv3.out_size, l_conv3.out_channel);
// Conv4 + Conv6 Path #3
hipLaunchKernelGGL(( bp_output_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv6.d_output, l_conv6.weight, &slice_3, l_conv6.in_size, l_conv6.kernel_size,
l_conv6.out_size, l_conv6.in_channel, l_conv6.out_channel, true, true);
hipLaunchKernelGGL(( bp_preact_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv6.d_preact, l_conv6.d_output, l_conv6.preact, l_conv6.out_size, l_conv6.out_channel);
hipLaunchKernelGGL(( bp_weight_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv6.d_weight, l_conv6.d_preact, l_conv6.output, l_conv6.kernel_size, l_conv6.in_size,
l_conv6.out_size, l_conv6.in_channel, l_conv6.out_channel, false);
hipLaunchKernelGGL(( bp_bias_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv6.bias, l_conv6.d_preact, l_conv6.out_size, l_conv6.out_channel);
hipLaunchKernelGGL(( bp_output_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv4.d_output, l_conv4.weight, l_conv6.d_preact, l_conv4.in_size, l_conv4.kernel_size,
l_conv4.out_size, l_conv4.in_channel, l_conv4.out_channel, true, true);
hipLaunchKernelGGL(( bp_preact_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv4.d_preact, l_conv4.d_output, l_conv4.preact, l_conv4.out_size, l_conv4.out_channel);
hipLaunchKernelGGL(( bp_weight_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv4.d_weight, l_conv4.d_preact, l_conv4.output, l_conv4.kernel_size, l_conv4.in_size,
l_conv4.out_size, l_conv4.in_channel, l_conv4.out_channel, false);
hipLaunchKernelGGL(( bp_bias_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv4.bias, l_conv4.d_preact, l_conv4.out_size, l_conv4.out_channel);
// maxpooling + Conv7 Path #4
hipLaunchKernelGGL(( bp_output_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv7.d_output, l_conv7.weight, &slice_4, l_conv7.in_size, l_conv7.kernel_size,
l_conv7.out_size, l_conv7.in_channel, l_conv7.out_channel, true, true);
hipLaunchKernelGGL(( bp_preact_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv7.d_preact, l_conv7.d_output, l_conv7.preact, l_conv7.out_size, l_conv7.out_channel);
hipLaunchKernelGGL(( bp_weight_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv7.d_weight, l_conv7.d_preact, l_conv7.output, l_conv7.kernel_size, l_conv7.in_size,
l_conv7.out_size, l_conv7.in_channel, l_conv7.out_channel, false);
hipLaunchKernelGGL(( bp_bias_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv7.bias, l_conv7.d_preact, l_conv7.out_size, l_conv7.out_channel);
hipLaunchKernelGGL(( bp_maxpool), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_maxpool.d_preact, l_maxpool.output, l_conv1.output, l_conv7.d_output, l_maxpool.kernel_size,
l_maxpool.in_size, l_maxpool.out_size, l_maxpool.out_channel, true);
// calculate gradient for conv1
int numElem = l_conv2.in_size * l_conv2.in_size * l_conv2.in_channel;
hipLaunchKernelGGL(( sumGrad), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, &sum_matrix, l_conv2.d_preact, l_conv3.d_preact, l_conv4.d_preact, l_maxpool.d_preact, numElem);
// Conv1
//bp_output_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv1.d_output, l_conv1.weight, l_conv2.d_preact, l_conv1.in_size, l_conv2.kernel_size,
hipLaunchKernelGGL(( bp_output_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv1.d_output, l_conv1.weight, &sum_matrix, l_conv1.in_size, l_conv2.kernel_size,
l_conv2.out_size, l_conv2.in_channel, l_conv2.out_channel, true, true);
hipLaunchKernelGGL(( bp_preact_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv1.d_preact, l_conv1.d_output, l_conv1.preact, l_conv1.out_size, l_conv1.out_channel);
hipLaunchKernelGGL(( bp_weight_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv1.d_weight, l_conv1.d_preact, l_conv1.output, l_conv1.kernel_size, l_conv1.in_size,
l_conv1.out_size, l_conv1.in_channel, l_conv1.out_channel, false);
hipLaunchKernelGGL(( bp_bias_conv), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv1.bias, l_conv1.d_preact, l_conv1.out_size, l_conv1.out_channel);
//l_conv1.dOut();
hipLaunchKernelGGL(( apply_grad), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_FC.weight, l_FC.d_weight, l_FC.M * l_FC.N);
hipLaunchKernelGGL(( apply_grad), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv1.weight, l_conv1.d_weight, l_conv1.M * l_conv1.N);
hipLaunchKernelGGL(( apply_grad), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv2.weight, l_conv2.d_weight, l_conv2.M * l_conv2.N);
hipLaunchKernelGGL(( apply_grad), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv3.weight, l_conv3.d_weight, l_conv3.M * l_conv3.N);
hipLaunchKernelGGL(( apply_grad), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv4.weight, l_conv4.d_weight, l_conv4.M * l_conv4.N);
hipLaunchKernelGGL(( apply_grad), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv5.weight, l_conv5.d_weight, l_conv5.M * l_conv5.N);
hipLaunchKernelGGL(( apply_grad), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv6.weight, l_conv6.d_weight, l_conv6.M * l_conv6.N);
hipLaunchKernelGGL(( apply_grad), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, l_conv7.weight, l_conv7.d_weight, l_conv7.M * l_conv7.N);
return 0;
}
// Unfold the input layer
static void unfold_input(double input[28][28], double unfolded[24*24][5*5])
{
int a = 0;
(void)unfold_input;
for (int i = 0; i < 2; ++i)
for (int j = 0; j < 2; ++j) {
int b = 0;
for (int x = i; x < i + 2; ++x)
for (int y = j; y < j+2; ++y)
unfolded[a][b++] = input[x][y];
a++;
}
}
static void learn()
{
static hipblasHandle_t blas;
hipblasCreate(&blas);
clock_t start, end;
float err;
int iter = ITERATION;
double time_taken = 0.0;
fprintf(stdout ,"Learning\n");
while (iter < 0 || iter-- > 0) {
err = 0.0f;
start = clock();
for (int i = 0; i < BATCH_SIZE; ++i) {
float tmp_err;
int index = rand() % train_cnt;
time_taken += forward_pass(train_set[index].data);
l_FC.bp_clear();
l_maxpool.bp_clear();
l_conv1.bp_clear();
l_conv2.bp_clear();
l_conv3.bp_clear();
l_conv4.bp_clear();
l_conv5.bp_clear();
l_conv6.bp_clear();
l_conv7.bp_clear();
// Euclid distance of train_set[i]
//l_FC.Out();
hipLaunchKernelGGL(( calcLoss), dim3(10), dim3(1), 0, 0, l_FC.d_preact, l_FC.output, train_set[index].label, 10);
hipblasSnrm2(blas, 10, l_FC.d_preact, 1, &tmp_err);
err += tmp_err;
time_taken += back_pass();
}
//printf("jfhgodsufg\n");
err /= BATCH_SIZE;
end = clock();
time_taken += ((double)end - start) / CLOCKS_PER_SEC;
fprintf(stdout, "training loss: %e, time_on_gpu: %lf\n", err, time_taken);
if (err < 0) { // threshold
fprintf(stdout, "Training complete, error less than threshold\n\n");
break;
}
}
fprintf(stdout, "\n Time - %lf\n", time_taken);
fprintf(stdout, "\nAverage FPS - %lf\n", (ITERATION * BATCH_SIZE) / time_taken);
}
// Returns label of given data (0-9)
static unsigned int classify(double data[28][28])
{
float res[10];
forward_pass(data);
unsigned int max = 0;
hipMemcpy(res, l_FC.output, sizeof(float) * 10, hipMemcpyDeviceToHost);
for (int i = 1; i < 10; ++i) {
if (res[max] < res[i]) {
max = i;
}
}
return max;
}
// Perform forward propagation of test data
static void test()
{
int error = 0;
for (int i = 0; i < test_cnt; ++i) {
if (classify(test_set[i].data) != test_set[i].label) {
++error;
}
}
fprintf(stdout, "Test Error Rate: %.2lf%%\n",
double(error) / double(test_cnt) * 100.0);
}
| 7fd7c036baeed6d802b229fce1061ce440280ab5.cu | #define USE_MNIST_LOADER
#define MNIST_DOUBLE
#include "mnist.h"
#include "layer.h"
#include <cuda.h>
#include <cstdio>
#include <time.h>
static mnist_data *train_set, *test_set;
static unsigned int train_cnt, test_cnt;
// Define layers of CNN
static Layer l_input = Layer(0, 0, 28, 0, 1);
static Layer l_conv1 = Layer(3, 28, 24, 1, 8);
static Layer l_conv2 = Layer(1, 24, 24, 8, 8);
static Layer l_conv3 = Layer(3, 24, 24, 8, 8);
static Layer l_conv4 = Layer(5, 24, 24, 8, 8);
static Layer l_maxpool = Layer(3, 24, 24, 8, 8);
static Layer l_conv5 = Layer(1, 24, 24, 8, 8);
static Layer l_conv6 = Layer(1, 24, 24, 8, 8);
static Layer l_conv7 = Layer(1, 24, 24, 8, 8);
static Layer l_FC = Layer(24, 24, 1, 32, 10);
static float* concat_matrix, slice_1, slice_2, slice_3, slice_4, sum_matrix;
static void learn();
static unsigned int classify(double data[28][28]);
static void test();
static double forward_pass(double data[28][28]);
static double back_pass();
static inline void loaddata()
{
mnist_load("data/train-images.idx3-ubyte", "data/train-labels.idx1-ubyte",
&train_set, &train_cnt);
mnist_load("data/t10k-images.idx3-ubyte", "data/t10k-labels.idx1-ubyte",
&test_set, &test_cnt);
}
int main(int argc, const char **argv)
{
srand(time(NULL));
CUresult err = cuInit(0);
if (err != CUDA_SUCCESS) {
fprintf(stderr, "CUDA initialisation failed with error code - %d\n", err);
return 1;
}
cudaMalloc((void **)&concat_matrix, sizeof(float) * 24 * 24 * 32);
cudaMalloc((void **)&slice_1, sizeof(float) * 24 * 24 * 8);
cudaMalloc((void **)&slice_2, sizeof(float) * 24 * 24 * 8);
cudaMalloc((void **)&slice_3, sizeof(float) * 24 * 24 * 8);
cudaMalloc((void **)&slice_4, sizeof(float) * 24 * 24 * 8);
cudaMalloc((void **)&sum_matrix, sizeof(float) * 24 * 24 * 8);
loaddata();
//test();
learn();
test();
return 0;
}
// Forward propagation of a single row in dataset
static double forward_pass(double data[28][28])
{
float input[28][28];
//fprintf(stdout, "%f\n", data[14][14]);
for (int i = 0; i < 28; ++i) {
for (int j = 0; j < 28; ++j) {
input[i][j] = data[i][j];
}
}
l_input.clear();
l_conv1.clear();
l_conv2.clear();
l_conv3.clear();
l_conv4.clear();
l_conv5.clear();
l_conv6.clear();
l_conv7.clear();
l_maxpool.clear();
l_FC.clear();
// for (int i = 0; i < 18; i++){
// fprintf(stdout, "%f ", input[i][i]);
// }
l_input.setOutput((float *)input);
//l_input.Out();
// Conv1
fp_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv1.preact, l_input.output, l_conv1.weight, l_conv1.kernel_size,
l_conv1.in_size, l_conv1.out_size, l_conv1.in_channel, l_conv1.out_channel, false);
fp_bias_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv1.preact, l_conv1.bias, l_conv1.out_size, l_conv1.out_channel);
apply_step_function<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv1.preact, l_conv1.output, l_conv1.out_size * l_conv1.out_size * l_conv1.out_channel);
// Conv2 Path #1
fp_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv2.preact, l_conv1.output, l_conv2.weight, l_conv2.kernel_size,
l_conv2.in_size, l_conv2.out_size, l_conv2.in_channel, l_conv2.out_channel, true);
fp_bias_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv2.preact, l_conv2.bias, l_conv2.out_size, l_conv2.out_channel);
apply_step_function<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv2.preact, l_conv2.output, l_conv2.out_size * l_conv2.out_size * l_conv2.out_channel);
// Conv3 + Conv5 Path #2
fp_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv3.preact, l_conv1.output, l_conv3.weight, l_conv3.kernel_size,
l_conv3.in_size, l_conv3.out_size, l_conv3.in_channel, l_conv3.out_channel, true);
fp_bias_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv3.preact, l_conv3.bias, l_conv3.out_size, l_conv3.out_channel);
apply_step_function<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv3.preact, l_conv3.output, l_conv3.out_size * l_conv3.out_size * l_conv3.out_channel);
fp_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv5.preact, l_conv3.output, l_conv5.weight, l_conv5.kernel_size,
l_conv5.in_size, l_conv5.out_size, l_conv5.in_channel, l_conv5.out_channel, true);
fp_bias_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv5.preact, l_conv5.bias, l_conv5.out_size, l_conv5.out_channel);
apply_step_function<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv5.preact, l_conv5.output, l_conv5.out_size * l_conv5.out_size * l_conv5.out_channel);
// Conv4 + Conv6 Path #3
fp_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv4.preact, l_conv1.output, l_conv4.weight, l_conv4.kernel_size,
l_conv4.in_size, l_conv4.out_size, l_conv4.in_channel, l_conv4.out_channel, true);
fp_bias_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv4.preact, l_conv4.bias, l_conv4.out_size, l_conv4.out_channel);
apply_step_function<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv4.preact, l_conv4.output, l_conv4.out_size * l_conv4.out_size * l_conv4.out_channel);
fp_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv6.preact, l_conv4.output, l_conv6.weight, l_conv6.kernel_size,
l_conv6.in_size, l_conv6.out_size, l_conv6.in_channel, l_conv6.out_channel, true);
fp_bias_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv6.preact, l_conv6.bias, l_conv6.out_size, l_conv6.out_channel);
apply_step_function<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv6.preact, l_conv6.output, l_conv6.out_size * l_conv6.out_size * l_conv6.out_channel);
// maxpooling + Conv7 Path #4
fp_maxpool<<<GRID_SIZE, BLOCK_SIZE>>>(l_maxpool.output, l_conv1.output, l_maxpool.kernel_size, l_maxpool.in_size, l_maxpool.out_size, l_maxpool.out_channel, true);
fp_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv7.preact, l_maxpool.output, l_conv7.weight, l_conv7.kernel_size,
l_conv7.in_size, l_conv7.out_size, l_conv7.in_channel, l_conv7.out_channel, true);
fp_bias_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv7.preact, l_conv7.bias, l_conv7.out_size, l_conv7.out_channel);
apply_step_function<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv7.preact, l_conv7.output, l_conv7.out_size * l_conv7.out_size * l_conv7.out_channel);
// concat
concat<<<GRID_SIZE,BLOCK_SIZE>>>(concat_matrix, l_conv2.output, l_conv5.output, l_conv6.output, l_conv7.output,
l_conv2.out_size, l_conv2.out_channel, l_conv5.out_channel, l_conv6.out_channel, l_conv7.out_channel);
// FC
fp_preact_fc<<<GRID_SIZE, BLOCK_SIZE>>>(concat_matrix, l_FC.preact, l_FC.weight, l_FC.in_size, l_FC.in_channel, l_FC.out_channel);
fp_bias_fc<<<GRID_SIZE, BLOCK_SIZE>>>(l_FC.preact, l_FC.bias, l_FC.out_channel);
apply_step_function<<<GRID_SIZE, BLOCK_SIZE>>>(l_FC.preact, l_FC.output, l_FC.out_size * l_FC.out_size * l_FC.out_channel);
//l_FC.Out();
return 0;
}
// Back propagation to update weights
static double back_pass()
{
// FC
bp_weight_fc<<<GRID_SIZE, BLOCK_SIZE>>>(l_FC.d_weight, l_FC.d_preact, l_maxpool.output, l_FC.in_size, l_FC.in_channel, l_FC.out_channel);
bp_bias_fc<<<GRID_SIZE, BLOCK_SIZE>>>(l_FC.bias, l_FC.d_preact, l_FC.out_channel);
bp_output_fc<<<GRID_SIZE, BLOCK_SIZE>>>(l_FC.d_output, l_FC.d_preact, l_FC.weight, l_FC.in_size, l_FC.in_channel, l_FC.out_channel);
//l_FC.dOut();
// parallel block
//bp_four_parallel<<<4,1>>>(&sum_matrix, l_conv2, l_conv3, l_conv4, l_maxpool, l_conv5, l_conv6, l_conv7, &slice_1, &slice_2, &slice_3, &slice_4, l_conv1.output);
// decat
decat<<<GRID_SIZE,BLOCK_SIZE>>>(l_FC.d_output, &slice_1, &slice_2, &slice_3, &slice_4,
l_FC.in_size, l_conv2.out_channel, l_conv3.out_channel, l_conv4.out_channel, l_maxpool.out_channel);
// Conv2 Path #1
bp_output_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv2.d_output, l_conv2.weight, &slice_1, l_conv2.in_size, l_conv2.kernel_size,
l_conv2.out_size, l_conv2.in_channel, l_conv2.out_channel, true, true);
bp_preact_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv2.d_preact, l_conv2.d_output, l_conv2.preact, l_conv2.out_size, l_conv2.out_channel);
bp_weight_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv2.d_weight, l_conv2.d_preact, l_conv2.output, l_conv2.kernel_size, l_conv2.in_size,
l_conv2.out_size, l_conv2.in_channel, l_conv2.out_channel, false);
bp_bias_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv2.bias, l_conv2.d_preact, l_conv2.out_size, l_conv2.out_channel);
// Conv3 + Conv5 Path #2
bp_output_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv5.d_output, l_conv5.weight, &slice_2, l_conv5.in_size, l_conv5.kernel_size,
l_conv5.out_size, l_conv5.in_channel, l_conv5.out_channel, true, true);
bp_preact_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv5.d_preact, l_conv5.d_output, l_conv5.preact, l_conv5.out_size, l_conv5.out_channel);
bp_weight_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv5.d_weight, l_conv5.d_preact, l_conv5.output, l_conv5.kernel_size, l_conv5.in_size,
l_conv5.out_size, l_conv5.in_channel, l_conv5.out_channel, false);
bp_bias_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv5.bias, l_conv5.d_preact, l_conv5.out_size, l_conv5.out_channel);
bp_output_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv3.d_output, l_conv3.weight, l_conv5.d_preact, l_conv3.in_size, l_conv3.kernel_size,
l_conv3.out_size, l_conv3.in_channel, l_conv3.out_channel, true, true);
bp_preact_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv3.d_preact, l_conv3.d_output, l_conv3.preact, l_conv3.out_size, l_conv3.out_channel);
bp_weight_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv3.d_weight, l_conv3.d_preact, l_conv3.output, l_conv3.kernel_size, l_conv3.in_size,
l_conv3.out_size, l_conv3.in_channel, l_conv3.out_channel, false);
bp_bias_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv3.bias, l_conv3.d_preact, l_conv3.out_size, l_conv3.out_channel);
// Conv4 + Conv6 Path #3
bp_output_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv6.d_output, l_conv6.weight, &slice_3, l_conv6.in_size, l_conv6.kernel_size,
l_conv6.out_size, l_conv6.in_channel, l_conv6.out_channel, true, true);
bp_preact_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv6.d_preact, l_conv6.d_output, l_conv6.preact, l_conv6.out_size, l_conv6.out_channel);
bp_weight_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv6.d_weight, l_conv6.d_preact, l_conv6.output, l_conv6.kernel_size, l_conv6.in_size,
l_conv6.out_size, l_conv6.in_channel, l_conv6.out_channel, false);
bp_bias_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv6.bias, l_conv6.d_preact, l_conv6.out_size, l_conv6.out_channel);
bp_output_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv4.d_output, l_conv4.weight, l_conv6.d_preact, l_conv4.in_size, l_conv4.kernel_size,
l_conv4.out_size, l_conv4.in_channel, l_conv4.out_channel, true, true);
bp_preact_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv4.d_preact, l_conv4.d_output, l_conv4.preact, l_conv4.out_size, l_conv4.out_channel);
bp_weight_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv4.d_weight, l_conv4.d_preact, l_conv4.output, l_conv4.kernel_size, l_conv4.in_size,
l_conv4.out_size, l_conv4.in_channel, l_conv4.out_channel, false);
bp_bias_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv4.bias, l_conv4.d_preact, l_conv4.out_size, l_conv4.out_channel);
// maxpooling + Conv7 Path #4
bp_output_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv7.d_output, l_conv7.weight, &slice_4, l_conv7.in_size, l_conv7.kernel_size,
l_conv7.out_size, l_conv7.in_channel, l_conv7.out_channel, true, true);
bp_preact_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv7.d_preact, l_conv7.d_output, l_conv7.preact, l_conv7.out_size, l_conv7.out_channel);
bp_weight_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv7.d_weight, l_conv7.d_preact, l_conv7.output, l_conv7.kernel_size, l_conv7.in_size,
l_conv7.out_size, l_conv7.in_channel, l_conv7.out_channel, false);
bp_bias_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv7.bias, l_conv7.d_preact, l_conv7.out_size, l_conv7.out_channel);
bp_maxpool<<<GRID_SIZE, BLOCK_SIZE>>>(l_maxpool.d_preact, l_maxpool.output, l_conv1.output, l_conv7.d_output, l_maxpool.kernel_size,
l_maxpool.in_size, l_maxpool.out_size, l_maxpool.out_channel, true);
// calculate gradient for conv1
int numElem = l_conv2.in_size * l_conv2.in_size * l_conv2.in_channel;
sumGrad<<<GRID_SIZE,BLOCK_SIZE>>>(&sum_matrix, l_conv2.d_preact, l_conv3.d_preact, l_conv4.d_preact, l_maxpool.d_preact, numElem);
// Conv1
//bp_output_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv1.d_output, l_conv1.weight, l_conv2.d_preact, l_conv1.in_size, l_conv2.kernel_size,
bp_output_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv1.d_output, l_conv1.weight, &sum_matrix, l_conv1.in_size, l_conv2.kernel_size,
l_conv2.out_size, l_conv2.in_channel, l_conv2.out_channel, true, true);
bp_preact_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv1.d_preact, l_conv1.d_output, l_conv1.preact, l_conv1.out_size, l_conv1.out_channel);
bp_weight_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv1.d_weight, l_conv1.d_preact, l_conv1.output, l_conv1.kernel_size, l_conv1.in_size,
l_conv1.out_size, l_conv1.in_channel, l_conv1.out_channel, false);
bp_bias_conv<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv1.bias, l_conv1.d_preact, l_conv1.out_size, l_conv1.out_channel);
//l_conv1.dOut();
apply_grad<<<GRID_SIZE, BLOCK_SIZE>>>(l_FC.weight, l_FC.d_weight, l_FC.M * l_FC.N);
apply_grad<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv1.weight, l_conv1.d_weight, l_conv1.M * l_conv1.N);
apply_grad<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv2.weight, l_conv2.d_weight, l_conv2.M * l_conv2.N);
apply_grad<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv3.weight, l_conv3.d_weight, l_conv3.M * l_conv3.N);
apply_grad<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv4.weight, l_conv4.d_weight, l_conv4.M * l_conv4.N);
apply_grad<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv5.weight, l_conv5.d_weight, l_conv5.M * l_conv5.N);
apply_grad<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv6.weight, l_conv6.d_weight, l_conv6.M * l_conv6.N);
apply_grad<<<GRID_SIZE, BLOCK_SIZE>>>(l_conv7.weight, l_conv7.d_weight, l_conv7.M * l_conv7.N);
return 0;
}
// Unfold the input layer
static void unfold_input(double input[28][28], double unfolded[24*24][5*5])
{
int a = 0;
(void)unfold_input;
for (int i = 0; i < 2; ++i)
for (int j = 0; j < 2; ++j) {
int b = 0;
for (int x = i; x < i + 2; ++x)
for (int y = j; y < j+2; ++y)
unfolded[a][b++] = input[x][y];
a++;
}
}
static void learn()
{
static cublasHandle_t blas;
cublasCreate(&blas);
clock_t start, end;
float err;
int iter = ITERATION;
double time_taken = 0.0;
fprintf(stdout ,"Learning\n");
while (iter < 0 || iter-- > 0) {
err = 0.0f;
start = clock();
for (int i = 0; i < BATCH_SIZE; ++i) {
float tmp_err;
int index = rand() % train_cnt;
time_taken += forward_pass(train_set[index].data);
l_FC.bp_clear();
l_maxpool.bp_clear();
l_conv1.bp_clear();
l_conv2.bp_clear();
l_conv3.bp_clear();
l_conv4.bp_clear();
l_conv5.bp_clear();
l_conv6.bp_clear();
l_conv7.bp_clear();
// Euclid distance of train_set[i]
//l_FC.Out();
calcLoss<<<10, 1>>>(l_FC.d_preact, l_FC.output, train_set[index].label, 10);
cublasSnrm2(blas, 10, l_FC.d_preact, 1, &tmp_err);
err += tmp_err;
time_taken += back_pass();
}
//printf("jfhgodsufg\n");
err /= BATCH_SIZE;
end = clock();
time_taken += ((double)end - start) / CLOCKS_PER_SEC;
fprintf(stdout, "training loss: %e, time_on_gpu: %lf\n", err, time_taken);
if (err < 0) { // threshold
fprintf(stdout, "Training complete, error less than threshold\n\n");
break;
}
}
fprintf(stdout, "\n Time - %lf\n", time_taken);
fprintf(stdout, "\nAverage FPS - %lf\n", (ITERATION * BATCH_SIZE) / time_taken);
}
// Returns label of given data (0-9)
static unsigned int classify(double data[28][28])
{
float res[10];
forward_pass(data);
unsigned int max = 0;
cudaMemcpy(res, l_FC.output, sizeof(float) * 10, cudaMemcpyDeviceToHost);
for (int i = 1; i < 10; ++i) {
if (res[max] < res[i]) {
max = i;
}
}
return max;
}
// Perform forward propagation of test data
static void test()
{
int error = 0;
for (int i = 0; i < test_cnt; ++i) {
if (classify(test_set[i].data) != test_set[i].label) {
++error;
}
}
fprintf(stdout, "Test Error Rate: %.2lf%%\n",
double(error) / double(test_cnt) * 100.0);
}
|
f6d1ecdd79cf3d1d89889635eb9b023df4704275.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "ScanHost.cu.h"
template<class OP, class T>
int testGeneric( unsigned int num_threads,
unsigned int block_size,
T* h_in,
int* flags_h,
T* h_out
) {
unsigned int mem_size = num_threads * sizeof(T);
// allocate device memory
T* d_in;
T* d_out;
int* flags_d;
hipMalloc((void**)&d_in , mem_size);
hipMalloc((void**)&d_out, mem_size);
hipMalloc((void**)&flags_d, num_threads*sizeof(int));
// copy host memory to device
hipMemcpy(d_in, h_in, mem_size, hipMemcpyHostToDevice);
hipMemcpy(flags_d, flags_h, num_threads*sizeof(int), hipMemcpyHostToDevice);
{ // execute kernel
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
sgmScanInc< OP,T > ( block_size, num_threads, d_in, flags_d, d_out );
//scanInc< OP,T > ( block_size, num_threads, d_in, d_out );
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("Sequential Naive version runs in: %lu microsecs\n", elapsed);
}
// copy host memory to device
hipMemcpy(h_out, d_out, mem_size, hipMemcpyDeviceToHost);
// cleanup memory
hipFree(d_in );
hipFree(d_out);
hipFree(flags_d);
return 1;
}
int mainTest(int argc, char** argv) {
const unsigned int num_threads = 8353455;
const unsigned int block_size = 512;
unsigned int mem_size = num_threads * sizeof(int);
int* h_in = (int*) malloc(mem_size);
int* h_out = (int*) malloc(mem_size);
int* flags_h = (int*) malloc(num_threads*sizeof(int));
int sgm_size = 123;
{ // init segments and flags
for(unsigned int i=0; i<num_threads; i++) {
h_in [i] = 1;
flags_h[i] = (i % sgm_size == 0) ? 1 : 0;
}
}
testGeneric<Add<int>,int>( num_threads, block_size, h_in, flags_h, h_out );
{ // validation
bool success = true;
int accum = 0;
for(int i=0; i<num_threads; i++) {
if (i % sgm_size == 0) accum = 1;
else accum += 1;
//accum += 1;
if ( accum != h_out[i] ) {
success = false;
printf("Violation: %.1d should be %.1d\n", h_out[i], accum);
}
}
if(success) printf("\n VALID RESULT!\n");
else printf("\nINVALID RESULT!\n");
}
// cleanup memory
free(h_in );
free(h_out);
free(flags_h);
return 0;
}
#include "MsspProblem.cu.h"
#include "SparseMatVctMult.cu.h"
int main(int argc, char** argv) {
const unsigned int mssp_list_size = 8353455;
const unsigned int matrix_row_num = 11033;
const unsigned int vct_size = 2076;
const unsigned int block_size = 256;
MsspProblem(block_size, mssp_list_size);
SparseMatVctMult(block_size, matrix_row_num, vct_size);
}
| f6d1ecdd79cf3d1d89889635eb9b023df4704275.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include "ScanHost.cu.h"
template<class OP, class T>
int testGeneric( unsigned int num_threads,
unsigned int block_size,
T* h_in,
int* flags_h,
T* h_out
) {
unsigned int mem_size = num_threads * sizeof(T);
// allocate device memory
T* d_in;
T* d_out;
int* flags_d;
cudaMalloc((void**)&d_in , mem_size);
cudaMalloc((void**)&d_out, mem_size);
cudaMalloc((void**)&flags_d, num_threads*sizeof(int));
// copy host memory to device
cudaMemcpy(d_in, h_in, mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(flags_d, flags_h, num_threads*sizeof(int), cudaMemcpyHostToDevice);
{ // execute kernel
unsigned long int elapsed;
struct timeval t_start, t_end, t_diff;
gettimeofday(&t_start, NULL);
sgmScanInc< OP,T > ( block_size, num_threads, d_in, flags_d, d_out );
//scanInc< OP,T > ( block_size, num_threads, d_in, d_out );
gettimeofday(&t_end, NULL);
timeval_subtract(&t_diff, &t_end, &t_start);
elapsed = (t_diff.tv_sec*1e6+t_diff.tv_usec);
printf("Sequential Naive version runs in: %lu microsecs\n", elapsed);
}
// copy host memory to device
cudaMemcpy(h_out, d_out, mem_size, cudaMemcpyDeviceToHost);
// cleanup memory
cudaFree(d_in );
cudaFree(d_out);
cudaFree(flags_d);
return 1;
}
int mainTest(int argc, char** argv) {
const unsigned int num_threads = 8353455;
const unsigned int block_size = 512;
unsigned int mem_size = num_threads * sizeof(int);
int* h_in = (int*) malloc(mem_size);
int* h_out = (int*) malloc(mem_size);
int* flags_h = (int*) malloc(num_threads*sizeof(int));
int sgm_size = 123;
{ // init segments and flags
for(unsigned int i=0; i<num_threads; i++) {
h_in [i] = 1;
flags_h[i] = (i % sgm_size == 0) ? 1 : 0;
}
}
testGeneric<Add<int>,int>( num_threads, block_size, h_in, flags_h, h_out );
{ // validation
bool success = true;
int accum = 0;
for(int i=0; i<num_threads; i++) {
if (i % sgm_size == 0) accum = 1;
else accum += 1;
//accum += 1;
if ( accum != h_out[i] ) {
success = false;
printf("Violation: %.1d should be %.1d\n", h_out[i], accum);
}
}
if(success) printf("\n VALID RESULT!\n");
else printf("\nINVALID RESULT!\n");
}
// cleanup memory
free(h_in );
free(h_out);
free(flags_h);
return 0;
}
#include "MsspProblem.cu.h"
#include "SparseMatVctMult.cu.h"
int main(int argc, char** argv) {
const unsigned int mssp_list_size = 8353455;
const unsigned int matrix_row_num = 11033;
const unsigned int vct_size = 2076;
const unsigned int block_size = 256;
MsspProblem(block_size, mssp_list_size);
SparseMatVctMult(block_size, matrix_row_num, vct_size);
}
|
c9e7265def6778532f1263d64a20f73e0ef8dac5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %clang_cc1 -no-opaque-pointers -fcuda-is-device -triple amdgcn-amd-amdhsa -target-cpu gfx906 \
// RUN: -emit-llvm -o - %s | FileCheck %s
#include "Inputs/cuda.h"
// CHECK-LABEL: define {{.*}}@_ZN1AC2Ev(%struct.A* noundef nonnull align 8 dereferenceable(8) %this)
// CHECK: store %struct.A* %this, %struct.A** %this.addr.ascast
// CHECK: %this1 = load %struct.A*, %struct.A** %this.addr.ascast
// CHECK: %[[VTFIELD:.*]] = bitcast %struct.A* %this1 to i32 (...)* addrspace(1)**
// CHECK: store i32 (...)* addrspace(1)* bitcast{{.*}} @_ZTV1A{{.*}}, i32 (...)* addrspace(1)** %[[VTFIELD]]
struct A {
__device__ virtual void vf() {}
};
__global__ void kern() {
A a;
}
| c9e7265def6778532f1263d64a20f73e0ef8dac5.cu | // RUN: %clang_cc1 -no-opaque-pointers -fcuda-is-device -triple amdgcn-amd-amdhsa -target-cpu gfx906 \
// RUN: -emit-llvm -o - %s | FileCheck %s
#include "Inputs/cuda.h"
// CHECK-LABEL: define {{.*}}@_ZN1AC2Ev(%struct.A* noundef nonnull align 8 dereferenceable(8) %this)
// CHECK: store %struct.A* %this, %struct.A** %this.addr.ascast
// CHECK: %this1 = load %struct.A*, %struct.A** %this.addr.ascast
// CHECK: %[[VTFIELD:.*]] = bitcast %struct.A* %this1 to i32 (...)* addrspace(1)**
// CHECK: store i32 (...)* addrspace(1)* bitcast{{.*}} @_ZTV1A{{.*}}, i32 (...)* addrspace(1)** %[[VTFIELD]]
struct A {
__device__ virtual void vf() {}
};
__global__ void kern() {
A a;
}
|
c49d7f31f18a7464eb54117a3f0ed74d9f24e4b9.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "radix.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Radix {
int* dev_tempData;
int* dev_inputData;
int* dev_boolData;
int* dev_notBoolData;
int* dev_scanData;
int* dev_outputData;
// Returns the position of the most significant bit
int getMSB(int x)
{
int bit = 1 << 31;
for (int i = 31; i >= 0; i--, bit >>= 1)
{
if (x & bit)
return i + 1;
}
return 0;
}
// Returns the maximum of the array
int getMax(int n, const int* a)
{
int maximum = a[0];
for (int i = 1; i < n; i++)
{
maximum = ::max(maximum, a[i]);
}
return maximum;
}
// Maps an array to 2 arrays only contains 1s and 0s.
// _bools_ is just the logic NOT of _notBools_
__global__ void kernMapTo2Bools(int n, int bit, int* bools, int* notBools, const int* idata)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n)
{
return;
}
bool b = idata[index] & bit;
bools[index] = b;
notBools[index] = !b;
}
// Computes the temp array _temps_ which stores address for writing true keys
__global__ void kernComputeAddressOfTrueKeys(int n, int* temps, const int* notBools, const int* scanData)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n)
{
return;
}
int totalFalses = notBools[n - 1] + scanData[n - 1];
temps[index] = index - scanData[index] + totalFalses;
}
// Scatters based on address _temps_
__global__ void kernRadixScatter(int n, int* odata, const int* temps, const int* bools, const int* scanData, const int* idata)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n)
{
return;
}
int newIdx = bools[index] ? temps[index] : scanData[index];
odata[newIdx] = idata[index];
}
/**
* Performs radix sort on idata, storing the result into odata.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to sort.
*/
void sort(int n, int* odata, const int* idata)
{
int depth = ilog2ceil(n);
int size = 1 << depth; // sizes of arrays will are rounded to the next power of two
int maximum = getMax(n, idata);
int highestBit = getMSB(maximum);
dim3 threadsPerBlock(blockSize);
dim3 blocksPerGrid((n + blockSize - 1) / blockSize);
dim3 scanBlocksPerGrid((n + blockSize - 1) / blockSize);
hipMalloc((void**)&dev_inputData, n * sizeof(int));
hipMalloc((void**)&dev_boolData, n * sizeof(int));
hipMalloc((void**)&dev_notBoolData, n * sizeof(int));
hipMalloc((void**)&dev_scanData, size * sizeof(int));
hipMalloc((void**)&dev_tempData, n * sizeof(int));
hipMalloc((void**)&dev_outputData, n * sizeof(int));
hipLaunchKernelGGL(( Common::kernInitializeArray), dim3(scanBlocksPerGrid), dim3(threadsPerBlock), 0, 0, size, dev_scanData, 0);
hipMemcpy(dev_inputData, idata, n * sizeof(int), hipMemcpyKind::hipMemcpyHostToDevice);
// Do radix sort for _bits_ times
for (int i = 0, bit = 1; i < highestBit; i++, bit <<= 1)
{
// Step 1: Compute the bool array and notBool array
hipLaunchKernelGGL(( kernMapTo2Bools), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, n, bit, dev_boolData, dev_notBoolData, dev_inputData);
// Step 2: Exclusive scan array
hipMemcpy(dev_scanData, dev_notBoolData, n * sizeof(int), hipMemcpyKind::hipMemcpyDeviceToDevice);
for (int d = 0; d < depth; d++)
{
hipLaunchKernelGGL(( Efficient::kernUpSweep), dim3(scanBlocksPerGrid), dim3(threadsPerBlock), 0, 0, size, dev_scanData, d);
}
hipMemset(dev_scanData + size - 1, 0, sizeof(int));
for (int d = depth - 1; d >= 0; d--)
{
hipLaunchKernelGGL(( Efficient::kernDownSweep), dim3(scanBlocksPerGrid), dim3(threadsPerBlock), 0, 0, size, dev_scanData, d);
}
// Step 3: Compute temp array _dev_tempData_
hipLaunchKernelGGL(( kernComputeAddressOfTrueKeys), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, n, dev_tempData, dev_notBoolData, dev_scanData);
// Step 4: Scatter
hipLaunchKernelGGL(( kernRadixScatter), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, n, dev_outputData, dev_tempData, dev_boolData, dev_scanData, dev_inputData);
// Swap for next round of radix sort
std::swap(dev_outputData, dev_inputData);
}
hipMemcpy(odata, dev_inputData, n * sizeof(int), hipMemcpyKind::hipMemcpyDeviceToHost);
hipFree(dev_inputData);
hipFree(dev_boolData);
hipFree(dev_notBoolData);
hipFree(dev_scanData);
hipFree(dev_tempData);
hipFree(dev_outputData);
}
}
}
| c49d7f31f18a7464eb54117a3f0ed74d9f24e4b9.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "radix.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Radix {
int* dev_tempData;
int* dev_inputData;
int* dev_boolData;
int* dev_notBoolData;
int* dev_scanData;
int* dev_outputData;
// Returns the position of the most significant bit
int getMSB(int x)
{
int bit = 1 << 31;
for (int i = 31; i >= 0; i--, bit >>= 1)
{
if (x & bit)
return i + 1;
}
return 0;
}
// Returns the maximum of the array
int getMax(int n, const int* a)
{
int maximum = a[0];
for (int i = 1; i < n; i++)
{
maximum = std::max(maximum, a[i]);
}
return maximum;
}
// Maps an array to 2 arrays only contains 1s and 0s.
// _bools_ is just the logic NOT of _notBools_
__global__ void kernMapTo2Bools(int n, int bit, int* bools, int* notBools, const int* idata)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n)
{
return;
}
bool b = idata[index] & bit;
bools[index] = b;
notBools[index] = !b;
}
// Computes the temp array _temps_ which stores address for writing true keys
__global__ void kernComputeAddressOfTrueKeys(int n, int* temps, const int* notBools, const int* scanData)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n)
{
return;
}
int totalFalses = notBools[n - 1] + scanData[n - 1];
temps[index] = index - scanData[index] + totalFalses;
}
// Scatters based on address _temps_
__global__ void kernRadixScatter(int n, int* odata, const int* temps, const int* bools, const int* scanData, const int* idata)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= n)
{
return;
}
int newIdx = bools[index] ? temps[index] : scanData[index];
odata[newIdx] = idata[index];
}
/**
* Performs radix sort on idata, storing the result into odata.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to sort.
*/
void sort(int n, int* odata, const int* idata)
{
int depth = ilog2ceil(n);
int size = 1 << depth; // sizes of arrays will are rounded to the next power of two
int maximum = getMax(n, idata);
int highestBit = getMSB(maximum);
dim3 threadsPerBlock(blockSize);
dim3 blocksPerGrid((n + blockSize - 1) / blockSize);
dim3 scanBlocksPerGrid((n + blockSize - 1) / blockSize);
cudaMalloc((void**)&dev_inputData, n * sizeof(int));
cudaMalloc((void**)&dev_boolData, n * sizeof(int));
cudaMalloc((void**)&dev_notBoolData, n * sizeof(int));
cudaMalloc((void**)&dev_scanData, size * sizeof(int));
cudaMalloc((void**)&dev_tempData, n * sizeof(int));
cudaMalloc((void**)&dev_outputData, n * sizeof(int));
Common::kernInitializeArray<<<scanBlocksPerGrid, threadsPerBlock>>>(size, dev_scanData, 0);
cudaMemcpy(dev_inputData, idata, n * sizeof(int), cudaMemcpyKind::cudaMemcpyHostToDevice);
// Do radix sort for _bits_ times
for (int i = 0, bit = 1; i < highestBit; i++, bit <<= 1)
{
// Step 1: Compute the bool array and notBool array
kernMapTo2Bools<<<blocksPerGrid, threadsPerBlock>>>(n, bit, dev_boolData, dev_notBoolData, dev_inputData);
// Step 2: Exclusive scan array
cudaMemcpy(dev_scanData, dev_notBoolData, n * sizeof(int), cudaMemcpyKind::cudaMemcpyDeviceToDevice);
for (int d = 0; d < depth; d++)
{
Efficient::kernUpSweep<<<scanBlocksPerGrid, threadsPerBlock>>>(size, dev_scanData, d);
}
cudaMemset(dev_scanData + size - 1, 0, sizeof(int));
for (int d = depth - 1; d >= 0; d--)
{
Efficient::kernDownSweep<<<scanBlocksPerGrid, threadsPerBlock>>>(size, dev_scanData, d);
}
// Step 3: Compute temp array _dev_tempData_
kernComputeAddressOfTrueKeys<<<blocksPerGrid, threadsPerBlock>>>(n, dev_tempData, dev_notBoolData, dev_scanData);
// Step 4: Scatter
kernRadixScatter<<<blocksPerGrid, threadsPerBlock>>>(n, dev_outputData, dev_tempData, dev_boolData, dev_scanData, dev_inputData);
// Swap for next round of radix sort
std::swap(dev_outputData, dev_inputData);
}
cudaMemcpy(odata, dev_inputData, n * sizeof(int), cudaMemcpyKind::cudaMemcpyDeviceToHost);
cudaFree(dev_inputData);
cudaFree(dev_boolData);
cudaFree(dev_notBoolData);
cudaFree(dev_scanData);
cudaFree(dev_tempData);
cudaFree(dev_outputData);
}
}
}
|
fa3d266168f57f595dbd9fdece3ec6eaec341a11.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Based on the work of Andrew Krepps
#include <iostream>
#include <random>
#include <chrono>
#include <stdio.h>
__constant__ static const int VAL_A = 1;
__constant__ static const int VAL_B = 3;
enum gpu_tests_enum
{
GLOBAL,
SHARED,
CONSTANT,
REGISTER,
STREAM,
NUM_GPU_TESTS
};
gpu_tests_enum& operator++(gpu_tests_enum& e)
{
return e = (e == NUM_GPU_TESTS) ? GLOBAL : static_cast<gpu_tests_enum>(static_cast<int>(e)+1);
}
std::string gpu_tests_strings[NUM_GPU_TESTS] = {
"Global",
"Shared",
"Constant",
"Register",
"Stream"};
// Global GPU add c[i] = a[i] + b[i]
__global__ void addGlob(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] + b[thread_idx];
}
// Global GPU subtract c[i] = a[i] - b[i]
__global__ void subtractGlob(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] - b[thread_idx];
}
// Global GPU multiply c[i] = a[i] * b[i]
__global__ void multGlob(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] * b[thread_idx];
}
// Global GPU div c[i] = a[i] / b[i]
__global__ void divGlob(int *a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] / b[thread_idx];
}
// Global GPU mod c[i] = a[i] % b[i]
__global__ void modGlob(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] % b[thread_idx];
}
// Device GPU add c[i] = a[i] + b[i]
__device__ void add(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] + b[thread_idx];
}
// Device GPU subtract c[i] = a[i] - b[i]
__device__ void subtract(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] - b[thread_idx];
}
// Device GPU multiply c[i] = a[i] * b[i]
__device__ void mult(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] * b[thread_idx];
}
// Device GPU div c[i] = a[i] / b[i]
__device__ void div(int *a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] / b[thread_idx];
}
// Device GPU mod c[i] = a[i] % b[i]
__device__ void mod(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] % b[thread_idx];
}
// Device GPU add in register c[i] = a[i] + b[i]
__device__ void addReg(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA + tempB;
c[thread_idx] = tempResult;
}
// Device GPU subtract in register c[i] = a[i] - b[i]
__device__ void subtractReg(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA - tempB;
c[thread_idx] = tempResult;
}
// Device GPU multiply in register c[i] = a[i] * b[i]
__device__ void multReg(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA * tempB;
c[thread_idx] = tempResult;
}
// Device GPU div in register c[i] = a[i] / b[i]
__device__ void divReg(int *a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA / tempB;
c[thread_idx] = tempResult;
}
// Device GPU mod in register c[i] = a[i] % b[i]
__device__ void modReg(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA % tempB;
c[thread_idx] = tempResult;
}
// Executes all 5 shared math operations
__global__ void executeSharedMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
extern __shared__ int sharedMem[];
// Use offsets in the shared mem to create arrays.
int * sharedA = &sharedMem[0];
int * sharedB = &sharedMem[size];
int * sharedRet = &sharedMem[2*size];
sharedA[tid] = a[tid];
sharedB[tid] = b[tid];
// Add sharedA to sharedB and store in addDest
add(sharedA, sharedB, sharedRet);
addDest[tid] = sharedRet[tid];
// Subtract sharedB from sharedA and store in subDest
subtract(sharedA, sharedB, sharedRet);
subDest[tid] = sharedRet[tid];
// Multiply sharedA to sharedB and store in mutlDest
mult(sharedA, sharedB, sharedRet);
multDest[tid] = sharedRet[tid];
// Divide sharedA by sharedB and store in divDest
div(sharedA, sharedB, sharedRet);
divDest[tid] = sharedRet[tid];
// Mod sharedA by sharedB and store in modDest
mod(sharedA, sharedB, sharedRet);
modDest[tid] = sharedRet[tid];
}
// Executes all 5 global math operations
__global__ void executeGlobalMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
// Add a to b and store in addDest
add(a, b, addDest);
// Subtract a from b and store in subDest
subtract(a, b, subDest);
// Multiply a to b and store in mutlDest
mult(a, b, multDest);
// Divide a by b and store in divDest
div(a, b, divDest);
// Mod a by b and store in modDest
mod(a, b, modDest);
}
// Executes all 5 register math operations
__global__ void executeRegisterMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
// Add a to b and store in addDest
addReg(a, b, addDest);
// Subtract a from b and store in subDest
subtractReg(a, b, subDest);
// Multiply a to b and store in mutlDest
multReg(a, b, multDest);
// Divide a by b and store in divDest
divReg(a, b, divDest);
// Mod a by b and store in modDest
modReg(a, b, modDest);
}
// Executes all 5 constant math operations
__global__ void executeConstantMathOperations(int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
// Add VAL_A to VAL_B and store in addDest
addDest[tid] = VAL_A + VAL_B;
// Subtract a from b and store in subDest
subDest[tid] = VAL_A - VAL_B;
// Multiply a to b and store in mutlDest
multDest[tid] = VAL_A * VAL_B;
// Divide a by b and store in divDest
divDest[tid] = VAL_A / VAL_B; // B is chosen to not be 0.
// Mod a by b and store in modDest
modDest[tid] = VAL_A / VAL_B; // B is chosen to not be 0.
}
// Host (Cpu) add c[i] = a[i] + b[i]
void hostAdd(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] + b[i];
}
}
// Host (Cpu) sub c[i] = a[i] - b[i]
void hostSub(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] - b[i];
}
}
// Host (Cpu) multiply c[i] = a[i] * b[i]
void hostMult(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] * b[i];
}
}
// Host (Cpu) divide c[i] = a[i] / b[i]
void hostDiv(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
if (b[i] != 0)
{
c[i] = a[i] / b[i];
}
else
{
c[i] = 0;
}
}
}
// Host (Cpu) mod c[i] = a[i] % b[i]
void hostMod(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
// Protect against divide by 0.
// cuda code catches this error and sets result to 0 by default.
if (b[i] == 0)
{
c[i] = 0;
}
else
{
c[i] = a[i] % b[i];
}
}
}
// Executes each of the host (cpu) tests by creating local memory and executing all 5 math operations on the data.
// The data is filled with random numbers that uses the same seed as the GPU tests.
void executeHostTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], c[totalThreads];
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
// Add all of the numbers c[i] = a[i] + b[i];
hostAdd(a,b,c, totalThreads);
// Subtract all of the numbers c[i] = a[i] - b[i];
hostSub(a,b,c, totalThreads);
// Multiply all of the numbers c[i] = a[i] * b[i];
hostMult(a,b,c, totalThreads);
// Divides all of the numbers c[i] = a[i] / b[i]; if b[i] == 0, c[i] = 0
hostDiv(a,b,c, totalThreads);
// Mod all of the numbers c[i] = a[i] % b[i];
hostMod(a,b,c, totalThreads);
}
// Executes a streams test, which is similar to the GPU tests below except here we make use
// of CUDA streams and allocate/deallocate memory in an asynchronous fashion.
// The data is filled with random numbers that uses the same seed as the CPU tests.
void executeStreamTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_a, *gpu_b, *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
hipStream_t stream;
hipStreamCreate(&stream);
hipMalloc((void**)&gpu_a, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_b, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
// Here we will now copy memory asynchronously and call each of the global version of the math
// methods using a stream. This will allow the stream to do its own calculation of how these
// methods should be executed.
hipMemcpyAsync(gpu_a, a, totalThreads * sizeof(int), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(gpu_b, b, totalThreads * sizeof(int), hipMemcpyHostToDevice, stream);
// Asynchronously add and then copy memory to host.
hipLaunchKernelGGL(( addGlob), dim3(numBlocks), dim3(blockSize), 0, stream, gpu_a, gpu_b, add_dest);
hipMemcpyAsync(add_dest, gpu_add_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost, stream);
// Asynchronously subtract and then copy memory to host.
hipLaunchKernelGGL(( subtractGlob), dim3(numBlocks), dim3(blockSize), 0, stream, gpu_a, gpu_b, sub_dest);
hipMemcpyAsync(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost, stream);
// Asynchronously multiply and then copy memory to host.
hipLaunchKernelGGL(( multGlob), dim3(numBlocks), dim3(blockSize), 0, stream, gpu_a, gpu_b, mult_dest);
hipMemcpyAsync(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost, stream);
// Asynchronously divide and then copy memory to host.
hipLaunchKernelGGL(( divGlob), dim3(numBlocks), dim3(blockSize), 0, stream, gpu_a, gpu_b, div_dest);
hipMemcpyAsync(div_dest, gpu_div_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost, stream);
// Asynchronously modulous and then copy memory to host.
hipLaunchKernelGGL(( modGlob), dim3(numBlocks), dim3(blockSize), 0, stream, gpu_a, gpu_b, mod_dest);
hipMemcpyAsync(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost, stream);
hipStreamSynchronize(stream);
hipFree(gpu_a);
hipFree(gpu_b);
hipFree(gpu_add_dest);
hipFree(gpu_sub_dest);
hipFree(gpu_mult_dest);
hipFree(gpu_div_dest);
hipFree(gpu_mod_dest);
}
// Executes each of the gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data.
// The data is filled with random numbers that uses the same seed as the CPU tests.
void executeGPUTest(const int totalThreads, const int blockSize, const int numBlocks, const gpu_tests_enum testType)
{
// The stream test works differently enough that it requires a different method since its calls will all be async.
if (testType == STREAM)
{
executeStreamTest(totalThreads, blockSize, numBlocks);
return;
}
int a[totalThreads], b[totalThreads], add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_a, *gpu_b, *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
hipMalloc((void**)&gpu_a, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_b, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
hipMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
hipMemcpy(gpu_a, a, totalThreads * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(gpu_b, b, totalThreads * sizeof(int), hipMemcpyHostToDevice);
switch (testType)
{
case GLOBAL:
// Executes global memory operations.
hipLaunchKernelGGL(( executeGlobalMathOperations), dim3(numBlocks), dim3(blockSize), 0, 0, gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, numBlocks * blockSize);
break;
case SHARED:
// The third parameter is the size of the shared memory
// We multiply by 3 because we need to copy A and B and then also have room for the return in shared memory.
hipLaunchKernelGGL(( executeSharedMathOperations), dim3(numBlocks), dim3(blockSize), 3 * totalThreads * sizeof(int), 0, gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
break;
case CONSTANT:
// constant doesn't actually take in gpu_a and gpu_b since it uses constant memory. However the random generation is left in so timing can be compared.
hipLaunchKernelGGL(( executeConstantMathOperations), dim3(numBlocks), dim3(blockSize), 0, 0, gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
break;
case REGISTER:
// Executes global memory operations by saving the value into local registers first.
hipLaunchKernelGGL(( executeRegisterMathOperations), dim3(numBlocks), dim3(blockSize), 0, 0, gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
break;
default:
std::cout << "Unknown test type " << testType << "!" << std::endl;
break;
}
hipMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), hipMemcpyDeviceToHost);
hipFree(gpu_a);
hipFree(gpu_b);
hipFree(gpu_add_dest);
hipFree(gpu_sub_dest);
hipFree(gpu_mult_dest);
hipFree(gpu_div_dest);
hipFree(gpu_mod_dest);
}
void printArray(const int * const arr, const int xSize, const int ySize)
{
for (size_t i = 0; i < xSize; ++i)
{
for(size_t j = 0; j < ySize; ++j)
{
std::cout << arr[i * ySize + j] << " ";
}
std::cout << '\n';
}
std::cout << std::flush;
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = 256;
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
auto startTime = std::chrono::system_clock::now();
executeHostTest(totalThreads, blockSize, numBlocks);
auto endTime = std::chrono::system_clock::now();
std::chrono::duration<double> totalTime = endTime-startTime;
std::cout << "Host execution took: " << totalTime.count() << " seconds." << std::endl;
for (auto testType = GLOBAL; testType < NUM_GPU_TESTS; ++testType)
{
startTime = std::chrono::system_clock::now();
executeGPUTest(totalThreads, blockSize, numBlocks, testType);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << gpu_tests_strings[testType] + " Memory execution took: " << totalTime.count() << " seconds." << std::endl;
}
return 0;
}
| fa3d266168f57f595dbd9fdece3ec6eaec341a11.cu | //Based on the work of Andrew Krepps
#include <iostream>
#include <random>
#include <chrono>
#include <stdio.h>
__constant__ static const int VAL_A = 1;
__constant__ static const int VAL_B = 3;
enum gpu_tests_enum
{
GLOBAL,
SHARED,
CONSTANT,
REGISTER,
STREAM,
NUM_GPU_TESTS
};
gpu_tests_enum& operator++(gpu_tests_enum& e)
{
return e = (e == NUM_GPU_TESTS) ? GLOBAL : static_cast<gpu_tests_enum>(static_cast<int>(e)+1);
}
std::string gpu_tests_strings[NUM_GPU_TESTS] = {
"Global",
"Shared",
"Constant",
"Register",
"Stream"};
// Global GPU add c[i] = a[i] + b[i]
__global__ void addGlob(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] + b[thread_idx];
}
// Global GPU subtract c[i] = a[i] - b[i]
__global__ void subtractGlob(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] - b[thread_idx];
}
// Global GPU multiply c[i] = a[i] * b[i]
__global__ void multGlob(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] * b[thread_idx];
}
// Global GPU div c[i] = a[i] / b[i]
__global__ void divGlob(int *a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] / b[thread_idx];
}
// Global GPU mod c[i] = a[i] % b[i]
__global__ void modGlob(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] % b[thread_idx];
}
// Device GPU add c[i] = a[i] + b[i]
__device__ void add(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] + b[thread_idx];
}
// Device GPU subtract c[i] = a[i] - b[i]
__device__ void subtract(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] - b[thread_idx];
}
// Device GPU multiply c[i] = a[i] * b[i]
__device__ void mult(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] * b[thread_idx];
}
// Device GPU div c[i] = a[i] / b[i]
__device__ void div(int *a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] / b[thread_idx];
}
// Device GPU mod c[i] = a[i] % b[i]
__device__ void mod(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
c[thread_idx] = a[thread_idx] % b[thread_idx];
}
// Device GPU add in register c[i] = a[i] + b[i]
__device__ void addReg(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA + tempB;
c[thread_idx] = tempResult;
}
// Device GPU subtract in register c[i] = a[i] - b[i]
__device__ void subtractReg(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA - tempB;
c[thread_idx] = tempResult;
}
// Device GPU multiply in register c[i] = a[i] * b[i]
__device__ void multReg(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA * tempB;
c[thread_idx] = tempResult;
}
// Device GPU div in register c[i] = a[i] / b[i]
__device__ void divReg(int *a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA / tempB;
c[thread_idx] = tempResult;
}
// Device GPU mod in register c[i] = a[i] % b[i]
__device__ void modReg(int * a, int * b, int * c)
{
const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tempA = a[thread_idx];
int tempB = b[thread_idx];
int tempResult = tempA % tempB;
c[thread_idx] = tempResult;
}
// Executes all 5 shared math operations
__global__ void executeSharedMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
extern __shared__ int sharedMem[];
// Use offsets in the shared mem to create arrays.
int * sharedA = &sharedMem[0];
int * sharedB = &sharedMem[size];
int * sharedRet = &sharedMem[2*size];
sharedA[tid] = a[tid];
sharedB[tid] = b[tid];
// Add sharedA to sharedB and store in addDest
add(sharedA, sharedB, sharedRet);
addDest[tid] = sharedRet[tid];
// Subtract sharedB from sharedA and store in subDest
subtract(sharedA, sharedB, sharedRet);
subDest[tid] = sharedRet[tid];
// Multiply sharedA to sharedB and store in mutlDest
mult(sharedA, sharedB, sharedRet);
multDest[tid] = sharedRet[tid];
// Divide sharedA by sharedB and store in divDest
div(sharedA, sharedB, sharedRet);
divDest[tid] = sharedRet[tid];
// Mod sharedA by sharedB and store in modDest
mod(sharedA, sharedB, sharedRet);
modDest[tid] = sharedRet[tid];
}
// Executes all 5 global math operations
__global__ void executeGlobalMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
// Add a to b and store in addDest
add(a, b, addDest);
// Subtract a from b and store in subDest
subtract(a, b, subDest);
// Multiply a to b and store in mutlDest
mult(a, b, multDest);
// Divide a by b and store in divDest
div(a, b, divDest);
// Mod a by b and store in modDest
mod(a, b, modDest);
}
// Executes all 5 register math operations
__global__ void executeRegisterMathOperations(int * a, int * b, int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
// Add a to b and store in addDest
addReg(a, b, addDest);
// Subtract a from b and store in subDest
subtractReg(a, b, subDest);
// Multiply a to b and store in mutlDest
multReg(a, b, multDest);
// Divide a by b and store in divDest
divReg(a, b, divDest);
// Mod a by b and store in modDest
modReg(a, b, modDest);
}
// Executes all 5 constant math operations
__global__ void executeConstantMathOperations(int * addDest, int * subDest, int * multDest, int * divDest, int * modDest, const int size)
{
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
// Add VAL_A to VAL_B and store in addDest
addDest[tid] = VAL_A + VAL_B;
// Subtract a from b and store in subDest
subDest[tid] = VAL_A - VAL_B;
// Multiply a to b and store in mutlDest
multDest[tid] = VAL_A * VAL_B;
// Divide a by b and store in divDest
divDest[tid] = VAL_A / VAL_B; // B is chosen to not be 0.
// Mod a by b and store in modDest
modDest[tid] = VAL_A / VAL_B; // B is chosen to not be 0.
}
// Host (Cpu) add c[i] = a[i] + b[i]
void hostAdd(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] + b[i];
}
}
// Host (Cpu) sub c[i] = a[i] - b[i]
void hostSub(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] - b[i];
}
}
// Host (Cpu) multiply c[i] = a[i] * b[i]
void hostMult(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
c[i] = a[i] * b[i];
}
}
// Host (Cpu) divide c[i] = a[i] / b[i]
void hostDiv(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
if (b[i] != 0)
{
c[i] = a[i] / b[i];
}
else
{
c[i] = 0;
}
}
}
// Host (Cpu) mod c[i] = a[i] % b[i]
void hostMod(int * a, int * b, int *c, const int size)
{
for (int i = 0; i < size; ++i)
{
// Protect against divide by 0.
// cuda code catches this error and sets result to 0 by default.
if (b[i] == 0)
{
c[i] = 0;
}
else
{
c[i] = a[i] % b[i];
}
}
}
// Executes each of the host (cpu) tests by creating local memory and executing all 5 math operations on the data.
// The data is filled with random numbers that uses the same seed as the GPU tests.
void executeHostTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], c[totalThreads];
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
// Add all of the numbers c[i] = a[i] + b[i];
hostAdd(a,b,c, totalThreads);
// Subtract all of the numbers c[i] = a[i] - b[i];
hostSub(a,b,c, totalThreads);
// Multiply all of the numbers c[i] = a[i] * b[i];
hostMult(a,b,c, totalThreads);
// Divides all of the numbers c[i] = a[i] / b[i]; if b[i] == 0, c[i] = 0
hostDiv(a,b,c, totalThreads);
// Mod all of the numbers c[i] = a[i] % b[i];
hostMod(a,b,c, totalThreads);
}
// Executes a streams test, which is similar to the GPU tests below except here we make use
// of CUDA streams and allocate/deallocate memory in an asynchronous fashion.
// The data is filled with random numbers that uses the same seed as the CPU tests.
void executeStreamTest(const int totalThreads, const int blockSize, const int numBlocks)
{
int a[totalThreads], b[totalThreads], add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_a, *gpu_b, *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaMalloc((void**)&gpu_a, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_b, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
// Here we will now copy memory asynchronously and call each of the global version of the math
// methods using a stream. This will allow the stream to do its own calculation of how these
// methods should be executed.
cudaMemcpyAsync(gpu_a, a, totalThreads * sizeof(int), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(gpu_b, b, totalThreads * sizeof(int), cudaMemcpyHostToDevice, stream);
// Asynchronously add and then copy memory to host.
addGlob<<<numBlocks, blockSize, 0, stream>>>(gpu_a, gpu_b, add_dest);
cudaMemcpyAsync(add_dest, gpu_add_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost, stream);
// Asynchronously subtract and then copy memory to host.
subtractGlob<<<numBlocks, blockSize, 0, stream>>>(gpu_a, gpu_b, sub_dest);
cudaMemcpyAsync(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost, stream);
// Asynchronously multiply and then copy memory to host.
multGlob<<<numBlocks, blockSize, 0, stream>>>(gpu_a, gpu_b, mult_dest);
cudaMemcpyAsync(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost, stream);
// Asynchronously divide and then copy memory to host.
divGlob<<<numBlocks, blockSize, 0, stream>>>(gpu_a, gpu_b, div_dest);
cudaMemcpyAsync(div_dest, gpu_div_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost, stream);
// Asynchronously modulous and then copy memory to host.
modGlob<<<numBlocks, blockSize, 0, stream>>>(gpu_a, gpu_b, mod_dest);
cudaMemcpyAsync(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost, stream);
cudaStreamSynchronize(stream);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_add_dest);
cudaFree(gpu_sub_dest);
cudaFree(gpu_mult_dest);
cudaFree(gpu_div_dest);
cudaFree(gpu_mod_dest);
}
// Executes each of the gpu tests by creating local memory, copying it global memory, and then performing
// all 5 math operations on the data.
// The data is filled with random numbers that uses the same seed as the CPU tests.
void executeGPUTest(const int totalThreads, const int blockSize, const int numBlocks, const gpu_tests_enum testType)
{
// The stream test works differently enough that it requires a different method since its calls will all be async.
if (testType == STREAM)
{
executeStreamTest(totalThreads, blockSize, numBlocks);
return;
}
int a[totalThreads], b[totalThreads], add_dest[totalThreads], sub_dest[totalThreads], mult_dest[totalThreads], div_dest[totalThreads], mod_dest[totalThreads];
int *gpu_a, *gpu_b, *gpu_add_dest, *gpu_sub_dest, *gpu_mult_dest, *gpu_div_dest, *gpu_mod_dest;
cudaMalloc((void**)&gpu_a, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_b, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_add_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_sub_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mult_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_div_dest, totalThreads * sizeof(int));
cudaMalloc((void**)&gpu_mod_dest, totalThreads * sizeof(int));
// Create a random generate that will generate random numbers from 0 to 4.
// Use a set seed so output is deterministic
unsigned seed = 12345;
std::default_random_engine gen(seed);
std::uniform_int_distribution<int> dist(0,4);
for (size_t i = 0; i < totalThreads; ++i)
{
a[i] = i;
b[i] = dist(gen);
}
cudaMemcpy(gpu_a, a, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, totalThreads * sizeof(int), cudaMemcpyHostToDevice);
switch (testType)
{
case GLOBAL:
// Executes global memory operations.
executeGlobalMathOperations<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, numBlocks * blockSize);
break;
case SHARED:
// The third parameter is the size of the shared memory
// We multiply by 3 because we need to copy A and B and then also have room for the return in shared memory.
executeSharedMathOperations<<<numBlocks, blockSize, 3 * totalThreads * sizeof(int)>>>(gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
break;
case CONSTANT:
// constant doesn't actually take in gpu_a and gpu_b since it uses constant memory. However the random generation is left in so timing can be compared.
executeConstantMathOperations<<<numBlocks, blockSize>>>(gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
break;
case REGISTER:
// Executes global memory operations by saving the value into local registers first.
executeRegisterMathOperations<<<numBlocks, blockSize>>>(gpu_a,gpu_b,gpu_add_dest,gpu_sub_dest,gpu_mult_dest,gpu_div_dest,gpu_mod_dest, totalThreads);
break;
default:
std::cout << "Unknown test type " << testType << "!" << std::endl;
break;
}
cudaMemcpy(add_dest, gpu_add_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(sub_dest, gpu_sub_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mult_dest, gpu_mult_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(div_dest, gpu_div_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(mod_dest, gpu_mod_dest, totalThreads*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_add_dest);
cudaFree(gpu_sub_dest);
cudaFree(gpu_mult_dest);
cudaFree(gpu_div_dest);
cudaFree(gpu_mod_dest);
}
void printArray(const int * const arr, const int xSize, const int ySize)
{
for (size_t i = 0; i < xSize; ++i)
{
for(size_t j = 0; j < ySize; ++j)
{
std::cout << arr[i * ySize + j] << " ";
}
std::cout << '\n';
}
std::cout << std::flush;
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = 256;
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
auto startTime = std::chrono::system_clock::now();
executeHostTest(totalThreads, blockSize, numBlocks);
auto endTime = std::chrono::system_clock::now();
std::chrono::duration<double> totalTime = endTime-startTime;
std::cout << "Host execution took: " << totalTime.count() << " seconds." << std::endl;
for (auto testType = GLOBAL; testType < NUM_GPU_TESTS; ++testType)
{
startTime = std::chrono::system_clock::now();
executeGPUTest(totalThreads, blockSize, numBlocks, testType);
endTime = std::chrono::system_clock::now();
totalTime = endTime-startTime;
std::cout << gpu_tests_strings[testType] + " Memory execution took: " << totalTime.count() << " seconds." << std::endl;
}
return 0;
}
|
67eae237f22ed609285a113b17d2703fb929313d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/lrn_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Ftype>
__global__ void LRNFillScale(const int nthreads, const Ftype* const in,
const int num, const int channels, const int height,
const int width, const int size, const float alpha_over_size,
const float k, Ftype* const scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int n = index / width / height;
const int offset = (n * channels * height + h) * width + w;
const int step = height * width;
const Ftype* const in_off = in + offset;
Ftype* const scale_off = scale + offset;
int head = 0;
const int pre_pad = (size - 1) / 2;
const int post_pad = size - pre_pad - 1;
float accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad && head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
}
}
template <typename Ftype, typename Btype>
void LRNLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelForward_gpu(bottom, top);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
WithinChannelForward(bottom, top);
break;
default:
LOG(FATAL) << "Unknown normalization region.";
}
}
// TODO: check if it would be faster to just put it into the previous kernel.
template <typename Ftype>
__global__ void LRNComputeOutput(const int nthreads, const Ftype* const in,
const Ftype* const scale, const float negative_beta, Ftype* const out) {
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(static_cast<float>(scale[index]), negative_beta);
}
}
template <typename Ftype, typename Btype>
void LRNLayer<Ftype, Btype>::CrossChannelForward_gpu(
const vector<Blob*>& bottom, const vector<Blob*>& top) {
// First, compute scale
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
Ftype* scale_data = scale_.template mutable_gpu_data<Ftype>();
// We will launch one kernel for each pixel location, and have the kernel
// go through all the channels.
int n_threads = num_ * height_ * width_;
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LRNFillScale), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
n_threads, bottom_data, num_, channels_, height_, width_, size_,
alpha_ / size_, k_, scale_data);
CUDA_POST_KERNEL_CHECK;
n_threads = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LRNComputeOutput<Ftype>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
n_threads, bottom_data, scale_data, -beta_, top[0]->mutable_gpu_data<Ftype>());
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
template <typename Ftype, typename Btype>
void LRNLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down, const vector<Blob*>& bottom) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelBackward_gpu(top, propagate_down, bottom);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
WithinChannelBackward(top, propagate_down, bottom);
break;
default:
LOG(FATAL) << "Unknown normalization region.";
}
}
template <typename Btype>
__global__ void LRNComputeDiff(const int nthreads,
const Btype* const bottom_data, const Btype* const top_data,
const Btype* const scale, const Btype* const top_diff,
const int num, const int channels, const int height,
const int width, const int size, const float negative_beta,
const float cache_ratio, Btype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int n = index / width / height;
const int offset = (n * channels * height + h) * width + w;
const int step = height * width;
const Btype* const bottom_off = bottom_data + offset;
const Btype* const top_off = top_data + offset;
const Btype* const scale_off = scale + offset;
const Btype* const top_diff_off = top_diff + offset;
Btype* const bottom_diff_off = bottom_diff + offset;
int head = 0;
const int pre_pad = size - (size + 1) / 2;
const int post_pad = size - pre_pad - 1;
float accum_ratio = 0;
// accumulate values
while (head < post_pad && head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step] /
scale_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step] /
scale_off[head * step];
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step] *
top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] =
top_diff_off[(head - post_pad) * step]
* pow(static_cast<float>(scale_off[(head - post_pad) * step]), negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step] *
top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] =
top_diff_off[(head - post_pad) * step]
* pow(static_cast<float>(scale_off[(head - post_pad) * step]), negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;
++head;
}
}
}
template <typename Ftype, typename Btype>
void LRNLayer<Ftype, Btype>::CrossChannelBackward_gpu(
const vector<Blob*>& top, const vector<bool>& propagate_down,
const vector<Blob*>& bottom) {
int n_threads = num_ * height_ * width_;
hipStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LRNComputeDiff<Btype>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream,
n_threads, bottom[0]->gpu_data<Btype>(), top[0]->gpu_data<Btype>(),
scale_.template gpu_data<Btype>(), top[0]->gpu_diff<Btype>(),
num_, channels_, height_, width_,
size_, -beta_, 2. * alpha_ * beta_ / size_,
bottom[0]->mutable_gpu_diff<Btype>());
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(hipStreamSynchronize(stream));
}
INSTANTIATE_LAYER_GPU_FW_MEMBER_FB(LRNLayer, CrossChannelForward_gpu);
INSTANTIATE_LAYER_GPU_BW_MEMBER_FB(LRNLayer, CrossChannelBackward_gpu);
INSTANTIATE_LAYER_GPU_FUNCS_FB(LRNLayer);
} // namespace caffe
| 67eae237f22ed609285a113b17d2703fb929313d.cu | #include <vector>
#include "caffe/layers/lrn_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Ftype>
__global__ void LRNFillScale(const int nthreads, const Ftype* const in,
const int num, const int channels, const int height,
const int width, const int size, const float alpha_over_size,
const float k, Ftype* const scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int n = index / width / height;
const int offset = (n * channels * height + h) * width + w;
const int step = height * width;
const Ftype* const in_off = in + offset;
Ftype* const scale_off = scale + offset;
int head = 0;
const int pre_pad = (size - 1) / 2;
const int post_pad = size - pre_pad - 1;
float accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad && head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in_off[head * step] * in_off[head * step];
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_scale -= in_off[(head - size) * step]
* in_off[(head - size) * step];
}
scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size;
++head;
}
}
}
template <typename Ftype, typename Btype>
void LRNLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom,
const vector<Blob*>& top) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelForward_gpu(bottom, top);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
WithinChannelForward(bottom, top);
break;
default:
LOG(FATAL) << "Unknown normalization region.";
}
}
// TODO: check if it would be faster to just put it into the previous kernel.
template <typename Ftype>
__global__ void LRNComputeOutput(const int nthreads, const Ftype* const in,
const Ftype* const scale, const float negative_beta, Ftype* const out) {
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(static_cast<float>(scale[index]), negative_beta);
}
}
template <typename Ftype, typename Btype>
void LRNLayer<Ftype, Btype>::CrossChannelForward_gpu(
const vector<Blob*>& bottom, const vector<Blob*>& top) {
// First, compute scale
const Ftype* bottom_data = bottom[0]->gpu_data<Ftype>();
Ftype* scale_data = scale_.template mutable_gpu_data<Ftype>();
// We will launch one kernel for each pixel location, and have the kernel
// go through all the channels.
int n_threads = num_ * height_ * width_;
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
LRNFillScale<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
n_threads, bottom_data, num_, channels_, height_, width_, size_,
alpha_ / size_, k_, scale_data);
CUDA_POST_KERNEL_CHECK;
n_threads = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
LRNComputeOutput<Ftype><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
n_threads, bottom_data, scale_data, -beta_, top[0]->mutable_gpu_data<Ftype>());
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
template <typename Ftype, typename Btype>
void LRNLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top,
const vector<bool>& propagate_down, const vector<Blob*>& bottom) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelBackward_gpu(top, propagate_down, bottom);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
WithinChannelBackward(top, propagate_down, bottom);
break;
default:
LOG(FATAL) << "Unknown normalization region.";
}
}
template <typename Btype>
__global__ void LRNComputeDiff(const int nthreads,
const Btype* const bottom_data, const Btype* const top_data,
const Btype* const scale, const Btype* const top_diff,
const int num, const int channels, const int height,
const int width, const int size, const float negative_beta,
const float cache_ratio, Btype* const bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
const int w = index % width;
const int h = (index / width) % height;
const int n = index / width / height;
const int offset = (n * channels * height + h) * width + w;
const int step = height * width;
const Btype* const bottom_off = bottom_data + offset;
const Btype* const top_off = top_data + offset;
const Btype* const scale_off = scale + offset;
const Btype* const top_diff_off = top_diff + offset;
Btype* const bottom_diff_off = bottom_diff + offset;
int head = 0;
const int pre_pad = size - (size + 1) / 2;
const int post_pad = size - pre_pad - 1;
float accum_ratio = 0;
// accumulate values
while (head < post_pad && head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step] /
scale_off[head * step];
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff_off[head * step] * top_off[head * step] /
scale_off[head * step];
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step] *
top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] =
top_diff_off[(head - post_pad) * step]
* pow(static_cast<float>(scale_off[(head - post_pad) * step]), negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
if (head - size >= 0) {
accum_ratio -= top_diff_off[(head - size) * step] *
top_off[(head - size) * step] / scale_off[(head - size) * step];
}
bottom_diff_off[(head - post_pad) * step] =
top_diff_off[(head - post_pad) * step]
* pow(static_cast<float>(scale_off[(head - post_pad) * step]), negative_beta)
- cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio;
++head;
}
}
}
template <typename Ftype, typename Btype>
void LRNLayer<Ftype, Btype>::CrossChannelBackward_gpu(
const vector<Blob*>& top, const vector<bool>& propagate_down,
const vector<Blob*>& bottom) {
int n_threads = num_ * height_ * width_;
cudaStream_t stream = Caffe::thread_stream();
// NOLINT_NEXT_LINE(whitespace/operators)
LRNComputeDiff<Btype><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS, 0, stream>>>(
n_threads, bottom[0]->gpu_data<Btype>(), top[0]->gpu_data<Btype>(),
scale_.template gpu_data<Btype>(), top[0]->gpu_diff<Btype>(),
num_, channels_, height_, width_,
size_, -beta_, 2. * alpha_ * beta_ / size_,
bottom[0]->mutable_gpu_diff<Btype>());
CUDA_POST_KERNEL_CHECK;
CUDA_CHECK(cudaStreamSynchronize(stream));
}
INSTANTIATE_LAYER_GPU_FW_MEMBER_FB(LRNLayer, CrossChannelForward_gpu);
INSTANTIATE_LAYER_GPU_BW_MEMBER_FB(LRNLayer, CrossChannelBackward_gpu);
INSTANTIATE_LAYER_GPU_FUNCS_FB(LRNLayer);
} // namespace caffe
|
8a84f56cb77e1b1b81dfc6010d98d3f0210cfac4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <numeric>
#include <vector>
#include <complex>
#include <random>
#include <cstdlib>
#include <cstdio>
#include <hipfft.h>
#include <cufftMp.h>
#include <mpi.h>
#include <nvshmem.h>
#include "../common/error_checks.hpp"
#include "../common/scaling.cuh"
#include "../common/generate_random.hpp"
#include "../iterators/box_iterator.hpp"
/**
* This samples illustrates a basic use of cuFFTMp using the built-in, optimized, data distributions.
*
* It assumes the CPU data is initially distributed according to CUFFT_XT_FORMAT_INPLACE, a.k.a. X-Slabs.
* Given a global array of size X * Y * Z, every MPI rank owns approximately (X / ngpus) * Y * Z entries.
* More precisely,
* - The first (X % ngpus) MPI rank each own (X / ngpus + 1) planes of size Y * Z,
* - The remaining MPI rank each own (X / ngpus) planes of size Y * Z
*
* The CPU data is then copied on GPU and a forward transform is applied.
*
* After that transform, GPU data is distributed according to CUFFT_XT_FORMAT_INPLACE_SHUFFLED, a.k.a. Y-Slabs.
* Given a global array of size X * Y * Z, every MPI rank owns approximately X * (Y / ngpus) * Z entries.
* More precisely,
* - The first (Y % ngpus) MPI rank each own (Y / ngpus + 1) planes of size X * Z,
* - The remaining MPI rank each own (Y / ngpus) planes of size X * Z
*
* A scaling kerel is applied, on the distributed GPU data (distributed according to CUFFT_XT_FORMAT_INPLACE)
* This kernel prints some elements to illustrate the CUFFT_XT_FORMAT_INPLACE_SHUFFLED data distribution and
* normalize entries by (nx * ny * nz)
*
* Finally, a backward transform is applied.
* After this, data is again distributed according to CUFFT_XT_FORMAT_INPLACE, same as the input data.
*
* Data is finally copied back to CPU and compared to the input data. They should be almost identical.
*/
void run_c2c_fwd_inv(size_t nx, size_t ny, size_t nz, std::vector<std::complex<float>>& cpu_data, int rank, int size, MPI_Comm comm) {
// Allocate GPU memory, copy CPU data to GPU
// Data is initially distributed according to CUFFT_XT_FORMAT_INPLACE
hipComplex* gpu_data = (hipComplex*)nvshmem_malloc(cpu_data.size() * sizeof(hipComplex));
CUDA_CHECK(hipMemcpy(gpu_data, cpu_data.data(), cpu_data.size() * sizeof(hipComplex), hipMemcpyDefault));
hipfftHandle plan = 0;
hipStream_t stream = nullptr;
CUDA_CHECK(hipStreamCreate(&stream));
CUFFT_CHECK(hipfftCreate(&plan));
CUFFT_CHECK(cufftMpAttachComm(plan, CUFFT_COMM_MPI, &comm));
CUFFT_CHECK(hipfftSetStream(plan, stream));
CUFFT_CHECK(cufftXtSetSubformatDefault(plan, CUFFT_XT_FORMAT_INPLACE, CUFFT_XT_FORMAT_INPLACE_SHUFFLED));
size_t workspace;
CUFFT_CHECK(hipfftMakePlan3d(plan, nx, ny, nz, HIPFFT_C2C, &workspace));
// Run C2C Fwd
CUFFT_CHECK(hipfftExecC2C(plan, gpu_data, gpu_data, HIPFFT_FORWARD));
// Data is now distributed as Y-Slabs
// We run a kernel on the distributed data, using the BoxIterator's for convenience
auto[begin_d, end_d] = BoxIterators(CUFFT_XT_FORMAT_INPLACE_SHUFFLED, HIPFFT_C2C,
rank, size, nx, ny, nz, gpu_data);
const size_t num_elements = std::distance(begin_d, end_d);
const size_t num_threads = 128;
const size_t num_blocks = (num_elements + num_threads - 1) / num_threads;
hipLaunchKernelGGL(( scaling_kernel), dim3(num_blocks), dim3(num_threads), 0, stream, begin_d, end_d, rank, size, nx, ny, nz);
// Run C2C Bwd
CUFFT_CHECK(hipfftExecC2C(plan, gpu_data, gpu_data, HIPFFT_BACKWARD));
// Copy back and free
// Data is distributed as X-Slabs again
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipMemcpy(cpu_data.data(), gpu_data, cpu_data.size() * sizeof(hipComplex), hipMemcpyDefault));
CUFFT_CHECK(hipfftDestroy(plan));
CUDA_CHECK(hipStreamDestroy(stream));
};
int main(int argc, char** argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int ndevices;
CUDA_CHECK(hipGetDeviceCount(&ndevices));
CUDA_CHECK(hipSetDevice(rank % ndevices));
nvshmemx_init_attr_t attr;
MPI_Comm comm = MPI_COMM_WORLD;
attr.mpi_comm = (void*)&comm;
nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr);
size_t nx = (argc >= 2 ? atoi(argv[1]) : 8*size); // any value >= size is OK
size_t ny = (argc >= 2 ? atoi(argv[1]) : 8*size); // any value >= size is OK
size_t nz = (argc >= 2 ? atoi(argv[1]) : 8*size); // any value >= size is OK
// We start with X-Slabs
// Ranks 0 ... (nx % size - 1) have 1 more element in the X dimension
// and every rank own all elements in the Y and Z dimensions.
int ranks_cutoff = nx % size;
size_t my_nx = (nx / size) + (rank < ranks_cutoff ? 1 : 0);
size_t my_ny = ny;
size_t my_nz = nz;
printf("Hello from rank %d/%d using GPU %d transform of size %zu x %zu x %zu, local size %zu x %zu x %zu\n", rank, size, rank % ndevices, nx, ny, nz, my_nx, my_ny, my_nz);
// Generate local, distributed, data
std::vector<std::complex<float>> data(my_nx * my_ny * my_nz);
generate_random(data, rank);
std::vector<std::complex<float>> ref = data;
// Run Forward and Inverse FFT
run_c2c_fwd_inv(nx, ny, nz, data, rank, size, MPI_COMM_WORLD);
// Compute error
double error = compute_error(ref, data, buildBox3D(CUFFT_XT_FORMAT_INPLACE, HIPFFT_C2C, rank, size, nx, ny, nz));
nvshmem_init();
MPI_Finalize();
return assess_error(error);
}
| 8a84f56cb77e1b1b81dfc6010d98d3f0210cfac4.cu | #include <numeric>
#include <vector>
#include <complex>
#include <random>
#include <cstdlib>
#include <cstdio>
#include <cufft.h>
#include <cufftMp.h>
#include <mpi.h>
#include <nvshmem.h>
#include "../common/error_checks.hpp"
#include "../common/scaling.cuh"
#include "../common/generate_random.hpp"
#include "../iterators/box_iterator.hpp"
/**
* This samples illustrates a basic use of cuFFTMp using the built-in, optimized, data distributions.
*
* It assumes the CPU data is initially distributed according to CUFFT_XT_FORMAT_INPLACE, a.k.a. X-Slabs.
* Given a global array of size X * Y * Z, every MPI rank owns approximately (X / ngpus) * Y * Z entries.
* More precisely,
* - The first (X % ngpus) MPI rank each own (X / ngpus + 1) planes of size Y * Z,
* - The remaining MPI rank each own (X / ngpus) planes of size Y * Z
*
* The CPU data is then copied on GPU and a forward transform is applied.
*
* After that transform, GPU data is distributed according to CUFFT_XT_FORMAT_INPLACE_SHUFFLED, a.k.a. Y-Slabs.
* Given a global array of size X * Y * Z, every MPI rank owns approximately X * (Y / ngpus) * Z entries.
* More precisely,
* - The first (Y % ngpus) MPI rank each own (Y / ngpus + 1) planes of size X * Z,
* - The remaining MPI rank each own (Y / ngpus) planes of size X * Z
*
* A scaling kerel is applied, on the distributed GPU data (distributed according to CUFFT_XT_FORMAT_INPLACE)
* This kernel prints some elements to illustrate the CUFFT_XT_FORMAT_INPLACE_SHUFFLED data distribution and
* normalize entries by (nx * ny * nz)
*
* Finally, a backward transform is applied.
* After this, data is again distributed according to CUFFT_XT_FORMAT_INPLACE, same as the input data.
*
* Data is finally copied back to CPU and compared to the input data. They should be almost identical.
*/
void run_c2c_fwd_inv(size_t nx, size_t ny, size_t nz, std::vector<std::complex<float>>& cpu_data, int rank, int size, MPI_Comm comm) {
// Allocate GPU memory, copy CPU data to GPU
// Data is initially distributed according to CUFFT_XT_FORMAT_INPLACE
cuComplex* gpu_data = (cuComplex*)nvshmem_malloc(cpu_data.size() * sizeof(cuComplex));
CUDA_CHECK(cudaMemcpy(gpu_data, cpu_data.data(), cpu_data.size() * sizeof(cuComplex), cudaMemcpyDefault));
cufftHandle plan = 0;
cudaStream_t stream = nullptr;
CUDA_CHECK(cudaStreamCreate(&stream));
CUFFT_CHECK(cufftCreate(&plan));
CUFFT_CHECK(cufftMpAttachComm(plan, CUFFT_COMM_MPI, &comm));
CUFFT_CHECK(cufftSetStream(plan, stream));
CUFFT_CHECK(cufftXtSetSubformatDefault(plan, CUFFT_XT_FORMAT_INPLACE, CUFFT_XT_FORMAT_INPLACE_SHUFFLED));
size_t workspace;
CUFFT_CHECK(cufftMakePlan3d(plan, nx, ny, nz, CUFFT_C2C, &workspace));
// Run C2C Fwd
CUFFT_CHECK(cufftExecC2C(plan, gpu_data, gpu_data, CUFFT_FORWARD));
// Data is now distributed as Y-Slabs
// We run a kernel on the distributed data, using the BoxIterator's for convenience
auto[begin_d, end_d] = BoxIterators(CUFFT_XT_FORMAT_INPLACE_SHUFFLED, CUFFT_C2C,
rank, size, nx, ny, nz, gpu_data);
const size_t num_elements = std::distance(begin_d, end_d);
const size_t num_threads = 128;
const size_t num_blocks = (num_elements + num_threads - 1) / num_threads;
scaling_kernel<<<num_blocks, num_threads, 0, stream>>>(begin_d, end_d, rank, size, nx, ny, nz);
// Run C2C Bwd
CUFFT_CHECK(cufftExecC2C(plan, gpu_data, gpu_data, CUFFT_INVERSE));
// Copy back and free
// Data is distributed as X-Slabs again
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaMemcpy(cpu_data.data(), gpu_data, cpu_data.size() * sizeof(cuComplex), cudaMemcpyDefault));
CUFFT_CHECK(cufftDestroy(plan));
CUDA_CHECK(cudaStreamDestroy(stream));
};
int main(int argc, char** argv) {
MPI_Init(&argc, &argv);
int rank, size;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
int ndevices;
CUDA_CHECK(cudaGetDeviceCount(&ndevices));
CUDA_CHECK(cudaSetDevice(rank % ndevices));
nvshmemx_init_attr_t attr;
MPI_Comm comm = MPI_COMM_WORLD;
attr.mpi_comm = (void*)&comm;
nvshmemx_init_attr(NVSHMEMX_INIT_WITH_MPI_COMM, &attr);
size_t nx = (argc >= 2 ? atoi(argv[1]) : 8*size); // any value >= size is OK
size_t ny = (argc >= 2 ? atoi(argv[1]) : 8*size); // any value >= size is OK
size_t nz = (argc >= 2 ? atoi(argv[1]) : 8*size); // any value >= size is OK
// We start with X-Slabs
// Ranks 0 ... (nx % size - 1) have 1 more element in the X dimension
// and every rank own all elements in the Y and Z dimensions.
int ranks_cutoff = nx % size;
size_t my_nx = (nx / size) + (rank < ranks_cutoff ? 1 : 0);
size_t my_ny = ny;
size_t my_nz = nz;
printf("Hello from rank %d/%d using GPU %d transform of size %zu x %zu x %zu, local size %zu x %zu x %zu\n", rank, size, rank % ndevices, nx, ny, nz, my_nx, my_ny, my_nz);
// Generate local, distributed, data
std::vector<std::complex<float>> data(my_nx * my_ny * my_nz);
generate_random(data, rank);
std::vector<std::complex<float>> ref = data;
// Run Forward and Inverse FFT
run_c2c_fwd_inv(nx, ny, nz, data, rank, size, MPI_COMM_WORLD);
// Compute error
double error = compute_error(ref, data, buildBox3D(CUFFT_XT_FORMAT_INPLACE, CUFFT_C2C, rank, size, nx, ny, nz));
nvshmem_init();
MPI_Finalize();
return assess_error(error);
}
|
fc0c78decf5fae97a7dfaea7a326255c5f0664c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <torch/extension.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include "box_convolution.h" // for `enum class Parameter`
#define BLOCK_SIZE 256
#define NUM_THREADS 1024
using std::min;
using std::max;
namespace gpu {
template <typename T, size_t N>
using CudaAcsr = const at::PackedTensorAccessor32<T, N, torch::RestrictPtrTraits>;
// TODO switch to square blocks
template <bool normalize, bool exact, typename scalar_t>
__global__ void boxConvUpdateGradInputKernel(
CudaAcsr<scalar_t,3> gradOutputInt, scalar_t * __restrict__ tmpArray,
const int32_t * __restrict__ xMinInt , const int32_t * __restrict__ xMaxInt ,
const int32_t * __restrict__ yMinInt , const int32_t * __restrict__ yMaxInt ,
const scalar_t * __restrict__ xMinFrac, const scalar_t * __restrict__ xMaxFrac,
const scalar_t * __restrict__ yMinFrac, const scalar_t * __restrict__ yMaxFrac,
const scalar_t * __restrict__ area, const int nParams) {
int32_t id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id;
const int32_t h = gradOutputInt.size(1) - 1;
const int32_t w = gradOutputInt.size(2) - 1;
const int32_t y = id % w; id /= w;
const int32_t x = id % h; id /= h;
const int32_t paramIdx = id % nParams;
// `id` is now the current plane number
auto gradOutputIntPlane = gradOutputInt[id];
if (id < gradOutputInt.size(0)) {
const int32_t xMinCurr = xMinInt[paramIdx];
const int32_t xMaxCurr = xMaxInt[paramIdx];
const int32_t yMinCurr = yMinInt[paramIdx];
const int32_t yMaxCurr = yMaxInt[paramIdx];
const int t = max(0, min(x+xMinCurr, h));
const int b = max(0, min(x+xMaxCurr, h));
const int l = max(0, min(y+yMinCurr, w));
const int r = max(0, min(y+yMaxCurr, w));
scalar_t outValue;
outValue =
gradOutputIntPlane[b][r]
- gradOutputIntPlane[t][r]
- gradOutputIntPlane[b][l]
+ gradOutputIntPlane[t][l];
if (exact) {
const scalar_t xMinCurrFrac = xMinFrac[paramIdx];
const scalar_t xMaxCurrFrac = xMaxFrac[paramIdx];
const scalar_t yMinCurrFrac = yMinFrac[paramIdx];
const scalar_t yMaxCurrFrac = yMaxFrac[paramIdx];
const int tAdv = x+xMinCurr-1 < h ? max(0, min(t-1, h)) : t;
const int bAdv = x+xMaxCurr >= 0 ? max(0, min(b+1, h)) : b;
const int lAdv = y+yMinCurr-1 < w ? max(0, min(l-1, w)) : l;
const int rAdv = y+yMaxCurr >= 0 ? max(0, min(r+1, w)) : r;
// -- xMax border
outValue +=
( gradOutputIntPlane[bAdv][r]
- gradOutputIntPlane[b ][r]
- gradOutputIntPlane[bAdv][l]
+ gradOutputIntPlane[b ][l]
) * xMaxCurrFrac;
// -- yMax border
outValue +=
( gradOutputIntPlane[b][rAdv]
- gradOutputIntPlane[b][r ]
- gradOutputIntPlane[t][rAdv]
+ gradOutputIntPlane[t][r ]
) * yMaxCurrFrac;
// -- xMin border
outValue +=
( gradOutputIntPlane[t ][r]
- gradOutputIntPlane[tAdv][r]
- gradOutputIntPlane[t ][l]
+ gradOutputIntPlane[tAdv][l]
) * xMinCurrFrac;
// -- yMin border
outValue +=
( gradOutputIntPlane[b][l ]
- gradOutputIntPlane[b][lAdv]
- gradOutputIntPlane[t][l ]
+ gradOutputIntPlane[t][lAdv]
) * yMinCurrFrac;
// -- corner pixels
outValue +=
xMaxCurrFrac*yMaxCurrFrac * (
(x+xMaxCurr >= h or
y+yMaxCurr >= w or
x+xMaxCurr < 0 or
y+yMaxCurr < 0 or
b == bAdv or
r == rAdv) ? static_cast<scalar_t>(0) :
( gradOutputIntPlane[b+1][r+1]
- gradOutputIntPlane[b ][r+1]
- gradOutputIntPlane[b+1][r ]
+ gradOutputIntPlane[b ][r ]));
outValue +=
xMinCurrFrac*yMaxCurrFrac * (
(x+xMinCurr > h or
y+yMaxCurr >= w or
x+xMinCurr <= 0 or
y+yMaxCurr < 0 or
t == tAdv or
r == rAdv) ? static_cast<scalar_t>(0) :
( gradOutputIntPlane[tAdv+1][r+1]
- gradOutputIntPlane[tAdv+1][r ]
- gradOutputIntPlane[tAdv ][r+1]
+ gradOutputIntPlane[tAdv ][r ]));
outValue +=
xMaxCurrFrac*yMinCurrFrac * (
(x+xMaxCurr >= h or
y+yMinCurr > w or
x+xMaxCurr < 0 or
y+yMinCurr <= 0 or
b == bAdv or
l == lAdv) ? static_cast<scalar_t>(0) :
( gradOutputIntPlane[b+1][lAdv+1]
- gradOutputIntPlane[b ][lAdv+1]
- gradOutputIntPlane[b+1][lAdv ]
+ gradOutputIntPlane[b ][lAdv ]));
outValue +=
xMinCurrFrac*yMinCurrFrac * (
(x+xMinCurr > h or
y+yMinCurr > w or
x+xMinCurr <= 0 or
y+yMinCurr <= 0 or
t == tAdv or
l == lAdv) ? static_cast<scalar_t>(0) :
( gradOutputIntPlane[tAdv+1][lAdv+1]
- gradOutputIntPlane[tAdv+1][lAdv ]
- gradOutputIntPlane[tAdv ][lAdv+1]
+ gradOutputIntPlane[tAdv ][lAdv ]));
}
*tmpArray = outValue * (normalize ? area[paramIdx] : static_cast<scalar_t>(1));
}
}
template <bool normalize, bool exact>
void boxConvUpdateGradInput(
at::Tensor & xMinInt , at::Tensor & xMaxInt , at::Tensor & yMinInt , at::Tensor & yMaxInt ,
at::Tensor & xMinFrac, at::Tensor & xMaxFrac, at::Tensor & yMinFrac, at::Tensor & yMaxFrac,
at::Tensor & area, at::Tensor & grad_output_integrated, at::Tensor & tmpArray) {
// TODO use square blocks as in `boxConvUpdateOutput`?
const int threadsNeeded = tmpArray.numel();
int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(tmpArray.scalar_type(), "gpu::boxConvUpdateGradInput", ([&] {
auto gradOutputIntFlattened = grad_output_integrated.view(
{-1, grad_output_integrated.size(-2), grad_output_integrated.size(-1)});
auto gradOutputIntAcsr =
gradOutputIntFlattened.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
hipLaunchKernelGGL(( boxConvUpdateGradInputKernel <normalize, exact>)
, dim3(numBlocks), dim3(NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradOutputIntAcsr, tmpArray.data_ptr<scalar_t>(),
xMinInt.data_ptr<int32_t>(), xMaxInt.data_ptr<int32_t>(),
yMinInt.data_ptr<int32_t>(), yMaxInt.data_ptr<int32_t>(),
xMinFrac.data_ptr<scalar_t>(), xMaxFrac.data_ptr<scalar_t>(),
yMinFrac.data_ptr<scalar_t>(), yMaxFrac.data_ptr<scalar_t>(),
normalize ? area.data_ptr<scalar_t>() : nullptr, xMinInt.numel());
THCudaCheck(hipGetLastError());
}));
}
// explicitly instantiate
template void boxConvUpdateGradInput<true, true>(
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &);
template void boxConvUpdateGradInput<false, true>(
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &);
template void boxConvUpdateGradInput<true, false>(
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &);
template void boxConvUpdateGradInput<false, false>(
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &);
// TODO overload for exact/truncated mode
// TODO accept only three pairs of parameter arrays, not four (one is always redundant)
template <Parameter parameter, bool exact, typename scalar_t>
__global__ void boxConvAccGradParametersKernel(
CudaAcsr<scalar_t,3> inputInt, scalar_t * __restrict__ tmpArray,
const int32_t * __restrict__ xMinInt , const int32_t * __restrict__ xMaxInt ,
const int32_t * __restrict__ yMinInt , const int32_t * __restrict__ yMaxInt ,
const scalar_t * __restrict__ xMinFrac, const scalar_t * __restrict__ xMaxFrac,
const scalar_t * __restrict__ yMinFrac, const scalar_t * __restrict__ yMaxFrac,
const int nParams) {
int32_t id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id;
const int32_t h = inputInt.size(1) - 1;
const int32_t w = inputInt.size(2) - 1;
const int32_t y = id % w + 1; id /= w;
const int32_t x = id % h + 1; id /= h;
const int32_t paramIdx = id % nParams; id /= nParams;
// `id` is now the current absolute input plane number
auto inputIntPlane = inputInt[id];
if (id < inputInt.size(0)) {
const int32_t xMinCurr = xMinInt[paramIdx];
const int32_t xMaxCurr = xMaxInt[paramIdx];
const int32_t yMinCurr = yMinInt[paramIdx];
const int32_t yMaxCurr = yMaxInt[paramIdx];
// TODO only define these if `exact == true`
const scalar_t xMinCurrFrac = xMinFrac[paramIdx];
const scalar_t xMaxCurrFrac = xMaxFrac[paramIdx];
const scalar_t yMinCurrFrac = yMinFrac[paramIdx];
const scalar_t yMaxCurrFrac = yMaxFrac[paramIdx];
int valid;
int cornerX, cornerY;
scalar_t delta = 0;
if (parameter == Parameter::xMin) {
if (exact) {
// TODO maybe use `input` instead of `inputInt`
valid =
not (y+yMinCurr < 1) & not (y+yMinCurr > w) & not (x+xMinCurr < 1);
cornerX = max(0,min(h-1,x+xMinCurr-1));
cornerY = max(0,min(w-1,y+yMinCurr-1));
const scalar_t tlCorner = valid *
( inputIntPlane[cornerX+1][cornerY+1]
- inputIntPlane[cornerX ][cornerY+1]
- inputIntPlane[cornerX+1][cornerY ]
+ inputIntPlane[cornerX ][cornerY ]);
valid =
not (y+yMaxCurr < 0) & not (y+yMaxCurr >= w) & not (x+xMinCurr < 1);
cornerX = max(0,min(h-1,x+xMinCurr -1));
cornerY = max(0,min(w-1,y+yMaxCurr ));
const scalar_t trCorner = valid *
( inputIntPlane[cornerX+1][cornerY+1]
- inputIntPlane[cornerX ][cornerY+1]
- inputIntPlane[cornerX+1][cornerY ]
+ inputIntPlane[cornerX ][cornerY ]);
delta += trCorner * yMaxCurrFrac;
delta += tlCorner * yMinCurrFrac;
} // if (exact)
delta += inputIntPlane
[max(0,min(x+xMinCurr , h))][max(0,min(y+yMaxCurr , w))];
delta -= inputIntPlane
[max(0,min(x+xMinCurr -1, h))][max(0,min(y+yMaxCurr , w))];
delta -= inputIntPlane
[max(0,min(x+xMinCurr , h))][max(0,min(y+yMinCurr , w))];
delta += inputIntPlane
[max(0,min(x+xMinCurr -1, h))][max(0,min(y+yMinCurr , w))];
delta *= (x+xMinCurr >= 1) & (x+xMinCurr <= h);
*tmpArray = -delta;
}
else if (parameter == Parameter::xMax) {
if (exact) {
valid =
not (y+yMinCurr < 1) & not (y+yMinCurr > w) & not (x+xMaxCurr >= h);
cornerX = max(0,min(h-1,x+xMaxCurr ));
cornerY = max(0,min(w-1,y+yMinCurr -1));
const scalar_t blCorner = valid *
( inputIntPlane[cornerX+1][cornerY+1]
- inputIntPlane[cornerX ][cornerY+1]
- inputIntPlane[cornerX+1][cornerY ]
+ inputIntPlane[cornerX ][cornerY ]);
valid =
not (y+yMaxCurr < 0) & not (y+yMaxCurr >= w) & not (x+xMaxCurr >= h);
cornerX = max(0,min(h-1,x+xMaxCurr ));
cornerY = max(0,min(w-1,y+yMaxCurr ));
const scalar_t brCorner = valid *
( inputIntPlane[cornerX+1][cornerY+1]
- inputIntPlane[cornerX ][cornerY+1]
- inputIntPlane[cornerX+1][cornerY ]
+ inputIntPlane[cornerX ][cornerY ]);
delta += brCorner * yMaxCurrFrac;
delta += blCorner * yMinCurrFrac;
} // if (exact)
delta += inputIntPlane
[max(0,min(x+xMaxCurr +1, h))][max(0,min(y+yMaxCurr , w))];
delta -= inputIntPlane
[max(0,min(x+xMaxCurr , h))][max(0,min(y+yMaxCurr , w))];
delta -= inputIntPlane
[max(0,min(x+xMaxCurr +1, h))][max(0,min(y+yMinCurr , w))];
delta += inputIntPlane
[max(0,min(x+xMaxCurr , h))][max(0,min(y+yMinCurr , w))];
delta *= (x+xMaxCurr >= 0) & (x+xMaxCurr < h);
*tmpArray = delta;
}
else if (parameter == Parameter::yMin) {
if (exact) {
valid =
not (y+yMinCurr < 1) & not (x+xMinCurr < 1) & not (x+xMinCurr > h);
cornerX = max(0,min(h-1,x+xMinCurr -1));
cornerY = max(0,min(w-1,y+yMinCurr -1));
const scalar_t tlCorner = valid *
( inputIntPlane[cornerX+1][cornerY+1]
- inputIntPlane[cornerX ][cornerY+1]
- inputIntPlane[cornerX+1][cornerY ]
+ inputIntPlane[cornerX ][cornerY ]);
valid =
not (y+yMinCurr < 1) & not (x+xMaxCurr < 0) & not (x+xMaxCurr >= h);
cornerX = max(0,min(h-1,x+xMaxCurr ));
cornerY = max(0,min(w-1,y+yMinCurr -1));
const scalar_t blCorner = valid *
( inputIntPlane[cornerX+1][cornerY+1]
- inputIntPlane[cornerX ][cornerY+1]
- inputIntPlane[cornerX+1][cornerY ]
+ inputIntPlane[cornerX ][cornerY ]);
delta += tlCorner * xMinCurrFrac;
delta += blCorner * xMaxCurrFrac;
} // if (exact)
delta += inputIntPlane
[max(0,min(x+xMaxCurr , h))][max(0,min(y+yMinCurr , w))];
delta -= inputIntPlane
[max(0,min(x+xMaxCurr , h))][max(0,min(y+yMinCurr -1, w))];
delta -= inputIntPlane
[max(0,min(x+xMinCurr , h))][max(0,min(y+yMinCurr , w))];
delta += inputIntPlane
[max(0,min(x+xMinCurr , h))][max(0,min(y+yMinCurr -1, w))];
delta *= (y+yMinCurr >= 1) & (y+yMinCurr <= w);
*tmpArray = -delta;
}
else if (parameter == Parameter::yMax) {
if (exact) {
valid =
not (y+yMaxCurr >= w) & not (x+xMinCurr < 1) & not (x+xMinCurr > h);
cornerX = max(0,min(h-1,x+xMinCurr -1));
cornerY = max(0,min(w-1,y+yMaxCurr ));
const scalar_t trCorner = valid *
( inputIntPlane[cornerX+1][cornerY+1]
- inputIntPlane[cornerX ][cornerY+1]
- inputIntPlane[cornerX+1][cornerY ]
+ inputIntPlane[cornerX ][cornerY ]);
valid =
not (y+yMaxCurr >= w) & not (x+xMaxCurr < 0) & not (x+xMaxCurr >= h);
cornerX = max(0,min(h-1,x+xMaxCurr ));
cornerY = max(0,min(w-1,y+yMaxCurr ));
const scalar_t brCorner = valid *
( inputIntPlane[cornerX+1][cornerY+1]
- inputIntPlane[cornerX ][cornerY+1]
- inputIntPlane[cornerX+1][cornerY ]
+ inputIntPlane[cornerX ][cornerY ]);
delta += trCorner * xMinCurrFrac;
delta += brCorner * xMaxCurrFrac;
} // if (exact)
delta += inputIntPlane
[max(0,min(x+xMaxCurr , h))][max(0,min(y+yMaxCurr +1, w))];
delta -= inputIntPlane
[max(0,min(x+xMaxCurr , h))][max(0,min(y+yMaxCurr , w))];
delta -= inputIntPlane
[max(0,min(x+xMinCurr , h))][max(0,min(y+yMaxCurr +1, w))];
delta += inputIntPlane
[max(0,min(x+xMinCurr , h))][max(0,min(y+yMaxCurr , w))];
delta *= (y+yMaxCurr >= 0) & (y+yMaxCurr < w);
*tmpArray = delta;
}
}
}
template <bool exact>
void boxConvAccGradParameters(
// tmpArray size: {batchSize, nInputPlanes, numFilters, h, w}
at::Tensor & xMinInt , at::Tensor & xMaxInt , at::Tensor & yMinInt , at::Tensor & yMaxInt ,
at::Tensor & xMinFrac, at::Tensor & xMaxFrac, at::Tensor & yMinFrac, at::Tensor & yMaxFrac,
at::Tensor & input_integrated, at::Tensor & tmpArray, Parameter parameter) {
// TODO switch to square blocks?
const int threadsNeeded = tmpArray.numel();
int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(tmpArray.scalar_type(), "gpu::boxConvAccGradParameters", ([&] {
auto inputIntFlattened = input_integrated.view(
{-1, input_integrated.size(-2), input_integrated.size(-1)});
auto inputIntAcsr =
inputIntFlattened.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
switch (parameter) {
case Parameter::xMin:
hipLaunchKernelGGL(( boxConvAccGradParametersKernel <Parameter::xMin, exact>)
, dim3(numBlocks), dim3(NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
inputIntAcsr, tmpArray.data_ptr<scalar_t>(),
xMinInt.data_ptr<int32_t>(), xMaxInt.data_ptr<int32_t>(),
yMinInt.data_ptr<int32_t>(), yMaxInt.data_ptr<int32_t>(),
xMinFrac.data_ptr<scalar_t>(), xMaxFrac.data_ptr<scalar_t>(),
yMinFrac.data_ptr<scalar_t>(), yMaxFrac.data_ptr<scalar_t>(), xMinInt.numel()); break;
case Parameter::xMax:
hipLaunchKernelGGL(( boxConvAccGradParametersKernel <Parameter::xMax, exact>)
, dim3(numBlocks), dim3(NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
inputIntAcsr, tmpArray.data_ptr<scalar_t>(),
xMinInt.data_ptr<int32_t>(), xMaxInt.data_ptr<int32_t>(),
yMinInt.data_ptr<int32_t>(), yMaxInt.data_ptr<int32_t>(),
xMinFrac.data_ptr<scalar_t>(), xMaxFrac.data_ptr<scalar_t>(),
yMinFrac.data_ptr<scalar_t>(), yMaxFrac.data_ptr<scalar_t>(), xMinInt.numel()); break;
case Parameter::yMin:
hipLaunchKernelGGL(( boxConvAccGradParametersKernel <Parameter::yMin, exact>)
, dim3(numBlocks), dim3(NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
inputIntAcsr, tmpArray.data_ptr<scalar_t>(),
xMinInt.data_ptr<int32_t>(), xMaxInt.data_ptr<int32_t>(),
yMinInt.data_ptr<int32_t>(), yMaxInt.data_ptr<int32_t>(),
xMinFrac.data_ptr<scalar_t>(), xMaxFrac.data_ptr<scalar_t>(),
yMinFrac.data_ptr<scalar_t>(), yMaxFrac.data_ptr<scalar_t>(), xMinInt.numel()); break;
case Parameter::yMax:
hipLaunchKernelGGL(( boxConvAccGradParametersKernel <Parameter::yMax, exact>)
, dim3(numBlocks), dim3(NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
inputIntAcsr, tmpArray.data_ptr<scalar_t>(),
xMinInt.data_ptr<int32_t>(), xMaxInt.data_ptr<int32_t>(),
yMinInt.data_ptr<int32_t>(), yMaxInt.data_ptr<int32_t>(),
xMinFrac.data_ptr<scalar_t>(), xMaxFrac.data_ptr<scalar_t>(),
yMinFrac.data_ptr<scalar_t>(), yMaxFrac.data_ptr<scalar_t>(), xMinInt.numel()); break;
}
THCudaCheck(hipGetLastError());
}));
}
// explicitly instantiate
template void boxConvAccGradParameters<true>(
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, Parameter);
template void boxConvAccGradParameters<false>(
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, Parameter);
}
| fc0c78decf5fae97a7dfaea7a326255c5f0664c8.cu | #include <torch/extension.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include "box_convolution.h" // for `enum class Parameter`
#define BLOCK_SIZE 256
#define NUM_THREADS 1024
using std::min;
using std::max;
namespace gpu {
template <typename T, size_t N>
using CudaAcsr = const at::PackedTensorAccessor32<T, N, torch::RestrictPtrTraits>;
// TODO switch to square blocks
template <bool normalize, bool exact, typename scalar_t>
__global__ void boxConvUpdateGradInputKernel(
CudaAcsr<scalar_t,3> gradOutputInt, scalar_t * __restrict__ tmpArray,
const int32_t * __restrict__ xMinInt , const int32_t * __restrict__ xMaxInt ,
const int32_t * __restrict__ yMinInt , const int32_t * __restrict__ yMaxInt ,
const scalar_t * __restrict__ xMinFrac, const scalar_t * __restrict__ xMaxFrac,
const scalar_t * __restrict__ yMinFrac, const scalar_t * __restrict__ yMaxFrac,
const scalar_t * __restrict__ area, const int nParams) {
int32_t id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id;
const int32_t h = gradOutputInt.size(1) - 1;
const int32_t w = gradOutputInt.size(2) - 1;
const int32_t y = id % w; id /= w;
const int32_t x = id % h; id /= h;
const int32_t paramIdx = id % nParams;
// `id` is now the current plane number
auto gradOutputIntPlane = gradOutputInt[id];
if (id < gradOutputInt.size(0)) {
const int32_t xMinCurr = xMinInt[paramIdx];
const int32_t xMaxCurr = xMaxInt[paramIdx];
const int32_t yMinCurr = yMinInt[paramIdx];
const int32_t yMaxCurr = yMaxInt[paramIdx];
const int t = max(0, min(x+xMinCurr, h));
const int b = max(0, min(x+xMaxCurr, h));
const int l = max(0, min(y+yMinCurr, w));
const int r = max(0, min(y+yMaxCurr, w));
scalar_t outValue;
outValue =
gradOutputIntPlane[b][r]
- gradOutputIntPlane[t][r]
- gradOutputIntPlane[b][l]
+ gradOutputIntPlane[t][l];
if (exact) {
const scalar_t xMinCurrFrac = xMinFrac[paramIdx];
const scalar_t xMaxCurrFrac = xMaxFrac[paramIdx];
const scalar_t yMinCurrFrac = yMinFrac[paramIdx];
const scalar_t yMaxCurrFrac = yMaxFrac[paramIdx];
const int tAdv = x+xMinCurr-1 < h ? max(0, min(t-1, h)) : t;
const int bAdv = x+xMaxCurr >= 0 ? max(0, min(b+1, h)) : b;
const int lAdv = y+yMinCurr-1 < w ? max(0, min(l-1, w)) : l;
const int rAdv = y+yMaxCurr >= 0 ? max(0, min(r+1, w)) : r;
// -- xMax border
outValue +=
( gradOutputIntPlane[bAdv][r]
- gradOutputIntPlane[b ][r]
- gradOutputIntPlane[bAdv][l]
+ gradOutputIntPlane[b ][l]
) * xMaxCurrFrac;
// -- yMax border
outValue +=
( gradOutputIntPlane[b][rAdv]
- gradOutputIntPlane[b][r ]
- gradOutputIntPlane[t][rAdv]
+ gradOutputIntPlane[t][r ]
) * yMaxCurrFrac;
// -- xMin border
outValue +=
( gradOutputIntPlane[t ][r]
- gradOutputIntPlane[tAdv][r]
- gradOutputIntPlane[t ][l]
+ gradOutputIntPlane[tAdv][l]
) * xMinCurrFrac;
// -- yMin border
outValue +=
( gradOutputIntPlane[b][l ]
- gradOutputIntPlane[b][lAdv]
- gradOutputIntPlane[t][l ]
+ gradOutputIntPlane[t][lAdv]
) * yMinCurrFrac;
// -- corner pixels
outValue +=
xMaxCurrFrac*yMaxCurrFrac * (
(x+xMaxCurr >= h or
y+yMaxCurr >= w or
x+xMaxCurr < 0 or
y+yMaxCurr < 0 or
b == bAdv or
r == rAdv) ? static_cast<scalar_t>(0) :
( gradOutputIntPlane[b+1][r+1]
- gradOutputIntPlane[b ][r+1]
- gradOutputIntPlane[b+1][r ]
+ gradOutputIntPlane[b ][r ]));
outValue +=
xMinCurrFrac*yMaxCurrFrac * (
(x+xMinCurr > h or
y+yMaxCurr >= w or
x+xMinCurr <= 0 or
y+yMaxCurr < 0 or
t == tAdv or
r == rAdv) ? static_cast<scalar_t>(0) :
( gradOutputIntPlane[tAdv+1][r+1]
- gradOutputIntPlane[tAdv+1][r ]
- gradOutputIntPlane[tAdv ][r+1]
+ gradOutputIntPlane[tAdv ][r ]));
outValue +=
xMaxCurrFrac*yMinCurrFrac * (
(x+xMaxCurr >= h or
y+yMinCurr > w or
x+xMaxCurr < 0 or
y+yMinCurr <= 0 or
b == bAdv or
l == lAdv) ? static_cast<scalar_t>(0) :
( gradOutputIntPlane[b+1][lAdv+1]
- gradOutputIntPlane[b ][lAdv+1]
- gradOutputIntPlane[b+1][lAdv ]
+ gradOutputIntPlane[b ][lAdv ]));
outValue +=
xMinCurrFrac*yMinCurrFrac * (
(x+xMinCurr > h or
y+yMinCurr > w or
x+xMinCurr <= 0 or
y+yMinCurr <= 0 or
t == tAdv or
l == lAdv) ? static_cast<scalar_t>(0) :
( gradOutputIntPlane[tAdv+1][lAdv+1]
- gradOutputIntPlane[tAdv+1][lAdv ]
- gradOutputIntPlane[tAdv ][lAdv+1]
+ gradOutputIntPlane[tAdv ][lAdv ]));
}
*tmpArray = outValue * (normalize ? area[paramIdx] : static_cast<scalar_t>(1));
}
}
template <bool normalize, bool exact>
void boxConvUpdateGradInput(
at::Tensor & xMinInt , at::Tensor & xMaxInt , at::Tensor & yMinInt , at::Tensor & yMaxInt ,
at::Tensor & xMinFrac, at::Tensor & xMaxFrac, at::Tensor & yMinFrac, at::Tensor & yMaxFrac,
at::Tensor & area, at::Tensor & grad_output_integrated, at::Tensor & tmpArray) {
// TODO use square blocks as in `boxConvUpdateOutput`?
const int threadsNeeded = tmpArray.numel();
int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(tmpArray.scalar_type(), "gpu::boxConvUpdateGradInput", ([&] {
auto gradOutputIntFlattened = grad_output_integrated.view(
{-1, grad_output_integrated.size(-2), grad_output_integrated.size(-1)});
auto gradOutputIntAcsr =
gradOutputIntFlattened.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
boxConvUpdateGradInputKernel <normalize, exact>
<<<numBlocks, NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>> (
gradOutputIntAcsr, tmpArray.data_ptr<scalar_t>(),
xMinInt.data_ptr<int32_t>(), xMaxInt.data_ptr<int32_t>(),
yMinInt.data_ptr<int32_t>(), yMaxInt.data_ptr<int32_t>(),
xMinFrac.data_ptr<scalar_t>(), xMaxFrac.data_ptr<scalar_t>(),
yMinFrac.data_ptr<scalar_t>(), yMaxFrac.data_ptr<scalar_t>(),
normalize ? area.data_ptr<scalar_t>() : nullptr, xMinInt.numel());
THCudaCheck(cudaGetLastError());
}));
}
// explicitly instantiate
template void boxConvUpdateGradInput<true, true>(
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &);
template void boxConvUpdateGradInput<false, true>(
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &);
template void boxConvUpdateGradInput<true, false>(
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &);
template void boxConvUpdateGradInput<false, false>(
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &);
// TODO overload for exact/truncated mode
// TODO accept only three pairs of parameter arrays, not four (one is always redundant)
template <Parameter parameter, bool exact, typename scalar_t>
__global__ void boxConvAccGradParametersKernel(
CudaAcsr<scalar_t,3> inputInt, scalar_t * __restrict__ tmpArray,
const int32_t * __restrict__ xMinInt , const int32_t * __restrict__ xMaxInt ,
const int32_t * __restrict__ yMinInt , const int32_t * __restrict__ yMaxInt ,
const scalar_t * __restrict__ xMinFrac, const scalar_t * __restrict__ xMaxFrac,
const scalar_t * __restrict__ yMinFrac, const scalar_t * __restrict__ yMaxFrac,
const int nParams) {
int32_t id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id;
const int32_t h = inputInt.size(1) - 1;
const int32_t w = inputInt.size(2) - 1;
const int32_t y = id % w + 1; id /= w;
const int32_t x = id % h + 1; id /= h;
const int32_t paramIdx = id % nParams; id /= nParams;
// `id` is now the current absolute input plane number
auto inputIntPlane = inputInt[id];
if (id < inputInt.size(0)) {
const int32_t xMinCurr = xMinInt[paramIdx];
const int32_t xMaxCurr = xMaxInt[paramIdx];
const int32_t yMinCurr = yMinInt[paramIdx];
const int32_t yMaxCurr = yMaxInt[paramIdx];
// TODO only define these if `exact == true`
const scalar_t xMinCurrFrac = xMinFrac[paramIdx];
const scalar_t xMaxCurrFrac = xMaxFrac[paramIdx];
const scalar_t yMinCurrFrac = yMinFrac[paramIdx];
const scalar_t yMaxCurrFrac = yMaxFrac[paramIdx];
int valid;
int cornerX, cornerY;
scalar_t delta = 0;
if (parameter == Parameter::xMin) {
if (exact) {
// TODO maybe use `input` instead of `inputInt`
valid =
not (y+yMinCurr < 1) & not (y+yMinCurr > w) & not (x+xMinCurr < 1);
cornerX = max(0,min(h-1,x+xMinCurr-1));
cornerY = max(0,min(w-1,y+yMinCurr-1));
const scalar_t tlCorner = valid *
( inputIntPlane[cornerX+1][cornerY+1]
- inputIntPlane[cornerX ][cornerY+1]
- inputIntPlane[cornerX+1][cornerY ]
+ inputIntPlane[cornerX ][cornerY ]);
valid =
not (y+yMaxCurr < 0) & not (y+yMaxCurr >= w) & not (x+xMinCurr < 1);
cornerX = max(0,min(h-1,x+xMinCurr -1));
cornerY = max(0,min(w-1,y+yMaxCurr ));
const scalar_t trCorner = valid *
( inputIntPlane[cornerX+1][cornerY+1]
- inputIntPlane[cornerX ][cornerY+1]
- inputIntPlane[cornerX+1][cornerY ]
+ inputIntPlane[cornerX ][cornerY ]);
delta += trCorner * yMaxCurrFrac;
delta += tlCorner * yMinCurrFrac;
} // if (exact)
delta += inputIntPlane
[max(0,min(x+xMinCurr , h))][max(0,min(y+yMaxCurr , w))];
delta -= inputIntPlane
[max(0,min(x+xMinCurr -1, h))][max(0,min(y+yMaxCurr , w))];
delta -= inputIntPlane
[max(0,min(x+xMinCurr , h))][max(0,min(y+yMinCurr , w))];
delta += inputIntPlane
[max(0,min(x+xMinCurr -1, h))][max(0,min(y+yMinCurr , w))];
delta *= (x+xMinCurr >= 1) & (x+xMinCurr <= h);
*tmpArray = -delta;
}
else if (parameter == Parameter::xMax) {
if (exact) {
valid =
not (y+yMinCurr < 1) & not (y+yMinCurr > w) & not (x+xMaxCurr >= h);
cornerX = max(0,min(h-1,x+xMaxCurr ));
cornerY = max(0,min(w-1,y+yMinCurr -1));
const scalar_t blCorner = valid *
( inputIntPlane[cornerX+1][cornerY+1]
- inputIntPlane[cornerX ][cornerY+1]
- inputIntPlane[cornerX+1][cornerY ]
+ inputIntPlane[cornerX ][cornerY ]);
valid =
not (y+yMaxCurr < 0) & not (y+yMaxCurr >= w) & not (x+xMaxCurr >= h);
cornerX = max(0,min(h-1,x+xMaxCurr ));
cornerY = max(0,min(w-1,y+yMaxCurr ));
const scalar_t brCorner = valid *
( inputIntPlane[cornerX+1][cornerY+1]
- inputIntPlane[cornerX ][cornerY+1]
- inputIntPlane[cornerX+1][cornerY ]
+ inputIntPlane[cornerX ][cornerY ]);
delta += brCorner * yMaxCurrFrac;
delta += blCorner * yMinCurrFrac;
} // if (exact)
delta += inputIntPlane
[max(0,min(x+xMaxCurr +1, h))][max(0,min(y+yMaxCurr , w))];
delta -= inputIntPlane
[max(0,min(x+xMaxCurr , h))][max(0,min(y+yMaxCurr , w))];
delta -= inputIntPlane
[max(0,min(x+xMaxCurr +1, h))][max(0,min(y+yMinCurr , w))];
delta += inputIntPlane
[max(0,min(x+xMaxCurr , h))][max(0,min(y+yMinCurr , w))];
delta *= (x+xMaxCurr >= 0) & (x+xMaxCurr < h);
*tmpArray = delta;
}
else if (parameter == Parameter::yMin) {
if (exact) {
valid =
not (y+yMinCurr < 1) & not (x+xMinCurr < 1) & not (x+xMinCurr > h);
cornerX = max(0,min(h-1,x+xMinCurr -1));
cornerY = max(0,min(w-1,y+yMinCurr -1));
const scalar_t tlCorner = valid *
( inputIntPlane[cornerX+1][cornerY+1]
- inputIntPlane[cornerX ][cornerY+1]
- inputIntPlane[cornerX+1][cornerY ]
+ inputIntPlane[cornerX ][cornerY ]);
valid =
not (y+yMinCurr < 1) & not (x+xMaxCurr < 0) & not (x+xMaxCurr >= h);
cornerX = max(0,min(h-1,x+xMaxCurr ));
cornerY = max(0,min(w-1,y+yMinCurr -1));
const scalar_t blCorner = valid *
( inputIntPlane[cornerX+1][cornerY+1]
- inputIntPlane[cornerX ][cornerY+1]
- inputIntPlane[cornerX+1][cornerY ]
+ inputIntPlane[cornerX ][cornerY ]);
delta += tlCorner * xMinCurrFrac;
delta += blCorner * xMaxCurrFrac;
} // if (exact)
delta += inputIntPlane
[max(0,min(x+xMaxCurr , h))][max(0,min(y+yMinCurr , w))];
delta -= inputIntPlane
[max(0,min(x+xMaxCurr , h))][max(0,min(y+yMinCurr -1, w))];
delta -= inputIntPlane
[max(0,min(x+xMinCurr , h))][max(0,min(y+yMinCurr , w))];
delta += inputIntPlane
[max(0,min(x+xMinCurr , h))][max(0,min(y+yMinCurr -1, w))];
delta *= (y+yMinCurr >= 1) & (y+yMinCurr <= w);
*tmpArray = -delta;
}
else if (parameter == Parameter::yMax) {
if (exact) {
valid =
not (y+yMaxCurr >= w) & not (x+xMinCurr < 1) & not (x+xMinCurr > h);
cornerX = max(0,min(h-1,x+xMinCurr -1));
cornerY = max(0,min(w-1,y+yMaxCurr ));
const scalar_t trCorner = valid *
( inputIntPlane[cornerX+1][cornerY+1]
- inputIntPlane[cornerX ][cornerY+1]
- inputIntPlane[cornerX+1][cornerY ]
+ inputIntPlane[cornerX ][cornerY ]);
valid =
not (y+yMaxCurr >= w) & not (x+xMaxCurr < 0) & not (x+xMaxCurr >= h);
cornerX = max(0,min(h-1,x+xMaxCurr ));
cornerY = max(0,min(w-1,y+yMaxCurr ));
const scalar_t brCorner = valid *
( inputIntPlane[cornerX+1][cornerY+1]
- inputIntPlane[cornerX ][cornerY+1]
- inputIntPlane[cornerX+1][cornerY ]
+ inputIntPlane[cornerX ][cornerY ]);
delta += trCorner * xMinCurrFrac;
delta += brCorner * xMaxCurrFrac;
} // if (exact)
delta += inputIntPlane
[max(0,min(x+xMaxCurr , h))][max(0,min(y+yMaxCurr +1, w))];
delta -= inputIntPlane
[max(0,min(x+xMaxCurr , h))][max(0,min(y+yMaxCurr , w))];
delta -= inputIntPlane
[max(0,min(x+xMinCurr , h))][max(0,min(y+yMaxCurr +1, w))];
delta += inputIntPlane
[max(0,min(x+xMinCurr , h))][max(0,min(y+yMaxCurr , w))];
delta *= (y+yMaxCurr >= 0) & (y+yMaxCurr < w);
*tmpArray = delta;
}
}
}
template <bool exact>
void boxConvAccGradParameters(
// tmpArray size: {batchSize, nInputPlanes, numFilters, h, w}
at::Tensor & xMinInt , at::Tensor & xMaxInt , at::Tensor & yMinInt , at::Tensor & yMaxInt ,
at::Tensor & xMinFrac, at::Tensor & xMaxFrac, at::Tensor & yMinFrac, at::Tensor & yMaxFrac,
at::Tensor & input_integrated, at::Tensor & tmpArray, Parameter parameter) {
// TODO switch to square blocks?
const int threadsNeeded = tmpArray.numel();
int numBlocks = (threadsNeeded + NUM_THREADS - 1) / NUM_THREADS;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(tmpArray.scalar_type(), "gpu::boxConvAccGradParameters", ([&] {
auto inputIntFlattened = input_integrated.view(
{-1, input_integrated.size(-2), input_integrated.size(-1)});
auto inputIntAcsr =
inputIntFlattened.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>();
switch (parameter) {
case Parameter::xMin:
boxConvAccGradParametersKernel <Parameter::xMin, exact>
<<<numBlocks, NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>> (
inputIntAcsr, tmpArray.data_ptr<scalar_t>(),
xMinInt.data_ptr<int32_t>(), xMaxInt.data_ptr<int32_t>(),
yMinInt.data_ptr<int32_t>(), yMaxInt.data_ptr<int32_t>(),
xMinFrac.data_ptr<scalar_t>(), xMaxFrac.data_ptr<scalar_t>(),
yMinFrac.data_ptr<scalar_t>(), yMaxFrac.data_ptr<scalar_t>(), xMinInt.numel()); break;
case Parameter::xMax:
boxConvAccGradParametersKernel <Parameter::xMax, exact>
<<<numBlocks, NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>> (
inputIntAcsr, tmpArray.data_ptr<scalar_t>(),
xMinInt.data_ptr<int32_t>(), xMaxInt.data_ptr<int32_t>(),
yMinInt.data_ptr<int32_t>(), yMaxInt.data_ptr<int32_t>(),
xMinFrac.data_ptr<scalar_t>(), xMaxFrac.data_ptr<scalar_t>(),
yMinFrac.data_ptr<scalar_t>(), yMaxFrac.data_ptr<scalar_t>(), xMinInt.numel()); break;
case Parameter::yMin:
boxConvAccGradParametersKernel <Parameter::yMin, exact>
<<<numBlocks, NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>> (
inputIntAcsr, tmpArray.data_ptr<scalar_t>(),
xMinInt.data_ptr<int32_t>(), xMaxInt.data_ptr<int32_t>(),
yMinInt.data_ptr<int32_t>(), yMaxInt.data_ptr<int32_t>(),
xMinFrac.data_ptr<scalar_t>(), xMaxFrac.data_ptr<scalar_t>(),
yMinFrac.data_ptr<scalar_t>(), yMaxFrac.data_ptr<scalar_t>(), xMinInt.numel()); break;
case Parameter::yMax:
boxConvAccGradParametersKernel <Parameter::yMax, exact>
<<<numBlocks, NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>> (
inputIntAcsr, tmpArray.data_ptr<scalar_t>(),
xMinInt.data_ptr<int32_t>(), xMaxInt.data_ptr<int32_t>(),
yMinInt.data_ptr<int32_t>(), yMaxInt.data_ptr<int32_t>(),
xMinFrac.data_ptr<scalar_t>(), xMaxFrac.data_ptr<scalar_t>(),
yMinFrac.data_ptr<scalar_t>(), yMaxFrac.data_ptr<scalar_t>(), xMinInt.numel()); break;
}
THCudaCheck(cudaGetLastError());
}));
}
// explicitly instantiate
template void boxConvAccGradParameters<true>(
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, Parameter);
template void boxConvAccGradParameters<false>(
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &,
at::Tensor &, at::Tensor &, Parameter);
}
|
31eb36854f0a9a53149f7303619d1e72ba1a2ffd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Accelerated Computing for Deep Learning
#include <stdio.h>
#define N 64
__global__ void matrixMulGPU( int * a, int * b, int * c )
{
int val = 0;
int row = blockIdx.x * blockDim.x + threadIdx.x;//Build out the kernel.
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < N)
{
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
void matrixMulCPU( int * a, int * b, int * c )// run to create a solution matrix
{
int val = 0;
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
val = 0;
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
int main()
{
int *a, *b, *c_cpu, *c_gpu;// Allocate a solution matrix for both the CPU and the GPU operations
int size = N * N * sizeof (int); // Number of bytes of an N x N matrix
// Allocate memory
hipMallocManaged (&a, size);
hipMallocManaged (&b, size);
hipMallocManaged (&c_cpu, size);
hipMallocManaged (&c_gpu, size);
// Initialize memory; create 2D matrices
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
a[row*N + col] = row;
b[row*N + col] = col+2;
c_cpu[row*N + col] = 0;
c_gpu[row*N + col] = 0;
}
// Assign `threads_per_block` and `number_of_blocks` 2D values
dim3 threads_per_block (16, 16, 1); // A 16 x 16 block threads
dim3 number_of_blocks ((N / threads_per_block.x) + 1, (N / threads_per_block.y) + 1, 1);
hipLaunchKernelGGL(( matrixMulGPU) , dim3(number_of_blocks), dim3(threads_per_block) , 0, 0, a, b, c_gpu );
hipDeviceSynchronize();
// Call the CPU version to check our work
matrixMulCPU( a, b, c_cpu );
// Compare the two answers to make sure they are equal
bool error = false;
for( int row = 0; row < N && !error; ++row )
for( int col = 0; col < N && !error; ++col )
if (c_cpu[row * N + col] != c_gpu[row * N + col])
{
printf("FOUND ERROR at c[%d][%d]\n", row, col);
error = true;
break;
}
if (!error)
printf("Success!\n");
// Free all our allocated memory
hipFree(a); hipFree(b);
hipFree( c_cpu ); hipFree( c_gpu );
}
| 31eb36854f0a9a53149f7303619d1e72ba1a2ffd.cu | //Accelerated Computing for Deep Learning
#include <stdio.h>
#define N 64
__global__ void matrixMulGPU( int * a, int * b, int * c )
{
int val = 0;
int row = blockIdx.x * blockDim.x + threadIdx.x;//Build out the kernel.
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < N)
{
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
void matrixMulCPU( int * a, int * b, int * c )// run to create a solution matrix
{
int val = 0;
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
val = 0;
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
int main()
{
int *a, *b, *c_cpu, *c_gpu;// Allocate a solution matrix for both the CPU and the GPU operations
int size = N * N * sizeof (int); // Number of bytes of an N x N matrix
// Allocate memory
cudaMallocManaged (&a, size);
cudaMallocManaged (&b, size);
cudaMallocManaged (&c_cpu, size);
cudaMallocManaged (&c_gpu, size);
// Initialize memory; create 2D matrices
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
a[row*N + col] = row;
b[row*N + col] = col+2;
c_cpu[row*N + col] = 0;
c_gpu[row*N + col] = 0;
}
// Assign `threads_per_block` and `number_of_blocks` 2D values
dim3 threads_per_block (16, 16, 1); // A 16 x 16 block threads
dim3 number_of_blocks ((N / threads_per_block.x) + 1, (N / threads_per_block.y) + 1, 1);
matrixMulGPU <<< number_of_blocks, threads_per_block >>> ( a, b, c_gpu );
cudaDeviceSynchronize();
// Call the CPU version to check our work
matrixMulCPU( a, b, c_cpu );
// Compare the two answers to make sure they are equal
bool error = false;
for( int row = 0; row < N && !error; ++row )
for( int col = 0; col < N && !error; ++col )
if (c_cpu[row * N + col] != c_gpu[row * N + col])
{
printf("FOUND ERROR at c[%d][%d]\n", row, col);
error = true;
break;
}
if (!error)
printf("Success!\n");
// Free all our allocated memory
cudaFree(a); cudaFree(b);
cudaFree( c_cpu ); cudaFree( c_gpu );
}
|
7196bb9ebbe65450f699057d117a6b370ae88948.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/flow_warp_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/benchmark.hpp"
#include <iostream>
#include <fstream>
#define CUDART_NAN_F __int_as_float(0x7fffffff)
namespace caffe {
#define min(a,b) ((a<b)?(a):(b))
#define max(a,b) ((a>b)?(a):(b))
//#define DISPLAY_TIMINGS
#define RA_TILE 32
#define RA_ROWS 8
template <typename Dtype>
__global__ void flow_warp_rearrange_kernel(const Dtype* in, Dtype* out, int num, int channels, int cblocks, int width, int height, int widthheight)
{
__shared__ float buffer[RA_TILE][RA_TILE+1];
int n = blockIdx.x/cblocks;
if(n>=num) return;
int c0 = (blockIdx.x%cblocks)*RA_TILE;
int x0 = blockIdx.y*RA_TILE;
int y = blockIdx.z;
int xoff=threadIdx.x;
int coff=threadIdx.y;
int x=x0+xoff;
if(x<width)
for(int i=coff; i<RA_TILE && c0+i<channels; i+=RA_ROWS)
buffer[i][xoff] = in[((n*channels + c0 + i)*height + y)*width + x];
__syncthreads();
coff = threadIdx.x;
xoff = threadIdx.y;
int c = c0 + coff;
if(c<channels)
for(int j=xoff; j<RA_TILE && x0+j<width; j+=RA_ROWS)
out[((n*height + y)*width + x0+j)*channels + c] = buffer[coff][j];
}
#define FW_THREADS 32
#define FW_TILE_X FW_THREADS
#define FW_TILE_C FW_THREADS
template <typename Dtype>
__global__ void flow_warp_kernel_smem(const Dtype* image, const Dtype* flow, Dtype* warped, int num, int channels, int cblocks, int width, int wblocks, int height, int widthheight, float fillValue)
{
int y = blockIdx.y;
int n = blockIdx.z;
__shared__ float x2_buf[FW_TILE_X], y2_buf[FW_TILE_X];
__shared__ float buffer[FW_TILE_C][FW_TILE_X+1];
int x;
int c;
x = blockIdx.x*FW_TILE_X + threadIdx.x;
if(threadIdx.y==0 && x<width)
{
x2_buf[threadIdx.x] = float(x) + flow[((2*n )*height + y)*width + x];
y2_buf[threadIdx.x] = float(y) + flow[((2*n+1)*height + y)*width + x];
}
__syncthreads();
float x2 = x2_buf[threadIdx.y];
float y2 = y2_buf[threadIdx.y];
int ix2_L = int(x2);
int iy2_T = int(y2);
int ix2_R = min(ix2_L+1, width-1);
int iy2_B = min(iy2_T+1, height-1);
int off_TL = ((n*height + iy2_T)*width + ix2_L)*channels;
int off_TR = ((n*height + iy2_T)*width + ix2_R)*channels;
int off_BL = ((n*height + iy2_B)*width + ix2_L)*channels;
int off_BR = ((n*height + iy2_B)*width + ix2_R)*channels;
float alpha = x2-ix2_L;
float beta = y2-iy2_T;
float coeffTL = (1-alpha)*(1-beta);
float coeffTR = alpha*(1-beta);
float coeffBL = (1-alpha)*beta;
float coeffBR = alpha*beta;
for(int cb=0; cb<cblocks; cb++)
{
__syncthreads();
buffer[threadIdx.y][threadIdx.x] = fillValue;
__syncthreads();
c = cb*FW_TILE_C + threadIdx.x;
if(x2>=0 && y2>=0 && x2<width && y2<height && c<channels)
buffer[threadIdx.y][threadIdx.x] = // buffer [x][c]
coeffTL * image[off_TL + c] +
coeffTR * image[off_TR + c] +
coeffBL * image[off_BL + c] +
coeffBR * image[off_BR + c];
__syncthreads();
c = cb*FW_TILE_C + threadIdx.y;
x = blockIdx.x*FW_TILE_X + threadIdx.x;
if(c<channels && x<width)
warped[((n*channels+c)*height + y)*width + x] = buffer[threadIdx.x][threadIdx.y];
}
}
template <typename Dtype>
__global__ void flow_warp_kernel_no_smem(const Dtype* image, const Dtype* flow, Dtype* warped, int num, int channels, int width, int wblocks, int height, int widthheight)
{
int x = blockIdx.x*FW_TILE_X + threadIdx.x;
if(x>=width)
return;
int y = blockIdx.y;
int n = blockIdx.z;
float x2 = float(x) + flow[((2*n )*height + y)*width + x];
float y2 = float(y) + flow[((2*n+1)*height + y)*width + x];
if(x2>=0.f && y2>=0.f && x2<width && y2<height)
{
int ix2_L = int(x2);
int iy2_T = int(y2);
int ix2_R = min(ix2_L+1, width-1);
int iy2_B = min(iy2_T+1, height-1);
float alpha = x2-ix2_L;
float beta = y2-iy2_T;
for(int c=0; c<channels; c++)
{
int ch_off = (n*channels+c)*height;
int off_TL = (ch_off + iy2_T)*width + ix2_L;
int off_TR = (ch_off + iy2_T)*width + ix2_R;
int off_BL = (ch_off + iy2_B)*width + ix2_L;
int off_BR = (ch_off + iy2_B)*width + ix2_R;
float coeffTL = (1-alpha)*(1-beta);
float coeffTR = alpha*(1-beta);
float coeffBL = (1-alpha)*beta;
float coeffBR = alpha*beta;
warped[(ch_off + y)*width + x] =
coeffTL * image[off_TL] +
coeffTR * image[off_TR] +
coeffBL * image[off_BL] +
coeffBR * image[off_BR];
}
}
}
template <typename Dtype>
__global__ void flow_warp_backward_kernel_no_smem(
const Dtype* image_data, float* image_diff, const Dtype* flow_data, Dtype* flow_diff, const Dtype* warped_diff,
int num, int channels, int cblocks, int width, int wblocks, int height, int widthheight)
{
int x = blockIdx.x*FW_TILE_X + threadIdx.x;
if(x>=width)
return;
int y = blockIdx.y;
int n = blockIdx.z;
float x2 = float(x) + flow_data[((2*n )*height + y)*width + x];
float y2 = float(y) + flow_data[((2*n+1)*height + y)*width + x];
if(x2>=0.f && y2>=0.f && x2<width && y2<height)
{
int ix2_L = int(x2);
int iy2_T = int(y2);
int ix2_R = min(ix2_L+1, width-1);
int iy2_B = min(iy2_T+1, height-1);
float alpha=x2-ix2_L;
float beta=y2-iy2_T;
for(int c=0; c<channels; c++)
{
int ch_off = (n*channels + c)*height;
float warped_diff_value = warped_diff[(ch_off + y)*width + x];
atomicAdd(&image_diff[(ch_off + iy2_T)*width + ix2_L], warped_diff_value * (1-alpha)*(1-beta));
atomicAdd(&image_diff[(ch_off + iy2_T)*width + ix2_R], warped_diff_value * alpha*(1-beta));
atomicAdd(&image_diff[(ch_off + iy2_B)*width + ix2_L], warped_diff_value * (1-alpha)*beta);
atomicAdd(&image_diff[(ch_off + iy2_B)*width + ix2_R], warped_diff_value * alpha*beta);
}
float gamma = iy2_B - y2;
float bot_diff = 0;
for(int c=0; c<channels; c++)
{
int ch_off = (n*channels + c)*height;
float temp = 0;
temp += gamma * (image_data[(ch_off + iy2_T)*width + ix2_R] - image_data[(ch_off + iy2_T)*width + ix2_L]);
temp += (1-gamma) * (image_data[(ch_off + iy2_B)*width + ix2_R] - image_data[(ch_off + iy2_B)*width + ix2_L]);
bot_diff += warped_diff[(ch_off + y)*width + x] * temp;
}
flow_diff[(2*n*height + y)*width + x] = bot_diff;
gamma = ix2_R - x2;
bot_diff = 0;
for(int c=0; c<channels; c++)
{
int ch_off = (n*channels + c)*height;
float temp = 0;
temp += gamma * (image_data[(ch_off + iy2_B)*width + ix2_L] - image_data[(ch_off + iy2_T)*width + ix2_L]);
temp += (1-gamma) * (image_data[(ch_off + iy2_B)*width + ix2_R] - image_data[(ch_off + iy2_T)*width + ix2_R]);
bot_diff += warped_diff[(ch_off + y)*width + x] * temp;
}
flow_diff[((2*n+1)*height + y)*width + x] = bot_diff;
}
}
template <typename Dtype>
__global__ void flow_warp_backward_kernel_smem(const Dtype* trans_image_data, Dtype* image_diff, const Dtype* flow_data, Dtype* flow_diff, const Dtype* warped_diff, int num, int channels, int cblocks, int width, int wblocks, int height, int widthheight)
{
// int y = blockIdx.y;
// int n = blockIdx.z;
// __shared__ float x2_buf[FW_TILE_X], y2_buf[FW_TILE_X];
// __shared__ float buffer[FW_TILE_C][FW_TILE_X+1];
// int x;
// int c;
// x = blockIdx.x*FW_TILE_X + threadIdx.x;
// if(threadIdx.y==0 && x<width)
// {
// x2_buf[threadIdx.x] = float(x) + flow[((2*n )*height + y)*width + x];
// y2_buf[threadIdx.x] = float(y) + flow[((2*n+1)*height + y)*width + x];
// }
// __syncthreads();
// float x2 = x2_buf[threadIdx.y];
// float y2 = y2_buf[threadIdx.y];
// int ix2_L = int(x2);
// int iy2_T = int(y2);
// int ix2_R = min(ix2_L+1, width-1);
// int iy2_B = min(iy2_T+1, height-1);
// int off_TL = ((n*height + iy2_T)*width + ix2_L)*channels;
// int off_TR = ((n*height + iy2_T)*width + ix2_R)*channels;
// int off_BL = ((n*height + iy2_B)*width + ix2_L)*channels;
// int off_BR = ((n*height + iy2_B)*width + ix2_R)*channels;
// float alpha = x2-ix2_L;
// float beta = y2-iy2_T;
// float coeffTL = (1-alpha)*(1-beta);
// float coeffTR = alpha*(1-beta);
// float coeffBL = (1-alpha)*beta;
// float coeffBR = alpha*beta;
// for(int cb=0; cb<cblocks; cb++)
// {
// __syncthreads();
// buffer[threadIdx.y][threadIdx.x] = 0;
// __syncthreads();
// c = cb*FW_TILE_C + threadIdx.y;
// x = blockIdx.x*FW_TILE_X + threadIdx.x;
// if(c<channels && x<width)
// buffer[threadIdx.y][threadIdx.x] = warped_diff[((n*channels + c)*height + y)*width + x]; // buffer[c][x]
// __syncthreads();
// c = cb*FW_TILE_C + threadIdx.x;
// float wd = buffer[threadIdx.x][threadIdx.y];
// if(x2>=0 && y2>=0 && x2<width && y2<height && c<channels && x<width)
// {
// atomicAdd(&image_diff[((n*channels + c)*height + iy2_T)*width + ix2_L], wd * coeffTL);
// atomicAdd(&image_diff[((n*channels + c)*height + iy2_T)*width + ix2_R], wd * coeffTR);
// atomicAdd(&image_diff[((n*channels + c)*height + iy2_B)*width + ix2_L], wd * coeffBL);
// atomicAdd(&image_diff[((n*channels + c)*height + iy2_B)*width + ix2_R], wd * coeffBR);
// float gamma = iy2_B - y2;
// c = cb*FW_TILE_C + threadIdx.x;
// float imgTR = trans_image_data[((n*height + iy2_T)*width + ix2_R)*channels + c];
// float imgTL = trans_image_data[((n*height + iy2_T)*width + ix2_L)*channels + c];
// float imgBR = trans_image_data[((n*height + iy2_B)*width + ix2_R)*channels + c];
// float imgBL = trans_image_data[((n*height + iy2_B)*width + ix2_L)*channels + c];
// float temp = 0;
// temp += gamma * (imgTR - imgTL);
// temp += (1-gamma) * (imgBR - imhBL);
// temp *= buffer[threadIdx.x][threadIdx.y]; // warped_diff[((n*channels + c)*height + y)*width + x]
// atomicAdd(&flow_diff[(2*n*height + y)*width + x], wd * coeffBR);
// }
// for(int c=0; c<channels; c++)
// {
// float temp = 0;
// temp += gamma * (imgTR - imgTL);
// temp += (1-gamma) * (imgBR - imhBL);
// bot_diff += warped_diff[((n*channels + c)*height + y)*width + x] * temp;
// }
// flow_diff[(2*n*height + y)*width + x] = bot_diff;
// gamma = ix2_R - x2;
// bot_diff = 0;
// for(int c=0; c<channels; c++)
// {
// float temp = 0;
// temp += gamma * (image_data[((n*channels + c)*height + iy2_B)*width + ix2_L] - image_data[((n*channels + c)*height + iy2_T)*width + ix2_L]);
// temp += (1-gamma) * (image_data[((n*channels + c)*height + iy2_B)*width + ix2_R] - image_data[((n*channels + c)*height + iy2_T)*width + ix2_R]);
// bot_diff += warped_diff[((n*channels + c)*height + y)*width + x] * temp;
// }
// flow_diff[((2*n+1)*height + y)*width + x] = bot_diff;
// int c = cb*FW_TILE_C + threadIdx.x;
// if(x2>=0 && y2>=0 && x2<width && y2<height && c<channels)
// buffer[threadIdx.y][threadIdx.x] = // buffer [x][c]
// coeffTL * image[off_TL + c] +
// coeffTR * image[off_TR + c] +
// coeffBL * image[off_BL + c] +
// coeffBR * image[off_BR + c];
// __syncthreads();
// c = cb*FW_TILE_C + threadIdx.y;
// int x = blockIdx.x*FW_TILE_X + threadIdx.x;
// if(c<channels && x<width)
// warped[((n*channels+c)*height + y)*width + x] = buffer[threadIdx.x][threadIdx.y];
// }
}
template <typename Dtype>
void FlowWarpLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
int width = top[0]->width();
int height = top[0]->height();
int channels = top[0]->channels();
int num = top[0]->num();
const int wh_size = width * height;
//const int whc_size = width * height * channels;
Dtype* warped_data = top[0]->mutable_gpu_data(); // dest
const Dtype* image_data = bottom[0]->gpu_data(); // source image
Dtype* trans_image_data = transposed_image_.mutable_gpu_data(); // source image
const Dtype* flow_data = bottom[1]->gpu_data(); // source flow
int nan = 0xFFE00000;
float nanf = *(reinterpret_cast<float*>(&nan));
Dtype fillValue = this->layer_param().flow_warp_param().fill_value() == FlowWarpParameter_FillParameter_ZERO ? 0 : nanf;
hipMemset(warped_data, fillValue, width*height*channels*num*sizeof(float));
#ifdef DISPLAY_TIMINGS
caffe::Timer t1;
t1.Start();
#endif
dim3 rearrangeThreads(RA_TILE,RA_ROWS,1);
int cblocks = ((channels-1)/RA_TILE+1);
dim3 rearrangeBlocks(cblocks*num, (width-1)/RA_TILE+1, height);
hipLaunchKernelGGL(( flow_warp_rearrange_kernel<Dtype>), dim3(rearrangeBlocks), dim3(rearrangeThreads), 0, 0,
image_data,
trans_image_data,
num,
channels,
cblocks,
width,
height,
wh_size
);
CUDA_POST_KERNEL_CHECK;
#ifdef DISPLAY_TIMINGS
t1.Stop();
LOG(INFO) << "rearrange time " << t1.MilliSeconds() << "ms";
#endif
// if(channels>8)
{
#ifdef DISPLAY_TIMINGS
caffe::Timer t2;
t2.Start();
#endif
int wblocks = ((width-1)/FW_TILE_X+1);
int cblocks = ((channels-1)/FW_TILE_C+1);
dim3 warpThreads(FW_TILE_X,FW_TILE_C);
dim3 warpBlocks(wblocks, height, num);
hipLaunchKernelGGL(( flow_warp_kernel_smem<Dtype>), dim3(warpBlocks), dim3(warpThreads), 0, 0,
trans_image_data,
flow_data,
warped_data,
num,
channels,
cblocks,
width,
wblocks,
height,
wh_size,
fillValue
);
CUDA_POST_KERNEL_CHECK;
#ifdef DISPLAY_TIMINGS
t2.Stop();
LOG(INFO) << "warp time 1a: " << t2.MilliSeconds() << "ms";
#endif
}
// else
// {
//#ifdef DISPLAY_TIMINGS
// caffe::Timer t2a;
// t2a.Start();
//#endif
// int wblocks = ((width-1)/FW_TILE_X+1);
// dim3 warpThreads(FW_TILE_X);
// dim3 warpBlocks(wblocks, height, num);
// flow_warp_kernel_no_smem<Dtype><<<warpBlocks, warpThreads>>>(
// image_data,
// flow_data,
// warped_data,
// num,
// channels,
// width,
// wblocks,
// height,
// wh_size
// );
// CUDA_POST_KERNEL_CHECK;
//#ifdef DISPLAY_TIMINGS
// t2a.Stop();
// LOG(INFO) << "warp time 1b: " << t2a.MilliSeconds() << "ms";
//#endif
// }
}
template <typename Dtype>
void FlowWarpLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
int width = top[0]->width();
int height = top[0]->height();
int channels = top[0]->channels();
int num = top[0]->num();
const int wh_size = width * height;
//const int whc_size = width * height * channels;
const Dtype* warped_data = top[0]->gpu_data(); // dest
const Dtype* warped_diff = top[0]->gpu_diff(); // dest
const Dtype* image_data = bottom[0]->gpu_data(); // source image
Dtype* image_diff = bottom[0]->mutable_gpu_diff(); // source image
const Dtype* flow_data = bottom[1]->gpu_data(); // source flow
Dtype* flow_diff = bottom[1]->mutable_gpu_diff(); // source flow
hipMemset(image_diff, 0, width*height*channels*num*sizeof(float));
hipMemset(flow_diff, 0, width*height*2*num*sizeof(float));
//Backward_cpu(top, propagate_down, bottom);
//return;
#ifdef DISPLAY_TIMINGS
caffe::Timer t3a;
t3a.Start();
#endif
int wblocks = ((width-1)/FW_TILE_X+1);
int cblocks = ((channels-1)/FW_TILE_C+1);
dim3 warpThreads(FW_TILE_X,1);
dim3 warpBlocks(wblocks, height, num);
hipLaunchKernelGGL(( flow_warp_backward_kernel_no_smem<Dtype>), dim3(warpBlocks), dim3(warpThreads), 0, 0,
image_data,
(float*)image_diff,
flow_data,
flow_diff,
warped_diff,
num,
channels,
cblocks,
width,
wblocks,
height,
wh_size
);
CUDA_POST_KERNEL_CHECK;
#ifdef DISPLAY_TIMINGS
t3a.Stop();
LOG(INFO) << "backward time 1a: " << t3a.MilliSeconds() << "ms";
#endif
if(!propagate_down[0]) caffe_gpu_memset(bottom[0]->count()*sizeof(Dtype), 0, image_diff);
if(!propagate_down[1]) caffe_gpu_memset(bottom[1]->count()*sizeof(Dtype), 0, flow_diff);
// {
// printf("gpu flow u:\n");
// for(int y=0; y<height; y++)
// {
// for(int x=0; x<width; x++)
// {
// printf("%f ", bottom[1]->data_at(0, 0, y, x));
// }
// printf("\n");
// }
// printf("gpu flow v:\n");
// for(int y=0; y<height; y++)
// {
// for(int x=0; x<width; x++)
// {
// printf("%f ", bottom[1]->data_at(0, 1, y, x));
// }
// printf("\n");
// }
// printf("gpu image:\n");
// for(int y=0; y<height; y++)
// {
// for(int x=0; x<width; x++)
// {
// printf("%f ", bottom[0]->data_at(0, 0, y, x));
// }
// printf("\n");
// }
// printf("gpu flow diff u:\n");
// for(int y=0; y<height; y++)
// {
// for(int x=0; x<width; x++)
// {
// printf("%f ", bottom[1]->diff_at(0, 0, y, x));
// }
// printf("\n");
// }
// printf("gpu flow diff v:\n");
// for(int y=0; y<height; y++)
// {
// for(int x=0; x<width; x++)
// {
// printf("%f ", bottom[1]->diff_at(0, 1, y, x));
// }
// printf("\n");
// }
// printf("gpu image diff:\n");
// for(int y=0; y<height; y++)
// {
// for(int x=0; x<width; x++)
// {
// printf("%f ", bottom[0]->diff_at(0, 0, y, x));
// }
// printf("\n");
// }
// }
}
INSTANTIATE_LAYER_GPU_FUNCS(FlowWarpLayer);
} // namespace caffe
//caffe::Timer t3;
//t3.Start();
//int topcount = width*height*channels*num;
//WarpData<Dtype><<<CAFFE_GET_BLOCKS(topcount), CAFFE_CUDA_NUM_THREADS>>>(
// topcount,
// num, channels, height, width, image_data, topcount,
// height, width, warped_data, flow_data);
//t3.Stop();
//LOG(INFO) << "warp time 2: " << t3.MilliSeconds() << "ms";
//template <typename Dtype>
//__global__ void WarpData(const int nthreads, const int num, const int channels, const int height, const int width, const Dtype* src_data, const int src_count,
// const int dest_height, const int dest_width, Dtype* dest_data, const Dtype* flow) {
// CUDA_KERNEL_LOOP(index, nthreads) {
// int x = (index % width); //w-pos
// int y = ((index / width) % height); //h-pos
// int cn = (index / width / height); // channel*num
// int n = cn / channels; //num
// // === Warping:
// float xpos = (float)(x) + flow[width*(height*(2*n+0) + y) + x];
// float ypos = (float)(y) + flow[width*(height*(2*n+1) + y) + x];
// if (xpos > 0.f && xpos <= width-1.01f && ypos > 0.f && ypos <= height-1.01f) {
// // Get interpolated sample
// float tlx = floor(xpos);
// float tly = floor(ypos);
// int srcIdxOff = width*(height*cn + tly) + tlx;
// float sampleTL = src_data[srcIdxOff];
// float sampleTR = src_data[min(srcIdxOff+1,src_count)];
// float sampleBL = src_data[min(srcIdxOff+width,src_count)];
// float sampleBR = src_data[min(srcIdxOff+1+width,src_count)];
// float xdist = xpos - tlx;
// float ydist = ypos - tly;
// float sample = (1-xdist)*(1-ydist)*sampleTL
// + ( xdist)*( ydist)*sampleBR
// + (1-xdist)*( ydist)*sampleBL
// + ( xdist)*(1-ydist)*sampleTR;
// dest_data[index] = sample;
// }
// }
//}
//volatile float TL = image[((n*channels + c)*height + iy2_T)*width + ix2_L];
//volatile float TR = image[((n*channels + c)*height + iy2_T)*width + ix2_R];
//volatile float BL = image[((n*channels + c)*height + iy2_B)*width + ix2_L];
//volatile float BR = image[((n*channels + c)*height + iy2_B)*width + ix2_R];
//threadIdx.y;
// if(threadIdx.y == 0)
// {
// x2[xoff] = float(x) + flow[((2*n )*height + y)*width + x];
// y2[xoff] = float(y) + flow[((2*n+1)*height + y)*width + x];
// }
// __syncthreads();
// __syncthreads();
// if(x2>=0 && y2>=0 && x2<width && y2<height)
// {
// int ix2_L = int(x2);
// int iy2_T = int(y2);
// int ix2_R = min(ix2_L+1, width-1);
// int iy2_B = min(iy2_T+1, height-1);
// float alpha=x2-ix2_L;
// float beta=y2-iy2_T;
// for(int c=threadIdx.x; c<channels; c+=blockDim.x)
// {
// float TL = image[((n*height + iy2_T)*width + ix2_L)*channels + c];
// float TR = image[((n*height + iy2_T)*width + ix2_R)*channels + c];
// float BL = image[((n*height + iy2_B)*width + ix2_L)*channels + c];
// float BR = image[((n*height + iy2_B)*width + ix2_R)*channels + c];
// //warped[((n*height+y)*width + x)*channels + c] =
// warped[((n*channels+c)*height + y)*width + x] =
// (1-alpha)*(1-beta)*TL +
// alpha*(1-beta)*TR +
// (1-alpha)*beta*BL +
// alpha*beta*BR;
// }
// }
// else
// {
// for(int c=threadIdx.x; c<channels; c+=blockDim.x)
// warped[((n*channels+c)*height + y)*width + x] = 0;
// }
// }
| 7196bb9ebbe65450f699057d117a6b370ae88948.cu | #include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/flow_warp_layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/benchmark.hpp"
#include <iostream>
#include <fstream>
#define CUDART_NAN_F __int_as_float(0x7fffffff)
namespace caffe {
#define min(a,b) ((a<b)?(a):(b))
#define max(a,b) ((a>b)?(a):(b))
//#define DISPLAY_TIMINGS
#define RA_TILE 32
#define RA_ROWS 8
template <typename Dtype>
__global__ void flow_warp_rearrange_kernel(const Dtype* in, Dtype* out, int num, int channels, int cblocks, int width, int height, int widthheight)
{
__shared__ float buffer[RA_TILE][RA_TILE+1];
int n = blockIdx.x/cblocks;
if(n>=num) return;
int c0 = (blockIdx.x%cblocks)*RA_TILE;
int x0 = blockIdx.y*RA_TILE;
int y = blockIdx.z;
int xoff=threadIdx.x;
int coff=threadIdx.y;
int x=x0+xoff;
if(x<width)
for(int i=coff; i<RA_TILE && c0+i<channels; i+=RA_ROWS)
buffer[i][xoff] = in[((n*channels + c0 + i)*height + y)*width + x];
__syncthreads();
coff = threadIdx.x;
xoff = threadIdx.y;
int c = c0 + coff;
if(c<channels)
for(int j=xoff; j<RA_TILE && x0+j<width; j+=RA_ROWS)
out[((n*height + y)*width + x0+j)*channels + c] = buffer[coff][j];
}
#define FW_THREADS 32
#define FW_TILE_X FW_THREADS
#define FW_TILE_C FW_THREADS
template <typename Dtype>
__global__ void flow_warp_kernel_smem(const Dtype* image, const Dtype* flow, Dtype* warped, int num, int channels, int cblocks, int width, int wblocks, int height, int widthheight, float fillValue)
{
int y = blockIdx.y;
int n = blockIdx.z;
__shared__ float x2_buf[FW_TILE_X], y2_buf[FW_TILE_X];
__shared__ float buffer[FW_TILE_C][FW_TILE_X+1];
int x;
int c;
x = blockIdx.x*FW_TILE_X + threadIdx.x;
if(threadIdx.y==0 && x<width)
{
x2_buf[threadIdx.x] = float(x) + flow[((2*n )*height + y)*width + x];
y2_buf[threadIdx.x] = float(y) + flow[((2*n+1)*height + y)*width + x];
}
__syncthreads();
float x2 = x2_buf[threadIdx.y];
float y2 = y2_buf[threadIdx.y];
int ix2_L = int(x2);
int iy2_T = int(y2);
int ix2_R = min(ix2_L+1, width-1);
int iy2_B = min(iy2_T+1, height-1);
int off_TL = ((n*height + iy2_T)*width + ix2_L)*channels;
int off_TR = ((n*height + iy2_T)*width + ix2_R)*channels;
int off_BL = ((n*height + iy2_B)*width + ix2_L)*channels;
int off_BR = ((n*height + iy2_B)*width + ix2_R)*channels;
float alpha = x2-ix2_L;
float beta = y2-iy2_T;
float coeffTL = (1-alpha)*(1-beta);
float coeffTR = alpha*(1-beta);
float coeffBL = (1-alpha)*beta;
float coeffBR = alpha*beta;
for(int cb=0; cb<cblocks; cb++)
{
__syncthreads();
buffer[threadIdx.y][threadIdx.x] = fillValue;
__syncthreads();
c = cb*FW_TILE_C + threadIdx.x;
if(x2>=0 && y2>=0 && x2<width && y2<height && c<channels)
buffer[threadIdx.y][threadIdx.x] = // buffer [x][c]
coeffTL * image[off_TL + c] +
coeffTR * image[off_TR + c] +
coeffBL * image[off_BL + c] +
coeffBR * image[off_BR + c];
__syncthreads();
c = cb*FW_TILE_C + threadIdx.y;
x = blockIdx.x*FW_TILE_X + threadIdx.x;
if(c<channels && x<width)
warped[((n*channels+c)*height + y)*width + x] = buffer[threadIdx.x][threadIdx.y];
}
}
template <typename Dtype>
__global__ void flow_warp_kernel_no_smem(const Dtype* image, const Dtype* flow, Dtype* warped, int num, int channels, int width, int wblocks, int height, int widthheight)
{
int x = blockIdx.x*FW_TILE_X + threadIdx.x;
if(x>=width)
return;
int y = blockIdx.y;
int n = blockIdx.z;
float x2 = float(x) + flow[((2*n )*height + y)*width + x];
float y2 = float(y) + flow[((2*n+1)*height + y)*width + x];
if(x2>=0.f && y2>=0.f && x2<width && y2<height)
{
int ix2_L = int(x2);
int iy2_T = int(y2);
int ix2_R = min(ix2_L+1, width-1);
int iy2_B = min(iy2_T+1, height-1);
float alpha = x2-ix2_L;
float beta = y2-iy2_T;
for(int c=0; c<channels; c++)
{
int ch_off = (n*channels+c)*height;
int off_TL = (ch_off + iy2_T)*width + ix2_L;
int off_TR = (ch_off + iy2_T)*width + ix2_R;
int off_BL = (ch_off + iy2_B)*width + ix2_L;
int off_BR = (ch_off + iy2_B)*width + ix2_R;
float coeffTL = (1-alpha)*(1-beta);
float coeffTR = alpha*(1-beta);
float coeffBL = (1-alpha)*beta;
float coeffBR = alpha*beta;
warped[(ch_off + y)*width + x] =
coeffTL * image[off_TL] +
coeffTR * image[off_TR] +
coeffBL * image[off_BL] +
coeffBR * image[off_BR];
}
}
}
template <typename Dtype>
__global__ void flow_warp_backward_kernel_no_smem(
const Dtype* image_data, float* image_diff, const Dtype* flow_data, Dtype* flow_diff, const Dtype* warped_diff,
int num, int channels, int cblocks, int width, int wblocks, int height, int widthheight)
{
int x = blockIdx.x*FW_TILE_X + threadIdx.x;
if(x>=width)
return;
int y = blockIdx.y;
int n = blockIdx.z;
float x2 = float(x) + flow_data[((2*n )*height + y)*width + x];
float y2 = float(y) + flow_data[((2*n+1)*height + y)*width + x];
if(x2>=0.f && y2>=0.f && x2<width && y2<height)
{
int ix2_L = int(x2);
int iy2_T = int(y2);
int ix2_R = min(ix2_L+1, width-1);
int iy2_B = min(iy2_T+1, height-1);
float alpha=x2-ix2_L;
float beta=y2-iy2_T;
for(int c=0; c<channels; c++)
{
int ch_off = (n*channels + c)*height;
float warped_diff_value = warped_diff[(ch_off + y)*width + x];
atomicAdd(&image_diff[(ch_off + iy2_T)*width + ix2_L], warped_diff_value * (1-alpha)*(1-beta));
atomicAdd(&image_diff[(ch_off + iy2_T)*width + ix2_R], warped_diff_value * alpha*(1-beta));
atomicAdd(&image_diff[(ch_off + iy2_B)*width + ix2_L], warped_diff_value * (1-alpha)*beta);
atomicAdd(&image_diff[(ch_off + iy2_B)*width + ix2_R], warped_diff_value * alpha*beta);
}
float gamma = iy2_B - y2;
float bot_diff = 0;
for(int c=0; c<channels; c++)
{
int ch_off = (n*channels + c)*height;
float temp = 0;
temp += gamma * (image_data[(ch_off + iy2_T)*width + ix2_R] - image_data[(ch_off + iy2_T)*width + ix2_L]);
temp += (1-gamma) * (image_data[(ch_off + iy2_B)*width + ix2_R] - image_data[(ch_off + iy2_B)*width + ix2_L]);
bot_diff += warped_diff[(ch_off + y)*width + x] * temp;
}
flow_diff[(2*n*height + y)*width + x] = bot_diff;
gamma = ix2_R - x2;
bot_diff = 0;
for(int c=0; c<channels; c++)
{
int ch_off = (n*channels + c)*height;
float temp = 0;
temp += gamma * (image_data[(ch_off + iy2_B)*width + ix2_L] - image_data[(ch_off + iy2_T)*width + ix2_L]);
temp += (1-gamma) * (image_data[(ch_off + iy2_B)*width + ix2_R] - image_data[(ch_off + iy2_T)*width + ix2_R]);
bot_diff += warped_diff[(ch_off + y)*width + x] * temp;
}
flow_diff[((2*n+1)*height + y)*width + x] = bot_diff;
}
}
template <typename Dtype>
__global__ void flow_warp_backward_kernel_smem(const Dtype* trans_image_data, Dtype* image_diff, const Dtype* flow_data, Dtype* flow_diff, const Dtype* warped_diff, int num, int channels, int cblocks, int width, int wblocks, int height, int widthheight)
{
// int y = blockIdx.y;
// int n = blockIdx.z;
// __shared__ float x2_buf[FW_TILE_X], y2_buf[FW_TILE_X];
// __shared__ float buffer[FW_TILE_C][FW_TILE_X+1];
// int x;
// int c;
// x = blockIdx.x*FW_TILE_X + threadIdx.x;
// if(threadIdx.y==0 && x<width)
// {
// x2_buf[threadIdx.x] = float(x) + flow[((2*n )*height + y)*width + x];
// y2_buf[threadIdx.x] = float(y) + flow[((2*n+1)*height + y)*width + x];
// }
// __syncthreads();
// float x2 = x2_buf[threadIdx.y];
// float y2 = y2_buf[threadIdx.y];
// int ix2_L = int(x2);
// int iy2_T = int(y2);
// int ix2_R = min(ix2_L+1, width-1);
// int iy2_B = min(iy2_T+1, height-1);
// int off_TL = ((n*height + iy2_T)*width + ix2_L)*channels;
// int off_TR = ((n*height + iy2_T)*width + ix2_R)*channels;
// int off_BL = ((n*height + iy2_B)*width + ix2_L)*channels;
// int off_BR = ((n*height + iy2_B)*width + ix2_R)*channels;
// float alpha = x2-ix2_L;
// float beta = y2-iy2_T;
// float coeffTL = (1-alpha)*(1-beta);
// float coeffTR = alpha*(1-beta);
// float coeffBL = (1-alpha)*beta;
// float coeffBR = alpha*beta;
// for(int cb=0; cb<cblocks; cb++)
// {
// __syncthreads();
// buffer[threadIdx.y][threadIdx.x] = 0;
// __syncthreads();
// c = cb*FW_TILE_C + threadIdx.y;
// x = blockIdx.x*FW_TILE_X + threadIdx.x;
// if(c<channels && x<width)
// buffer[threadIdx.y][threadIdx.x] = warped_diff[((n*channels + c)*height + y)*width + x]; // buffer[c][x]
// __syncthreads();
// c = cb*FW_TILE_C + threadIdx.x;
// float wd = buffer[threadIdx.x][threadIdx.y];
// if(x2>=0 && y2>=0 && x2<width && y2<height && c<channels && x<width)
// {
// atomicAdd(&image_diff[((n*channels + c)*height + iy2_T)*width + ix2_L], wd * coeffTL);
// atomicAdd(&image_diff[((n*channels + c)*height + iy2_T)*width + ix2_R], wd * coeffTR);
// atomicAdd(&image_diff[((n*channels + c)*height + iy2_B)*width + ix2_L], wd * coeffBL);
// atomicAdd(&image_diff[((n*channels + c)*height + iy2_B)*width + ix2_R], wd * coeffBR);
// float gamma = iy2_B - y2;
// c = cb*FW_TILE_C + threadIdx.x;
// float imgTR = trans_image_data[((n*height + iy2_T)*width + ix2_R)*channels + c];
// float imgTL = trans_image_data[((n*height + iy2_T)*width + ix2_L)*channels + c];
// float imgBR = trans_image_data[((n*height + iy2_B)*width + ix2_R)*channels + c];
// float imgBL = trans_image_data[((n*height + iy2_B)*width + ix2_L)*channels + c];
// float temp = 0;
// temp += gamma * (imgTR - imgTL);
// temp += (1-gamma) * (imgBR - imhBL);
// temp *= buffer[threadIdx.x][threadIdx.y]; // warped_diff[((n*channels + c)*height + y)*width + x]
// atomicAdd(&flow_diff[(2*n*height + y)*width + x], wd * coeffBR);
// }
// for(int c=0; c<channels; c++)
// {
// float temp = 0;
// temp += gamma * (imgTR - imgTL);
// temp += (1-gamma) * (imgBR - imhBL);
// bot_diff += warped_diff[((n*channels + c)*height + y)*width + x] * temp;
// }
// flow_diff[(2*n*height + y)*width + x] = bot_diff;
// gamma = ix2_R - x2;
// bot_diff = 0;
// for(int c=0; c<channels; c++)
// {
// float temp = 0;
// temp += gamma * (image_data[((n*channels + c)*height + iy2_B)*width + ix2_L] - image_data[((n*channels + c)*height + iy2_T)*width + ix2_L]);
// temp += (1-gamma) * (image_data[((n*channels + c)*height + iy2_B)*width + ix2_R] - image_data[((n*channels + c)*height + iy2_T)*width + ix2_R]);
// bot_diff += warped_diff[((n*channels + c)*height + y)*width + x] * temp;
// }
// flow_diff[((2*n+1)*height + y)*width + x] = bot_diff;
// int c = cb*FW_TILE_C + threadIdx.x;
// if(x2>=0 && y2>=0 && x2<width && y2<height && c<channels)
// buffer[threadIdx.y][threadIdx.x] = // buffer [x][c]
// coeffTL * image[off_TL + c] +
// coeffTR * image[off_TR + c] +
// coeffBL * image[off_BL + c] +
// coeffBR * image[off_BR + c];
// __syncthreads();
// c = cb*FW_TILE_C + threadIdx.y;
// int x = blockIdx.x*FW_TILE_X + threadIdx.x;
// if(c<channels && x<width)
// warped[((n*channels+c)*height + y)*width + x] = buffer[threadIdx.x][threadIdx.y];
// }
}
template <typename Dtype>
void FlowWarpLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
int width = top[0]->width();
int height = top[0]->height();
int channels = top[0]->channels();
int num = top[0]->num();
const int wh_size = width * height;
//const int whc_size = width * height * channels;
Dtype* warped_data = top[0]->mutable_gpu_data(); // dest
const Dtype* image_data = bottom[0]->gpu_data(); // source image
Dtype* trans_image_data = transposed_image_.mutable_gpu_data(); // source image
const Dtype* flow_data = bottom[1]->gpu_data(); // source flow
int nan = 0xFFE00000;
float nanf = *(reinterpret_cast<float*>(&nan));
Dtype fillValue = this->layer_param().flow_warp_param().fill_value() == FlowWarpParameter_FillParameter_ZERO ? 0 : nanf;
cudaMemset(warped_data, fillValue, width*height*channels*num*sizeof(float));
#ifdef DISPLAY_TIMINGS
caffe::Timer t1;
t1.Start();
#endif
dim3 rearrangeThreads(RA_TILE,RA_ROWS,1);
int cblocks = ((channels-1)/RA_TILE+1);
dim3 rearrangeBlocks(cblocks*num, (width-1)/RA_TILE+1, height);
flow_warp_rearrange_kernel<Dtype><<<rearrangeBlocks, rearrangeThreads>>>(
image_data,
trans_image_data,
num,
channels,
cblocks,
width,
height,
wh_size
);
CUDA_POST_KERNEL_CHECK;
#ifdef DISPLAY_TIMINGS
t1.Stop();
LOG(INFO) << "rearrange time " << t1.MilliSeconds() << "ms";
#endif
// if(channels>8)
{
#ifdef DISPLAY_TIMINGS
caffe::Timer t2;
t2.Start();
#endif
int wblocks = ((width-1)/FW_TILE_X+1);
int cblocks = ((channels-1)/FW_TILE_C+1);
dim3 warpThreads(FW_TILE_X,FW_TILE_C);
dim3 warpBlocks(wblocks, height, num);
flow_warp_kernel_smem<Dtype><<<warpBlocks, warpThreads>>>(
trans_image_data,
flow_data,
warped_data,
num,
channels,
cblocks,
width,
wblocks,
height,
wh_size,
fillValue
);
CUDA_POST_KERNEL_CHECK;
#ifdef DISPLAY_TIMINGS
t2.Stop();
LOG(INFO) << "warp time 1a: " << t2.MilliSeconds() << "ms";
#endif
}
// else
// {
//#ifdef DISPLAY_TIMINGS
// caffe::Timer t2a;
// t2a.Start();
//#endif
// int wblocks = ((width-1)/FW_TILE_X+1);
// dim3 warpThreads(FW_TILE_X);
// dim3 warpBlocks(wblocks, height, num);
// flow_warp_kernel_no_smem<Dtype><<<warpBlocks, warpThreads>>>(
// image_data,
// flow_data,
// warped_data,
// num,
// channels,
// width,
// wblocks,
// height,
// wh_size
// );
// CUDA_POST_KERNEL_CHECK;
//#ifdef DISPLAY_TIMINGS
// t2a.Stop();
// LOG(INFO) << "warp time 1b: " << t2a.MilliSeconds() << "ms";
//#endif
// }
}
template <typename Dtype>
void FlowWarpLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
int width = top[0]->width();
int height = top[0]->height();
int channels = top[0]->channels();
int num = top[0]->num();
const int wh_size = width * height;
//const int whc_size = width * height * channels;
const Dtype* warped_data = top[0]->gpu_data(); // dest
const Dtype* warped_diff = top[0]->gpu_diff(); // dest
const Dtype* image_data = bottom[0]->gpu_data(); // source image
Dtype* image_diff = bottom[0]->mutable_gpu_diff(); // source image
const Dtype* flow_data = bottom[1]->gpu_data(); // source flow
Dtype* flow_diff = bottom[1]->mutable_gpu_diff(); // source flow
cudaMemset(image_diff, 0, width*height*channels*num*sizeof(float));
cudaMemset(flow_diff, 0, width*height*2*num*sizeof(float));
//Backward_cpu(top, propagate_down, bottom);
//return;
#ifdef DISPLAY_TIMINGS
caffe::Timer t3a;
t3a.Start();
#endif
int wblocks = ((width-1)/FW_TILE_X+1);
int cblocks = ((channels-1)/FW_TILE_C+1);
dim3 warpThreads(FW_TILE_X,1);
dim3 warpBlocks(wblocks, height, num);
flow_warp_backward_kernel_no_smem<Dtype><<<warpBlocks, warpThreads>>>(
image_data,
(float*)image_diff,
flow_data,
flow_diff,
warped_diff,
num,
channels,
cblocks,
width,
wblocks,
height,
wh_size
);
CUDA_POST_KERNEL_CHECK;
#ifdef DISPLAY_TIMINGS
t3a.Stop();
LOG(INFO) << "backward time 1a: " << t3a.MilliSeconds() << "ms";
#endif
if(!propagate_down[0]) caffe_gpu_memset(bottom[0]->count()*sizeof(Dtype), 0, image_diff);
if(!propagate_down[1]) caffe_gpu_memset(bottom[1]->count()*sizeof(Dtype), 0, flow_diff);
// {
// printf("gpu flow u:\n");
// for(int y=0; y<height; y++)
// {
// for(int x=0; x<width; x++)
// {
// printf("%f ", bottom[1]->data_at(0, 0, y, x));
// }
// printf("\n");
// }
// printf("gpu flow v:\n");
// for(int y=0; y<height; y++)
// {
// for(int x=0; x<width; x++)
// {
// printf("%f ", bottom[1]->data_at(0, 1, y, x));
// }
// printf("\n");
// }
// printf("gpu image:\n");
// for(int y=0; y<height; y++)
// {
// for(int x=0; x<width; x++)
// {
// printf("%f ", bottom[0]->data_at(0, 0, y, x));
// }
// printf("\n");
// }
// printf("gpu flow diff u:\n");
// for(int y=0; y<height; y++)
// {
// for(int x=0; x<width; x++)
// {
// printf("%f ", bottom[1]->diff_at(0, 0, y, x));
// }
// printf("\n");
// }
// printf("gpu flow diff v:\n");
// for(int y=0; y<height; y++)
// {
// for(int x=0; x<width; x++)
// {
// printf("%f ", bottom[1]->diff_at(0, 1, y, x));
// }
// printf("\n");
// }
// printf("gpu image diff:\n");
// for(int y=0; y<height; y++)
// {
// for(int x=0; x<width; x++)
// {
// printf("%f ", bottom[0]->diff_at(0, 0, y, x));
// }
// printf("\n");
// }
// }
}
INSTANTIATE_LAYER_GPU_FUNCS(FlowWarpLayer);
} // namespace caffe
//caffe::Timer t3;
//t3.Start();
//int topcount = width*height*channels*num;
//WarpData<Dtype><<<CAFFE_GET_BLOCKS(topcount), CAFFE_CUDA_NUM_THREADS>>>(
// topcount,
// num, channels, height, width, image_data, topcount,
// height, width, warped_data, flow_data);
//t3.Stop();
//LOG(INFO) << "warp time 2: " << t3.MilliSeconds() << "ms";
//template <typename Dtype>
//__global__ void WarpData(const int nthreads, const int num, const int channels, const int height, const int width, const Dtype* src_data, const int src_count,
// const int dest_height, const int dest_width, Dtype* dest_data, const Dtype* flow) {
// CUDA_KERNEL_LOOP(index, nthreads) {
// int x = (index % width); //w-pos
// int y = ((index / width) % height); //h-pos
// int cn = (index / width / height); // channel*num
// int n = cn / channels; //num
// // === Warping:
// float xpos = (float)(x) + flow[width*(height*(2*n+0) + y) + x];
// float ypos = (float)(y) + flow[width*(height*(2*n+1) + y) + x];
// if (xpos > 0.f && xpos <= width-1.01f && ypos > 0.f && ypos <= height-1.01f) {
// // Get interpolated sample
// float tlx = floor(xpos);
// float tly = floor(ypos);
// int srcIdxOff = width*(height*cn + tly) + tlx;
// float sampleTL = src_data[srcIdxOff];
// float sampleTR = src_data[min(srcIdxOff+1,src_count)];
// float sampleBL = src_data[min(srcIdxOff+width,src_count)];
// float sampleBR = src_data[min(srcIdxOff+1+width,src_count)];
// float xdist = xpos - tlx;
// float ydist = ypos - tly;
// float sample = (1-xdist)*(1-ydist)*sampleTL
// + ( xdist)*( ydist)*sampleBR
// + (1-xdist)*( ydist)*sampleBL
// + ( xdist)*(1-ydist)*sampleTR;
// dest_data[index] = sample;
// }
// }
//}
//volatile float TL = image[((n*channels + c)*height + iy2_T)*width + ix2_L];
//volatile float TR = image[((n*channels + c)*height + iy2_T)*width + ix2_R];
//volatile float BL = image[((n*channels + c)*height + iy2_B)*width + ix2_L];
//volatile float BR = image[((n*channels + c)*height + iy2_B)*width + ix2_R];
//threadIdx.y;
// if(threadIdx.y == 0)
// {
// x2[xoff] = float(x) + flow[((2*n )*height + y)*width + x];
// y2[xoff] = float(y) + flow[((2*n+1)*height + y)*width + x];
// }
// __syncthreads();
// __syncthreads();
// if(x2>=0 && y2>=0 && x2<width && y2<height)
// {
// int ix2_L = int(x2);
// int iy2_T = int(y2);
// int ix2_R = min(ix2_L+1, width-1);
// int iy2_B = min(iy2_T+1, height-1);
// float alpha=x2-ix2_L;
// float beta=y2-iy2_T;
// for(int c=threadIdx.x; c<channels; c+=blockDim.x)
// {
// float TL = image[((n*height + iy2_T)*width + ix2_L)*channels + c];
// float TR = image[((n*height + iy2_T)*width + ix2_R)*channels + c];
// float BL = image[((n*height + iy2_B)*width + ix2_L)*channels + c];
// float BR = image[((n*height + iy2_B)*width + ix2_R)*channels + c];
// //warped[((n*height+y)*width + x)*channels + c] =
// warped[((n*channels+c)*height + y)*width + x] =
// (1-alpha)*(1-beta)*TL +
// alpha*(1-beta)*TR +
// (1-alpha)*beta*BL +
// alpha*beta*BR;
// }
// }
// else
// {
// for(int c=threadIdx.x; c<channels; c+=blockDim.x)
// warped[((n*channels+c)*height + y)*width + x] = 0;
// }
// }
|
6fad27dc26c9f3d6b6a2f449d66b30761d4faec9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2011 Kerem KAT
//
// http://dissipatedheat.com/
// Do not hesisate to contact me about usage of the code or to make comments
// about the code. Your feedback will be appreciated.
// keremkat<@>gmail<.>com
//
// Kodun kullanm hakknda veya yorum yapmak iin benimle iletiim kurmaktan
// ekinmeyiniz. Geri bildirimleriniz deerlendirilecektir.
// keremkat<@>gmail<.>com
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef TEX_INVERT_CU
#define TEX_INVERT_CU
/**
\file texInvert.cu
CUDA texture invert kernelinin launcher metodunu ve kernelini tanmlar.
*/
#include "texInvert.h"
texture<float4, 2, hipReadModeElementType> texInvert1; /**< Kernelde kullanlan texture sembol. */
#define BLOCK_SIZE (32) /**< Blok boyutu ( BLOCK_SIZE x BLOCK_SIZE kare blok ). */
/** GPU zamann lmek iin 1 yapnz. */
#define ENABLE_TIMING_CODE 0
/**
Texture kullanarak grntnn negatifini alan kernel.
\param image [0, 1] aralna normalize edilmi, BGR kanal sral grntnn GPU belleindeki adresi.
\param width Grntnn piksel olarak genilii
\param height Grntnn piksel olarak ykseklii
Metod GPU zerinde alr, ktsn image parametresinin zerine yazar.
*/
__global__
void gpuTexInvert(
float* image,
int width,
int height
)
{
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int cIdx = ( row * width + col ) * 3; // 3 ile arpm RGB iin, linearIndex.
// threade ait kordinatn texture uzayndaki kordinat bulunur.
float tu = (float)col / width;
float tv = (float)row / height;
// Texture zerinden grnt verisi okunur.
float4 texVal = tex2D( texInvert1, tu, tv );
// Texture deerleri 1'den kartlarak global bellee yazlr.
*( image + cIdx ) = 1 - texVal.x;
*( image + cIdx + 1 ) = 1 - texVal.y;
*( image + cIdx + 2 ) = 1 - texVal.z;
}
/**
\ref ptKernelLauncher tipinde metod.
\param d_Image [0, 1] aralna normalize edilmi, BGR kanal sral grntnn GPU belleindeki adresi.
\param width Grntnn piksel olarak genilii
\param height Grntnn piksel olarak ykseklii
\ref gpuTexInvert kernelini Grid ve Block boyutlarn ayarlayarak aran metod.
*/
void deviceTexInvertLaunch(
float *d_Image,
int width,
int height
)
{
// launch kernel
dim3 dimBlock( BLOCK_SIZE, BLOCK_SIZE );
dim3 dimGrid( width / dimBlock.x, height / dimBlock.y );
#if ENABLE_TIMING_CODE
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
#endif
hipLaunchKernelGGL(( gpuTexInvert), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_Image, width, height);
#if ENABLE_TIMING_CODE
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
// block until the device has completed
hipDeviceSynchronize();
printf("gpuInvert kernel time: %.3f ms\n", elapsedTime);
#endif
hipDeviceSynchronize();
// check if kernel execution generated an error
// Check for any CUDA errors
checkCUDAError("kernel invocation");
}
#endif | 6fad27dc26c9f3d6b6a2f449d66b30761d4faec9.cu | // Copyright (c) 2011 Kerem KAT
//
// http://dissipatedheat.com/
// Do not hesisate to contact me about usage of the code or to make comments
// about the code. Your feedback will be appreciated.
// keremkat<@>gmail<.>com
//
// Kodun kullanımı hakkında veya yorum yapmak için benimle iletişim kurmaktan
// çekinmeyiniz. Geri bildirimleriniz değerlendirilecektir.
// keremkat<@>gmail<.>com
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef TEX_INVERT_CU
#define TEX_INVERT_CU
/**
\file texInvert.cu
CUDA texture invert kernelinin launcher metodunu ve kernelini tanımlar.
*/
#include "texInvert.h"
texture<float4, 2, cudaReadModeElementType> texInvert1; /**< Kernelde kullanılan texture sembolü. */
#define BLOCK_SIZE (32) /**< Blok boyutu ( BLOCK_SIZE x BLOCK_SIZE kare blok ). */
/** GPU zamanını ölçmek için 1 yapınız. */
#define ENABLE_TIMING_CODE 0
/**
Texture kullanarak görüntünün negatifini alan kernel.
\param image [0, 1] aralığına normalize edilmiş, BGR kanal sıralı görüntünün GPU belleğindeki adresi.
\param width Görüntünün piksel olarak genişliği
\param height Görüntünün piksel olarak yüksekliği
Metod GPU üzerinde çalışır, çıktısını image parametresinin üzerine yazar.
*/
__global__
void gpuTexInvert(
float* image,
int width,
int height
)
{
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int cIdx = ( row * width + col ) * 3; // 3 ile çarpım RGB için, linearIndex.
// threade ait kordinatın texture uzayındaki kordinatı bulunur.
float tu = (float)col / width;
float tv = (float)row / height;
// Texture üzerinden görüntü verisi okunur.
float4 texVal = tex2D( texInvert1, tu, tv );
// Texture değerleri 1'den çıkartılarak global belleğe yazılır.
*( image + cIdx ) = 1 - texVal.x;
*( image + cIdx + 1 ) = 1 - texVal.y;
*( image + cIdx + 2 ) = 1 - texVal.z;
}
/**
\ref ptKernelLauncher tipinde metod.
\param d_Image [0, 1] aralığına normalize edilmiş, BGR kanal sıralı görüntünün GPU belleğindeki adresi.
\param width Görüntünün piksel olarak genişliği
\param height Görüntünün piksel olarak yüksekliği
\ref gpuTexInvert kernelini Grid ve Block boyutlarını ayarlayarak çağıran metod.
*/
void deviceTexInvertLaunch(
float *d_Image,
int width,
int height
)
{
// launch kernel
dim3 dimBlock( BLOCK_SIZE, BLOCK_SIZE );
dim3 dimGrid( width / dimBlock.x, height / dimBlock.y );
#if ENABLE_TIMING_CODE
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
#endif
gpuTexInvert<<< dimGrid, dimBlock >>>( d_Image, width, height);
#if ENABLE_TIMING_CODE
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
// block until the device has completed
cudaThreadSynchronize();
printf("gpuInvert kernel time: %.3f ms\n", elapsedTime);
#endif
cudaThreadSynchronize();
// check if kernel execution generated an error
// Check for any CUDA errors
checkCUDAError("kernel invocation");
}
#endif |
f1225cb84bf85896cf535110e1c6127acf83f404.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<malloc.h>
#include<time.h>
#include<cuda.h>
#include<string.h>
__global__
void multiplicationKernell(float* m1, float* m2, float* m3, int rowsM1, int colsM2)
{
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
if((Row < rowsM1) && (Col < colsM2))
{
float resul = 0.0;
for(int i = 0; i < rowsM1; i++)
{
resul = resul + m1[Row*rowsM1+i] * m2[i*rowsM1+Col];
}
m3[Row*rowsM1+Col] = resul;
}
}
__host__
void toMatrix(float *M, FILE *content, int rows, int cols)
{
for(int i=0; i<rows;i++)
{
for(int j=0; j<cols; j++)
{
fscanf(content,"%f",&M[i*cols+j]);
}
}
fclose(content);
}
__host__
void print(float *M, int rows, int cols)
{
printf("----------MATRIX----------\n");
for(int i=0; i<rows;i++)
{
for(int j=0; j<cols; j++)
{
printf("[%f]",M[i*cols+j]);
}
printf("\n");
}
}
int main(int argc, char** argv)
{
if(argc != 3)
{
printf("Error, no se encontraron todos los parametros necesarios.");
return 1;
}
FILE *inputMatrix1;
FILE *inputMatrix2;
inputMatrix1 = fopen(argv[1],"r");
inputMatrix2 = fopen(argv[2],"r");
float *m1, *m2, *m3;
int rowsM1, rowsM2, colsM1, colsM2, rowsM3, colsM3;
fscanf(inputMatrix1,"%d",&rowsM1);
fscanf(inputMatrix1,"%d",&colsM1);
fscanf(inputMatrix2,"%d",&rowsM2);
fscanf(inputMatrix2,"%d",&colsM2);
m1 = (float*) malloc(rowsM1*colsM1*sizeof(float));
m2 = (float*) malloc(rowsM2*colsM2*sizeof(float));
m3 = (float*) malloc(rowsM1*colsM2*sizeof(float));
toMatrix(m1, inputMatrix1, rowsM1, colsM1);
toMatrix(m2, inputMatrix2, rowsM2, colsM2);
print(m1, rowsM1, colsM1);
print(m2, rowsM2, colsM2);
if((rowsM1 != colsM2))
{
printf("Error los tamaos de las matrices no son compatibles.");
return 1;
}
//Para el Devince
hipError_t error = hipSuccess;
float *d_m1, *d_m2, *d_m3;
int blockSize = 32;
dim3 dimBlockSize(blockSize,blockSize,1);
dim3 dimGridSize(ceil(colsM1 / float(blockSize)), ceil(rowsM1 / float(blockSize)), 1);
error = hipMalloc((void**)&d_m1, rowsM1 * colsM1 * sizeof(float));
if(error != hipSuccess)
{
printf("Imposible asignar memoria para d_m1");
return 1;
}
error = hipMalloc((void**)&d_m2, rowsM2 * colsM2 * sizeof(float));
if(error != hipSuccess)
{
printf("Imposible asignar memoria para d_m2");
return 1;
}
error = hipMalloc((void**)&d_m3, rowsM3 * colsM3 * sizeof(float));
if(error != hipSuccess)
{
printf("Imposible asignar memoria para d_m3");
return 1;
}
hipMemcpy(d_m1, m1, rowsM1 * colsM1 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_m2, m2, rowsM2 * colsM2 * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( multiplicationKernell), dim3(dimGridSize), dim3(dimBlockSize), 0, 0, d_m1, d_m2, d_m3, rowsM1, colsM2);
hipMemcpy(m3, d_m3, rowsM1 * colsM2 * sizeof(float), hipMemcpyDeviceToHost);
print(m3, rowsM1, colsM2);
free(m1);
free(m2);
free(m3);
hipFree(d_m1);
hipFree(d_m2);
hipFree(d_m3);
return 0;
} | f1225cb84bf85896cf535110e1c6127acf83f404.cu | #include<stdio.h>
#include<stdlib.h>
#include<malloc.h>
#include<time.h>
#include<cuda.h>
#include<string.h>
__global__
void multiplicationKernell(float* m1, float* m2, float* m3, int rowsM1, int colsM2)
{
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
if((Row < rowsM1) && (Col < colsM2))
{
float resul = 0.0;
for(int i = 0; i < rowsM1; i++)
{
resul = resul + m1[Row*rowsM1+i] * m2[i*rowsM1+Col];
}
m3[Row*rowsM1+Col] = resul;
}
}
__host__
void toMatrix(float *M, FILE *content, int rows, int cols)
{
for(int i=0; i<rows;i++)
{
for(int j=0; j<cols; j++)
{
fscanf(content,"%f",&M[i*cols+j]);
}
}
fclose(content);
}
__host__
void print(float *M, int rows, int cols)
{
printf("----------MATRIX----------\n");
for(int i=0; i<rows;i++)
{
for(int j=0; j<cols; j++)
{
printf("[%f]",M[i*cols+j]);
}
printf("\n");
}
}
int main(int argc, char** argv)
{
if(argc != 3)
{
printf("Error, no se encontraron todos los parametros necesarios.");
return 1;
}
FILE *inputMatrix1;
FILE *inputMatrix2;
inputMatrix1 = fopen(argv[1],"r");
inputMatrix2 = fopen(argv[2],"r");
float *m1, *m2, *m3;
int rowsM1, rowsM2, colsM1, colsM2, rowsM3, colsM3;
fscanf(inputMatrix1,"%d",&rowsM1);
fscanf(inputMatrix1,"%d",&colsM1);
fscanf(inputMatrix2,"%d",&rowsM2);
fscanf(inputMatrix2,"%d",&colsM2);
m1 = (float*) malloc(rowsM1*colsM1*sizeof(float));
m2 = (float*) malloc(rowsM2*colsM2*sizeof(float));
m3 = (float*) malloc(rowsM1*colsM2*sizeof(float));
toMatrix(m1, inputMatrix1, rowsM1, colsM1);
toMatrix(m2, inputMatrix2, rowsM2, colsM2);
print(m1, rowsM1, colsM1);
print(m2, rowsM2, colsM2);
if((rowsM1 != colsM2))
{
printf("Error los tamaños de las matrices no son compatibles.");
return 1;
}
//Para el Devince
cudaError_t error = cudaSuccess;
float *d_m1, *d_m2, *d_m3;
int blockSize = 32;
dim3 dimBlockSize(blockSize,blockSize,1);
dim3 dimGridSize(ceil(colsM1 / float(blockSize)), ceil(rowsM1 / float(blockSize)), 1);
error = cudaMalloc((void**)&d_m1, rowsM1 * colsM1 * sizeof(float));
if(error != cudaSuccess)
{
printf("Imposible asignar memoria para d_m1");
return 1;
}
error = cudaMalloc((void**)&d_m2, rowsM2 * colsM2 * sizeof(float));
if(error != cudaSuccess)
{
printf("Imposible asignar memoria para d_m2");
return 1;
}
error = cudaMalloc((void**)&d_m3, rowsM3 * colsM3 * sizeof(float));
if(error != cudaSuccess)
{
printf("Imposible asignar memoria para d_m3");
return 1;
}
cudaMemcpy(d_m1, m1, rowsM1 * colsM1 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_m2, m2, rowsM2 * colsM2 * sizeof(float), cudaMemcpyHostToDevice);
multiplicationKernell<<<dimGridSize, dimBlockSize>>>(d_m1, d_m2, d_m3, rowsM1, colsM2);
cudaMemcpy(m3, d_m3, rowsM1 * colsM2 * sizeof(float), cudaMemcpyDeviceToHost);
print(m3, rowsM1, colsM2);
free(m1);
free(m2);
free(m3);
cudaFree(d_m1);
cudaFree(d_m2);
cudaFree(d_m3);
return 0;
} |
965250f1095635509372be80856e7cea8689c975.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/random.h>
#include <thrust/transform.h>
#include <string>
#include "paddle/fluid/operators/dropout_op.h"
#include "paddle/fluid/platform/dynload/hiprand/hiprand.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, typename MaskType>
__global__ void RandomGenerator(const size_t n, const int seed,
const float dropout_prob, const T* src,
MaskType* mask_data, T* dst,
bool is_upscale_in_train) {
hiprandStatePhilox4_32_10_t state;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step_size = 0;
MaskType mask;
T dest;
for (; idx < n; idx += blockDim.x * gridDim.x) {
T s = src[idx];
if (step_size == 0) {
hiprand_init(seed, idx, idx, &state);
step_size = blockDim.x * gridDim.x;
} else {
hiprand_init(seed, idx, step_size, &state);
}
if (hiprand_uniform(&state) < dropout_prob) {
mask = 0;
dest = 0;
} else {
mask = 1;
if (is_upscale_in_train) {
dest = s / static_cast<T>(1.0f - dropout_prob);
} else {
dest = s;
}
}
mask_data[idx] = mask;
dst[idx] = dest;
}
}
// It seems that Eigen::Tensor::setRandom in GPU will SEGFAULT.
// Use std::random and thrust::random(thrust is a std library in CUDA) to
// implement uniform random.
template <typename Place, typename T>
class GPUDropoutKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* seed =
context.HasInput("Seed") ? context.Input<Tensor>("Seed") : nullptr;
auto* y = context.Output<Tensor>("Out");
y->mutable_data<T>(context.GetPlace());
float dropout_prob = context.Attr<float>("dropout_prob");
auto& dropout_implementation =
context.Attr<std::string>("dropout_implementation");
bool upscale_in_train = (dropout_implementation == "upscale_in_train");
auto& place = *context.template device_context<Place>().eigen_device();
if (!context.Attr<bool>("is_test")) {
int64_t x_numel = x->numel();
auto stream = context.cuda_device_context().stream();
auto* mask = context.Output<Tensor>("Mask");
auto* mask_data = mask->mutable_data<uint8_t>(context.GetPlace());
size_t size = framework::product(mask->dims());
auto* x_data = x->data<T>();
int seed_data;
std::random_device rnd;
if (seed) {
if (platform::is_gpu_place(seed->place())) {
framework::Tensor temp;
TensorCopySync(*seed, platform::CPUPlace(), &temp);
seed_data = *(temp.data<int>());
} else {
seed_data = *(seed->data<int>());
}
} else {
seed_data =
context.Attr<bool>("fix_seed") ? context.Attr<int>("seed") : rnd();
}
auto* y_data = y->mutable_data<T>(context.GetPlace());
if (dropout_prob == 1.0f) {
PADDLE_ENFORCE_CUDA_SUCCESS(
hipMemsetAsync(y_data, 0, x_numel * sizeof(T), stream));
PADDLE_ENFORCE_CUDA_SUCCESS(hipMemsetAsync(
mask_data, 0, x_numel * sizeof(*mask_data), stream));
return;
}
int threads = 512;
int grid = (x_numel + threads - 1) / threads;
hipLaunchKernelGGL(( RandomGenerator<T, uint8_t>), dim3(grid), dim3(threads), 0, stream,
size, seed_data, dropout_prob, x_data, mask_data, y_data,
upscale_in_train);
} else {
auto X = EigenMatrix<T>::Reshape(*x, 1);
auto Y = EigenMatrix<T>::Reshape(*y, 1);
if (upscale_in_train) {
Y.device(place) = X;
} else {
Y.device(place) = X * static_cast<T>(1.0f - dropout_prob);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
dropout, ops::GPUDropoutKernel<plat::CUDADeviceContext, float>,
ops::GPUDropoutKernel<plat::CUDADeviceContext, plat::float16>,
ops::GPUDropoutKernel<plat::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
dropout_grad, ops::DropoutGradKernel<plat::CUDADeviceContext, float>,
ops::DropoutGradKernel<plat::CUDADeviceContext, plat::float16>,
ops::DropoutGradKernel<plat::CUDADeviceContext, double>);
| 965250f1095635509372be80856e7cea8689c975.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <cuda.h>
#include <curand_kernel.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/random.h>
#include <thrust/transform.h>
#include <string>
#include "paddle/fluid/operators/dropout_op.h"
#include "paddle/fluid/platform/dynload/curand.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, typename MaskType>
__global__ void RandomGenerator(const size_t n, const int seed,
const float dropout_prob, const T* src,
MaskType* mask_data, T* dst,
bool is_upscale_in_train) {
curandStatePhilox4_32_10_t state;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step_size = 0;
MaskType mask;
T dest;
for (; idx < n; idx += blockDim.x * gridDim.x) {
T s = src[idx];
if (step_size == 0) {
curand_init(seed, idx, idx, &state);
step_size = blockDim.x * gridDim.x;
} else {
curand_init(seed, idx, step_size, &state);
}
if (curand_uniform(&state) < dropout_prob) {
mask = 0;
dest = 0;
} else {
mask = 1;
if (is_upscale_in_train) {
dest = s / static_cast<T>(1.0f - dropout_prob);
} else {
dest = s;
}
}
mask_data[idx] = mask;
dst[idx] = dest;
}
}
// It seems that Eigen::Tensor::setRandom in GPU will SEGFAULT.
// Use std::random and thrust::random(thrust is a std library in CUDA) to
// implement uniform random.
template <typename Place, typename T>
class GPUDropoutKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* seed =
context.HasInput("Seed") ? context.Input<Tensor>("Seed") : nullptr;
auto* y = context.Output<Tensor>("Out");
y->mutable_data<T>(context.GetPlace());
float dropout_prob = context.Attr<float>("dropout_prob");
auto& dropout_implementation =
context.Attr<std::string>("dropout_implementation");
bool upscale_in_train = (dropout_implementation == "upscale_in_train");
auto& place = *context.template device_context<Place>().eigen_device();
if (!context.Attr<bool>("is_test")) {
int64_t x_numel = x->numel();
auto stream = context.cuda_device_context().stream();
auto* mask = context.Output<Tensor>("Mask");
auto* mask_data = mask->mutable_data<uint8_t>(context.GetPlace());
size_t size = framework::product(mask->dims());
auto* x_data = x->data<T>();
int seed_data;
std::random_device rnd;
if (seed) {
if (platform::is_gpu_place(seed->place())) {
framework::Tensor temp;
TensorCopySync(*seed, platform::CPUPlace(), &temp);
seed_data = *(temp.data<int>());
} else {
seed_data = *(seed->data<int>());
}
} else {
seed_data =
context.Attr<bool>("fix_seed") ? context.Attr<int>("seed") : rnd();
}
auto* y_data = y->mutable_data<T>(context.GetPlace());
if (dropout_prob == 1.0f) {
PADDLE_ENFORCE_CUDA_SUCCESS(
cudaMemsetAsync(y_data, 0, x_numel * sizeof(T), stream));
PADDLE_ENFORCE_CUDA_SUCCESS(cudaMemsetAsync(
mask_data, 0, x_numel * sizeof(*mask_data), stream));
return;
}
int threads = 512;
int grid = (x_numel + threads - 1) / threads;
RandomGenerator<T, uint8_t><<<grid, threads, 0, stream>>>(
size, seed_data, dropout_prob, x_data, mask_data, y_data,
upscale_in_train);
} else {
auto X = EigenMatrix<T>::Reshape(*x, 1);
auto Y = EigenMatrix<T>::Reshape(*y, 1);
if (upscale_in_train) {
Y.device(place) = X;
} else {
Y.device(place) = X * static_cast<T>(1.0f - dropout_prob);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
dropout, ops::GPUDropoutKernel<plat::CUDADeviceContext, float>,
ops::GPUDropoutKernel<plat::CUDADeviceContext, plat::float16>,
ops::GPUDropoutKernel<plat::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
dropout_grad, ops::DropoutGradKernel<plat::CUDADeviceContext, float>,
ops::DropoutGradKernel<plat::CUDADeviceContext, plat::float16>,
ops::DropoutGradKernel<plat::CUDADeviceContext, double>);
|
d1f84673e7413fc1486788098d57a07f2f83808a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "matvec.h"
#include "matrixlib.h"
#include <helper_cuda.h>
#define BLOCK_SIZE 16
#define NUM_BLOCKS 16
int main(int argc, char **argv)
{
int m, n;
double *h_A, *d_A, *h_v, *d_v, *h_w, *d_w;
// Fetch command line parameters, otherwise use default
if (argc == 3)
{
m = atoi(argv[1]);
n = atoi(argv[2]);
}
else
{
m = 5;
n = 5;
}
// Allocate memory on host and device
hipHostMalloc((void **)&h_A, m * n * sizeof(double)); // A is m x n
hipHostMalloc((void **)&h_v, n * sizeof(double)); // v is n x 1
hipHostMalloc((void **)&h_w, m * sizeof(double)); // w is m x 1
hipMalloc((void **)&d_A, m * n * sizeof(double));
hipMalloc((void **)&d_v, n * sizeof(double));
hipMalloc((void **)&d_w, m * sizeof(double));
if (h_A == NULL || h_v == NULL || h_w == NULL || d_A == NULL || d_v == NULL || d_w == NULL)
{
fprintf(stderr, "memory allocation failed!\n");
return (1);
}
// Init matrices on host
init_matrix(m, n, h_A, 1.0);
init_vector(n, h_v, 2.0);
// Print input
print_matrix(m,n,h_A);
print_vector(n,h_v);
// Copy data from host to device
hipMemcpy(d_A, h_A, m * n * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_v, h_v, n * sizeof(double), hipMemcpyHostToDevice);
// Launch kernel (only using 1 thread ATM)
//dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); // For 2D problems, e.g. 16*16 = 256 threads per block
//dim3 numBlocks(m / threadsPerBlock.x, n / threadsPerBlock.y);
//mat_x_vec_kernel<<<numBlocks, threadsPerBlock>>>(d_A, d_v, d_w, m, n);
// The problem is 1D!!!
hipLaunchKernelGGL(( mat_x_vec_kernel), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_A, d_v, d_w, m, n); // For 1D problems, e.g. 16*16 = 256 threads in total
checkCudaErrors(hipDeviceSynchronize());
// Copy result back to host
hipMemcpy(h_w, d_w, m * sizeof(double), hipMemcpyDeviceToHost);
// Print result
print_vector(m,h_w);
// Cleanup
hipHostFree(h_A);
hipHostFree(h_v);
hipHostFree(h_w);
hipFree(d_A);
hipFree(d_v);
hipFree(d_w);
return 0;
} | d1f84673e7413fc1486788098d57a07f2f83808a.cu | #include <stdio.h>
#include <stdlib.h>
#include "matvec.h"
#include "matrixlib.h"
#include <helper_cuda.h>
#define BLOCK_SIZE 16
#define NUM_BLOCKS 16
int main(int argc, char **argv)
{
int m, n;
double *h_A, *d_A, *h_v, *d_v, *h_w, *d_w;
// Fetch command line parameters, otherwise use default
if (argc == 3)
{
m = atoi(argv[1]);
n = atoi(argv[2]);
}
else
{
m = 5;
n = 5;
}
// Allocate memory on host and device
cudaMallocHost((void **)&h_A, m * n * sizeof(double)); // A is m x n
cudaMallocHost((void **)&h_v, n * sizeof(double)); // v is n x 1
cudaMallocHost((void **)&h_w, m * sizeof(double)); // w is m x 1
cudaMalloc((void **)&d_A, m * n * sizeof(double));
cudaMalloc((void **)&d_v, n * sizeof(double));
cudaMalloc((void **)&d_w, m * sizeof(double));
if (h_A == NULL || h_v == NULL || h_w == NULL || d_A == NULL || d_v == NULL || d_w == NULL)
{
fprintf(stderr, "memory allocation failed!\n");
return (1);
}
// Init matrices on host
init_matrix(m, n, h_A, 1.0);
init_vector(n, h_v, 2.0);
// Print input
print_matrix(m,n,h_A);
print_vector(n,h_v);
// Copy data from host to device
cudaMemcpy(d_A, h_A, m * n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_v, h_v, n * sizeof(double), cudaMemcpyHostToDevice);
// Launch kernel (only using 1 thread ATM)
//dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); // For 2D problems, e.g. 16*16 = 256 threads per block
//dim3 numBlocks(m / threadsPerBlock.x, n / threadsPerBlock.y);
//mat_x_vec_kernel<<<numBlocks, threadsPerBlock>>>(d_A, d_v, d_w, m, n);
// The problem is 1D!!!
mat_x_vec_kernel<<<NUM_BLOCKS, BLOCK_SIZE>>>(d_A, d_v, d_w, m, n); // For 1D problems, e.g. 16*16 = 256 threads in total
checkCudaErrors(cudaDeviceSynchronize());
// Copy result back to host
cudaMemcpy(h_w, d_w, m * sizeof(double), cudaMemcpyDeviceToHost);
// Print result
print_vector(m,h_w);
// Cleanup
cudaFreeHost(h_A);
cudaFreeHost(h_v);
cudaFreeHost(h_w);
cudaFree(d_A);
cudaFree(d_v);
cudaFree(d_w);
return 0;
} |
d1682921b1413bc6e3648767ec1b5928cca387e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// modified for sm < sm5.3
#include "hip/hip_fp16.h"
#include "helper_cuda.h"
#include <cstdio>
#include <cstdlib>
#include <ctime>
__global__ void scalarProductKernel(
half2 const * const a,
half2 const * const b,
float * const results,
size_t const size
)
{
const int stride = gridDim.x*blockDim.x;
__shared__ half2 shArray[128];
shArray[threadIdx.x] = __float2half2_rn(0.f);
half2 value = __float2half2_rn(0.f);
float af, bf, cf;
for (int i = threadIdx.x + blockDim.x + blockIdx.x; i < size; i+=stride)
{
half2 tmpa = a[i];
half2 tmpb = b[i];
af = __half2float(tmpa.x);
bf = __half2float(tmpa.x);
cf = af*bf;
value.x = __float2half(cf);
af = __half2float(tmpa.y);
bf = __half2float(tmpb.y);
cf = af*bf;
value.y = __float2half(cf);
}
shArray[threadIdx.x] = value;
__syncthreads();
if (threadIdx.x == 0)
{
half2 result = shArray[0];
float f_result = __low2float(result) + __high2float(result);
results[blockIdx.x] = f_result;
}
}
void generateInput(half2 * a, size_t size)
{
for (size_t i = 0; i < size; ++i)
{
unsigned temp = rand();
temp &= 0x83FF83FF;
temp |= 0x3C003C00;
a[i] = *(half2*)&temp;
}
}
int main(int argc, char *argv[])
{
srand(time(NULL));
const int blocks = 128;
const int threads = 128;
size_t size = blocks*threads*16;
half2 * vec[2];
half2 * devVec[2];
float * results;
float * devResults;
int devID = findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t devProp;
checkCudaErrors(hipGetDeviceProperties(&devProp, devID));
for (int i = 0; i < 2; ++i)
{
checkCudaErrors(hipHostMalloc((void**)&vec[i], size*sizeof*vec[i]));
checkCudaErrors(hipMalloc((void**)&devVec[i], size*sizeof*devVec[i]));
}
checkCudaErrors(hipHostMalloc((void**)&results, blocks*sizeof*results));
checkCudaErrors(hipMalloc((void**)&devResults, blocks*sizeof*devResults));
for (int i = 0; i < 2; ++i)
{
generateInput(vec[i], size);
checkCudaErrors(hipMemcpy(devVec[i], vec[i], size*sizeof*vec[i], hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( scalarProductKernel), dim3(blocks), dim3(threads), 0, 0, devVec[0], devVec[1], devResults, size);
checkCudaErrors(hipMemcpy(results, devResults, blocks*sizeof*results, hipMemcpyDeviceToHost));
float result = 0;
for (int i = 0; i < blocks; ++i)
{
result += results[i];
}
printf("Result: %f \n", result);
for (int i = 0; i < 2; ++i)
{
checkCudaErrors(hipFree(devVec[i]));
checkCudaErrors(hipHostFree(vec[i]));
}
checkCudaErrors(hipFree(devResults));
checkCudaErrors(hipHostFree(results));
return EXIT_SUCCESS;
}
| d1682921b1413bc6e3648767ec1b5928cca387e3.cu | /**
* Copyright 2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// modified for sm < sm5.3
#include "cuda_fp16.h"
#include "helper_cuda.h"
#include <cstdio>
#include <cstdlib>
#include <ctime>
__global__ void scalarProductKernel(
half2 const * const a,
half2 const * const b,
float * const results,
size_t const size
)
{
const int stride = gridDim.x*blockDim.x;
__shared__ half2 shArray[128];
shArray[threadIdx.x] = __float2half2_rn(0.f);
half2 value = __float2half2_rn(0.f);
float af, bf, cf;
for (int i = threadIdx.x + blockDim.x + blockIdx.x; i < size; i+=stride)
{
half2 tmpa = a[i];
half2 tmpb = b[i];
af = __half2float(tmpa.x);
bf = __half2float(tmpa.x);
cf = af*bf;
value.x = __float2half(cf);
af = __half2float(tmpa.y);
bf = __half2float(tmpb.y);
cf = af*bf;
value.y = __float2half(cf);
}
shArray[threadIdx.x] = value;
__syncthreads();
if (threadIdx.x == 0)
{
half2 result = shArray[0];
float f_result = __low2float(result) + __high2float(result);
results[blockIdx.x] = f_result;
}
}
void generateInput(half2 * a, size_t size)
{
for (size_t i = 0; i < size; ++i)
{
unsigned temp = rand();
temp &= 0x83FF83FF;
temp |= 0x3C003C00;
a[i] = *(half2*)&temp;
}
}
int main(int argc, char *argv[])
{
srand(time(NULL));
const int blocks = 128;
const int threads = 128;
size_t size = blocks*threads*16;
half2 * vec[2];
half2 * devVec[2];
float * results;
float * devResults;
int devID = findCudaDevice(argc, (const char **)argv);
cudaDeviceProp devProp;
checkCudaErrors(cudaGetDeviceProperties(&devProp, devID));
for (int i = 0; i < 2; ++i)
{
checkCudaErrors(cudaMallocHost((void**)&vec[i], size*sizeof*vec[i]));
checkCudaErrors(cudaMalloc((void**)&devVec[i], size*sizeof*devVec[i]));
}
checkCudaErrors(cudaMallocHost((void**)&results, blocks*sizeof*results));
checkCudaErrors(cudaMalloc((void**)&devResults, blocks*sizeof*devResults));
for (int i = 0; i < 2; ++i)
{
generateInput(vec[i], size);
checkCudaErrors(cudaMemcpy(devVec[i], vec[i], size*sizeof*vec[i], cudaMemcpyHostToDevice));
}
scalarProductKernel<<<blocks, threads>>>(devVec[0], devVec[1], devResults, size);
checkCudaErrors(cudaMemcpy(results, devResults, blocks*sizeof*results, cudaMemcpyDeviceToHost));
float result = 0;
for (int i = 0; i < blocks; ++i)
{
result += results[i];
}
printf("Result: %f \n", result);
for (int i = 0; i < 2; ++i)
{
checkCudaErrors(cudaFree(devVec[i]));
checkCudaErrors(cudaFreeHost(vec[i]));
}
checkCudaErrors(cudaFree(devResults));
checkCudaErrors(cudaFreeHost(results));
return EXIT_SUCCESS;
}
|
8ec35c618c240e34533c4d737e7e0285b4e2ddc7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This example demonstrates parallel floating point vector
// addition with a simple __global__ function.
#include <stdlib.h>
#include <stdio.h>
// this kernel computes the vector sum c = a + b
// each thread performs one pair-wise addition
__global__ void vector_add(const float *a,
const float *b,
float *c,
const size_t n)
{
// compute the global element index this thread should process
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
// avoid accessing out of bounds elements
if(i < n)
{
// sum elements
c[i] = a[i] + b[i];
}
}
int main(void)
{
// create arrays of 1M elements
const int num_elements = 1<<20;
// compute the size of the arrays in bytes
const int num_bytes = num_elements * sizeof(float);
// points to host & device arrays
float *device_array_a = 0;
float *device_array_b = 0;
float *device_array_c = 0;
float *host_array_a = 0;
float *host_array_b = 0;
float *host_array_c = 0;
// malloc the host arrays
host_array_a = (float*)malloc(num_bytes);
host_array_b = (float*)malloc(num_bytes);
host_array_c = (float*)malloc(num_bytes);
// hipMalloc the device arrays
hipMalloc((void**)&device_array_a, num_bytes);
hipMalloc((void**)&device_array_b, num_bytes);
hipMalloc((void**)&device_array_c, num_bytes);
// if any memory allocation failed, report an error message
if(host_array_a == 0 || host_array_b == 0 || host_array_c == 0 ||
device_array_a == 0 || device_array_b == 0 || device_array_c == 0)
{
printf("couldn't allocate memory\n");
return 1;
}
// initialize host_array_a & host_array_b
for(int i = 0; i < num_elements; ++i)
{
// make array a a linear ramp
host_array_a[i] = (float)i;
// make array b random
host_array_b[i] = (float)rand() / RAND_MAX;
}
// copy arrays a & b to the device memory space
hipMemcpy(device_array_a, host_array_a, num_bytes, hipMemcpyHostToDevice);
hipMemcpy(device_array_b, host_array_b, num_bytes, hipMemcpyHostToDevice);
// compute c = a + b on the device
const size_t block_size = 256;
size_t grid_size = num_elements / block_size;
// deal with a possible partial final block
if(num_elements % block_size) ++grid_size;
// launch the kernel
hipLaunchKernelGGL(( vector_add), dim3(grid_size), dim3(block_size), 0, 0, device_array_a, device_array_b, device_array_c, num_elements);
// copy the result back to the host memory space
hipMemcpy(host_array_c, device_array_c, num_bytes, hipMemcpyDeviceToHost);
// print out the first 10 results
for(int i = 0; i < 10; ++i)
{
printf("result %d: %1.1f + %7.1f = %7.1f\n", i, host_array_a[i], host_array_b[i], host_array_c[i]);
}
// deallocate memory
free(host_array_a);
free(host_array_b);
free(host_array_c);
hipFree(device_array_a);
hipFree(device_array_b);
hipFree(device_array_c);
}
| 8ec35c618c240e34533c4d737e7e0285b4e2ddc7.cu | // This example demonstrates parallel floating point vector
// addition with a simple __global__ function.
#include <stdlib.h>
#include <stdio.h>
// this kernel computes the vector sum c = a + b
// each thread performs one pair-wise addition
__global__ void vector_add(const float *a,
const float *b,
float *c,
const size_t n)
{
// compute the global element index this thread should process
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
// avoid accessing out of bounds elements
if(i < n)
{
// sum elements
c[i] = a[i] + b[i];
}
}
int main(void)
{
// create arrays of 1M elements
const int num_elements = 1<<20;
// compute the size of the arrays in bytes
const int num_bytes = num_elements * sizeof(float);
// points to host & device arrays
float *device_array_a = 0;
float *device_array_b = 0;
float *device_array_c = 0;
float *host_array_a = 0;
float *host_array_b = 0;
float *host_array_c = 0;
// malloc the host arrays
host_array_a = (float*)malloc(num_bytes);
host_array_b = (float*)malloc(num_bytes);
host_array_c = (float*)malloc(num_bytes);
// cudaMalloc the device arrays
cudaMalloc((void**)&device_array_a, num_bytes);
cudaMalloc((void**)&device_array_b, num_bytes);
cudaMalloc((void**)&device_array_c, num_bytes);
// if any memory allocation failed, report an error message
if(host_array_a == 0 || host_array_b == 0 || host_array_c == 0 ||
device_array_a == 0 || device_array_b == 0 || device_array_c == 0)
{
printf("couldn't allocate memory\n");
return 1;
}
// initialize host_array_a & host_array_b
for(int i = 0; i < num_elements; ++i)
{
// make array a a linear ramp
host_array_a[i] = (float)i;
// make array b random
host_array_b[i] = (float)rand() / RAND_MAX;
}
// copy arrays a & b to the device memory space
cudaMemcpy(device_array_a, host_array_a, num_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(device_array_b, host_array_b, num_bytes, cudaMemcpyHostToDevice);
// compute c = a + b on the device
const size_t block_size = 256;
size_t grid_size = num_elements / block_size;
// deal with a possible partial final block
if(num_elements % block_size) ++grid_size;
// launch the kernel
vector_add<<<grid_size, block_size>>>(device_array_a, device_array_b, device_array_c, num_elements);
// copy the result back to the host memory space
cudaMemcpy(host_array_c, device_array_c, num_bytes, cudaMemcpyDeviceToHost);
// print out the first 10 results
for(int i = 0; i < 10; ++i)
{
printf("result %d: %1.1f + %7.1f = %7.1f\n", i, host_array_a[i], host_array_b[i], host_array_c[i]);
}
// deallocate memory
free(host_array_a);
free(host_array_b);
free(host_array_c);
cudaFree(device_array_a);
cudaFree(device_array_b);
cudaFree(device_array_c);
}
|
c6d2c9dd5b7afb226db31fc6ac29b21ba7450daa.hip | // !!! This is a file automatically generated by hipify!!!
/*****************************************************************************************************
*
* main.cu
*
*****************************************************************************************************/
#include <stdio.h>
#include "support.h"
int main(int argc, char* argv[])
{
Timer timer;
// Initialize variables -----------------------------------------------------
float *xarr_h, *yarr_h;
float *xarr_d, *yarr_d;
float *cx_h, *cy_h;
float *out_cx, *out_cy;
int numPoints, numClusters;
char *datafile;
hipError_t cuda_ret;
// Allocate values to variables through arguments
printf("\n Setting up the problem ........");
fflush(stdout);
startTime(&timer);
if(argc == 1)
{
datafile = "color_histogram68040.txt";
numClusters = 11;
}
else if(argc == 2)
{
/*if(sscanf(argv[1], "%s", &datafile) != 1)
{
printf("\nArgument is not an integer");
exit(0);
}*/
datafile = argv[1];
numClusters = 11;
}
else if(argc == 3)
{
/*if(sscanf(argv[1], "%s", &datafile) != 1)
{
printf("\n Argument for file of points is not a string");
exit(0);
}*/
datafile = argv[1];
if(sscanf(argv[2], "%d", &numClusters) != 1)
{
printf("\n Argument for number of clusters is not an integer");
exit(0);
}
}
else
{
printf("\n Invalid input parameters");
exit(0);
}
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Number of points from the input file ---------------------------------------
numPoints = countPoints(datafile);
printf("\nInput size for points = %d, Size for clusters = %d\n", numPoints, numClusters);
// Allocating and initializing host variables -------------------------------
printf("Allocating host variables ................");
fflush(stdout);
startTime(&timer);
xarr_h = (float*)malloc(numPoints * sizeof(float));
if(xarr_h == NULL)
{
printf("\n Unable to allcate host variable");
exit(0);
}
yarr_h = (float*)malloc(numPoints * sizeof(float));
if(yarr_h == NULL)
{
printf("\n Unable to allocate host variable");
exit(0);
}
cx_h = (float*)malloc(numClusters * sizeof(float));
if(cx_h == NULL)
{
printf("\n Unable to allocate host variables");
exit(0);
}
cy_h = (float*)malloc(numClusters * sizeof(float));
if(cy_h == NULL)
{
printf("\n Unable to allocate host variables");
exit(0);
}
out_cx = (float*)malloc(numClusters * sizeof(float));
if(out_cx == NULL)
{
printf("\n Unable to allocate host variables");
exit(0);
}
out_cy = (float*)malloc(numClusters * sizeof(float));
if(out_cy == NULL)
{
printf("\n Unable to allocate host variables");
exit(0);
}
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Allocating device variables ----------------------------------------------
printf("\n Allocating device variables........");
fflush(stdout);
startTime(&timer);
cuda_ret = hipMalloc((void**)&xarr_d, numPoints*sizeof(float));
if(cuda_ret != hipSuccess)
{
printf("\n Unable to allocate device memory");
exit(0);
}
cuda_ret = hipMalloc((void**)&yarr_d, numPoints*sizeof(float));
if(cuda_ret != hipSuccess)
{
printf("\n Unable to allocate device memory");
exit(0);
}
hipDeviceSynchronize();
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Random generation of points in 2-D plane ---------------------------------------
//observationGen(numPoints);
// Store Points in host variables -----------------------------------------------
storePoints(xarr_h, yarr_h, datafile, numPoints);
// Randomly select distinct numClusters points from availabe points -----------------
clusterPoints(cx_h, cy_h, xarr_h, yarr_h, numClusters, numPoints);
// Initial cluster centers values --------------------------------------
/* printf("Initial values for cluster centres\n");
for(int j = 0; j < numClusters; ++j)
{
printf("For cluster %d :-\n",j+1);
printf("x-> ini = %f\n", cx_h[j]);
printf("y-> ini = %f\n", cy_h[j]);
}
*/
// Copy host variables to device memory ---------------------------------------------------
printf("\nCopying data from host to device\n");
fflush(stdout);
startTime(&timer);
cuda_ret = hipMemcpy(xarr_d, xarr_h, numPoints*sizeof(float), hipMemcpyHostToDevice);
if(cuda_ret != hipSuccess)
{
printf("Unable to copy memory to device\n");
exit(0);
}
cuda_ret = hipMemcpy(yarr_d, yarr_h, numPoints*sizeof(float), hipMemcpyHostToDevice);
if(cuda_ret != hipSuccess)
{
printf("Unable to copy memory to device\n");
exit(0);
}
hipDeviceSynchronize();
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Kernel invocation
printf("Launching kernel ...........\n");
fflush(stdout);
startTime(&timer);
/* Kernel will get setup and invoked inside findclusterInvok function in kernelinvoc.cu file*/
findclusterInvoc(xarr_d, yarr_d, cx_h, cy_h, out_cx, out_cy, numPoints, numClusters);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess)
{
printf("Unable to launch/execute kernel\n");
exit(0);
}
// Checking cluster centers values --------------------------------------
/* printf("Initial and final values\n");
for(int j = 0; j < numClusters; ++j)
{
printf("For cluster %d :-\n",j+1);
printf("x-> ini = %f, fin = %f\n", cx_h[j], out_cx[j]);
printf("y-> ini = %f, fin = %f\n", cy_h[j], out_cy[j]);
}
*/
stopTime(&timer);
printf("Elapsed time for kernel execution = %f s\n", elapsedTime(timer));
//Getting cluster centers in file outCenter.txt
clusterCenters(out_cx, out_cy, numClusters);
// Free Memory ----------------------------------------------------
hipFree(xarr_d);
hipFree(yarr_d);
hipFree(out_cx);
hipFree(out_cy);
free(xarr_h);
free(yarr_h);
free(cx_h);
free(cy_h);
return 0;
}
| c6d2c9dd5b7afb226db31fc6ac29b21ba7450daa.cu | /*****************************************************************************************************
*
* main.cu
*
*****************************************************************************************************/
#include <stdio.h>
#include "support.h"
int main(int argc, char* argv[])
{
Timer timer;
// Initialize variables -----------------------------------------------------
float *xarr_h, *yarr_h;
float *xarr_d, *yarr_d;
float *cx_h, *cy_h;
float *out_cx, *out_cy;
int numPoints, numClusters;
char *datafile;
cudaError_t cuda_ret;
// Allocate values to variables through arguments
printf("\n Setting up the problem ........");
fflush(stdout);
startTime(&timer);
if(argc == 1)
{
datafile = "color_histogram68040.txt";
numClusters = 11;
}
else if(argc == 2)
{
/*if(sscanf(argv[1], "%s", &datafile) != 1)
{
printf("\nArgument is not an integer");
exit(0);
}*/
datafile = argv[1];
numClusters = 11;
}
else if(argc == 3)
{
/*if(sscanf(argv[1], "%s", &datafile) != 1)
{
printf("\n Argument for file of points is not a string");
exit(0);
}*/
datafile = argv[1];
if(sscanf(argv[2], "%d", &numClusters) != 1)
{
printf("\n Argument for number of clusters is not an integer");
exit(0);
}
}
else
{
printf("\n Invalid input parameters");
exit(0);
}
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Number of points from the input file ---------------------------------------
numPoints = countPoints(datafile);
printf("\nInput size for points = %d, Size for clusters = %d\n", numPoints, numClusters);
// Allocating and initializing host variables -------------------------------
printf("Allocating host variables ................");
fflush(stdout);
startTime(&timer);
xarr_h = (float*)malloc(numPoints * sizeof(float));
if(xarr_h == NULL)
{
printf("\n Unable to allcate host variable");
exit(0);
}
yarr_h = (float*)malloc(numPoints * sizeof(float));
if(yarr_h == NULL)
{
printf("\n Unable to allocate host variable");
exit(0);
}
cx_h = (float*)malloc(numClusters * sizeof(float));
if(cx_h == NULL)
{
printf("\n Unable to allocate host variables");
exit(0);
}
cy_h = (float*)malloc(numClusters * sizeof(float));
if(cy_h == NULL)
{
printf("\n Unable to allocate host variables");
exit(0);
}
out_cx = (float*)malloc(numClusters * sizeof(float));
if(out_cx == NULL)
{
printf("\n Unable to allocate host variables");
exit(0);
}
out_cy = (float*)malloc(numClusters * sizeof(float));
if(out_cy == NULL)
{
printf("\n Unable to allocate host variables");
exit(0);
}
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Allocating device variables ----------------------------------------------
printf("\n Allocating device variables........");
fflush(stdout);
startTime(&timer);
cuda_ret = cudaMalloc((void**)&xarr_d, numPoints*sizeof(float));
if(cuda_ret != cudaSuccess)
{
printf("\n Unable to allocate device memory");
exit(0);
}
cuda_ret = cudaMalloc((void**)&yarr_d, numPoints*sizeof(float));
if(cuda_ret != cudaSuccess)
{
printf("\n Unable to allocate device memory");
exit(0);
}
cudaDeviceSynchronize();
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Random generation of points in 2-D plane ---------------------------------------
//observationGen(numPoints);
// Store Points in host variables -----------------------------------------------
storePoints(xarr_h, yarr_h, datafile, numPoints);
// Randomly select distinct numClusters points from availabe points -----------------
clusterPoints(cx_h, cy_h, xarr_h, yarr_h, numClusters, numPoints);
// Initial cluster centers values --------------------------------------
/* printf("Initial values for cluster centres\n");
for(int j = 0; j < numClusters; ++j)
{
printf("For cluster %d :-\n",j+1);
printf("x-> ini = %f\n", cx_h[j]);
printf("y-> ini = %f\n", cy_h[j]);
}
*/
// Copy host variables to device memory ---------------------------------------------------
printf("\nCopying data from host to device\n");
fflush(stdout);
startTime(&timer);
cuda_ret = cudaMemcpy(xarr_d, xarr_h, numPoints*sizeof(float), cudaMemcpyHostToDevice);
if(cuda_ret != cudaSuccess)
{
printf("Unable to copy memory to device\n");
exit(0);
}
cuda_ret = cudaMemcpy(yarr_d, yarr_h, numPoints*sizeof(float), cudaMemcpyHostToDevice);
if(cuda_ret != cudaSuccess)
{
printf("Unable to copy memory to device\n");
exit(0);
}
cudaDeviceSynchronize();
stopTime(&timer);
printf("%f s\n", elapsedTime(timer));
// Kernel invocation
printf("Launching kernel ...........\n");
fflush(stdout);
startTime(&timer);
/* Kernel will get setup and invoked inside findclusterInvok function in kernelinvoc.cu file*/
findclusterInvoc(xarr_d, yarr_d, cx_h, cy_h, out_cx, out_cy, numPoints, numClusters);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess)
{
printf("Unable to launch/execute kernel\n");
exit(0);
}
// Checking cluster centers values --------------------------------------
/* printf("Initial and final values\n");
for(int j = 0; j < numClusters; ++j)
{
printf("For cluster %d :-\n",j+1);
printf("x-> ini = %f, fin = %f\n", cx_h[j], out_cx[j]);
printf("y-> ini = %f, fin = %f\n", cy_h[j], out_cy[j]);
}
*/
stopTime(&timer);
printf("Elapsed time for kernel execution = %f s\n", elapsedTime(timer));
//Getting cluster centers in file outCenter.txt
clusterCenters(out_cx, out_cy, numClusters);
// Free Memory ----------------------------------------------------
cudaFree(xarr_d);
cudaFree(yarr_d);
cudaFree(out_cx);
cudaFree(out_cy);
free(xarr_h);
free(yarr_h);
free(cx_h);
free(cy_h);
return 0;
}
|
0f4474be893f3e1c5486d679b69c39d94b10cff4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/TensorUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/WrapDimUtils.h>
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHTensorSort.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <c10/macros/Macros.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/NumericLimits.cuh>
#include <type_traits>
#include <ATen/native/hip/PersistentSoftmax.cuh>
namespace at {
namespace native {
namespace {
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxForwardEpilogue {
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: logsum(max_input + ::log(sum)) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(input - logsum);
}
const AccumT logsum;
};
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxBackwardEpilogue {
__device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - ::exp(static_cast<AccumT>(output)) * sum);
}
const AccumT sum;
};
template<typename T, typename AccumT, typename OutT>
struct SoftMaxForwardEpilogue {
__device__ __forceinline__ SoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: max_input(max_input)
, sum(sum) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(::exp(input - max_input) / sum);
}
const AccumT max_input;
const AccumT sum;
};
template<typename T, typename AccumT, typename OutT>
struct SoftMaxBackwardEpilogue {
__device__ __forceinline__ SoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
// XXX: gradOutput that we get here is really gradOutput * output
// Look for cmul in SoftMax_updateGradInput
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - output * sum);
}
const AccumT sum;
};
////////////////////////////////////////////////////////////////////////////////
// Spatial kernel (fast with large inner_size and small dim_size)
////////////////////////////////////////////////////////////////////////////////
// Let's assume that our input has been flattened to have only three dimension:
// outer x dim x inner
// The spatial algorithm tries to parallelize along all of them.
// Within a 2d block threadIdx.y parallelizes over dim slices, and threads that
// share it will speed up reductions over dim (along axis x).
// The 2d grid is used to parallelize inner dimension over y axis and outer over x.
inline dim3 SpatialSoftMax_getGridSize(
dim3 block, uint32_t max_active_blocks,
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) {
// First, tile as many blocks as we can over the y axis
uint32_t inner_blocks = (inner_size + block.y - 1) / block.y;
if (inner_blocks > max_active_blocks)
inner_blocks = max_active_blocks;
// Fill the x axis with as many blocks as we can fit (a little more is ok too)
uint32_t outer_blocks = (max_active_blocks + inner_blocks - 1) / inner_blocks;
if (outer_blocks > outer_size)
outer_blocks = outer_size;
return dim3(outer_blocks, inner_blocks);
}
const int max_threads = 1024;
inline dim3 SpatialSoftMax_getBlockSize(
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) {
uint32_t inner_threads = inner_size;
inner_threads = ::min(inner_threads, static_cast<uint32_t>(max_threads));
uint32_t dim_threads = 1;
if (inner_threads <= 64 && dim_size >= 64) {
while (inner_threads * dim_threads <= max_threads && dim_threads <= dim_size)
dim_threads *= 2;
dim_threads /= 2;
}
return dim3(dim_threads, inner_threads);
}
template<typename accscalar_t, typename Kernel>
void SpatialSoftMax_getLaunchSizes(
Kernel k,
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size,
dim3& grid, dim3& block, uint32_t& smem_size) {
block = SpatialSoftMax_getBlockSize(outer_size, dim_size, inner_size);
uint32_t block_threads = block.x * block.y;
smem_size = block.x == 1 ? 0 : block_threads * sizeof(accscalar_t);
int max_active_blocks;
#ifdef __HIP_PLATFORM_HCC__
// XXX HIP function signature is not compatible yet.
uint32_t max_blocks;
hipOccupancyMaxActiveBlocksPerMultiprocessor(&max_blocks,
k, block_threads, smem_size);
max_active_blocks = max_blocks;
#else
hipOccupancyMaxActiveBlocksPerMultiprocessor(&max_active_blocks,
k, block_threads, smem_size);
#endif
max_active_blocks *= at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
grid = SpatialSoftMax_getGridSize(block, max_active_blocks, outer_size, dim_size, inner_size);
}
inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) {
uint64_t block_size = 1;
uint64_t max_block_size = ::min(dim_size / ILP, static_cast<uint64_t>(max_threads));
while (block_size < max_block_size) block_size *= 2;
// Launch at least a single warp - the kernel assumes that.
block_size = ::max(block_size, static_cast<uint64_t>(C10_WARP_SIZE));
return dim3(block_size);
}
template<typename T>
struct Add {
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<typename T>
struct Max {
__device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a;
}
};
// Note that it's not a complete block-wide reduction.
// Only threads that share threadIdx.y reduce values.
template<typename T, template<typename> class ReduceOp>
__forceinline__ __device__
T spatialBlockReduceX(T *shared, T val) {
ReduceOp<T> r;
shared += threadIdx.y * blockDim.x;
__syncthreads();
shared[threadIdx.x] = val;
// NOTE: loop starts with __syncthreads()
int offset = blockDim.x / 2;
while (offset > 0) {
__syncthreads();
if (threadIdx.x < offset)
shared[threadIdx.x] = r(shared[threadIdx.x], shared[threadIdx.x + offset]);
offset /= 2;
}
__syncthreads();
return shared[0];
}
template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void cunn_SpatialSoftMaxForward(
outscalar_t *output, scalar_t *input,
uint32_t outer_size, uint32_t dim_size, uint32_t inner_size)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
const uint32_t outer_stride = inner_size * dim_size;
const uint32_t dim_stride = inner_size;
for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) {
const uint32_t outer_offset = outer_index * outer_stride;
for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) {
const uint32_t data_offset = outer_offset + inner_index;
////////////////////////////////////////////////////////////
// These two blocks are really eqivalent, but specializing on
// blockDim.x == 1 makes the kernel faster when it's unused.
// I didn't want to thread an extra template parameter, and nvcc
// seems to be smart enough to hoist the if outside of the loops.
////////////////////////////////////////////////////////////
if (blockDim.x > 1) {
accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest();
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]);
max_input = Max<accscalar_t>()(max_input, value);
}
max_input = spatialBlockReduceX<accscalar_t, Max>(sdata,max_input);
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += ::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride])
- max_input);
sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]);
} else {
accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest();
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]);
max_input = Max<accscalar_t>()(max_input, value);
}
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += ::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride])
- max_input);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]);
}
}
}
}
template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void cunn_SpatialSoftMaxBackward(
scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput,
uint32_t outer_size, uint32_t dim_size, uint32_t inner_size)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
const uint32_t outer_stride = inner_size * dim_size;
const uint32_t dim_stride = inner_size;
for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) {
const uint32_t outer_offset = outer_index * outer_stride;
for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) {
const uint32_t data_offset = outer_offset + inner_index;
// See the comment in forward kernel
if (blockDim.x > 1) {
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += gradOutput[data_offset + d * dim_stride];
sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
gradInput[data_offset + d * dim_stride] =
epilogue(gradOutput[data_offset + d * dim_stride],
output[data_offset + d * dim_stride]);
}
} else {
accscalar_t sum = 0;
for (uint32_t d = 0; d < dim_size; d++)
sum += gradOutput[data_offset + d * dim_stride];
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum);
for (uint32_t d = 0; d < dim_size; d++) {
gradInput[data_offset + d * dim_stride] =
epilogue(gradOutput[data_offset + d * dim_stride],
output[data_offset + d * dim_stride]);
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Regular kernel (fast when dim_size is large; requires inner_size == 1)
////////////////////////////////////////////////////////////////////////////////
template <typename T, typename AccumT>
struct MaxFloat
{
__device__ __forceinline__ AccumT operator()(AccumT max, T v) const {
return ::max(max, (AccumT)v);
}
};
template<typename T, typename AccumT>
struct AddFloat
{
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + v;
}
};
template<typename T, typename AccumT>
struct SumExpFloat
{
__device__ __forceinline__ SumExpFloat(AccumT v)
: max_k(v) {}
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + ::exp(v - max_k);
}
const AccumT max_k;
};
template <template<typename> class Reduction, typename AccumT>
__device__ __forceinline__ AccumT
blockReduce(AccumT* smem, AccumT val,
const Reduction<AccumT>& r,
AccumT defaultVal)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val;
__syncthreads();
AccumT warpVal = defaultVal;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / C10_WARP_SIZE)) - 1;
if (threadIdx.x < C10_WARP_SIZE) {
int lane = threadIdx.x % C10_WARP_SIZE;
if (lane < blockDim.x / C10_WARP_SIZE) {
#pragma unroll
for (int i = 0; i < C10_WARP_SIZE; ++i) {
warpVal = r(warpVal, smem[lane * C10_WARP_SIZE + i]);
}
#if TORCH_HIP_VERSION >= 9000
__syncwarp(mask);
#endif
smem[lane] = warpVal;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal = defaultVal;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) {
blockVal = r(blockVal, smem[i]);
}
smem[0] = blockVal;
}
// Sync and broadcast
__syncthreads();
return smem[0];
}
template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT>
__device__ __forceinline__ AccumT
ilpReduce(T* data,
int size,
const Reduction<T, AccumT>& r,
AccumT defaultVal)
{
AccumT threadVal = defaultVal;
int offset = threadIdx.x;
int last = size % (ILP * blockDim.x);
// Body (unroll by ILP times)
for (; offset < size - last; offset += blockDim.x * ILP) {
T tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j)
tmp[j] = data[offset + j * blockDim.x];
#pragma unroll
for (int j = 0; j < ILP; ++j)
threadVal = r(threadVal, tmp[j]);
}
// Epilogue
for (; offset < size; offset += blockDim.x)
threadVal = r(threadVal, data[offset]);
return threadVal;
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxForward(outscalar_t *output, scalar_t *input, int classes)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
// forward pointers to batch[blockIdx.x]
// each block handles a sample in the mini-batch
input += blockIdx.x * classes;
output += blockIdx.x * classes;
// find the max
accscalar_t threadMax = ilpReduce<MaxFloat, ILP, scalar_t, accscalar_t>(
input, classes, MaxFloat<scalar_t, accscalar_t>(), -at::numeric_limits<accscalar_t>::max());
accscalar_t max_k = blockReduce<Max, accscalar_t>(
sdata, threadMax, Max<accscalar_t>(), -at::numeric_limits<accscalar_t>::max());
// reduce all values
accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>(
input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0));
accscalar_t sumAll = blockReduce<Add, accscalar_t>(
sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll);
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
scalar_t tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j)
tmp[j] = input[offset + j * blockDim.x];
#pragma unroll
for (int j = 0; j < ILP; ++j)
output[offset + j * blockDim.x] = epilogue(tmp[j]);
}
for (; offset < classes; offset += blockDim.x)
output[offset] = epilogue(input[offset]);
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxBackward(scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, int classes)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
gradInput += blockIdx.x * classes;
output += blockIdx.x * classes;
gradOutput += blockIdx.x * classes;
accscalar_t threadSum = ilpReduce<AddFloat, 4, outscalar_t, accscalar_t>(
gradOutput, classes, AddFloat<outscalar_t, accscalar_t>(), accscalar_t(0));
accscalar_t sum_k = blockReduce<Add, accscalar_t>(
sdata, threadSum, Add<accscalar_t>(), accscalar_t(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum_k);
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
outscalar_t tmpGradOutput[ILP];
outscalar_t tmpOutput[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmpGradOutput[j] = gradOutput[offset + j * blockDim.x];
tmpOutput[j] = output[offset + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
gradInput[offset + j * blockDim.x] = epilogue(tmpGradOutput[j], tmpOutput[j]);
}
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = epilogue(gradOutput[offset], output[offset]);
}
template<template<typename, typename, typename> class Epilogue, bool is_log_softmax>
Tensor host_softmax(const Tensor & input_, const int64_t dim_, const bool half_to_float){
if (half_to_float) AT_ASSERTM(input_.scalar_type() == ScalarType::Half,"conversion is supported for Half type only");
auto input = input_.contiguous();
Tensor output = half_to_float ? at::empty_like(input, input.options().dtype(ScalarType::Float), LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float");
if (input.dim() == 0) input = input.view(1);
int64_t dim = maybe_wrap_dim(dim_, input.dim());
TORCH_CHECK(dim >=0 && dim < input.dim(), "dim must be non-negative and less than input dimensions");
int64_t outer_size = 1;
int64_t dim_size = input.size(dim);
if (input.numel() > 0) {
int64_t inner_size = 1;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
for (int64_t i = 0; i < dim; ++i)
outer_size *= input.size(i);
for (int64_t i = dim + 1; i < input.dim(); ++i)
inner_size *= input.size(i);
// This kernel spawns a block per each element in the batch.
// XXX: it assumes that inner_size == 1
if (inner_size == 1) {
const int ILP = 2;
dim3 grid(outer_size);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "host_softmax", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
dispatch_softmax_forward<scalar_t, scalar_t, accscalar_t, is_log_softmax>(
output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), dim_size, dim_size, outer_size);
} else {
hipLaunchKernelGGL(( cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>)
, dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream,
output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), dim_size
);
}
} else {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
dispatch_softmax_forward<scalar_t, accscalar_t, accscalar_t, is_log_softmax>(
output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), dim_size, dim_size, outer_size);
} else {
hipLaunchKernelGGL(( cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>)
, dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream,
output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), dim_size
);
}
}
});
// This kernel runs in a 2D grid, where each application along y dimension has a fixed
// outer_size, and runs in parallel over inner_size. Dimension x is parallel over outer_size.
// Reductions over dim are done in a single-threaded manner.
} else {
uint32_t smem_size;
dim3 grid, block;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "host_softmax", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
hipLaunchKernelGGL(( cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>)
, dim3(grid), dim3(block), smem_size, stream,
output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), outer_size, dim_size, inner_size
);
} else {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
hipLaunchKernelGGL(( cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>)
, dim3(grid), dim3(block), smem_size, stream,
output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), outer_size, dim_size, inner_size
);
}
});
}
THCudaCheck(hipGetLastError());
}
return output;
}
template<template<typename, typename, typename> class Epilogue, bool is_log_softmax>
Tensor host_softmax_backward(const Tensor &grad_, const Tensor &output_, int64_t dim_, bool half_to_float){
int64_t dim = maybe_wrap_dim(dim_, grad_.dim());
Tensor gI = half_to_float ? at::empty_like(grad_, grad_.options().dtype(ScalarType::Half), LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::empty_like(grad_, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (grad_.numel() == 0) {
return gI;
}
auto grad = grad_.contiguous();
static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float");
if (grad.dim() == 0) grad = grad.view(1);
TORCH_CHECK(dim >=0 && dim < grad.dim(), "dim must be non-negative and less than input dimensions");
auto output = output_.contiguous();
if (output.dim() == 0) output = output.view(1);
int64_t outer_size = 1;
int64_t dim_size = output.size(dim);
int64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= output.size(i);
for (int64_t i = dim + 1; i < output.dim(); ++i)
inner_size *= output.size(i);
// See descriptions of kernels above.
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (inner_size == 1) {
const int ILP = 2;
dim3 grid(outer_size);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gI.scalar_type(), "host_softmax_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
dispatch_softmax_backward<scalar_t, scalar_t, accscalar_t, is_log_softmax>(
gI.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), dim_size, dim_size, outer_size);
} else {
hipLaunchKernelGGL(( cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>)
, dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream,
gI.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), dim_size
);
}
} else {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
dispatch_softmax_backward<accscalar_t, scalar_t, accscalar_t, is_log_softmax>(
gI.data_ptr<scalar_t>(), grad.data_ptr<accscalar_t>(), output.data_ptr<accscalar_t>(), dim_size, dim_size, outer_size);
} else {
hipLaunchKernelGGL(( cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>)
, dim3(grid), dim3(block), block.x * sizeof(accscalar_t), stream,
gI.data_ptr<scalar_t>(), output.data_ptr<accscalar_t>(), grad.data_ptr<accscalar_t>(), dim_size
);
}
}
});
} else {
uint32_t smem_size;
dim3 grid, block;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gI.scalar_type(), "host_softmax_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
hipLaunchKernelGGL(( cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>)
, dim3(grid), dim3(block), smem_size, stream,
gI.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(),
outer_size, dim_size, inner_size
);
} else {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
hipLaunchKernelGGL(( cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>)
, dim3(grid), dim3(block), smem_size, stream,
gI.data_ptr<scalar_t>(), output.data_ptr<accscalar_t>(), grad.data_ptr<accscalar_t>(),
outer_size, dim_size, inner_size
);
}
});
}
THCudaCheck(hipGetLastError());
return gI;
}
}
Tensor log_softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){
return host_softmax<LogSoftMaxForwardEpilogue,true>(input, dim, half_to_float);
}
Tensor log_softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){
bool half_to_float = grad.scalar_type() != input.scalar_type();
if (half_to_float) {
AT_ASSERTM((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
return host_softmax_backward<LogSoftMaxBackwardEpilogue,true>(grad, output, dim, half_to_float);
}
Tensor softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){
return host_softmax<SoftMaxForwardEpilogue,false>(input, dim, half_to_float);
}
Tensor softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){
bool half_to_float = grad.scalar_type() != input.scalar_type();
if (half_to_float) {
AT_ASSERTM((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
Tensor tmp = grad * output;
return host_softmax_backward<SoftMaxBackwardEpilogue,false>(tmp, output, dim, half_to_float);
}
}
}
| 0f4474be893f3e1c5486d679b69c39d94b10cff4.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/TensorUtils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/WrapDimUtils.h>
#include <THC/THCTensorMathReduce.cuh>
#include <THC/THCTensorSort.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <c10/macros/Macros.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/NumericLimits.cuh>
#include <type_traits>
#include <ATen/native/cuda/PersistentSoftmax.cuh>
namespace at {
namespace native {
namespace {
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxForwardEpilogue {
__device__ __forceinline__ LogSoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: logsum(max_input + std::log(sum)) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(input - logsum);
}
const AccumT logsum;
};
template<typename T, typename AccumT, typename OutT>
struct LogSoftMaxBackwardEpilogue {
__device__ __forceinline__ LogSoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - std::exp(static_cast<AccumT>(output)) * sum);
}
const AccumT sum;
};
template<typename T, typename AccumT, typename OutT>
struct SoftMaxForwardEpilogue {
__device__ __forceinline__ SoftMaxForwardEpilogue(AccumT max_input, AccumT sum)
: max_input(max_input)
, sum(sum) {}
__device__ __forceinline__ OutT operator()(T input) const {
return static_cast<OutT>(std::exp(input - max_input) / sum);
}
const AccumT max_input;
const AccumT sum;
};
template<typename T, typename AccumT, typename OutT>
struct SoftMaxBackwardEpilogue {
__device__ __forceinline__ SoftMaxBackwardEpilogue(AccumT sum)
: sum(sum) {}
// XXX: gradOutput that we get here is really gradOutput * output
// Look for cmul in SoftMax_updateGradInput
__device__ __forceinline__ T operator()(OutT gradOutput, OutT output) const {
return static_cast<T>(gradOutput - output * sum);
}
const AccumT sum;
};
////////////////////////////////////////////////////////////////////////////////
// Spatial kernel (fast with large inner_size and small dim_size)
////////////////////////////////////////////////////////////////////////////////
// Let's assume that our input has been flattened to have only three dimension:
// outer x dim x inner
// The spatial algorithm tries to parallelize along all of them.
// Within a 2d block threadIdx.y parallelizes over dim slices, and threads that
// share it will speed up reductions over dim (along axis x).
// The 2d grid is used to parallelize inner dimension over y axis and outer over x.
inline dim3 SpatialSoftMax_getGridSize(
dim3 block, uint32_t max_active_blocks,
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) {
// First, tile as many blocks as we can over the y axis
uint32_t inner_blocks = (inner_size + block.y - 1) / block.y;
if (inner_blocks > max_active_blocks)
inner_blocks = max_active_blocks;
// Fill the x axis with as many blocks as we can fit (a little more is ok too)
uint32_t outer_blocks = (max_active_blocks + inner_blocks - 1) / inner_blocks;
if (outer_blocks > outer_size)
outer_blocks = outer_size;
return dim3(outer_blocks, inner_blocks);
}
const int max_threads = 1024;
inline dim3 SpatialSoftMax_getBlockSize(
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size) {
uint32_t inner_threads = inner_size;
inner_threads = std::min(inner_threads, static_cast<uint32_t>(max_threads));
uint32_t dim_threads = 1;
if (inner_threads <= 64 && dim_size >= 64) {
while (inner_threads * dim_threads <= max_threads && dim_threads <= dim_size)
dim_threads *= 2;
dim_threads /= 2;
}
return dim3(dim_threads, inner_threads);
}
template<typename accscalar_t, typename Kernel>
void SpatialSoftMax_getLaunchSizes(
Kernel k,
uint64_t outer_size, uint64_t dim_size, uint64_t inner_size,
dim3& grid, dim3& block, uint32_t& smem_size) {
block = SpatialSoftMax_getBlockSize(outer_size, dim_size, inner_size);
uint32_t block_threads = block.x * block.y;
smem_size = block.x == 1 ? 0 : block_threads * sizeof(accscalar_t);
int max_active_blocks;
#ifdef __HIP_PLATFORM_HCC__
// XXX HIP function signature is not compatible yet.
uint32_t max_blocks;
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_blocks,
k, block_threads, smem_size);
max_active_blocks = max_blocks;
#else
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&max_active_blocks,
k, block_threads, smem_size);
#endif
max_active_blocks *= at::cuda::getCurrentDeviceProperties()->multiProcessorCount;
grid = SpatialSoftMax_getGridSize(block, max_active_blocks, outer_size, dim_size, inner_size);
}
inline dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) {
uint64_t block_size = 1;
uint64_t max_block_size = std::min(dim_size / ILP, static_cast<uint64_t>(max_threads));
while (block_size < max_block_size) block_size *= 2;
// Launch at least a single warp - the kernel assumes that.
block_size = std::max(block_size, static_cast<uint64_t>(C10_WARP_SIZE));
return dim3(block_size);
}
template<typename T>
struct Add {
__device__ __forceinline__ T operator()(T a, T b) const {
return a + b;
}
};
template<typename T>
struct Max {
__device__ __forceinline__ T operator()(T a, T b) const {
return a < b ? b : a;
}
};
// Note that it's not a complete block-wide reduction.
// Only threads that share threadIdx.y reduce values.
template<typename T, template<typename> class ReduceOp>
__forceinline__ __device__
T spatialBlockReduceX(T *shared, T val) {
ReduceOp<T> r;
shared += threadIdx.y * blockDim.x;
__syncthreads();
shared[threadIdx.x] = val;
// NOTE: loop starts with __syncthreads()
int offset = blockDim.x / 2;
while (offset > 0) {
__syncthreads();
if (threadIdx.x < offset)
shared[threadIdx.x] = r(shared[threadIdx.x], shared[threadIdx.x + offset]);
offset /= 2;
}
__syncthreads();
return shared[0];
}
template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void cunn_SpatialSoftMaxForward(
outscalar_t *output, scalar_t *input,
uint32_t outer_size, uint32_t dim_size, uint32_t inner_size)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
const uint32_t outer_stride = inner_size * dim_size;
const uint32_t dim_stride = inner_size;
for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) {
const uint32_t outer_offset = outer_index * outer_stride;
for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) {
const uint32_t data_offset = outer_offset + inner_index;
////////////////////////////////////////////////////////////
// These two blocks are really eqivalent, but specializing on
// blockDim.x == 1 makes the kernel faster when it's unused.
// I didn't want to thread an extra template parameter, and nvcc
// seems to be smart enough to hoist the if outside of the loops.
////////////////////////////////////////////////////////////
if (blockDim.x > 1) {
accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest();
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]);
max_input = Max<accscalar_t>()(max_input, value);
}
max_input = spatialBlockReduceX<accscalar_t, Max>(sdata,max_input);
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += std::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride])
- max_input);
sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]);
} else {
accscalar_t max_input = at::numeric_limits<accscalar_t>::lowest();
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
const accscalar_t value = static_cast<accscalar_t>(input[data_offset + d * dim_stride]);
max_input = Max<accscalar_t>()(max_input, value);
}
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += std::exp(static_cast<accscalar_t>(input[data_offset + d * dim_stride])
- max_input);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_input, sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
output[data_offset + d * dim_stride] = epilogue(input[data_offset + d * dim_stride]);
}
}
}
}
template <typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void cunn_SpatialSoftMaxBackward(
scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput,
uint32_t outer_size, uint32_t dim_size, uint32_t inner_size)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
const uint32_t outer_stride = inner_size * dim_size;
const uint32_t dim_stride = inner_size;
for (uint32_t outer_index = blockIdx.x; outer_index < outer_size; outer_index += gridDim.x) {
const uint32_t outer_offset = outer_index * outer_stride;
for (uint32_t inner_index = blockIdx.y * blockDim.y + threadIdx.y; inner_index < inner_size; inner_index += blockDim.y * gridDim.y) {
const uint32_t data_offset = outer_offset + inner_index;
// See the comment in forward kernel
if (blockDim.x > 1) {
accscalar_t sum = 0;
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x)
sum += gradOutput[data_offset + d * dim_stride];
sum = spatialBlockReduceX<accscalar_t, Add>(sdata, sum);
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum);
for (uint32_t d = threadIdx.x; d < dim_size; d += blockDim.x) {
gradInput[data_offset + d * dim_stride] =
epilogue(gradOutput[data_offset + d * dim_stride],
output[data_offset + d * dim_stride]);
}
} else {
accscalar_t sum = 0;
for (uint32_t d = 0; d < dim_size; d++)
sum += gradOutput[data_offset + d * dim_stride];
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum);
for (uint32_t d = 0; d < dim_size; d++) {
gradInput[data_offset + d * dim_stride] =
epilogue(gradOutput[data_offset + d * dim_stride],
output[data_offset + d * dim_stride]);
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Regular kernel (fast when dim_size is large; requires inner_size == 1)
////////////////////////////////////////////////////////////////////////////////
template <typename T, typename AccumT>
struct MaxFloat
{
__device__ __forceinline__ AccumT operator()(AccumT max, T v) const {
return ::max(max, (AccumT)v);
}
};
template<typename T, typename AccumT>
struct AddFloat
{
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + v;
}
};
template<typename T, typename AccumT>
struct SumExpFloat
{
__device__ __forceinline__ SumExpFloat(AccumT v)
: max_k(v) {}
__device__ __forceinline__ AccumT operator()(AccumT sum, T v) const {
return sum + std::exp(v - max_k);
}
const AccumT max_k;
};
template <template<typename> class Reduction, typename AccumT>
__device__ __forceinline__ AccumT
blockReduce(AccumT* smem, AccumT val,
const Reduction<AccumT>& r,
AccumT defaultVal)
{
// To avoid RaW races from chaining blockReduce calls together, we need a sync here
__syncthreads();
smem[threadIdx.x] = val;
__syncthreads();
AccumT warpVal = defaultVal;
// First warp will perform per-warp reductions for the remaining warps
uint32_t mask = (((uint64_t)1) << (blockDim.x / C10_WARP_SIZE)) - 1;
if (threadIdx.x < C10_WARP_SIZE) {
int lane = threadIdx.x % C10_WARP_SIZE;
if (lane < blockDim.x / C10_WARP_SIZE) {
#pragma unroll
for (int i = 0; i < C10_WARP_SIZE; ++i) {
warpVal = r(warpVal, smem[lane * C10_WARP_SIZE + i]);
}
#if CUDA_VERSION >= 9000
__syncwarp(mask);
#endif
smem[lane] = warpVal;
}
}
__syncthreads();
// First thread will perform a reduction of the above per-warp reductions
AccumT blockVal = defaultVal;
if (threadIdx.x == 0) {
for (int i = 0; i < blockDim.x / C10_WARP_SIZE; ++i) {
blockVal = r(blockVal, smem[i]);
}
smem[0] = blockVal;
}
// Sync and broadcast
__syncthreads();
return smem[0];
}
template <template<typename, typename> class Reduction, int ILP, typename T, typename AccumT>
__device__ __forceinline__ AccumT
ilpReduce(T* data,
int size,
const Reduction<T, AccumT>& r,
AccumT defaultVal)
{
AccumT threadVal = defaultVal;
int offset = threadIdx.x;
int last = size % (ILP * blockDim.x);
// Body (unroll by ILP times)
for (; offset < size - last; offset += blockDim.x * ILP) {
T tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j)
tmp[j] = data[offset + j * blockDim.x];
#pragma unroll
for (int j = 0; j < ILP; ++j)
threadVal = r(threadVal, tmp[j]);
}
// Epilogue
for (; offset < size; offset += blockDim.x)
threadVal = r(threadVal, data[offset]);
return threadVal;
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template <typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxForward(outscalar_t *output, scalar_t *input, int classes)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
// forward pointers to batch[blockIdx.x]
// each block handles a sample in the mini-batch
input += blockIdx.x * classes;
output += blockIdx.x * classes;
// find the max
accscalar_t threadMax = ilpReduce<MaxFloat, ILP, scalar_t, accscalar_t>(
input, classes, MaxFloat<scalar_t, accscalar_t>(), -at::numeric_limits<accscalar_t>::max());
accscalar_t max_k = blockReduce<Max, accscalar_t>(
sdata, threadMax, Max<accscalar_t>(), -at::numeric_limits<accscalar_t>::max());
// reduce all values
accscalar_t threadExp = ilpReduce<SumExpFloat, ILP, scalar_t, accscalar_t>(
input, classes, SumExpFloat<scalar_t, accscalar_t>(max_k), static_cast<accscalar_t>(0));
accscalar_t sumAll = blockReduce<Add, accscalar_t>(
sdata, threadExp, Add<accscalar_t>(), static_cast<accscalar_t>(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(max_k, sumAll);
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
scalar_t tmp[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j)
tmp[j] = input[offset + j * blockDim.x];
#pragma unroll
for (int j = 0; j < ILP; ++j)
output[offset + j * blockDim.x] = epilogue(tmp[j]);
}
for (; offset < classes; offset += blockDim.x)
output[offset] = epilogue(input[offset]);
}
template <int ILP, typename scalar_t, typename accscalar_t, typename outscalar_t, template<typename, typename, typename> class Epilogue>
__global__ void
cunn_SoftMaxBackward(scalar_t *gradInput, outscalar_t *output, outscalar_t *gradOutput, int classes)
{
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
gradInput += blockIdx.x * classes;
output += blockIdx.x * classes;
gradOutput += blockIdx.x * classes;
accscalar_t threadSum = ilpReduce<AddFloat, 4, outscalar_t, accscalar_t>(
gradOutput, classes, AddFloat<outscalar_t, accscalar_t>(), accscalar_t(0));
accscalar_t sum_k = blockReduce<Add, accscalar_t>(
sdata, threadSum, Add<accscalar_t>(), accscalar_t(0));
Epilogue<scalar_t, accscalar_t, outscalar_t> epilogue(sum_k);
int offset = threadIdx.x;
int last = classes % (ILP * blockDim.x);
for (; offset < classes - last; offset += blockDim.x * ILP) {
outscalar_t tmpGradOutput[ILP];
outscalar_t tmpOutput[ILP];
#pragma unroll
for (int j = 0; j < ILP; ++j) {
tmpGradOutput[j] = gradOutput[offset + j * blockDim.x];
tmpOutput[j] = output[offset + j * blockDim.x];
}
#pragma unroll
for (int j = 0; j < ILP; ++j)
gradInput[offset + j * blockDim.x] = epilogue(tmpGradOutput[j], tmpOutput[j]);
}
for (; offset < classes; offset += blockDim.x)
gradInput[offset] = epilogue(gradOutput[offset], output[offset]);
}
template<template<typename, typename, typename> class Epilogue, bool is_log_softmax>
Tensor host_softmax(const Tensor & input_, const int64_t dim_, const bool half_to_float){
if (half_to_float) AT_ASSERTM(input_.scalar_type() == ScalarType::Half,"conversion is supported for Half type only");
auto input = input_.contiguous();
Tensor output = half_to_float ? at::empty_like(input, input.options().dtype(ScalarType::Float), LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float");
if (input.dim() == 0) input = input.view(1);
int64_t dim = maybe_wrap_dim(dim_, input.dim());
TORCH_CHECK(dim >=0 && dim < input.dim(), "dim must be non-negative and less than input dimensions");
int64_t outer_size = 1;
int64_t dim_size = input.size(dim);
if (input.numel() > 0) {
int64_t inner_size = 1;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
for (int64_t i = 0; i < dim; ++i)
outer_size *= input.size(i);
for (int64_t i = dim + 1; i < input.dim(); ++i)
inner_size *= input.size(i);
// This kernel spawns a block per each element in the batch.
// XXX: it assumes that inner_size == 1
if (inner_size == 1) {
const int ILP = 2;
dim3 grid(outer_size);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "host_softmax", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
dispatch_softmax_forward<scalar_t, scalar_t, accscalar_t, is_log_softmax>(
output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), dim_size, dim_size, outer_size);
} else {
cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), dim_size
);
}
} else {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
dispatch_softmax_forward<scalar_t, accscalar_t, accscalar_t, is_log_softmax>(
output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), dim_size, dim_size, outer_size);
} else {
cunn_SoftMaxForward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), dim_size
);
}
}
});
// This kernel runs in a 2D grid, where each application along y dimension has a fixed
// outer_size, and runs in parallel over inner_size. Dimension x is parallel over outer_size.
// Reductions over dim are done in a single-threaded manner.
} else {
uint32_t smem_size;
dim3 grid, block;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "host_softmax", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, scalar_t, Epilogue>
<<<grid, block, smem_size, stream>>>(
output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(), outer_size, dim_size, inner_size
);
} else {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
cunn_SpatialSoftMaxForward<scalar_t, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, smem_size, stream>>>(
output.data_ptr<accscalar_t>(), input.data_ptr<scalar_t>(), outer_size, dim_size, inner_size
);
}
});
}
THCudaCheck(cudaGetLastError());
}
return output;
}
template<template<typename, typename, typename> class Epilogue, bool is_log_softmax>
Tensor host_softmax_backward(const Tensor &grad_, const Tensor &output_, int64_t dim_, bool half_to_float){
int64_t dim = maybe_wrap_dim(dim_, grad_.dim());
Tensor gI = half_to_float ? at::empty_like(grad_, grad_.options().dtype(ScalarType::Half), LEGACY_CONTIGUOUS_MEMORY_FORMAT) : at::empty_like(grad_, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (grad_.numel() == 0) {
return gI;
}
auto grad = grad_.contiguous();
static_assert(std::is_same<acc_type<at::Half, true>, float>::value, "accscalar_t for half should be float");
if (grad.dim() == 0) grad = grad.view(1);
TORCH_CHECK(dim >=0 && dim < grad.dim(), "dim must be non-negative and less than input dimensions");
auto output = output_.contiguous();
if (output.dim() == 0) output = output.view(1);
int64_t outer_size = 1;
int64_t dim_size = output.size(dim);
int64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= output.size(i);
for (int64_t i = dim + 1; i < output.dim(); ++i)
inner_size *= output.size(i);
// See descriptions of kernels above.
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (inner_size == 1) {
const int ILP = 2;
dim3 grid(outer_size);
dim3 block = SoftMax_getBlockSize(ILP, dim_size);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gI.scalar_type(), "host_softmax_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
dispatch_softmax_backward<scalar_t, scalar_t, accscalar_t, is_log_softmax>(
gI.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), dim_size, dim_size, outer_size);
} else {
cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, scalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
gI.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(), dim_size
);
}
} else {
if (dim_size <= 1024 && dim_size*sizeof(scalar_t) <= 4096) {
dispatch_softmax_backward<accscalar_t, scalar_t, accscalar_t, is_log_softmax>(
gI.data_ptr<scalar_t>(), grad.data_ptr<accscalar_t>(), output.data_ptr<accscalar_t>(), dim_size, dim_size, outer_size);
} else {
cunn_SoftMaxBackward<ILP, scalar_t, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, block.x * sizeof(accscalar_t), stream>>>(
gI.data_ptr<scalar_t>(), output.data_ptr<accscalar_t>(), grad.data_ptr<accscalar_t>(), dim_size
);
}
}
});
} else {
uint32_t smem_size;
dim3 grid, block;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(gI.scalar_type(), "host_softmax_backward", [&] {
using accscalar_t = acc_type<scalar_t, true>;
if (!half_to_float) {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, scalar_t, Epilogue>
<<<grid, block, smem_size, stream>>>(
gI.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), grad.data_ptr<scalar_t>(),
outer_size, dim_size, inner_size
);
} else {
SpatialSoftMax_getLaunchSizes<accscalar_t>(
&cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>,
outer_size, dim_size, inner_size,
grid, block, smem_size);
cunn_SpatialSoftMaxBackward<scalar_t, accscalar_t, accscalar_t, Epilogue>
<<<grid, block, smem_size, stream>>>(
gI.data_ptr<scalar_t>(), output.data_ptr<accscalar_t>(), grad.data_ptr<accscalar_t>(),
outer_size, dim_size, inner_size
);
}
});
}
THCudaCheck(cudaGetLastError());
return gI;
}
}
Tensor log_softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){
return host_softmax<LogSoftMaxForwardEpilogue,true>(input, dim, half_to_float);
}
Tensor log_softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){
bool half_to_float = grad.scalar_type() != input.scalar_type();
if (half_to_float) {
AT_ASSERTM((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
return host_softmax_backward<LogSoftMaxBackwardEpilogue,true>(grad, output, dim, half_to_float);
}
Tensor softmax_cuda(const Tensor &input, const int64_t dim, const bool half_to_float){
return host_softmax<SoftMaxForwardEpilogue,false>(input, dim, half_to_float);
}
Tensor softmax_backward_cuda(const Tensor &grad, const Tensor &output, int64_t dim, const Tensor &input){
bool half_to_float = grad.scalar_type() != input.scalar_type();
if (half_to_float) {
AT_ASSERTM((grad.scalar_type() == ScalarType::Float && input.scalar_type() == ScalarType::Half), "expected input and grad types to match, or input to be at::Half and grad to be at::Float");
}
Tensor tmp = grad * output;
return host_softmax_backward<SoftMaxBackwardEpilogue,false>(tmp, output, dim, half_to_float);
}
}
}
|
898f84786df4c6cb590b332f83374b15d690cdfd.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <float.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "kernels.hip"
const int HIGHEST = 3;
const int ITER = 100;
const int WORKLOAD = 1;
int sizepernode;
// global var
float preScore = -99999999999.f;
float score = 0.f;
float maxScore[HIGHEST] = {-999999999.f};
bool orders[NODE_N][NODE_N];
bool preOrders[NODE_N][NODE_N];
bool preGraph[NODE_N][NODE_N];
bool bestGraph[HIGHEST][NODE_N][NODE_N];
bool graph[NODE_N][NODE_N];
float *localscore, *scores;
float *LG;
int *parents;
void initial(); // initial orders and data
int genOrders(); // swap
int ConCore(); // discard new order or not
// get every possible set of parents for a node
void incr(int *bit, int n); // binary code increases 1 each time
void incrS(int *bit, int n); // STATE_N code increases 1 each time
// get every possible combination of state for a parent set
bool getState( int parN, int *state, int time);
float logGamma(int N); // log and gamma
float findBestGraph(float* D_localscore, int* D_resP, float* D_Score, bool *D_parent);
void genScore();
void sortGraph();
void swap(int a, int b);
void Pre_logGamma();
int findindex(int *arr, int size);
int C(int n, int a);
int main(int argc, char** argv) {
if (argc != 3) {
printf("Usage: ./%s <path to output file> <repeat>\n", argv[0]);
return 1;
}
// save output in a file
FILE *fpout = fopen(argv[1], "w");
if (fpout == NULL) {
printf("Error: failed to open %s. Exit..\n", argv[1]);
return -1;
}
const int repeat = atoi(argv[2]);
int i, j, c = 0, tmp, a, b;
float tmpd;
printf("NODE_N=%d\nInitialization...\n", NODE_N);
srand(2);
initial(); // update sizepernode
scores = (float*) malloc ((sizepernode / (256 * WORKLOAD) + 1) * sizeof(float));
parents = (int*) malloc ((sizepernode / (256 * WORKLOAD) + 1) * 4 * sizeof(int));
Pre_logGamma();
int *D_data;
float *D_LG;
float *D_localscore;
float *D_Score;
bool *D_parent;
int *D_resP;
hipMalloc((void **)&D_data, NODE_N * DATA_N * sizeof(int));
hipMalloc((void **)&D_localscore, NODE_N * sizepernode * sizeof(float));
hipMalloc((void **)&D_LG, (DATA_N + 2) * sizeof(float));
hipMalloc((void **)&D_Score, (sizepernode / (256 * WORKLOAD) + 1) * sizeof(float));
hipMalloc((void **)&D_parent, NODE_N * sizeof(bool));
hipMalloc((void **)&D_resP, (sizepernode / (256 * WORKLOAD) + 1) * 4 * sizeof(int));
dim3 grid(sizepernode / 256 + 1, 1, 1);
dim3 threads(256, 1, 1);
hipMemset(D_localscore, 0.f, NODE_N * sizepernode * sizeof(float));
hipMemcpy(D_data, data, NODE_N * DATA_N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(D_LG, LG, (DATA_N + 2) * sizeof(float), hipMemcpyHostToDevice);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (i = 0; i < repeat; i++)
hipLaunchKernelGGL(( genScoreKernel), dim3(grid), dim3(threads), 0, 0, sizepernode, D_localscore, D_data, D_LG);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of genScoreKernel: %f (s)\n", time * 1e-9f / repeat);
hipMemcpy(localscore, D_localscore, NODE_N * sizepernode * sizeof(float), hipMemcpyDeviceToHost);
long findBestGraph_time = 0;
i = 0;
while (i != ITER) {
i++;
score = 0;
for (a = 0; a < NODE_N; a++) {
for (j = 0; j < NODE_N; j++) {
orders[a][j] = preOrders[a][j];
}
}
tmp = rand() % 6;
for (j = 0; j < tmp; j++)
genOrders();
start = std::chrono::steady_clock::now();
score = findBestGraph(D_localscore, D_resP, D_Score, D_parent);
end = std::chrono::steady_clock::now();
findBestGraph_time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
ConCore();
// store the top HIGHEST highest orders
if (c < HIGHEST) {
tmp = 1;
for (j = 0; j < c; j++) {
if (maxScore[j] == preScore) {
tmp = 0;
}
}
if (tmp != 0) {
maxScore[c] = preScore;
for (a = 0; a < NODE_N; a++) {
for (b = 0; b < NODE_N; b++) {
bestGraph[c][a][b] = preGraph[a][b];
}
}
c++;
}
} else if (c == HIGHEST) {
sortGraph();
c++;
} else {
tmp = 1;
for (j = 0; j < HIGHEST; j++) {
if (maxScore[j] == preScore) {
tmp = 0;
break;
}
}
if (tmp != 0 && preScore > maxScore[HIGHEST - 1]) {
maxScore[HIGHEST - 1] = preScore;
for (a = 0; a < NODE_N; a++) {
for (b = 0; b < NODE_N; b++) {
bestGraph[HIGHEST - 1][a][b] = preGraph[a][b];
}
}
b = HIGHEST - 1;
for (a = HIGHEST - 2; a >= 0; a--) {
if (maxScore[b] > maxScore[a]) {
swap(a, b);
tmpd = maxScore[a];
maxScore[a] = maxScore[b];
maxScore[b] = tmpd;
b = a;
}
}
}
}
} // endwhile
printf("Find best graph time %lf (s)\n", findBestGraph_time * 1e-9);
free(localscore);
free(scores);
free(parents);
free(LG);
hipFree(D_LG);
hipFree(D_data);
hipFree(D_localscore);
hipFree(D_parent);
hipFree(D_Score);
hipFree(D_resP);
for(j=0;j<HIGHEST;j++){
fprintf(fpout,"score:%f\n",maxScore[j]);
fprintf(fpout,"Best Graph:\n");
for(int a=0;a<NODE_N;a++){
for(int b=0;b<NODE_N;b++)
fprintf(fpout,"%d ",bestGraph[j][a][b]);
fprintf(fpout,"\n");
}
fprintf(fpout,"--------------------------------------------------------------------\n");
}
return 0;
}
float findBestGraph(float* D_localscore, int* D_resP, float* D_Score, bool *D_parent) {
float bestls = -99999999.f;
int bestparent[5];
int bestpN, total;
int node, index;
int pre[NODE_N] = {0};
int parent[NODE_N] = {0};
int posN = 0, i, j, parN, tmp, k, l;
float ls = -99999999999.f, score = 0;
int blocknum;
for (i = 0; i < NODE_N; i++)
for (j = 0; j < NODE_N; j++)
graph[i][j] = 0;
for (node = 0; node < NODE_N; node++) {
bestls = -99999999.f;
posN = 0;
for (i = 0; i < NODE_N; i++) {
if (orders[node][i] == 1) {
pre[posN++] = i;
}
}
if (posN >= 0) {
total = C(posN, 4) + C(posN, 3) + C(posN, 2) + posN + 1;
blocknum = total / (256 * WORKLOAD) + 1;
hipMemset(D_resP, 0, blocknum * 4 * sizeof(int));
hipMemset(D_Score, -999999.f, blocknum * sizeof(float));
hipMemcpy(D_parent, orders[node], NODE_N * sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( computeKernel), dim3(blocknum), dim3(256), 256 * sizeof(float), 0,
WORKLOAD, sizepernode, D_localscore, D_parent, node, total, D_Score,
D_resP);
hipMemcpy(parents, D_resP, blocknum * 4 * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(scores, D_Score, blocknum * sizeof(float), hipMemcpyDeviceToHost);
for (i = 0; i < blocknum; i++) {
if (scores[i] > bestls) {
bestls = scores[i];
parN = 0;
for (tmp = 0; tmp < 4; tmp++) {
if (parents[i * 4 + tmp] < 0)
break;
bestparent[tmp] = parents[i * 4 + tmp];
parN++;
}
bestpN = parN;
}
}
} else {
if (posN >= 4) {
for (i = 0; i < posN; i++) {
for (j = i + 1; j < posN; j++) {
for (k = j + 1; k < posN; k++) {
for (l = k + 1; l < posN; l++) {
parN = 4;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
if (pre[j] > node)
parent[2] = pre[j];
else
parent[2] = pre[j] + 1;
if (pre[k] > node)
parent[3] = pre[k];
else
parent[3] = pre[k] + 1;
if (pre[l] > node)
parent[4] = pre[l];
else
parent[4] = pre[l] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
}
}
}
if (posN >= 3) {
for (i = 0; i < posN; i++) {
for (j = i + 1; j < posN; j++) {
for (k = j + 1; k < posN; k++) {
parN = 3;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
if (pre[j] > node)
parent[2] = pre[j];
else
parent[2] = pre[j] + 1;
if (pre[k] > node)
parent[3] = pre[k];
else
parent[3] = pre[k] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
}
}
if (posN >= 2) {
for (i = 0; i < posN; i++) {
for (j = i + 1; j < posN; j++) {
parN = 2;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
if (pre[j] > node)
parent[2] = pre[j];
else
parent[2] = pre[j] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
}
if (posN >= 1) {
for (i = 0; i < posN; i++) {
parN = 1;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
parN = 0;
index = sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = 0;
}
}
if (bestls > -99999999.f) {
for (i = 0; i < bestpN; i++) {
if (bestparent[i] < node)
graph[node][bestparent[i] - 1] = 1;
else
graph[node][bestparent[i]] = 1;
}
score += bestls;
}
}
return score;
}
void sortGraph() {
float max = -99999999999999.f;
int maxi, i, j;
float tmp;
for (j = 0; j < HIGHEST - 1; j++) {
max = maxScore[j];
maxi = j;
for (i = j + 1; i < HIGHEST; i++) {
if (maxScore[i] > max) {
max = maxScore[i];
maxi = i;
}
}
swap(j, maxi);
tmp = maxScore[j];
maxScore[j] = max;
maxScore[maxi] = tmp;
}
}
void swap(int a, int b) {
int i, j;
bool tmp;
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < NODE_N; j++) {
tmp = bestGraph[a][i][j];
bestGraph[a][i][j] = bestGraph[b][i][j];
bestGraph[b][i][j] = tmp;
}
}
}
void initial() {
int i, j, tmp, a, b, r;
bool tmpd;
tmp = 1;
for (i = 1; i <= 4; i++) {
tmp += C(NODE_N - 1, i);
}
sizepernode = tmp;
tmp *= NODE_N;
localscore = (float*) malloc(tmp * sizeof(float));
for (i = 0; i < tmp; i++)
localscore[i] = 0;
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < NODE_N; j++)
orders[i][j] = 0;
}
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < i; j++)
orders[i][j] = 1;
}
r = rand() % 10000;
for (i = 0; i < r; i++) {
a = rand() % NODE_N;
b = rand() % NODE_N;
for (j = 0; j < NODE_N; j++) {
tmpd = orders[j][a];
orders[j][a] = orders[j][b];
orders[j][b] = tmpd;
}
for (j = 0; j < NODE_N; j++) {
tmpd = orders[a][j];
orders[a][j] = orders[b][j];
orders[b][j] = tmpd;
}
}
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < NODE_N; j++) {
preOrders[i][j] = orders[i][j];
}
}
}
// generate ramdom order
int genOrders() {
int a, b, j;
bool tmp;
a = rand() % NODE_N;
b = rand() % NODE_N;
for (j = 0; j < NODE_N; j++) {
tmp = orders[a][j];
orders[a][j] = orders[b][j];
orders[b][j] = tmp;
}
for (j = 0; j < NODE_N; j++) {
tmp = orders[j][a];
orders[j][a] = orders[j][b];
orders[j][b] = tmp;
}
return 1;
}
// decide leave or discard an order
int ConCore() {
int i, j;
float tmp;
tmp = log((rand() % 100000) / 100000.0);
if (tmp < (score - preScore)) {
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < NODE_N; j++) {
preOrders[i][j] = orders[i][j];
preGraph[i][j] = graph[i][j];
}
}
preScore = score;
return 1;
}
return 0;
}
void genScore() {
}
void Pre_logGamma() {
LG = (float*) malloc ((DATA_N + 2) * sizeof(float));
LG[1] = log(1.0);
float i;
for (i = 2; i <= DATA_N + 1; i++) {
LG[(int)i] = LG[(int)i - 1] + log((float)i);
}
}
void incr(int *bit, int n) {
bit[n]++;
if (bit[n] >= 2) {
bit[n] = 0;
incr(bit, n + 1);
}
return;
}
void incrS(int *bit, int n) {
bit[n]++;
if (bit[n] >= STATE_N) {
bit[n] = 0;
incr(bit, n + 1);
}
return;
}
bool getState(int parN, int *state, int time) {
int j = 1;
j = pow(STATE_N, (float)parN) - 1;
if (time > j)
return false;
if (time >= 1)
incrS(state, 0);
return true;
}
int findindex(int *arr, int size) { // reminder: arr[0] has to be 0 && size ==
// array size-1 && index start from 0
int i, j, index = 0;
for (i = 1; i < size; i++) {
index += C(NODE_N - 1, i);
}
for (i = 1; i <= size - 1; i++) {
for (j = arr[i - 1] + 1; j <= arr[i] - 1; j++) {
index += C(NODE_N - 1 - j, size - i);
}
}
index += arr[size] - arr[size - 1];
return index;
}
int C(int n, int a) {
int i, res = 1, atmp = a;
for (i = 0; i < atmp; i++) {
res *= n;
n--;
}
for (i = 0; i < atmp; i++) {
res /= a;
a--;
}
return res;
}
| 898f84786df4c6cb590b332f83374b15d690cdfd.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <float.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "kernels.cu"
const int HIGHEST = 3;
const int ITER = 100;
const int WORKLOAD = 1;
int sizepernode;
// global var
float preScore = -99999999999.f;
float score = 0.f;
float maxScore[HIGHEST] = {-999999999.f};
bool orders[NODE_N][NODE_N];
bool preOrders[NODE_N][NODE_N];
bool preGraph[NODE_N][NODE_N];
bool bestGraph[HIGHEST][NODE_N][NODE_N];
bool graph[NODE_N][NODE_N];
float *localscore, *scores;
float *LG;
int *parents;
void initial(); // initial orders and data
int genOrders(); // swap
int ConCore(); // discard new order or not
// get every possible set of parents for a node
void incr(int *bit, int n); // binary code increases 1 each time
void incrS(int *bit, int n); // STATE_N code increases 1 each time
// get every possible combination of state for a parent set
bool getState( int parN, int *state, int time);
float logGamma(int N); // log and gamma
float findBestGraph(float* D_localscore, int* D_resP, float* D_Score, bool *D_parent);
void genScore();
void sortGraph();
void swap(int a, int b);
void Pre_logGamma();
int findindex(int *arr, int size);
int C(int n, int a);
int main(int argc, char** argv) {
if (argc != 3) {
printf("Usage: ./%s <path to output file> <repeat>\n", argv[0]);
return 1;
}
// save output in a file
FILE *fpout = fopen(argv[1], "w");
if (fpout == NULL) {
printf("Error: failed to open %s. Exit..\n", argv[1]);
return -1;
}
const int repeat = atoi(argv[2]);
int i, j, c = 0, tmp, a, b;
float tmpd;
printf("NODE_N=%d\nInitialization...\n", NODE_N);
srand(2);
initial(); // update sizepernode
scores = (float*) malloc ((sizepernode / (256 * WORKLOAD) + 1) * sizeof(float));
parents = (int*) malloc ((sizepernode / (256 * WORKLOAD) + 1) * 4 * sizeof(int));
Pre_logGamma();
int *D_data;
float *D_LG;
float *D_localscore;
float *D_Score;
bool *D_parent;
int *D_resP;
hipMalloc((void **)&D_data, NODE_N * DATA_N * sizeof(int));
hipMalloc((void **)&D_localscore, NODE_N * sizepernode * sizeof(float));
hipMalloc((void **)&D_LG, (DATA_N + 2) * sizeof(float));
hipMalloc((void **)&D_Score, (sizepernode / (256 * WORKLOAD) + 1) * sizeof(float));
hipMalloc((void **)&D_parent, NODE_N * sizeof(bool));
hipMalloc((void **)&D_resP, (sizepernode / (256 * WORKLOAD) + 1) * 4 * sizeof(int));
dim3 grid(sizepernode / 256 + 1, 1, 1);
dim3 threads(256, 1, 1);
hipMemset(D_localscore, 0.f, NODE_N * sizepernode * sizeof(float));
hipMemcpy(D_data, data, NODE_N * DATA_N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(D_LG, LG, (DATA_N + 2) * sizeof(float), hipMemcpyHostToDevice);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (i = 0; i < repeat; i++)
genScoreKernel<<<grid, threads>>>(sizepernode, D_localscore, D_data, D_LG);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of genScoreKernel: %f (s)\n", time * 1e-9f / repeat);
hipMemcpy(localscore, D_localscore, NODE_N * sizepernode * sizeof(float), hipMemcpyDeviceToHost);
long findBestGraph_time = 0;
i = 0;
while (i != ITER) {
i++;
score = 0;
for (a = 0; a < NODE_N; a++) {
for (j = 0; j < NODE_N; j++) {
orders[a][j] = preOrders[a][j];
}
}
tmp = rand() % 6;
for (j = 0; j < tmp; j++)
genOrders();
start = std::chrono::steady_clock::now();
score = findBestGraph(D_localscore, D_resP, D_Score, D_parent);
end = std::chrono::steady_clock::now();
findBestGraph_time += std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
ConCore();
// store the top HIGHEST highest orders
if (c < HIGHEST) {
tmp = 1;
for (j = 0; j < c; j++) {
if (maxScore[j] == preScore) {
tmp = 0;
}
}
if (tmp != 0) {
maxScore[c] = preScore;
for (a = 0; a < NODE_N; a++) {
for (b = 0; b < NODE_N; b++) {
bestGraph[c][a][b] = preGraph[a][b];
}
}
c++;
}
} else if (c == HIGHEST) {
sortGraph();
c++;
} else {
tmp = 1;
for (j = 0; j < HIGHEST; j++) {
if (maxScore[j] == preScore) {
tmp = 0;
break;
}
}
if (tmp != 0 && preScore > maxScore[HIGHEST - 1]) {
maxScore[HIGHEST - 1] = preScore;
for (a = 0; a < NODE_N; a++) {
for (b = 0; b < NODE_N; b++) {
bestGraph[HIGHEST - 1][a][b] = preGraph[a][b];
}
}
b = HIGHEST - 1;
for (a = HIGHEST - 2; a >= 0; a--) {
if (maxScore[b] > maxScore[a]) {
swap(a, b);
tmpd = maxScore[a];
maxScore[a] = maxScore[b];
maxScore[b] = tmpd;
b = a;
}
}
}
}
} // endwhile
printf("Find best graph time %lf (s)\n", findBestGraph_time * 1e-9);
free(localscore);
free(scores);
free(parents);
free(LG);
hipFree(D_LG);
hipFree(D_data);
hipFree(D_localscore);
hipFree(D_parent);
hipFree(D_Score);
hipFree(D_resP);
for(j=0;j<HIGHEST;j++){
fprintf(fpout,"score:%f\n",maxScore[j]);
fprintf(fpout,"Best Graph:\n");
for(int a=0;a<NODE_N;a++){
for(int b=0;b<NODE_N;b++)
fprintf(fpout,"%d ",bestGraph[j][a][b]);
fprintf(fpout,"\n");
}
fprintf(fpout,"--------------------------------------------------------------------\n");
}
return 0;
}
float findBestGraph(float* D_localscore, int* D_resP, float* D_Score, bool *D_parent) {
float bestls = -99999999.f;
int bestparent[5];
int bestpN, total;
int node, index;
int pre[NODE_N] = {0};
int parent[NODE_N] = {0};
int posN = 0, i, j, parN, tmp, k, l;
float ls = -99999999999.f, score = 0;
int blocknum;
for (i = 0; i < NODE_N; i++)
for (j = 0; j < NODE_N; j++)
graph[i][j] = 0;
for (node = 0; node < NODE_N; node++) {
bestls = -99999999.f;
posN = 0;
for (i = 0; i < NODE_N; i++) {
if (orders[node][i] == 1) {
pre[posN++] = i;
}
}
if (posN >= 0) {
total = C(posN, 4) + C(posN, 3) + C(posN, 2) + posN + 1;
blocknum = total / (256 * WORKLOAD) + 1;
hipMemset(D_resP, 0, blocknum * 4 * sizeof(int));
hipMemset(D_Score, -999999.f, blocknum * sizeof(float));
hipMemcpy(D_parent, orders[node], NODE_N * sizeof(bool), hipMemcpyHostToDevice);
computeKernel<<<blocknum, 256, 256 * sizeof(float)>>>(
WORKLOAD, sizepernode, D_localscore, D_parent, node, total, D_Score,
D_resP);
hipMemcpy(parents, D_resP, blocknum * 4 * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(scores, D_Score, blocknum * sizeof(float), hipMemcpyDeviceToHost);
for (i = 0; i < blocknum; i++) {
if (scores[i] > bestls) {
bestls = scores[i];
parN = 0;
for (tmp = 0; tmp < 4; tmp++) {
if (parents[i * 4 + tmp] < 0)
break;
bestparent[tmp] = parents[i * 4 + tmp];
parN++;
}
bestpN = parN;
}
}
} else {
if (posN >= 4) {
for (i = 0; i < posN; i++) {
for (j = i + 1; j < posN; j++) {
for (k = j + 1; k < posN; k++) {
for (l = k + 1; l < posN; l++) {
parN = 4;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
if (pre[j] > node)
parent[2] = pre[j];
else
parent[2] = pre[j] + 1;
if (pre[k] > node)
parent[3] = pre[k];
else
parent[3] = pre[k] + 1;
if (pre[l] > node)
parent[4] = pre[l];
else
parent[4] = pre[l] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
}
}
}
if (posN >= 3) {
for (i = 0; i < posN; i++) {
for (j = i + 1; j < posN; j++) {
for (k = j + 1; k < posN; k++) {
parN = 3;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
if (pre[j] > node)
parent[2] = pre[j];
else
parent[2] = pre[j] + 1;
if (pre[k] > node)
parent[3] = pre[k];
else
parent[3] = pre[k] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
}
}
if (posN >= 2) {
for (i = 0; i < posN; i++) {
for (j = i + 1; j < posN; j++) {
parN = 2;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
if (pre[j] > node)
parent[2] = pre[j];
else
parent[2] = pre[j] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
}
if (posN >= 1) {
for (i = 0; i < posN; i++) {
parN = 1;
if (pre[i] > node)
parent[1] = pre[i];
else
parent[1] = pre[i] + 1;
index = findindex(parent, parN);
index += sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = parN;
for (tmp = 0; tmp < parN; tmp++)
bestparent[tmp] = parent[tmp + 1];
}
}
}
parN = 0;
index = sizepernode * node;
ls = localscore[index];
if (ls > bestls) {
bestls = ls;
bestpN = 0;
}
}
if (bestls > -99999999.f) {
for (i = 0; i < bestpN; i++) {
if (bestparent[i] < node)
graph[node][bestparent[i] - 1] = 1;
else
graph[node][bestparent[i]] = 1;
}
score += bestls;
}
}
return score;
}
void sortGraph() {
float max = -99999999999999.f;
int maxi, i, j;
float tmp;
for (j = 0; j < HIGHEST - 1; j++) {
max = maxScore[j];
maxi = j;
for (i = j + 1; i < HIGHEST; i++) {
if (maxScore[i] > max) {
max = maxScore[i];
maxi = i;
}
}
swap(j, maxi);
tmp = maxScore[j];
maxScore[j] = max;
maxScore[maxi] = tmp;
}
}
void swap(int a, int b) {
int i, j;
bool tmp;
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < NODE_N; j++) {
tmp = bestGraph[a][i][j];
bestGraph[a][i][j] = bestGraph[b][i][j];
bestGraph[b][i][j] = tmp;
}
}
}
void initial() {
int i, j, tmp, a, b, r;
bool tmpd;
tmp = 1;
for (i = 1; i <= 4; i++) {
tmp += C(NODE_N - 1, i);
}
sizepernode = tmp;
tmp *= NODE_N;
localscore = (float*) malloc(tmp * sizeof(float));
for (i = 0; i < tmp; i++)
localscore[i] = 0;
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < NODE_N; j++)
orders[i][j] = 0;
}
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < i; j++)
orders[i][j] = 1;
}
r = rand() % 10000;
for (i = 0; i < r; i++) {
a = rand() % NODE_N;
b = rand() % NODE_N;
for (j = 0; j < NODE_N; j++) {
tmpd = orders[j][a];
orders[j][a] = orders[j][b];
orders[j][b] = tmpd;
}
for (j = 0; j < NODE_N; j++) {
tmpd = orders[a][j];
orders[a][j] = orders[b][j];
orders[b][j] = tmpd;
}
}
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < NODE_N; j++) {
preOrders[i][j] = orders[i][j];
}
}
}
// generate ramdom order
int genOrders() {
int a, b, j;
bool tmp;
a = rand() % NODE_N;
b = rand() % NODE_N;
for (j = 0; j < NODE_N; j++) {
tmp = orders[a][j];
orders[a][j] = orders[b][j];
orders[b][j] = tmp;
}
for (j = 0; j < NODE_N; j++) {
tmp = orders[j][a];
orders[j][a] = orders[j][b];
orders[j][b] = tmp;
}
return 1;
}
// decide leave or discard an order
int ConCore() {
int i, j;
float tmp;
tmp = log((rand() % 100000) / 100000.0);
if (tmp < (score - preScore)) {
for (i = 0; i < NODE_N; i++) {
for (j = 0; j < NODE_N; j++) {
preOrders[i][j] = orders[i][j];
preGraph[i][j] = graph[i][j];
}
}
preScore = score;
return 1;
}
return 0;
}
void genScore() {
}
void Pre_logGamma() {
LG = (float*) malloc ((DATA_N + 2) * sizeof(float));
LG[1] = log(1.0);
float i;
for (i = 2; i <= DATA_N + 1; i++) {
LG[(int)i] = LG[(int)i - 1] + log((float)i);
}
}
void incr(int *bit, int n) {
bit[n]++;
if (bit[n] >= 2) {
bit[n] = 0;
incr(bit, n + 1);
}
return;
}
void incrS(int *bit, int n) {
bit[n]++;
if (bit[n] >= STATE_N) {
bit[n] = 0;
incr(bit, n + 1);
}
return;
}
bool getState(int parN, int *state, int time) {
int j = 1;
j = pow(STATE_N, (float)parN) - 1;
if (time > j)
return false;
if (time >= 1)
incrS(state, 0);
return true;
}
int findindex(int *arr, int size) { // reminder: arr[0] has to be 0 && size ==
// array size-1 && index start from 0
int i, j, index = 0;
for (i = 1; i < size; i++) {
index += C(NODE_N - 1, i);
}
for (i = 1; i <= size - 1; i++) {
for (j = arr[i - 1] + 1; j <= arr[i] - 1; j++) {
index += C(NODE_N - 1 - j, size - i);
}
}
index += arr[size] - arr[size - 1];
return index;
}
int C(int n, int a) {
int i, res = 1, atmp = a;
for (i = 0; i < atmp; i++) {
res *= n;
n--;
}
for (i = 0; i < atmp; i++) {
res /= a;
a--;
}
return res;
}
|
3da5dacc0df61659d0b07d776379c176abd8f8ef.hip | // !!! This is a file automatically generated by hipify!!!
//This program is written by Abubakr Shafique ([email protected])
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "CUDA_Histogram.h"
__global__ void Histogram_CUDA(unsigned char* Image, int* Histogram);
void Histogram_Calculation_CUDA(unsigned char* Image, int Height, int Width, int Channels, int* Histogram){
unsigned char* Dev_Image = NULL;
int* Dev_Histogram = NULL;
//allocate cuda variable memory
hipMalloc((void**)&Dev_Image, Height * Width * Channels);
hipMalloc((void**)&Dev_Histogram, 256 * sizeof(int));
//copy CPU data to GPU
hipMemcpy(Dev_Image, Image, Height * Width * Channels, hipMemcpyHostToDevice);
hipMemcpy(Dev_Histogram, Histogram, 256 * sizeof(int), hipMemcpyHostToDevice);
dim3 Grid_Image(Width, Height);
Histogram_CUDA << <Grid_Image, 1 >> >(Dev_Image, Dev_Histogram);
//copy memory back to CPU from GPU
hipMemcpy(Histogram, Dev_Histogram, 256 * sizeof(int), hipMemcpyDeviceToHost);
//free up the memory of GPU
hipFree(Dev_Histogram);
hipFree(Dev_Image);
}
__global__ void Histogram_CUDA(unsigned char* Image, int* Histogram){
int x = blockIdx.x;
int y = blockIdx.y;
int Image_Idx = x + y * gridDim.x;
atomicAdd(&Histogram[Image[Image_Idx]], 1);
}
| 3da5dacc0df61659d0b07d776379c176abd8f8ef.cu | //This program is written by Abubakr Shafique ([email protected])
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "CUDA_Histogram.h"
__global__ void Histogram_CUDA(unsigned char* Image, int* Histogram);
void Histogram_Calculation_CUDA(unsigned char* Image, int Height, int Width, int Channels, int* Histogram){
unsigned char* Dev_Image = NULL;
int* Dev_Histogram = NULL;
//allocate cuda variable memory
cudaMalloc((void**)&Dev_Image, Height * Width * Channels);
cudaMalloc((void**)&Dev_Histogram, 256 * sizeof(int));
//copy CPU data to GPU
cudaMemcpy(Dev_Image, Image, Height * Width * Channels, cudaMemcpyHostToDevice);
cudaMemcpy(Dev_Histogram, Histogram, 256 * sizeof(int), cudaMemcpyHostToDevice);
dim3 Grid_Image(Width, Height);
Histogram_CUDA << <Grid_Image, 1 >> >(Dev_Image, Dev_Histogram);
//copy memory back to CPU from GPU
cudaMemcpy(Histogram, Dev_Histogram, 256 * sizeof(int), cudaMemcpyDeviceToHost);
//free up the memory of GPU
cudaFree(Dev_Histogram);
cudaFree(Dev_Image);
}
__global__ void Histogram_CUDA(unsigned char* Image, int* Histogram){
int x = blockIdx.x;
int y = blockIdx.y;
int Image_Idx = x + y * gridDim.x;
atomicAdd(&Histogram[Image[Image_Idx]], 1);
}
|
61b2dbee7e3ab948d7f2987512ceffb7b53672c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <opencv2/opencv.hpp>
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/utility.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include <helper_cuda.h>
#include <cuda/Fast.hpp>
#include <iostream>
using namespace cv;
using namespace cv::cuda;
using namespace cv::cuda::device;
namespace ORB_SLAM2 { namespace cuda {
///////////////////////////////////////////////////////////////////////////
// calcKeypoints
__constant__ uchar c_table[] = {
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff,
0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff,
0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff,
0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xf0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0xc0, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
// 1 -> v > x + th
// 2 -> v < x - th
// 0 -> x - th <= v <= x + th
__device__ __forceinline__ int diffType(const int v, const int x, const int th)
{
const int diff = x - v;
return static_cast<int>(diff < -th) + (static_cast<int>(diff > th) << 1);
}
__device__ void calcMask(const uint C[4], const int v, const int th, int& mask1, int& mask2)
{
mask1 = 0;
mask2 = 0;
int d1, d2;
d1 = diffType(v, C[0] & 0xff, th);
d2 = diffType(v, C[2] & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 0;
mask2 |= ((d1 & 2) >> 1) << 0;
mask1 |= (d2 & 1) << 8;
mask2 |= ((d2 & 2) >> 1) << 8;
d1 = diffType(v, C[1] & 0xff, th);
d2 = diffType(v, C[3] & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 4;
mask2 |= ((d1 & 2) >> 1) << 4;
mask1 |= (d2 & 1) << 12;
mask2 |= ((d2 & 2) >> 1) << 12;
d1 = diffType(v, (C[0] >> (2 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (2 * 8)) & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 2;
mask2 |= ((d1 & 2) >> 1) << 2;
mask1 |= (d2 & 1) << 10;
mask2 |= ((d2 & 2) >> 1) << 10;
d1 = diffType(v, (C[1] >> (2 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (2 * 8)) & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 6;
mask2 |= ((d1 & 2) >> 1) << 6;
mask1 |= (d2 & 1) << 14;
mask2 |= ((d2 & 2) >> 1) << 14;
d1 = diffType(v, (C[0] >> (1 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (1 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 1;
mask2 |= ((d1 & 2) >> 1) << 1;
mask1 |= (d2 & 1) << 9;
mask2 |= ((d2 & 2) >> 1) << 9;
d1 = diffType(v, (C[0] >> (3 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (3 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 3;
mask2 |= ((d1 & 2) >> 1) << 3;
mask1 |= (d2 & 1) << 11;
mask2 |= ((d2 & 2) >> 1) << 11;
d1 = diffType(v, (C[1] >> (1 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (1 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 5;
mask2 |= ((d1 & 2) >> 1) << 5;
mask1 |= (d2 & 1) << 13;
mask2 |= ((d2 & 2) >> 1) << 13;
d1 = diffType(v, (C[1] >> (3 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (3 * 8)) & 0xff, th);
mask1 |= (d1 & 1) << 7;
mask2 |= ((d1 & 2) >> 1) << 7;
mask1 |= (d2 & 1) << 15;
mask2 |= ((d2 & 2) >> 1) << 15;
}
// 1 -> v > x + th
// 2 -> v < x - th
// 0 -> not a keypoint
__device__ __forceinline__ bool isKeyPoint(int mask1, int mask2)
{
return (__popc(mask1) > 8 && (c_table[(mask1 >> 3) - 63] & (1 << (mask1 & 7)))) ||
(__popc(mask2) > 8 && (c_table[(mask2 >> 3) - 63] & (1 << (mask2 & 7))));
}
__device__ int cornerScore(const uint C[4], const int v, const int threshold)
{
// binary search in [threshold + 1, 255]
int min = threshold + 1;
int max = 255;
while (min <= max)
{
const int mid = (min + max) >> 1;
int mask1 = 0;
int mask2 = 0;
calcMask(C, v, mid, mask1, mask2);
int isKp = static_cast<int>(isKeyPoint(mask1, mask2));
min = isKp * (mid + 1) + (isKp ^ 1) * min;
max = (isKp ^ 1) * (mid - 1) + isKp * max;
}
return min - 1;
}
__device__ bool isKeyPoint2(const PtrStepSzb img, const int i, const int j, const int threshold, PtrStepi scoreMat) {
int v;
uint C[4] = {0,0,0,0};
C[2] |= static_cast<uint>(img(i - 3, j - 1)) << 8;
C[2] |= static_cast<uint>(img(i - 3, j));
C[1] |= static_cast<uint>(img(i - 3, j + 1)) << (3 * 8);
C[2] |= static_cast<uint>(img(i - 2, j - 2)) << (2 * 8);
C[1] |= static_cast<uint>(img(i - 2, j + 2)) << (2 * 8);
C[2] |= static_cast<uint>(img(i - 1, j - 3)) << (3 * 8);
C[1] |= static_cast<uint>(img(i - 1, j + 3)) << 8;
C[3] |= static_cast<uint>(img(i, j - 3));
v = static_cast<int>(img(i, j));
C[1] |= static_cast<uint>(img(i, j + 3));
int d1 = diffType(v, C[1] & 0xff, threshold);
int d2 = diffType(v, C[3] & 0xff, threshold);
if ((d1 | d2) == 0) {
return false;
}
C[3] |= static_cast<uint>(img(i + 1, j - 3)) << 8;
C[0] |= static_cast<uint>(img(i + 1, j + 3)) << (3 * 8);
C[3] |= static_cast<uint>(img(i + 2, j - 2)) << (2 * 8);
C[0] |= static_cast<uint>(img(i + 2, j + 2)) << (2 * 8);
C[3] |= static_cast<uint>(img(i + 3, j - 1)) << (3 * 8);
C[0] |= static_cast<uint>(img(i + 3, j));
C[0] |= static_cast<uint>(img(i + 3, j + 1)) << 8;
int mask1 = 0;
int mask2 = 0;
calcMask(C, v, threshold, mask1, mask2);
if (isKeyPoint(mask1, mask2)) {
scoreMat(i, j) = cornerScore(C, v, threshold);
return true;
} else {
scoreMat(i, j) = 0;
return false;
}
}
__device__ bool isMax(short2 loc, PtrStepi scoreMat) {
int score = scoreMat(loc.y, loc.x);
bool ismax =
score > scoreMat(loc.y - 1, loc.x - 1) &&
score > scoreMat(loc.y - 1, loc.x ) &&
score > scoreMat(loc.y - 1, loc.x + 1) &&
score > scoreMat(loc.y , loc.x - 1) &&
score > scoreMat(loc.y , loc.x + 1) &&
score > scoreMat(loc.y + 1, loc.x - 1) &&
score > scoreMat(loc.y + 1, loc.x ) &&
score > scoreMat(loc.y + 1, loc.x + 1);
return ismax;
}
__global__ void tileCalcKeypoints_kernel(const PtrStepSzb img, short2 * kpLoc, float * kpScore, const unsigned int maxKeypoints, const int highThreshold, const int lowThreshold, PtrStepi scoreMat, unsigned int * counter_ptr)
{
const int j = threadIdx.x + blockIdx.x * blockDim.x + 3;
const int i = (threadIdx.y + blockIdx.y * blockDim.y) * 4 + 3;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
__shared__ bool hasKp;
if (tid == 0) {
hasKp = false;
}
bool isKp[4] = {0};
for (int t = 0; t < 4; ++t) {
if (i+t < img.rows - 3 && j < img.cols - 3) {
isKp[t] = isKeyPoint2(img, i+t, j, highThreshold, scoreMat);
}
}
// barrieer
__syncthreads();
for (int t = 0; t < 4; ++t) {
if (isKp[t]) {
isKp[t] = false;
short2 loc = make_short2(j, i+t);
if (isMax(loc, scoreMat)) {
int score = scoreMat(loc.y, loc.x);
hasKp = true;
const unsigned int ind = atomicInc(counter_ptr, (unsigned int)(-1));
if (ind < maxKeypoints) {
kpLoc[ind] = loc;
kpScore[ind] = static_cast<float>(score);
}
}
}
}
// barrieer
__syncthreads();
if (hasKp) return ;
// lower the threshold and try again
for (int t = 0; t < 4; ++t) {
if (i+t < img.rows - 3 && j < img.cols - 3) {
isKp[t] = isKeyPoint2(img, i+t, j, lowThreshold, scoreMat);
}
}
// barrieer
__syncthreads();
for (int t = 0; t < 4; ++t) {
if (isKp[t]) {
short2 loc = make_short2(j, i+t);
if (isMax(loc, scoreMat)) {
int score = scoreMat(loc.y, loc.x);
const unsigned int ind = atomicInc(counter_ptr, (unsigned int)(-1));
if (ind < maxKeypoints) {
kpLoc[ind] = loc;
kpScore[ind] = static_cast<float>(score);
}
}
}
}
}
__global__ void tileCalcKeypoints_mul_kernel(const PtrStepSzb img1,const PtrStepSzb img2,const PtrStepSzb img3,
short2 *kpLoc, float *kpScore, const unsigned int maxKeypoints, const int highThreshold, const int lowThreshold,
PtrStepi scoreMat1,PtrStepi scoreMat2,PtrStepi scoreMat3,
unsigned int * counter_ptr) {
const int j = threadIdx.x + blockIdx.x * blockDim.x + 3;
const int i = (threadIdx.y + blockIdx.y * blockDim.y) * 4 + 3;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int c=blockIdx.z;
PtrStepSzb img;
PtrStepi scoreMat;
if(c==0)
{
img=img1;
scoreMat=scoreMat1;
}
if(c==1)
{
img=img2;
scoreMat=scoreMat2;
}
if(c==2)
{
img=img3;
scoreMat=scoreMat3;
}
__shared__ bool hasKp;
if (tid == 0) {
hasKp = false;
}
bool isKp[4] = {0};
for (int t = 0; t < 4; ++t) {
if (i+t < img.rows - 3 && j < img.cols - 3) {
isKp[t] = isKeyPoint2(img, i+t, j, highThreshold, scoreMat);
}
}
// barrieer
__syncthreads();
for (int t = 0; t < 4; ++t) {
if (isKp[t]) {
isKp[t] = false;
short2 loc = make_short2(j, i+t);
if (isMax(loc, scoreMat)) {
int score = scoreMat(loc.y, loc.x);
hasKp = true;
const unsigned int ind = atomicInc(counter_ptr+c, (unsigned int)(-1));
if (ind < maxKeypoints) {
kpLoc[maxKeypoints*c+ind] = loc;
kpScore[maxKeypoints*c+ind] = static_cast<float>(score);
}
}
}
}
// barrieer
__syncthreads();
if (hasKp) return ;
// lower the threshold and try again
for (int t = 0; t < 4; ++t) {
if (i+t < img.rows - 3 && j < img.cols - 3) {
isKp[t] = isKeyPoint2(img, i+t, j, lowThreshold, scoreMat);
}
}
// barrieer
__syncthreads();
for (int t = 0; t < 4; ++t) {
if (isKp[t]) {
short2 loc = make_short2(j, i+t);
if (isMax(loc, scoreMat)) {
int score = scoreMat(loc.y, loc.x);
const unsigned int ind = atomicInc(counter_ptr+c, (unsigned int)(-1));
if (ind < maxKeypoints) {
kpLoc[maxKeypoints*c+ind] = loc;
kpScore[maxKeypoints*c+ind] = static_cast<float>(score);
}
}
}
}
}
GpuFast::GpuFast(int highThreshold, int lowThreshold, int maxKeypoints)
: highThreshold(highThreshold), lowThreshold(lowThreshold), maxKeypoints(maxKeypoints)
{
checkCudaErrors( hipStreamCreate(&stream[0]) );
checkCudaErrors( hipStreamCreate(&stream[1]) );
checkCudaErrors( hipStreamCreate(&stream[2]) );
cvStream[0] = StreamAccessor::wrapStream(stream[0]);
cvStream[1] = StreamAccessor::wrapStream(stream[1]);
cvStream[2] = StreamAccessor::wrapStream(stream[2]);
checkCudaErrors( hipMallocManaged(&kpLoc, sizeof(short2) * maxKeypoints*CAMS) );
checkCudaErrors( hipMallocManaged(&kpScore, sizeof(float) * maxKeypoints*CAMS) );
checkCudaErrors( hipStreamAttachMemAsync(stream[0], kpLoc) );
checkCudaErrors( hipStreamAttachMemAsync(stream[0], kpScore) );
checkCudaErrors( hipMalloc(&counter_ptr, sizeof(unsigned int)*CAMS) );
}
GpuFast::~GpuFast() {
cvStream[0].~Stream();
cvStream[1].~Stream();
cvStream[2].~Stream();
checkCudaErrors( hipFree(counter_ptr) );
checkCudaErrors( hipFree(kpScore) );
checkCudaErrors( hipFree(kpLoc) );
checkCudaErrors( hipStreamDestroy(stream[0]) );
checkCudaErrors( hipStreamDestroy(stream[1]) );
checkCudaErrors( hipStreamDestroy(stream[2]) );
}
void GpuFast::detectAsync(InputArray _image,int c)
{
const cv::cuda::GpuMat image = _image.getGpuMat();
if (scoreMat[c].empty()) {
// If it is not empty, then it's already allocated by previous iteration
// and I ASSUME THE DIMENSIONS ARE CONSISTENT ACROSS EVERY ITERATION
// else THIS WILL BREAK
scoreMat[c] = GpuMat(image.size(), CV_32SC1);
}
scoreMat[c].setTo(Scalar::all(0), cvStream[c]);
checkCudaErrors( hipMemsetAsync(counter_ptr+c, 0, sizeof(unsigned int), stream[c]) );
dim3 dimBlock(32, 8);
dim3 dimGrid(divUp(image.cols, dimBlock.x), divUp(image.rows, dimBlock.y * 4));
hipLaunchKernelGGL(( tileCalcKeypoints_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream[c], image, kpLoc+maxKeypoints*c, kpScore+maxKeypoints*c, maxKeypoints, highThreshold, lowThreshold, scoreMat[c], counter_ptr+c);
checkCudaErrors( hipGetLastError() );
}
void GpuFast::detectAsync_mul(InputArray _image0,InputArray _image1,InputArray _image2,int level)
{
cv::cuda::GpuMat image[3];
image[0]= _image0.getGpuMat();
image[1]= _image1.getGpuMat();
image[2]= _image2.getGpuMat();
for(int c=0;c<3;c++)
{
if (scoreMat_mul[c][level].empty())
{
// If it is not empty, then it's already allocated by previous iteration
// and I ASSUME THE DIMENSIONS ARE CONSISTENT ACROSS EVERY ITERATION
// else THIS WILL BREAK
scoreMat_mul[c][level] = GpuMat(image[c].size(), CV_32SC1);
}
scoreMat_mul[c][level].setTo(Scalar::all(0), cvStream[c]);
checkCudaErrors( hipMemsetAsync(counter_ptr+c, 0, sizeof(unsigned int), stream[c]) );
}
dim3 dimBlock(32, 8);
dim3 dimGrid(divUp(image[0].cols, dimBlock.x), divUp(image[0].rows, dimBlock.y * 4),3);
hipLaunchKernelGGL(( tileCalcKeypoints_mul_kernel), dim3(dimGrid), dim3(dimBlock), 0, stream[0], image[0],image[1],image[2], kpLoc, kpScore, maxKeypoints, highThreshold, lowThreshold, scoreMat_mul[0][level], scoreMat_mul[1][level], scoreMat_mul[2][level], counter_ptr);
checkCudaErrors( hipGetLastError() );
}
void GpuFast::joinDetectAsync(std::vector<KeyPoint>& keypoints)
{
checkCudaErrors( hipStreamSynchronize(stream[0]) );
checkCudaErrors( hipMemcpyAsync(&count[0], counter_ptr, sizeof(unsigned int), hipMemcpyDeviceToHost, stream[0]) );
checkCudaErrors( hipStreamSynchronize(stream[0]) );
count[0] = ::min(count[0], maxKeypoints);
keypoints.resize(count[0]);
for (int i = 0; i < count[0]; ++i) {
KeyPoint kp(kpLoc[i].x, kpLoc[i].y, FEATURE_SIZE, -1, kpScore[i]);
keypoints[i] = kp;
}
}
void GpuFast::joinDetectAsync_mul(std::vector<KeyPoint> *keypoints,InputArray _mask0,InputArray _mask1,InputArray _mask2)
{
cv::Mat mask[3];
mask[0]= _mask0.getMat();
mask[1]= _mask1.getMat();
mask[2]= _mask2.getMat();
checkCudaErrors( hipStreamSynchronize(stream[0]) );
for(int c=0;c<3;c++)
{
checkCudaErrors( hipMemcpyAsync(&count[c], counter_ptr+c, sizeof(unsigned int), hipMemcpyDeviceToHost, stream[c]) );
checkCudaErrors( hipStreamSynchronize(stream[c]) );
}
for(int c=0;c<3;c++)
{
count[c] = ::min(count[c], maxKeypoints);
keypoints[c].resize(count[c]);
int num=0;
for (int i = 0; i < count[c]; ++i)
{
KeyPoint kp(kpLoc[maxKeypoints*c+i].x, kpLoc[maxKeypoints*c+i].y, FEATURE_SIZE, -1, kpScore[maxKeypoints*c+i]);
if(mask[c].at<uchar>(kp.pt.y,kp.pt.x)==255)
{
keypoints[c][num++] = kp;
}
}
keypoints[c].resize(num);
}
}
void GpuFast::detect(InputArray _image, std::vector<KeyPoint>& keypoints) {
detectAsync(_image);
joinDetectAsync(keypoints);
}
__constant__ int c_u_max[32];
void IC_Angle::loadUMax(const int* u_max, int count)
{
checkCudaErrors( hipMemcpyToSymbol(c_u_max, u_max, count * sizeof(int)) );
}
__global__ void IC_Angle_kernel_mul(const PtrStepb image1,const PtrStepb image2,const PtrStepb image3,
KeyPoint * keypoints1,KeyPoint * keypoints2,KeyPoint * keypoints3,
const int npoints1,const int npoints2,const int npoints3,
const int minBorderX,const int minBorderY,const int octave,const int size,const int half_k)
{
__shared__ int smem0[8 * 32];
__shared__ int smem1[8 * 32];
int* srow0 = smem0 + threadIdx.y * blockDim.x;
int* srow1 = smem1 + threadIdx.y * blockDim.x;
int c = blockIdx.y;
PtrStepb image;
KeyPoint * keypoints;
int npoints;
if(c==0)
{
image=image1;
keypoints=keypoints1;
npoints=npoints1;
}
if(c==1)
{
image=image2;
keypoints=keypoints2;
npoints=npoints2;
}
if(c==2)
{
image=image3;
keypoints=keypoints3;
npoints=npoints3;
}
cv::cuda::device::plus<int> op;
const int ptidx = blockIdx.x * blockDim.y + threadIdx.y;
if (ptidx < npoints)
{
if (threadIdx.x == 0)
{
keypoints[ptidx].pt.x += minBorderX;
keypoints[ptidx].pt.y += minBorderY;
keypoints[ptidx].octave = octave;
keypoints[ptidx].size = size;
}
__syncthreads();
int m_01 = 0, m_10 = 0;
const short2 loc = make_short2(keypoints[ptidx].pt.x, keypoints[ptidx].pt.y);
// Treat the center line differently, v=0
for (int u = threadIdx.x - half_k; u <= half_k; u += blockDim.x)
m_10 += u * image(loc.y, loc.x + u);
reduce<32>(srow0, m_10, threadIdx.x, op);
for (int v = 1; v <= half_k; ++v)
{
// Proceed over the two lines
int v_sum = 0;
int m_sum = 0;
const int d = c_u_max[v];
for (int u = threadIdx.x - d; u <= d; u += blockDim.x)
{
int val_plus = image(loc.y + v, loc.x + u);
int val_minus = image(loc.y - v, loc.x + u);
v_sum += (val_plus - val_minus);
m_sum += u * (val_plus + val_minus);
}
reduce<32>(smem_tuple(srow0, srow1), thrust::tie(v_sum, m_sum), threadIdx.x, thrust::make_tuple(op, op));
m_10 += m_sum;
m_01 += v * v_sum;
}
if (threadIdx.x == 0)
{
// vv what is this ?
//float kp_dir = ::atan2f((float)m_01, (float)m_10);
float kp_dir = atan2f((float)m_01, (float)m_10);
kp_dir += (kp_dir < 0) * (2.0f * CV_PI_F);
kp_dir *= 180.0f / CV_PI_F;
keypoints[ptidx].angle = kp_dir;
}
}
}
IC_Angle::IC_Angle(unsigned int maxKeypoints) : maxKeypoints(maxKeypoints)
{
checkCudaErrors( hipStreamCreate(&stream[0]) );
checkCudaErrors( hipStreamCreate(&stream[1]) );
checkCudaErrors( hipStreamCreate(&stream[2]) );
_cvStream[0] = StreamAccessor::wrapStream(stream[0]);
_cvStream[1] = StreamAccessor::wrapStream(stream[1]);
_cvStream[2] = StreamAccessor::wrapStream(stream[2]);
checkCudaErrors( hipMalloc(&keypoints[0], sizeof(KeyPoint) * maxKeypoints) );
checkCudaErrors( hipMalloc(&keypoints[1], sizeof(KeyPoint) * maxKeypoints) );
checkCudaErrors( hipMalloc(&keypoints[2], sizeof(KeyPoint) * maxKeypoints) );
for(int level=0;level<8;level++)
{
checkCudaErrors( hipMalloc(&keypoints_mul[0][level], sizeof(KeyPoint) * maxKeypoints) );
checkCudaErrors( hipMalloc(&keypoints_mul[1][level], sizeof(KeyPoint) * maxKeypoints) );
checkCudaErrors( hipMalloc(&keypoints_mul[2][level], sizeof(KeyPoint) * maxKeypoints) );
}
}
IC_Angle::~IC_Angle()
{
_cvStream[0].~Stream();
_cvStream[1].~Stream();
_cvStream[2].~Stream();
checkCudaErrors( hipFree(keypoints[0]) );
checkCudaErrors( hipFree(keypoints[1]) );
checkCudaErrors( hipFree(keypoints[2]) );
for(int level=0;level<8;level++)
{
checkCudaErrors( hipFree(keypoints_mul[0][level]) );
checkCudaErrors( hipFree(keypoints_mul[1][level]) );
checkCudaErrors( hipFree(keypoints_mul[2][level]) );
}
checkCudaErrors( hipStreamDestroy(stream[0]) );
checkCudaErrors( hipStreamDestroy(stream[1]) );
checkCudaErrors( hipStreamDestroy(stream[2]) );
}
void IC_Angle::launch_async_mul(std::vector<cv::cuda::GpuMat> _images,vector<vector<KeyPoint> > *_keypoints,vector<vector<KeyPoint*> > &keypoints_mul_GPU, int half_k, int minBorderX, int minBorderY, int octave, int size)
{
int npoints[3];
int npoint=0;
for(int c=0;c<3;c++){
if ((npoints[c]=_keypoints[c][octave].size()) == 0) {
continue;
}
checkCudaErrors( hipMemcpyAsync(keypoints_mul[c][octave], _keypoints[c][octave].data(), sizeof(KeyPoint) * npoints[c], hipMemcpyHostToDevice, stream[0]) );
if(npoints[c]>npoint)npoint=npoints[c];
}
if (npoint == 0) {
return ;
}
{
dim3 block(32, 8);
dim3 grid(divUp(npoint, block.y),3);
hipLaunchKernelGGL(( IC_Angle_kernel_mul), dim3(grid), dim3(block), 0, stream[0], _images[octave].rowRange(0, _images[octave].rows/3),_images[octave].rowRange(_images[octave].rows/3, _images[octave].rows/3*2),_images[octave].rowRange(_images[octave].rows/3*2, _images[octave].rows),
keypoints_mul[0][octave],keypoints_mul[1][octave],keypoints_mul[2][octave],
npoints[0],npoints[1],npoints[2],
minBorderX, minBorderY, octave, size,half_k);
keypoints_mul_GPU[octave][0]=keypoints_mul[0][octave];
keypoints_mul_GPU[octave][1]=keypoints_mul[1][octave];
keypoints_mul_GPU[octave][2]=keypoints_mul[2][octave];
checkCudaErrors( hipGetLastError() );
}
}
} } // namespace fast
| 61b2dbee7e3ab948d7f2987512ceffb7b53672c3.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <opencv2/opencv.hpp>
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/utility.hpp"
#include "opencv2/core/cuda/reduce.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include <helper_cuda.h>
#include <cuda/Fast.hpp>
#include <iostream>
using namespace cv;
using namespace cv::cuda;
using namespace cv::cuda::device;
namespace ORB_SLAM2 { namespace cuda {
///////////////////////////////////////////////////////////////////////////
// calcKeypoints
__constant__ uchar c_table[] = {
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff,
0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff,
0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff,
0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0xf0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0,
0x0, 0x80, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0,
0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0,
0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0xc0, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
// 1 -> v > x + th
// 2 -> v < x - th
// 0 -> x - th <= v <= x + th
__device__ __forceinline__ int diffType(const int v, const int x, const int th)
{
const int diff = x - v;
return static_cast<int>(diff < -th) + (static_cast<int>(diff > th) << 1);
}
__device__ void calcMask(const uint C[4], const int v, const int th, int& mask1, int& mask2)
{
mask1 = 0;
mask2 = 0;
int d1, d2;
d1 = diffType(v, C[0] & 0xff, th);
d2 = diffType(v, C[2] & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 0;
mask2 |= ((d1 & 2) >> 1) << 0;
mask1 |= (d2 & 1) << 8;
mask2 |= ((d2 & 2) >> 1) << 8;
d1 = diffType(v, C[1] & 0xff, th);
d2 = diffType(v, C[3] & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 4;
mask2 |= ((d1 & 2) >> 1) << 4;
mask1 |= (d2 & 1) << 12;
mask2 |= ((d2 & 2) >> 1) << 12;
d1 = diffType(v, (C[0] >> (2 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (2 * 8)) & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 2;
mask2 |= ((d1 & 2) >> 1) << 2;
mask1 |= (d2 & 1) << 10;
mask2 |= ((d2 & 2) >> 1) << 10;
d1 = diffType(v, (C[1] >> (2 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (2 * 8)) & 0xff, th);
if ((d1 | d2) == 0)
return;
mask1 |= (d1 & 1) << 6;
mask2 |= ((d1 & 2) >> 1) << 6;
mask1 |= (d2 & 1) << 14;
mask2 |= ((d2 & 2) >> 1) << 14;
d1 = diffType(v, (C[0] >> (1 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (1 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 1;
mask2 |= ((d1 & 2) >> 1) << 1;
mask1 |= (d2 & 1) << 9;
mask2 |= ((d2 & 2) >> 1) << 9;
d1 = diffType(v, (C[0] >> (3 * 8)) & 0xff, th);
d2 = diffType(v, (C[2] >> (3 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 3;
mask2 |= ((d1 & 2) >> 1) << 3;
mask1 |= (d2 & 1) << 11;
mask2 |= ((d2 & 2) >> 1) << 11;
d1 = diffType(v, (C[1] >> (1 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (1 * 8)) & 0xff, th);
/*if ((d1 | d2) == 0)
return;*/
mask1 |= (d1 & 1) << 5;
mask2 |= ((d1 & 2) >> 1) << 5;
mask1 |= (d2 & 1) << 13;
mask2 |= ((d2 & 2) >> 1) << 13;
d1 = diffType(v, (C[1] >> (3 * 8)) & 0xff, th);
d2 = diffType(v, (C[3] >> (3 * 8)) & 0xff, th);
mask1 |= (d1 & 1) << 7;
mask2 |= ((d1 & 2) >> 1) << 7;
mask1 |= (d2 & 1) << 15;
mask2 |= ((d2 & 2) >> 1) << 15;
}
// 1 -> v > x + th
// 2 -> v < x - th
// 0 -> not a keypoint
__device__ __forceinline__ bool isKeyPoint(int mask1, int mask2)
{
return (__popc(mask1) > 8 && (c_table[(mask1 >> 3) - 63] & (1 << (mask1 & 7)))) ||
(__popc(mask2) > 8 && (c_table[(mask2 >> 3) - 63] & (1 << (mask2 & 7))));
}
__device__ int cornerScore(const uint C[4], const int v, const int threshold)
{
// binary search in [threshold + 1, 255]
int min = threshold + 1;
int max = 255;
while (min <= max)
{
const int mid = (min + max) >> 1;
int mask1 = 0;
int mask2 = 0;
calcMask(C, v, mid, mask1, mask2);
int isKp = static_cast<int>(isKeyPoint(mask1, mask2));
min = isKp * (mid + 1) + (isKp ^ 1) * min;
max = (isKp ^ 1) * (mid - 1) + isKp * max;
}
return min - 1;
}
__device__ bool isKeyPoint2(const PtrStepSzb img, const int i, const int j, const int threshold, PtrStepi scoreMat) {
int v;
uint C[4] = {0,0,0,0};
C[2] |= static_cast<uint>(img(i - 3, j - 1)) << 8;
C[2] |= static_cast<uint>(img(i - 3, j));
C[1] |= static_cast<uint>(img(i - 3, j + 1)) << (3 * 8);
C[2] |= static_cast<uint>(img(i - 2, j - 2)) << (2 * 8);
C[1] |= static_cast<uint>(img(i - 2, j + 2)) << (2 * 8);
C[2] |= static_cast<uint>(img(i - 1, j - 3)) << (3 * 8);
C[1] |= static_cast<uint>(img(i - 1, j + 3)) << 8;
C[3] |= static_cast<uint>(img(i, j - 3));
v = static_cast<int>(img(i, j));
C[1] |= static_cast<uint>(img(i, j + 3));
int d1 = diffType(v, C[1] & 0xff, threshold);
int d2 = diffType(v, C[3] & 0xff, threshold);
if ((d1 | d2) == 0) {
return false;
}
C[3] |= static_cast<uint>(img(i + 1, j - 3)) << 8;
C[0] |= static_cast<uint>(img(i + 1, j + 3)) << (3 * 8);
C[3] |= static_cast<uint>(img(i + 2, j - 2)) << (2 * 8);
C[0] |= static_cast<uint>(img(i + 2, j + 2)) << (2 * 8);
C[3] |= static_cast<uint>(img(i + 3, j - 1)) << (3 * 8);
C[0] |= static_cast<uint>(img(i + 3, j));
C[0] |= static_cast<uint>(img(i + 3, j + 1)) << 8;
int mask1 = 0;
int mask2 = 0;
calcMask(C, v, threshold, mask1, mask2);
if (isKeyPoint(mask1, mask2)) {
scoreMat(i, j) = cornerScore(C, v, threshold);
return true;
} else {
scoreMat(i, j) = 0;
return false;
}
}
__device__ bool isMax(short2 loc, PtrStepi scoreMat) {
int score = scoreMat(loc.y, loc.x);
bool ismax =
score > scoreMat(loc.y - 1, loc.x - 1) &&
score > scoreMat(loc.y - 1, loc.x ) &&
score > scoreMat(loc.y - 1, loc.x + 1) &&
score > scoreMat(loc.y , loc.x - 1) &&
score > scoreMat(loc.y , loc.x + 1) &&
score > scoreMat(loc.y + 1, loc.x - 1) &&
score > scoreMat(loc.y + 1, loc.x ) &&
score > scoreMat(loc.y + 1, loc.x + 1);
return ismax;
}
__global__ void tileCalcKeypoints_kernel(const PtrStepSzb img, short2 * kpLoc, float * kpScore, const unsigned int maxKeypoints, const int highThreshold, const int lowThreshold, PtrStepi scoreMat, unsigned int * counter_ptr)
{
const int j = threadIdx.x + blockIdx.x * blockDim.x + 3;
const int i = (threadIdx.y + blockIdx.y * blockDim.y) * 4 + 3;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
__shared__ bool hasKp;
if (tid == 0) {
hasKp = false;
}
bool isKp[4] = {0};
for (int t = 0; t < 4; ++t) {
if (i+t < img.rows - 3 && j < img.cols - 3) {
isKp[t] = isKeyPoint2(img, i+t, j, highThreshold, scoreMat);
}
}
// barrieer
__syncthreads();
for (int t = 0; t < 4; ++t) {
if (isKp[t]) {
isKp[t] = false;
short2 loc = make_short2(j, i+t);
if (isMax(loc, scoreMat)) {
int score = scoreMat(loc.y, loc.x);
hasKp = true;
const unsigned int ind = atomicInc(counter_ptr, (unsigned int)(-1));
if (ind < maxKeypoints) {
kpLoc[ind] = loc;
kpScore[ind] = static_cast<float>(score);
}
}
}
}
// barrieer
__syncthreads();
if (hasKp) return ;
// lower the threshold and try again
for (int t = 0; t < 4; ++t) {
if (i+t < img.rows - 3 && j < img.cols - 3) {
isKp[t] = isKeyPoint2(img, i+t, j, lowThreshold, scoreMat);
}
}
// barrieer
__syncthreads();
for (int t = 0; t < 4; ++t) {
if (isKp[t]) {
short2 loc = make_short2(j, i+t);
if (isMax(loc, scoreMat)) {
int score = scoreMat(loc.y, loc.x);
const unsigned int ind = atomicInc(counter_ptr, (unsigned int)(-1));
if (ind < maxKeypoints) {
kpLoc[ind] = loc;
kpScore[ind] = static_cast<float>(score);
}
}
}
}
}
__global__ void tileCalcKeypoints_mul_kernel(const PtrStepSzb img1,const PtrStepSzb img2,const PtrStepSzb img3,
short2 *kpLoc, float *kpScore, const unsigned int maxKeypoints, const int highThreshold, const int lowThreshold,
PtrStepi scoreMat1,PtrStepi scoreMat2,PtrStepi scoreMat3,
unsigned int * counter_ptr) {
const int j = threadIdx.x + blockIdx.x * blockDim.x + 3;
const int i = (threadIdx.y + blockIdx.y * blockDim.y) * 4 + 3;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int c=blockIdx.z;
PtrStepSzb img;
PtrStepi scoreMat;
if(c==0)
{
img=img1;
scoreMat=scoreMat1;
}
if(c==1)
{
img=img2;
scoreMat=scoreMat2;
}
if(c==2)
{
img=img3;
scoreMat=scoreMat3;
}
__shared__ bool hasKp;
if (tid == 0) {
hasKp = false;
}
bool isKp[4] = {0};
for (int t = 0; t < 4; ++t) {
if (i+t < img.rows - 3 && j < img.cols - 3) {
isKp[t] = isKeyPoint2(img, i+t, j, highThreshold, scoreMat);
}
}
// barrieer
__syncthreads();
for (int t = 0; t < 4; ++t) {
if (isKp[t]) {
isKp[t] = false;
short2 loc = make_short2(j, i+t);
if (isMax(loc, scoreMat)) {
int score = scoreMat(loc.y, loc.x);
hasKp = true;
const unsigned int ind = atomicInc(counter_ptr+c, (unsigned int)(-1));
if (ind < maxKeypoints) {
kpLoc[maxKeypoints*c+ind] = loc;
kpScore[maxKeypoints*c+ind] = static_cast<float>(score);
}
}
}
}
// barrieer
__syncthreads();
if (hasKp) return ;
// lower the threshold and try again
for (int t = 0; t < 4; ++t) {
if (i+t < img.rows - 3 && j < img.cols - 3) {
isKp[t] = isKeyPoint2(img, i+t, j, lowThreshold, scoreMat);
}
}
// barrieer
__syncthreads();
for (int t = 0; t < 4; ++t) {
if (isKp[t]) {
short2 loc = make_short2(j, i+t);
if (isMax(loc, scoreMat)) {
int score = scoreMat(loc.y, loc.x);
const unsigned int ind = atomicInc(counter_ptr+c, (unsigned int)(-1));
if (ind < maxKeypoints) {
kpLoc[maxKeypoints*c+ind] = loc;
kpScore[maxKeypoints*c+ind] = static_cast<float>(score);
}
}
}
}
}
GpuFast::GpuFast(int highThreshold, int lowThreshold, int maxKeypoints)
: highThreshold(highThreshold), lowThreshold(lowThreshold), maxKeypoints(maxKeypoints)
{
checkCudaErrors( cudaStreamCreate(&stream[0]) );
checkCudaErrors( cudaStreamCreate(&stream[1]) );
checkCudaErrors( cudaStreamCreate(&stream[2]) );
cvStream[0] = StreamAccessor::wrapStream(stream[0]);
cvStream[1] = StreamAccessor::wrapStream(stream[1]);
cvStream[2] = StreamAccessor::wrapStream(stream[2]);
checkCudaErrors( cudaMallocManaged(&kpLoc, sizeof(short2) * maxKeypoints*CAMS) );
checkCudaErrors( cudaMallocManaged(&kpScore, sizeof(float) * maxKeypoints*CAMS) );
checkCudaErrors( cudaStreamAttachMemAsync(stream[0], kpLoc) );
checkCudaErrors( cudaStreamAttachMemAsync(stream[0], kpScore) );
checkCudaErrors( cudaMalloc(&counter_ptr, sizeof(unsigned int)*CAMS) );
}
GpuFast::~GpuFast() {
cvStream[0].~Stream();
cvStream[1].~Stream();
cvStream[2].~Stream();
checkCudaErrors( cudaFree(counter_ptr) );
checkCudaErrors( cudaFree(kpScore) );
checkCudaErrors( cudaFree(kpLoc) );
checkCudaErrors( cudaStreamDestroy(stream[0]) );
checkCudaErrors( cudaStreamDestroy(stream[1]) );
checkCudaErrors( cudaStreamDestroy(stream[2]) );
}
void GpuFast::detectAsync(InputArray _image,int c)
{
const cv::cuda::GpuMat image = _image.getGpuMat();
if (scoreMat[c].empty()) {
// If it is not empty, then it's already allocated by previous iteration
// and I ASSUME THE DIMENSIONS ARE CONSISTENT ACROSS EVERY ITERATION
// else THIS WILL BREAK
scoreMat[c] = GpuMat(image.size(), CV_32SC1);
}
scoreMat[c].setTo(Scalar::all(0), cvStream[c]);
checkCudaErrors( cudaMemsetAsync(counter_ptr+c, 0, sizeof(unsigned int), stream[c]) );
dim3 dimBlock(32, 8);
dim3 dimGrid(divUp(image.cols, dimBlock.x), divUp(image.rows, dimBlock.y * 4));
tileCalcKeypoints_kernel<<<dimGrid, dimBlock, 0, stream[c]>>>(image, kpLoc+maxKeypoints*c, kpScore+maxKeypoints*c, maxKeypoints, highThreshold, lowThreshold, scoreMat[c], counter_ptr+c);
checkCudaErrors( cudaGetLastError() );
}
void GpuFast::detectAsync_mul(InputArray _image0,InputArray _image1,InputArray _image2,int level)
{
cv::cuda::GpuMat image[3];
image[0]= _image0.getGpuMat();
image[1]= _image1.getGpuMat();
image[2]= _image2.getGpuMat();
for(int c=0;c<3;c++)
{
if (scoreMat_mul[c][level].empty())
{
// If it is not empty, then it's already allocated by previous iteration
// and I ASSUME THE DIMENSIONS ARE CONSISTENT ACROSS EVERY ITERATION
// else THIS WILL BREAK
scoreMat_mul[c][level] = GpuMat(image[c].size(), CV_32SC1);
}
scoreMat_mul[c][level].setTo(Scalar::all(0), cvStream[c]);
checkCudaErrors( cudaMemsetAsync(counter_ptr+c, 0, sizeof(unsigned int), stream[c]) );
}
dim3 dimBlock(32, 8);
dim3 dimGrid(divUp(image[0].cols, dimBlock.x), divUp(image[0].rows, dimBlock.y * 4),3);
tileCalcKeypoints_mul_kernel<<<dimGrid, dimBlock, 0, stream[0]>>>(image[0],image[1],image[2], kpLoc, kpScore, maxKeypoints, highThreshold, lowThreshold, scoreMat_mul[0][level], scoreMat_mul[1][level], scoreMat_mul[2][level], counter_ptr);
checkCudaErrors( cudaGetLastError() );
}
void GpuFast::joinDetectAsync(std::vector<KeyPoint>& keypoints)
{
checkCudaErrors( cudaStreamSynchronize(stream[0]) );
checkCudaErrors( cudaMemcpyAsync(&count[0], counter_ptr, sizeof(unsigned int), cudaMemcpyDeviceToHost, stream[0]) );
checkCudaErrors( cudaStreamSynchronize(stream[0]) );
count[0] = std::min(count[0], maxKeypoints);
keypoints.resize(count[0]);
for (int i = 0; i < count[0]; ++i) {
KeyPoint kp(kpLoc[i].x, kpLoc[i].y, FEATURE_SIZE, -1, kpScore[i]);
keypoints[i] = kp;
}
}
void GpuFast::joinDetectAsync_mul(std::vector<KeyPoint> *keypoints,InputArray _mask0,InputArray _mask1,InputArray _mask2)
{
cv::Mat mask[3];
mask[0]= _mask0.getMat();
mask[1]= _mask1.getMat();
mask[2]= _mask2.getMat();
checkCudaErrors( cudaStreamSynchronize(stream[0]) );
for(int c=0;c<3;c++)
{
checkCudaErrors( cudaMemcpyAsync(&count[c], counter_ptr+c, sizeof(unsigned int), cudaMemcpyDeviceToHost, stream[c]) );
checkCudaErrors( cudaStreamSynchronize(stream[c]) );
}
for(int c=0;c<3;c++)
{
count[c] = std::min(count[c], maxKeypoints);
keypoints[c].resize(count[c]);
int num=0;
for (int i = 0; i < count[c]; ++i)
{
KeyPoint kp(kpLoc[maxKeypoints*c+i].x, kpLoc[maxKeypoints*c+i].y, FEATURE_SIZE, -1, kpScore[maxKeypoints*c+i]);
if(mask[c].at<uchar>(kp.pt.y,kp.pt.x)==255)
{
keypoints[c][num++] = kp;
}
}
keypoints[c].resize(num);
}
}
void GpuFast::detect(InputArray _image, std::vector<KeyPoint>& keypoints) {
detectAsync(_image);
joinDetectAsync(keypoints);
}
__constant__ int c_u_max[32];
void IC_Angle::loadUMax(const int* u_max, int count)
{
checkCudaErrors( cudaMemcpyToSymbol(c_u_max, u_max, count * sizeof(int)) );
}
__global__ void IC_Angle_kernel_mul(const PtrStepb image1,const PtrStepb image2,const PtrStepb image3,
KeyPoint * keypoints1,KeyPoint * keypoints2,KeyPoint * keypoints3,
const int npoints1,const int npoints2,const int npoints3,
const int minBorderX,const int minBorderY,const int octave,const int size,const int half_k)
{
__shared__ int smem0[8 * 32];
__shared__ int smem1[8 * 32];
int* srow0 = smem0 + threadIdx.y * blockDim.x;
int* srow1 = smem1 + threadIdx.y * blockDim.x;
int c = blockIdx.y;
PtrStepb image;
KeyPoint * keypoints;
int npoints;
if(c==0)
{
image=image1;
keypoints=keypoints1;
npoints=npoints1;
}
if(c==1)
{
image=image2;
keypoints=keypoints2;
npoints=npoints2;
}
if(c==2)
{
image=image3;
keypoints=keypoints3;
npoints=npoints3;
}
cv::cuda::device::plus<int> op;
const int ptidx = blockIdx.x * blockDim.y + threadIdx.y;
if (ptidx < npoints)
{
if (threadIdx.x == 0)
{
keypoints[ptidx].pt.x += minBorderX;
keypoints[ptidx].pt.y += minBorderY;
keypoints[ptidx].octave = octave;
keypoints[ptidx].size = size;
}
__syncthreads();
int m_01 = 0, m_10 = 0;
const short2 loc = make_short2(keypoints[ptidx].pt.x, keypoints[ptidx].pt.y);
// Treat the center line differently, v=0
for (int u = threadIdx.x - half_k; u <= half_k; u += blockDim.x)
m_10 += u * image(loc.y, loc.x + u);
reduce<32>(srow0, m_10, threadIdx.x, op);
for (int v = 1; v <= half_k; ++v)
{
// Proceed over the two lines
int v_sum = 0;
int m_sum = 0;
const int d = c_u_max[v];
for (int u = threadIdx.x - d; u <= d; u += blockDim.x)
{
int val_plus = image(loc.y + v, loc.x + u);
int val_minus = image(loc.y - v, loc.x + u);
v_sum += (val_plus - val_minus);
m_sum += u * (val_plus + val_minus);
}
reduce<32>(smem_tuple(srow0, srow1), thrust::tie(v_sum, m_sum), threadIdx.x, thrust::make_tuple(op, op));
m_10 += m_sum;
m_01 += v * v_sum;
}
if (threadIdx.x == 0)
{
// vv what is this ?
//float kp_dir = ::atan2f((float)m_01, (float)m_10);
float kp_dir = atan2f((float)m_01, (float)m_10);
kp_dir += (kp_dir < 0) * (2.0f * CV_PI_F);
kp_dir *= 180.0f / CV_PI_F;
keypoints[ptidx].angle = kp_dir;
}
}
}
IC_Angle::IC_Angle(unsigned int maxKeypoints) : maxKeypoints(maxKeypoints)
{
checkCudaErrors( cudaStreamCreate(&stream[0]) );
checkCudaErrors( cudaStreamCreate(&stream[1]) );
checkCudaErrors( cudaStreamCreate(&stream[2]) );
_cvStream[0] = StreamAccessor::wrapStream(stream[0]);
_cvStream[1] = StreamAccessor::wrapStream(stream[1]);
_cvStream[2] = StreamAccessor::wrapStream(stream[2]);
checkCudaErrors( cudaMalloc(&keypoints[0], sizeof(KeyPoint) * maxKeypoints) );
checkCudaErrors( cudaMalloc(&keypoints[1], sizeof(KeyPoint) * maxKeypoints) );
checkCudaErrors( cudaMalloc(&keypoints[2], sizeof(KeyPoint) * maxKeypoints) );
for(int level=0;level<8;level++)
{
checkCudaErrors( cudaMalloc(&keypoints_mul[0][level], sizeof(KeyPoint) * maxKeypoints) );
checkCudaErrors( cudaMalloc(&keypoints_mul[1][level], sizeof(KeyPoint) * maxKeypoints) );
checkCudaErrors( cudaMalloc(&keypoints_mul[2][level], sizeof(KeyPoint) * maxKeypoints) );
}
}
IC_Angle::~IC_Angle()
{
_cvStream[0].~Stream();
_cvStream[1].~Stream();
_cvStream[2].~Stream();
checkCudaErrors( cudaFree(keypoints[0]) );
checkCudaErrors( cudaFree(keypoints[1]) );
checkCudaErrors( cudaFree(keypoints[2]) );
for(int level=0;level<8;level++)
{
checkCudaErrors( cudaFree(keypoints_mul[0][level]) );
checkCudaErrors( cudaFree(keypoints_mul[1][level]) );
checkCudaErrors( cudaFree(keypoints_mul[2][level]) );
}
checkCudaErrors( cudaStreamDestroy(stream[0]) );
checkCudaErrors( cudaStreamDestroy(stream[1]) );
checkCudaErrors( cudaStreamDestroy(stream[2]) );
}
void IC_Angle::launch_async_mul(std::vector<cv::cuda::GpuMat> _images,vector<vector<KeyPoint> > *_keypoints,vector<vector<KeyPoint*> > &keypoints_mul_GPU, int half_k, int minBorderX, int minBorderY, int octave, int size)
{
int npoints[3];
int npoint=0;
for(int c=0;c<3;c++){
if ((npoints[c]=_keypoints[c][octave].size()) == 0) {
continue;
}
checkCudaErrors( cudaMemcpyAsync(keypoints_mul[c][octave], _keypoints[c][octave].data(), sizeof(KeyPoint) * npoints[c], cudaMemcpyHostToDevice, stream[0]) );
if(npoints[c]>npoint)npoint=npoints[c];
}
if (npoint == 0) {
return ;
}
{
dim3 block(32, 8);
dim3 grid(divUp(npoint, block.y),3);
IC_Angle_kernel_mul<<<grid, block, 0, stream[0]>>>(_images[octave].rowRange(0, _images[octave].rows/3),_images[octave].rowRange(_images[octave].rows/3, _images[octave].rows/3*2),_images[octave].rowRange(_images[octave].rows/3*2, _images[octave].rows),
keypoints_mul[0][octave],keypoints_mul[1][octave],keypoints_mul[2][octave],
npoints[0],npoints[1],npoints[2],
minBorderX, minBorderY, octave, size,half_k);
keypoints_mul_GPU[octave][0]=keypoints_mul[0][octave];
keypoints_mul_GPU[octave][1]=keypoints_mul[1][octave];
keypoints_mul_GPU[octave][2]=keypoints_mul[2][octave];
checkCudaErrors( cudaGetLastError() );
}
}
} } // namespace fast
|
78a835d364fff31f981104f0906e71d2f08b8e22.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <string>
#include <cassert>
#include <iostream>
#include <cstddef>
using namespace std;
typedef unsigned int uint;
typedef unsigned short ushort;
typedef unsigned char uchar;
#define CSC(call) do { \
hipError_t res = call; \
if (res != hipSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, hipGetErrorString(res)); \
exit(0); \
} \
} while (0)
texture<uchar4, 2, hipReadModeElementType> tex;
class BinaryImage {
public:
uint w;
uint h;
uchar4 *data;
BinaryImage() : w(0), h(0), data(NULL) {}
BinaryImage(string path) {
FILE *fin = fopen(path.c_str(), "rb");
if (!fin) {
printf("File %s not found\n", path.c_str());
return;
}
fread(&w, sizeof(uint), 1, fin);
fread(&h, sizeof(uint), 1, fin);
data = new uchar4[w * h];
fread(data, sizeof(uchar4), w * h, fin);
fclose(fin);
}
~BinaryImage() {
if (data != NULL) {
delete[] data;
}
}
void toFile(string path) {
FILE *fout = fopen(path.c_str(), "wb");
if (!fout) {
printf("File %s not found\n", path.c_str());
return;
}
cout << w << ' ' << h << endl;
fwrite(&w, sizeof(uint), 1, fout);
fwrite(&h, sizeof(uint), 1, fout);
fwrite(data, sizeof(uchar4), w * h, fout);
fclose(fout);
}
size_t size() {
return w * h;
}
};
__device__ uchar getMedian(ushort *cnt, int mid) {
int curNum = 0;
for (int i = 0; i < 256; i++) {
curNum += cnt[i];
if (curNum > mid) {
return i;
}
}
return 255;
}
__device__ uchar4 getPixelColor(int x, int y, int radius, uint w, uint h) {
uchar4 p;
ushort cntR[256];
ushort cntG[256];
ushort cntB[256];
uchar r, g, b;
for (int i = 0; i < 256; i++) {
cntR[i] = cntG[i] = cntB[i] = 0;
}
int mid = 0;
for (int i = x - radius; i <= x + radius; i++) {
for (int j = y - radius; j <= y + radius; j++) {
if (i < 0 || j < 0 || i >= w || j >= h) {
continue;
}
p = tex2D(tex, i, j);
cntR[p.x]++;
cntG[p.y]++;
cntB[p.z]++;
mid++;
}
}
mid /= 2;
r = getMedian(cntR, mid);
g = getMedian(cntG, mid);
b = getMedian(cntB, mid);
return make_uchar4(r, g, b, tex2D(tex, x, y).w);
}
__global__ void kernel(uchar4 *dst, uint w, uint h, int radius) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
for (int x = idx; x < w; x += offsetx) {
for (int y = idy; y < h; y += offsety) {
dst[y * w + x] = getPixelColor(x, y, radius, w, h);
}
}
}
int main() {
ios_base::sync_with_stdio(false);
string in;
string out;
int radius;
cin >> in >> out >> radius;
BinaryImage img(in);
hipArray *arr;
hipChannelFormatDesc ch = hipCreateChannelDesc<uchar4>();
CSC(hipMallocArray(&arr, &ch, img.w, img.h));
CSC(hipMemcpyToArray(arr, 0, 0, img.data, sizeof(uchar4) * img.size(), hipMemcpyHostToDevice));
tex.addressMode[0] = hipAddressModeClamp;
tex.addressMode[1] = hipAddressModeClamp;
tex.channelDesc = ch;
tex.filterMode = hipFilterModePoint;
tex.normalized = false;
CSC(hipBindTextureToArray(tex, arr, ch));
uchar4 *devData;
CSC(hipMalloc(&devData, sizeof(uchar4) * img.size()));
hipLaunchKernelGGL(( kernel), dim3(dim3(16, 16)), dim3(dim3(16, 16)), 0, 0, devData, img.w, img.h, radius);
CSC(hipGetLastError());
CSC(hipMemcpy(img.data, devData, sizeof(uchar4) * img.size(), hipMemcpyDeviceToHost));
CSC(hipUnbindTexture(tex));
CSC(hipFreeArray(arr));
CSC(hipFree(devData));
img.toFile(out);
return 0;
}
| 78a835d364fff31f981104f0906e71d2f08b8e22.cu | #include <cstdio>
#include <string>
#include <cassert>
#include <iostream>
#include <cstddef>
using namespace std;
typedef unsigned int uint;
typedef unsigned short ushort;
typedef unsigned char uchar;
#define CSC(call) do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while (0)
texture<uchar4, 2, cudaReadModeElementType> tex;
class BinaryImage {
public:
uint w;
uint h;
uchar4 *data;
BinaryImage() : w(0), h(0), data(NULL) {}
BinaryImage(string path) {
FILE *fin = fopen(path.c_str(), "rb");
if (!fin) {
printf("File %s not found\n", path.c_str());
return;
}
fread(&w, sizeof(uint), 1, fin);
fread(&h, sizeof(uint), 1, fin);
data = new uchar4[w * h];
fread(data, sizeof(uchar4), w * h, fin);
fclose(fin);
}
~BinaryImage() {
if (data != NULL) {
delete[] data;
}
}
void toFile(string path) {
FILE *fout = fopen(path.c_str(), "wb");
if (!fout) {
printf("File %s not found\n", path.c_str());
return;
}
cout << w << ' ' << h << endl;
fwrite(&w, sizeof(uint), 1, fout);
fwrite(&h, sizeof(uint), 1, fout);
fwrite(data, sizeof(uchar4), w * h, fout);
fclose(fout);
}
size_t size() {
return w * h;
}
};
__device__ uchar getMedian(ushort *cnt, int mid) {
int curNum = 0;
for (int i = 0; i < 256; i++) {
curNum += cnt[i];
if (curNum > mid) {
return i;
}
}
return 255;
}
__device__ uchar4 getPixelColor(int x, int y, int radius, uint w, uint h) {
uchar4 p;
ushort cntR[256];
ushort cntG[256];
ushort cntB[256];
uchar r, g, b;
for (int i = 0; i < 256; i++) {
cntR[i] = cntG[i] = cntB[i] = 0;
}
int mid = 0;
for (int i = x - radius; i <= x + radius; i++) {
for (int j = y - radius; j <= y + radius; j++) {
if (i < 0 || j < 0 || i >= w || j >= h) {
continue;
}
p = tex2D(tex, i, j);
cntR[p.x]++;
cntG[p.y]++;
cntB[p.z]++;
mid++;
}
}
mid /= 2;
r = getMedian(cntR, mid);
g = getMedian(cntG, mid);
b = getMedian(cntB, mid);
return make_uchar4(r, g, b, tex2D(tex, x, y).w);
}
__global__ void kernel(uchar4 *dst, uint w, uint h, int radius) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
for (int x = idx; x < w; x += offsetx) {
for (int y = idy; y < h; y += offsety) {
dst[y * w + x] = getPixelColor(x, y, radius, w, h);
}
}
}
int main() {
ios_base::sync_with_stdio(false);
string in;
string out;
int radius;
cin >> in >> out >> radius;
BinaryImage img(in);
cudaArray *arr;
cudaChannelFormatDesc ch = cudaCreateChannelDesc<uchar4>();
CSC(cudaMallocArray(&arr, &ch, img.w, img.h));
CSC(cudaMemcpyToArray(arr, 0, 0, img.data, sizeof(uchar4) * img.size(), cudaMemcpyHostToDevice));
tex.addressMode[0] = cudaAddressModeClamp;
tex.addressMode[1] = cudaAddressModeClamp;
tex.channelDesc = ch;
tex.filterMode = cudaFilterModePoint;
tex.normalized = false;
CSC(cudaBindTextureToArray(tex, arr, ch));
uchar4 *devData;
CSC(cudaMalloc(&devData, sizeof(uchar4) * img.size()));
kernel<<<dim3(16, 16), dim3(16, 16)>>>(devData, img.w, img.h, radius);
CSC(cudaGetLastError());
CSC(cudaMemcpy(img.data, devData, sizeof(uchar4) * img.size(), cudaMemcpyDeviceToHost));
CSC(cudaUnbindTexture(tex));
CSC(cudaFreeArray(arr));
CSC(cudaFree(devData));
img.toFile(out);
return 0;
}
|
8204ef542333b4b61099bf0adae021e3adda96ba.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Fabio D'Isidoro, ETH Zurich, 08.08.2017
*
* Implementation of a CUDA-based Cpp library for fast DRR generation with GPU acceleration
*
* Based both on the description found in the Improved Algorithm section in Jacobs paper (1998)
* https://www.researchgate.net/publication/2344985_A_Fast_Algorithm_to_Calculate_the_Exact_Radiological_Path_Through_a_Pixel_Or_Voxel_Space
* and on the implementation suggested in Greef et al 2009
* https://www.ncbi.nlm.nih.gov/pubmed/19810482
*
*
*
* Source file for the Class Siddon (see header for more information)
*
*
* -----------------------------------------------------------------
* Modified by Pengyi Zhang for infection-aware DRR generator @2020-08
* -----------------------------------------------------------------
*/
#include "siddon_class.cuh"
#include <iostream>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
__device__ const float epsilon = 2.22045e-016; // to compare double float values
// auxiliary functions
__device__ void get_dest(int idx, float *dest_array, float *dest) {
dest[0] = dest_array[0 + 3 * idx];
dest[1] = dest_array[1 + 3 * idx];
dest[2] = dest_array[2 + 3 * idx];
}
__device__ void compute_alpha_x(const float &X0,
const float &spacing_x,
const int &i,
const float &source_x,
const float &dest_x,
float &alpha_x) {
alpha_x = ((X0 + static_cast<float>(i)*spacing_x) - source_x) / (dest_x - source_x);
}
__device__ void compute_alpha_y(const float &Y0,
const float &spacing_y,
const int &j,
const float &source_y,
const float &dest_y,
float &alpha_y) {
alpha_y = ((Y0 + static_cast<float>(j)*spacing_y) - source_y) / (dest_y - source_y);
}
__device__ void compute_alpha_z(const float &Z0,
const float &spacing_z,
const int &k,
const float &source_z,
const float &dest_z,
float &alpha_z) {
alpha_z = ((Z0 + static_cast<float>(k)*spacing_z) - source_z) / (dest_z - source_z);
}
__device__ void compute_phi_x(const float &X0,
const float &spacing_x,
float &alpha,
const float &source_x,
const float &dest_x,
float &phi_x) {
phi_x = (source_x + alpha*(dest_x - source_x) - X0) / spacing_x;
}
__device__ void compute_phi_y(const float &Y0,
const float &spacing_y,
float &alpha,
const float &source_y,
const float &dest_y,
float &phi_y) {
phi_y = (source_y + alpha*(dest_y - source_y) - Y0) / spacing_y;
}
__device__ void compute_phi_z(const float &Z0,
const float &spacing_z,
float &alpha,
const float &source_z,
const float &dest_z,
float &phi_z) {
phi_z = (source_z + alpha*(dest_z - source_z) - Z0) / spacing_z;
}
__device__ void update_idx(unsigned int &i_v, unsigned int &j_v, unsigned int &k_v, const int &size_x, const int &size_y, int &arrayIdx) {
arrayIdx = i_v + size_x * (j_v + size_y * k_v);
}
__global__ void cuda_kernel(float *DRRarray, float *Maskarray, float *Lungarray, float *Valuearray,
float *source,
float *DestArray,
int DRRsize0,
float *movImgArray,
float *movMaskArray,
float *Weights,
int *MovSize,
float *MovSpacing,
float X0, float Y0, float Z0) {
// DRR image indeces
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
// DRR array index
int DRRidx = row + DRRsize0 * col;
//printf("Thread index %i\n", DRRidx);
if (DRRidx < DRRsize0 * DRRsize0) { // checks if thread index is within the length of the DRR array
// --- declaration of variables for Siddon ---
float alpha_min, alpha_max;
float alpha_x_min, alpha_x_max, alpha_y_min, alpha_y_max, alpha_z_min, alpha_z_max;
int i_min, i_max, j_min, j_max, k_min, k_max; // indeces corresponding to first and last intersected voxels
float alpha_current;
float alpha_x_next;
float alpha_y_next;
float alpha_z_next;
float distance;
int arrayIdx;
int arrayIdx_old;
unsigned int i_v, j_v, k_v;
float alpha_first_pixel;
float density_value = 0.;
float weight_value = 0.;
float num_value = 0.0;
float num_inf = 0.0;
float num_lung = 0.0;
float len_value = 0.0;
float len_inf = 0.0;
float len_lung = 0.0;
// --- define destination point based on DRR array index ---
float dest[3];
get_dest(DRRidx, DestArray, dest);
// --- source-to-destination distance ---
distance = sqrtf((dest[0] - source[0])*(dest[0] - source[0]) +
(dest[1] - source[1])*(dest[1] - source[1]) +
(dest[2] - source[2])*(dest[2] - source[2]));
//
float dx = MovSpacing[0] / fabsf(dest[0] - source[0]);
float dy = MovSpacing[1] / fabsf(dest[1] - source[1]);
float dz = MovSpacing[2] / fabsf(dest[2] - source[2]);
// --- find alpha_min and alpha_max
// initialize alpha_min and alpha_max
alpha_min = 0.;
alpha_max = 1.;
// X
if (fabsf(dest[0] - source[0]) > epsilon) {
float alpha_x0 = (X0 - source[0]) / (dest[0] - source[0]);
float alpha_xN;
compute_alpha_x(X0, MovSpacing[0], MovSize[0], source[0], dest[0], alpha_xN);
alpha_x_min = fminf(alpha_x0, alpha_xN);
alpha_x_max = fmaxf(alpha_x0, alpha_xN);
if (alpha_x_min > alpha_min) { alpha_min = alpha_x_min; };
if (alpha_x_max < alpha_max) { alpha_max = alpha_x_max; };
}
// Y
if (fabsf(dest[1] - source[1]) > epsilon) {
float alpha_y0 = (Y0 - source[1]) / (dest[1] - source[1]);
float alpha_yN;
compute_alpha_y(Y0, MovSpacing[1], MovSize[1], source[1], dest[1], alpha_yN);
alpha_y_min = fminf(alpha_y0, alpha_yN);
alpha_y_max = fmaxf(alpha_y0, alpha_yN);
if (alpha_y_min > alpha_min) { alpha_min = alpha_y_min; };
if (alpha_y_max < alpha_max) { alpha_max = alpha_y_max; };
}
// Z
if (fabsf(dest[2] - source[2]) > epsilon) {
float alpha_z0 = (Z0 - source[2]) / (dest[2] - source[2]);
float alpha_zN;
compute_alpha_z(Z0, MovSpacing[2], MovSize[2], source[2], dest[2], alpha_zN);
alpha_z_min = fminf(alpha_z0, alpha_zN);
alpha_z_max = fmaxf(alpha_z0, alpha_zN);
if (alpha_z_min > alpha_min) { alpha_min = alpha_z_min; };
if (alpha_z_max < alpha_max) { alpha_max = alpha_z_max; };
}
//if (DRRidx == 0){
//printf("Alpha min = %f\n", alpha_min);
//printf("Alpha max = %f\n", alpha_max);
//}
// --- initialize alpha ---
alpha_current = alpha_min;
if (alpha_min < alpha_max) {
// compute i_min, i_max and initialize alpha_x_next
if (dest[0] - source[0] > 0.) {
// i_min
if (fabsf(alpha_min - alpha_x_min) < epsilon) { i_min = 1; }
else {
float phi_x;
compute_phi_x(X0, MovSpacing[0], alpha_min, source[0], dest[0], phi_x);
i_min = ceil(phi_x);
}
// i_max
if (fabsf(alpha_max - alpha_x_max) < epsilon) { i_max = MovSize[0] - 1; }
else {
float phi_x;
compute_phi_x(X0, MovSpacing[0], alpha_max, source[0], dest[0], phi_x);
i_max = floor(phi_x);
}
// initialize alpha_x_next
compute_alpha_x(X0, MovSpacing[0], i_min, source[0], dest[0], alpha_x_next);
}
else {
// i_max
if (fabsf(alpha_min - alpha_x_min) < epsilon) { i_max = MovSize[0] - 1; }
else {
float phi_x;
compute_phi_x(X0, MovSpacing[0], alpha_min, source[0], dest[0], phi_x);
i_max = floor(phi_x);
}
// i_min
if (fabsf(alpha_max - alpha_x_max) < epsilon) { i_min = 0; }
else {
float phi_x;
compute_phi_x(X0, MovSpacing[0], alpha_max, source[0], dest[0], phi_x);
i_min = ceil(phi_x);
}
// initialize alpha_x_next
compute_alpha_x(X0, MovSpacing[0], i_max, source[0], dest[0], alpha_x_next);
}
// compute j_min, j_max and initialize alpha_y_next
if (dest[1] - source[1] > 0.) {
// j_min
if (fabsf(alpha_min - alpha_y_min) < epsilon) { j_min = 1; }
else {
float phi_y;
compute_phi_y(Y0, MovSpacing[1], alpha_min, source[1], dest[1], phi_y);
j_min = ceil(phi_y);
}
// j_max
if (fabsf(alpha_max - alpha_y_max) < epsilon) { j_max = MovSize[1] - 1; }
else {
float phi_y;
compute_phi_y(Y0, MovSpacing[1], alpha_max, source[1], dest[1], phi_y);
j_max = floor(phi_y);
}
// initialize alpha_y_next
compute_alpha_y(Y0, MovSpacing[1], j_min, source[1], dest[1], alpha_y_next);
}
else {
// j_max
if (fabsf(alpha_min - alpha_y_min) < epsilon) { j_max = MovSize[1] - 1; }
else {
float phi_y;
compute_phi_y(Y0, MovSpacing[1], alpha_min, source[1], dest[1], phi_y);
j_max = floor(phi_y);
}
// j_min
if (fabsf(alpha_max - alpha_y_max) < epsilon) { j_min = 0; }
else {
float phi_y;
compute_phi_y(Y0, MovSpacing[1], alpha_max, source[1], dest[1], phi_y);
j_min = ceil(phi_y);
}
// initialize alpha_y_next
compute_alpha_y(Y0, MovSpacing[1], j_max, source[1], dest[1], alpha_y_next);
}
// compute k_min, k_max and initialize alpha_z_next
if (dest[2] - source[2] > 0.) {
// k_min
if (fabsf(alpha_min - alpha_z_min) < epsilon) { k_min = 1; }
else {
float phi_z;
compute_phi_z(Z0, MovSpacing[2], alpha_min, source[2], dest[2], phi_z);
k_min = ceil(phi_z);
}
// k_max
if (fabsf(alpha_max - alpha_z_max) < epsilon) { k_max = MovSize[2] - 1; }
else {
float phi_z;
compute_phi_z(Z0, MovSpacing[2], alpha_max, source[2], dest[2], phi_z);
k_max = floor(phi_z);
}
// initialize alpha_z_next
compute_alpha_z(Z0, MovSpacing[2], k_min, source[2], dest[2], alpha_z_next);
}
else {
// k_max
if (fabsf(alpha_min - alpha_z_min) < epsilon) { k_max = MovSize[2] - 1; }
else {
float phi_z;
compute_phi_z(Z0, MovSpacing[2], alpha_min, source[2], dest[2], phi_z);
k_max = floor(phi_z);
}
// k_min
if (fabsf(alpha_max - alpha_z_max) < epsilon) { k_min = 0; }
else {
float phi_z;
compute_phi_z(Z0, MovSpacing[2], alpha_max, source[2], dest[2], phi_z);
k_min = ceil(phi_z);
}
// initialize alpha_z_next
compute_alpha_z(Z0, MovSpacing[2], k_max, source[2], dest[2], alpha_z_next);
}
//if (DRRidx == 0) {
// printf("i_min, i_max, Alpha_x_next = %d %d %f\n", i_min, i_max, alpha_x_next);
// printf("j_min, j_max, Alpha_y_next = %d %d %f\n", j_min, j_max, alpha_y_next);
// printf("k_min, k_max, Alpha_z_next = %d %d %f\n", k_min, k_max, alpha_z_next);
//}
// --- initialize first intersected pixel i_v, j_v, k_v ---
if ((alpha_y_next < alpha_x_next) && (alpha_y_next < alpha_z_next)) {
alpha_first_pixel = (alpha_y_next + alpha_min) / 2.;
}
else if (alpha_x_next < alpha_z_next) {
alpha_first_pixel = (alpha_x_next + alpha_min) / 2.;
}
else {
alpha_first_pixel = (alpha_z_next + alpha_min) / 2.;
}
float phi_x = 0.;
float phi_y = 0.;
float phi_z = 0.;
compute_phi_x(X0, MovSpacing[0], alpha_first_pixel, source[0], dest[0], phi_x);
i_v = floor(phi_x);
compute_phi_y(Y0, MovSpacing[1], alpha_first_pixel, source[1], dest[1], phi_y);
j_v = floor(phi_y);
compute_phi_z(Z0, MovSpacing[2], alpha_first_pixel, source[2], dest[2], phi_z);
k_v = floor(phi_z);
// initialize array index of first intersected pixel
arrayIdx = i_v + MovSize[0] * (j_v + MovSize[1] * k_v);
arrayIdx_old = i_v + MovSize[0] * (j_v + MovSize[1] * k_v);
//if (DRRidx == 0) {
// printf("i_v, j_v, k_v = %d %d %d\n", i_v, j_v, k_v);
// printf("arrayIdx, arrayIdx_old = %d %d\n", arrayIdx, arrayIdx_old);
//}
// iterator indeces
int stop = (i_max - i_min + 1) + (j_max - j_min + 1) + (k_max - k_min + 1);
int iter = 0;
//while (alpha_current < 1. && alpha_current < alpha_max) {
while (iter < stop) {
float l;
// next intersection plane is y
if ((alpha_y_next < alpha_x_next) && (alpha_y_next < alpha_z_next)) {
//T alpha_mid = (alpha_current + alpha_y_next) / 2.;
l = (alpha_y_next - alpha_current);
alpha_current = alpha_y_next;
// update
alpha_y_next += dy;
j_v += (dest[1] - source[1] > 0.) ? 1 : -1;
}
else if (alpha_x_next < alpha_z_next) {
// next intersection plane is x
//T alpha_mid = (alpha_current + alpha_x_next) / 2.;
l = (alpha_x_next - alpha_current);
alpha_current = alpha_x_next;
// update
alpha_x_next += dx;
i_v += (dest[0] - source[0] > 0.) ? 1 : -1;
}
else {
// next intersection plane is z
//T alpha_mid = (alpha_current + alpha_z_next) / 2.;
l = (alpha_z_next - alpha_current);
alpha_current = alpha_z_next;
// update
alpha_z_next += dz;
k_v += (dest[2] - source[2] > 0.) ? 1 : -1;
}
// update array index
update_idx(i_v, j_v, k_v, MovSize[0], MovSize[1], arrayIdx);
//if (arrayIdx < 0.) {
// printf("arrayIdx negative! %i", arrayIdx);
//}
if (arrayIdx_old > 0.){
// update density value
if (movImgArray[arrayIdx_old] != 0.) {
if (movMaskArray[arrayIdx_old] > 2.0) // infection
{
//printf("%f", movMaskArray[arrayIdx_old]);
density_value += movImgArray[arrayIdx_old] * l * Weights[2];
weight_value += Weights[2];
num_value += 1.0;
num_inf += 1.0;
len_value += l * Weights[2];
len_inf += l * Weights[2];
}
else if (movMaskArray[arrayIdx_old] > 0.0 && movMaskArray[arrayIdx_old] < 3.0) // lung
{
density_value += movImgArray[arrayIdx_old] * l * Weights[1];
weight_value += Weights[1];
num_value += 1.0;
num_lung += 1.0;
len_value += l * Weights[1];
len_lung += l * Weights[1];
}
else // backgound
{
density_value += movImgArray[arrayIdx_old] * l * Weights[0];
weight_value += Weights[0];
num_value += 1.0;
len_value += l * Weights[0];
}
//std::cout << density_value << std::endl;
}
}
// update arrayIdx
arrayIdx_old = arrayIdx;
// update iter
iter += 1;
}
// multiply by the distance
density_value *= distance;
//std::cout << density_value << std::endl;
}
// update density value array
DRRarray[DRRidx] = density_value * num_value / weight_value;
Maskarray[DRRidx] = len_inf;
Lungarray[DRRidx] = len_lung;
Valuearray[DRRidx] = len_value;
}
}
/**
*
* Deafult constructor
*
**/
SiddonGpu::SiddonGpu() { }
/**
*
* Overloaded constructor loads the CT scan (together with size and spacing) onto GPU memory
*
**/
SiddonGpu::SiddonGpu(int *NumThreadsPerBlock,
float *movImgArray,
float *movMaskArray,
float *Weights,
int *MovSize,
float *MovSpacing,
float X0, float Y0, float Z0,
int *DRRSize){
// ---- Allocate variable members ----
m_NumThreadsPerBlock[0] = NumThreadsPerBlock[0];
m_NumThreadsPerBlock[1] = NumThreadsPerBlock[1];
m_NumThreadsPerBlock[2] = NumThreadsPerBlock[2];
//m_MovSize[0] = MovSize[0];
//m_MovSize[1] = MovSize[1];
//m_MovSize[2] = MovSize[2];
m_X0 = X0;
m_Y0 = Y0;
m_Z0 = Z0;
m_DRRsize[0] = DRRSize[0];
m_DRRsize[1] = DRRSize[1];
m_DRRsize[2] = DRRSize[2];
m_DRRsize0 = DRRSize[0];
m_movImgMemSize = MovSize[0] * MovSize[1] * MovSize[2] * sizeof(float);
m_DestMemSize = (DRRSize[0] * DRRSize[1] * DRRSize[2] * 3) * sizeof(float);
m_DrrMemSize = (DRRSize[0] * DRRSize[1] * DRRSize[2]) * sizeof(float); // memory for each output drr
// allocate space for device copies
hipMalloc((void**)&m_d_movImgArray, m_movImgMemSize);
hipMalloc((void**)&m_d_MovSize, 3 * sizeof(int));
hipMalloc((void**)&m_d_MovSpacing, 3 * sizeof(float));
hipMalloc((void**)&m_d_movMaskArray, m_movImgMemSize);
hipMalloc((void**)&m_d_Weights, 3 * sizeof(float)); // bk,lungs,infection
// Copy arrays related to the moving image onto device array
hipMemcpy(m_d_movImgArray, movImgArray, m_movImgMemSize, hipMemcpyHostToDevice);
hipMemcpy(m_d_MovSize, MovSize, 3 * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(m_d_MovSpacing, MovSpacing, 3 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(m_d_movMaskArray, movMaskArray, m_movImgMemSize, hipMemcpyHostToDevice);
hipMemcpy(m_d_Weights, Weights, 3 * sizeof(float), hipMemcpyHostToDevice);
//std::cout << "Siddon object Initialization: GPU memory prepared \n" << std::endl;
//printf("ctor %p\n", this); // in constructors
}
/**
*
* Destructor clears everything left from the GPU memory
*
**/
SiddonGpu::~SiddonGpu() {
hipFree(m_d_movImgArray);
hipFree(m_d_MovSize);
hipFree(m_d_MovSpacing);
std::cout << "Siddon object destruction: GPU memory cleared \n" << std::endl;
//printf("dtor %p\n", this); // in destructor
}
/**
*-The function generate DRR must be called with the following variables :
*
* @param source : array of(transformed) source physical coordinates
* @param DestArray : C - ordered 1D array of physical coordinates relative to the(transformed) output DRR image.
* @param drrArray : output, 1D array for output values of projected CT densities
*
**/
void SiddonGpu::generateDRR(float *source,
float *DestArray,
float *drrArray,
float *maskArray,
float *lungArray,
float *valueArray
) {
hipError_t ierrAsync;
hipError_t ierrSync;
// declare pointer to device memory for output DRR array
float *d_DestArray;
float *d_source;
float *d_drr_array; // drr image
float *d_mask_array; // infection mask length
float *d_lung_array; // lung length
float *d_value_array; // total length
// allocate space on device
hipMalloc((void**)&d_drr_array, m_DrrMemSize);
hipMalloc((void**)&d_mask_array, m_DrrMemSize); // 0: infection-shot mask, 1: lung-shot mask, 2: infection total mask
hipMalloc((void**)&d_lung_array, m_DrrMemSize);
hipMalloc((void**)&d_value_array, m_DrrMemSize);
if (d_mask_array==NULL || d_drr_array==NULL || d_lung_array==NULL|| d_value_array==NULL)
//printf("Memory allocation error!\n");
std::cout << "Memory allocation error!\n" << std::endl;
hipMalloc((void**)&d_source, 3 * sizeof(float));
hipMalloc((void**)&d_DestArray, m_DestMemSize);
// Copy source and destination to device
hipMemcpy(d_DestArray, DestArray, m_DestMemSize, hipMemcpyHostToDevice);
hipMemcpy(d_source, source, 3 * sizeof(float), hipMemcpyHostToDevice);
//std::cout << "DRR generation: GPU memory prepared \n" << std::endl;
// determine number of required blocks
dim3 threads_per_block(m_NumThreadsPerBlock[0], m_NumThreadsPerBlock[1], 1);
dim3 number_of_blocks((m_DRRsize[0] / threads_per_block.x) + 1, (m_DRRsize[1] / threads_per_block.y) + 1, 1);
//// Query GPU device
//hipDeviceProp_t prop;
//hipGetDeviceProperties(&prop, 0);
//std::cout << "Max threads per block " << prop.maxThreadsPerBlock << std::endl;
//hipGetDeviceProperties(&prop, 0);
//if (threads_per_block.x * threads_per_block.y * threads_per_block.z > prop.maxThreadsPerBlock) {
// printf("Too many threads per block ... exiting\n");
// goto cleanup;
//}
//if (threads_per_block.x > prop.maxThreadsDim[0]) {
// printf("Too many threads in x-direction ... exiting\n");
// goto cleanup;
//}
//if (threads_per_block.y > prop.maxThreadsDim[1]) {
// printf("Too many threads in y-direction ... exiting\n");
// goto cleanup;
//}
//if (threads_per_block.z > prop.maxThreadsDim[2]) {
// printf("Too many threads in z-direction ... exiting\n");
// goto cleanup;
//}
// launch kernel
cuda_kernel << <number_of_blocks, threads_per_block >> >(d_drr_array,
d_mask_array,
d_lung_array,
d_value_array,
d_source,
d_DestArray,
m_DRRsize0,
m_d_movImgArray,
m_d_movMaskArray,
m_d_Weights,
m_d_MovSize,
m_d_MovSpacing,
m_X0, m_Y0, m_Z0);
// Check for errors in Kernel launch
ierrSync = hipGetLastError();
ierrAsync = hipDeviceSynchronize(); // Wait for the GPU to finish
if (ierrSync != hipSuccess) {
//printf("Cuda Sync error: %s\n", hipGetErrorString(ierrSync));
std::cout << "Cuda Sync error: "<< hipGetErrorString(ierrSync) << std::endl;
//goto cleanup;
}
if (ierrAsync != hipSuccess) {
//printf("Cuda Async error: %s\n", hipGetErrorString(ierrAsync));
std::cout << "Cuda Sync error: "<< hipGetErrorString(ierrSync) << std::endl;
//goto cleanup;
}
// Copy result to host array
hipMemcpy(drrArray, d_drr_array, m_DrrMemSize, hipMemcpyDeviceToHost);
hipMemcpy(maskArray, d_mask_array, m_DrrMemSize, hipMemcpyDeviceToHost);
hipMemcpy(lungArray, d_lung_array, m_DrrMemSize, hipMemcpyDeviceToHost);
hipMemcpy(valueArray, d_value_array, m_DrrMemSize, hipMemcpyDeviceToHost);
// Clean up device DRR array
cleanup:
hipFree(d_drr_array);
hipFree(d_mask_array);
hipFree(d_lung_array);
hipFree(d_value_array);
hipFree(d_source);
hipFree(d_DestArray);
//std::cout << "DRR generation: GPU memory cleared \n" << std::endl;
return;
} | 8204ef542333b4b61099bf0adae021e3adda96ba.cu | /**
* Fabio D'Isidoro, ETH Zurich, 08.08.2017
*
* Implementation of a CUDA-based Cpp library for fast DRR generation with GPU acceleration
*
* Based both on the description found in the �Improved Algorithm� section in Jacob�s paper (1998)
* https://www.researchgate.net/publication/2344985_A_Fast_Algorithm_to_Calculate_the_Exact_Radiological_Path_Through_a_Pixel_Or_Voxel_Space
* and on the implementation suggested in Greef et al 2009
* https://www.ncbi.nlm.nih.gov/pubmed/19810482
*
*
*
* Source file for the Class Siddon (see header for more information)
*
*
* -----------------------------------------------------------------
* Modified by Pengyi Zhang for infection-aware DRR generator @2020-08
* -----------------------------------------------------------------
*/
#include "siddon_class.cuh"
#include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__device__ const float epsilon = 2.22045e-016; // to compare double float values
// auxiliary functions
__device__ void get_dest(int idx, float *dest_array, float *dest) {
dest[0] = dest_array[0 + 3 * idx];
dest[1] = dest_array[1 + 3 * idx];
dest[2] = dest_array[2 + 3 * idx];
}
__device__ void compute_alpha_x(const float &X0,
const float &spacing_x,
const int &i,
const float &source_x,
const float &dest_x,
float &alpha_x) {
alpha_x = ((X0 + static_cast<float>(i)*spacing_x) - source_x) / (dest_x - source_x);
}
__device__ void compute_alpha_y(const float &Y0,
const float &spacing_y,
const int &j,
const float &source_y,
const float &dest_y,
float &alpha_y) {
alpha_y = ((Y0 + static_cast<float>(j)*spacing_y) - source_y) / (dest_y - source_y);
}
__device__ void compute_alpha_z(const float &Z0,
const float &spacing_z,
const int &k,
const float &source_z,
const float &dest_z,
float &alpha_z) {
alpha_z = ((Z0 + static_cast<float>(k)*spacing_z) - source_z) / (dest_z - source_z);
}
__device__ void compute_phi_x(const float &X0,
const float &spacing_x,
float &alpha,
const float &source_x,
const float &dest_x,
float &phi_x) {
phi_x = (source_x + alpha*(dest_x - source_x) - X0) / spacing_x;
}
__device__ void compute_phi_y(const float &Y0,
const float &spacing_y,
float &alpha,
const float &source_y,
const float &dest_y,
float &phi_y) {
phi_y = (source_y + alpha*(dest_y - source_y) - Y0) / spacing_y;
}
__device__ void compute_phi_z(const float &Z0,
const float &spacing_z,
float &alpha,
const float &source_z,
const float &dest_z,
float &phi_z) {
phi_z = (source_z + alpha*(dest_z - source_z) - Z0) / spacing_z;
}
__device__ void update_idx(unsigned int &i_v, unsigned int &j_v, unsigned int &k_v, const int &size_x, const int &size_y, int &arrayIdx) {
arrayIdx = i_v + size_x * (j_v + size_y * k_v);
}
__global__ void cuda_kernel(float *DRRarray, float *Maskarray, float *Lungarray, float *Valuearray,
float *source,
float *DestArray,
int DRRsize0,
float *movImgArray,
float *movMaskArray,
float *Weights,
int *MovSize,
float *MovSpacing,
float X0, float Y0, float Z0) {
// DRR image indeces
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
// DRR array index
int DRRidx = row + DRRsize0 * col;
//printf("Thread index %i\n", DRRidx);
if (DRRidx < DRRsize0 * DRRsize0) { // checks if thread index is within the length of the DRR array
// --- declaration of variables for Siddon ---
float alpha_min, alpha_max;
float alpha_x_min, alpha_x_max, alpha_y_min, alpha_y_max, alpha_z_min, alpha_z_max;
int i_min, i_max, j_min, j_max, k_min, k_max; // indeces corresponding to first and last intersected voxels
float alpha_current;
float alpha_x_next;
float alpha_y_next;
float alpha_z_next;
float distance;
int arrayIdx;
int arrayIdx_old;
unsigned int i_v, j_v, k_v;
float alpha_first_pixel;
float density_value = 0.;
float weight_value = 0.;
float num_value = 0.0;
float num_inf = 0.0;
float num_lung = 0.0;
float len_value = 0.0;
float len_inf = 0.0;
float len_lung = 0.0;
// --- define destination point based on DRR array index ---
float dest[3];
get_dest(DRRidx, DestArray, dest);
// --- source-to-destination distance ---
distance = sqrtf((dest[0] - source[0])*(dest[0] - source[0]) +
(dest[1] - source[1])*(dest[1] - source[1]) +
(dest[2] - source[2])*(dest[2] - source[2]));
//
float dx = MovSpacing[0] / fabsf(dest[0] - source[0]);
float dy = MovSpacing[1] / fabsf(dest[1] - source[1]);
float dz = MovSpacing[2] / fabsf(dest[2] - source[2]);
// --- find alpha_min and alpha_max
// initialize alpha_min and alpha_max
alpha_min = 0.;
alpha_max = 1.;
// X
if (fabsf(dest[0] - source[0]) > epsilon) {
float alpha_x0 = (X0 - source[0]) / (dest[0] - source[0]);
float alpha_xN;
compute_alpha_x(X0, MovSpacing[0], MovSize[0], source[0], dest[0], alpha_xN);
alpha_x_min = fminf(alpha_x0, alpha_xN);
alpha_x_max = fmaxf(alpha_x0, alpha_xN);
if (alpha_x_min > alpha_min) { alpha_min = alpha_x_min; };
if (alpha_x_max < alpha_max) { alpha_max = alpha_x_max; };
}
// Y
if (fabsf(dest[1] - source[1]) > epsilon) {
float alpha_y0 = (Y0 - source[1]) / (dest[1] - source[1]);
float alpha_yN;
compute_alpha_y(Y0, MovSpacing[1], MovSize[1], source[1], dest[1], alpha_yN);
alpha_y_min = fminf(alpha_y0, alpha_yN);
alpha_y_max = fmaxf(alpha_y0, alpha_yN);
if (alpha_y_min > alpha_min) { alpha_min = alpha_y_min; };
if (alpha_y_max < alpha_max) { alpha_max = alpha_y_max; };
}
// Z
if (fabsf(dest[2] - source[2]) > epsilon) {
float alpha_z0 = (Z0 - source[2]) / (dest[2] - source[2]);
float alpha_zN;
compute_alpha_z(Z0, MovSpacing[2], MovSize[2], source[2], dest[2], alpha_zN);
alpha_z_min = fminf(alpha_z0, alpha_zN);
alpha_z_max = fmaxf(alpha_z0, alpha_zN);
if (alpha_z_min > alpha_min) { alpha_min = alpha_z_min; };
if (alpha_z_max < alpha_max) { alpha_max = alpha_z_max; };
}
//if (DRRidx == 0){
//printf("Alpha min = %f\n", alpha_min);
//printf("Alpha max = %f\n", alpha_max);
//}
// --- initialize alpha ---
alpha_current = alpha_min;
if (alpha_min < alpha_max) {
// compute i_min, i_max and initialize alpha_x_next
if (dest[0] - source[0] > 0.) {
// i_min
if (fabsf(alpha_min - alpha_x_min) < epsilon) { i_min = 1; }
else {
float phi_x;
compute_phi_x(X0, MovSpacing[0], alpha_min, source[0], dest[0], phi_x);
i_min = ceil(phi_x);
}
// i_max
if (fabsf(alpha_max - alpha_x_max) < epsilon) { i_max = MovSize[0] - 1; }
else {
float phi_x;
compute_phi_x(X0, MovSpacing[0], alpha_max, source[0], dest[0], phi_x);
i_max = floor(phi_x);
}
// initialize alpha_x_next
compute_alpha_x(X0, MovSpacing[0], i_min, source[0], dest[0], alpha_x_next);
}
else {
// i_max
if (fabsf(alpha_min - alpha_x_min) < epsilon) { i_max = MovSize[0] - 1; }
else {
float phi_x;
compute_phi_x(X0, MovSpacing[0], alpha_min, source[0], dest[0], phi_x);
i_max = floor(phi_x);
}
// i_min
if (fabsf(alpha_max - alpha_x_max) < epsilon) { i_min = 0; }
else {
float phi_x;
compute_phi_x(X0, MovSpacing[0], alpha_max, source[0], dest[0], phi_x);
i_min = ceil(phi_x);
}
// initialize alpha_x_next
compute_alpha_x(X0, MovSpacing[0], i_max, source[0], dest[0], alpha_x_next);
}
// compute j_min, j_max and initialize alpha_y_next
if (dest[1] - source[1] > 0.) {
// j_min
if (fabsf(alpha_min - alpha_y_min) < epsilon) { j_min = 1; }
else {
float phi_y;
compute_phi_y(Y0, MovSpacing[1], alpha_min, source[1], dest[1], phi_y);
j_min = ceil(phi_y);
}
// j_max
if (fabsf(alpha_max - alpha_y_max) < epsilon) { j_max = MovSize[1] - 1; }
else {
float phi_y;
compute_phi_y(Y0, MovSpacing[1], alpha_max, source[1], dest[1], phi_y);
j_max = floor(phi_y);
}
// initialize alpha_y_next
compute_alpha_y(Y0, MovSpacing[1], j_min, source[1], dest[1], alpha_y_next);
}
else {
// j_max
if (fabsf(alpha_min - alpha_y_min) < epsilon) { j_max = MovSize[1] - 1; }
else {
float phi_y;
compute_phi_y(Y0, MovSpacing[1], alpha_min, source[1], dest[1], phi_y);
j_max = floor(phi_y);
}
// j_min
if (fabsf(alpha_max - alpha_y_max) < epsilon) { j_min = 0; }
else {
float phi_y;
compute_phi_y(Y0, MovSpacing[1], alpha_max, source[1], dest[1], phi_y);
j_min = ceil(phi_y);
}
// initialize alpha_y_next
compute_alpha_y(Y0, MovSpacing[1], j_max, source[1], dest[1], alpha_y_next);
}
// compute k_min, k_max and initialize alpha_z_next
if (dest[2] - source[2] > 0.) {
// k_min
if (fabsf(alpha_min - alpha_z_min) < epsilon) { k_min = 1; }
else {
float phi_z;
compute_phi_z(Z0, MovSpacing[2], alpha_min, source[2], dest[2], phi_z);
k_min = ceil(phi_z);
}
// k_max
if (fabsf(alpha_max - alpha_z_max) < epsilon) { k_max = MovSize[2] - 1; }
else {
float phi_z;
compute_phi_z(Z0, MovSpacing[2], alpha_max, source[2], dest[2], phi_z);
k_max = floor(phi_z);
}
// initialize alpha_z_next
compute_alpha_z(Z0, MovSpacing[2], k_min, source[2], dest[2], alpha_z_next);
}
else {
// k_max
if (fabsf(alpha_min - alpha_z_min) < epsilon) { k_max = MovSize[2] - 1; }
else {
float phi_z;
compute_phi_z(Z0, MovSpacing[2], alpha_min, source[2], dest[2], phi_z);
k_max = floor(phi_z);
}
// k_min
if (fabsf(alpha_max - alpha_z_max) < epsilon) { k_min = 0; }
else {
float phi_z;
compute_phi_z(Z0, MovSpacing[2], alpha_max, source[2], dest[2], phi_z);
k_min = ceil(phi_z);
}
// initialize alpha_z_next
compute_alpha_z(Z0, MovSpacing[2], k_max, source[2], dest[2], alpha_z_next);
}
//if (DRRidx == 0) {
// printf("i_min, i_max, Alpha_x_next = %d %d %f\n", i_min, i_max, alpha_x_next);
// printf("j_min, j_max, Alpha_y_next = %d %d %f\n", j_min, j_max, alpha_y_next);
// printf("k_min, k_max, Alpha_z_next = %d %d %f\n", k_min, k_max, alpha_z_next);
//}
// --- initialize first intersected pixel i_v, j_v, k_v ---
if ((alpha_y_next < alpha_x_next) && (alpha_y_next < alpha_z_next)) {
alpha_first_pixel = (alpha_y_next + alpha_min) / 2.;
}
else if (alpha_x_next < alpha_z_next) {
alpha_first_pixel = (alpha_x_next + alpha_min) / 2.;
}
else {
alpha_first_pixel = (alpha_z_next + alpha_min) / 2.;
}
float phi_x = 0.;
float phi_y = 0.;
float phi_z = 0.;
compute_phi_x(X0, MovSpacing[0], alpha_first_pixel, source[0], dest[0], phi_x);
i_v = floor(phi_x);
compute_phi_y(Y0, MovSpacing[1], alpha_first_pixel, source[1], dest[1], phi_y);
j_v = floor(phi_y);
compute_phi_z(Z0, MovSpacing[2], alpha_first_pixel, source[2], dest[2], phi_z);
k_v = floor(phi_z);
// initialize array index of first intersected pixel
arrayIdx = i_v + MovSize[0] * (j_v + MovSize[1] * k_v);
arrayIdx_old = i_v + MovSize[0] * (j_v + MovSize[1] * k_v);
//if (DRRidx == 0) {
// printf("i_v, j_v, k_v = %d %d %d\n", i_v, j_v, k_v);
// printf("arrayIdx, arrayIdx_old = %d %d\n", arrayIdx, arrayIdx_old);
//}
// iterator indeces
int stop = (i_max - i_min + 1) + (j_max - j_min + 1) + (k_max - k_min + 1);
int iter = 0;
//while (alpha_current < 1. && alpha_current < alpha_max) {
while (iter < stop) {
float l;
// next intersection plane is y
if ((alpha_y_next < alpha_x_next) && (alpha_y_next < alpha_z_next)) {
//T alpha_mid = (alpha_current + alpha_y_next) / 2.;
l = (alpha_y_next - alpha_current);
alpha_current = alpha_y_next;
// update
alpha_y_next += dy;
j_v += (dest[1] - source[1] > 0.) ? 1 : -1;
}
else if (alpha_x_next < alpha_z_next) {
// next intersection plane is x
//T alpha_mid = (alpha_current + alpha_x_next) / 2.;
l = (alpha_x_next - alpha_current);
alpha_current = alpha_x_next;
// update
alpha_x_next += dx;
i_v += (dest[0] - source[0] > 0.) ? 1 : -1;
}
else {
// next intersection plane is z
//T alpha_mid = (alpha_current + alpha_z_next) / 2.;
l = (alpha_z_next - alpha_current);
alpha_current = alpha_z_next;
// update
alpha_z_next += dz;
k_v += (dest[2] - source[2] > 0.) ? 1 : -1;
}
// update array index
update_idx(i_v, j_v, k_v, MovSize[0], MovSize[1], arrayIdx);
//if (arrayIdx < 0.) {
// printf("arrayIdx negative! %i", arrayIdx);
//}
if (arrayIdx_old > 0.){
// update density value
if (movImgArray[arrayIdx_old] != 0.) {
if (movMaskArray[arrayIdx_old] > 2.0) // infection
{
//printf("%f", movMaskArray[arrayIdx_old]);
density_value += movImgArray[arrayIdx_old] * l * Weights[2];
weight_value += Weights[2];
num_value += 1.0;
num_inf += 1.0;
len_value += l * Weights[2];
len_inf += l * Weights[2];
}
else if (movMaskArray[arrayIdx_old] > 0.0 && movMaskArray[arrayIdx_old] < 3.0) // lung
{
density_value += movImgArray[arrayIdx_old] * l * Weights[1];
weight_value += Weights[1];
num_value += 1.0;
num_lung += 1.0;
len_value += l * Weights[1];
len_lung += l * Weights[1];
}
else // backgound
{
density_value += movImgArray[arrayIdx_old] * l * Weights[0];
weight_value += Weights[0];
num_value += 1.0;
len_value += l * Weights[0];
}
//std::cout << density_value << std::endl;
}
}
// update arrayIdx
arrayIdx_old = arrayIdx;
// update iter
iter += 1;
}
// multiply by the distance
density_value *= distance;
//std::cout << density_value << std::endl;
}
// update density value array
DRRarray[DRRidx] = density_value * num_value / weight_value;
Maskarray[DRRidx] = len_inf;
Lungarray[DRRidx] = len_lung;
Valuearray[DRRidx] = len_value;
}
}
/**
*
* Deafult constructor
*
**/
SiddonGpu::SiddonGpu() { }
/**
*
* Overloaded constructor loads the CT scan (together with size and spacing) onto GPU memory
*
**/
SiddonGpu::SiddonGpu(int *NumThreadsPerBlock,
float *movImgArray,
float *movMaskArray,
float *Weights,
int *MovSize,
float *MovSpacing,
float X0, float Y0, float Z0,
int *DRRSize){
// ---- Allocate variable members ----
m_NumThreadsPerBlock[0] = NumThreadsPerBlock[0];
m_NumThreadsPerBlock[1] = NumThreadsPerBlock[1];
m_NumThreadsPerBlock[2] = NumThreadsPerBlock[2];
//m_MovSize[0] = MovSize[0];
//m_MovSize[1] = MovSize[1];
//m_MovSize[2] = MovSize[2];
m_X0 = X0;
m_Y0 = Y0;
m_Z0 = Z0;
m_DRRsize[0] = DRRSize[0];
m_DRRsize[1] = DRRSize[1];
m_DRRsize[2] = DRRSize[2];
m_DRRsize0 = DRRSize[0];
m_movImgMemSize = MovSize[0] * MovSize[1] * MovSize[2] * sizeof(float);
m_DestMemSize = (DRRSize[0] * DRRSize[1] * DRRSize[2] * 3) * sizeof(float);
m_DrrMemSize = (DRRSize[0] * DRRSize[1] * DRRSize[2]) * sizeof(float); // memory for each output drr
// allocate space for device copies
cudaMalloc((void**)&m_d_movImgArray, m_movImgMemSize);
cudaMalloc((void**)&m_d_MovSize, 3 * sizeof(int));
cudaMalloc((void**)&m_d_MovSpacing, 3 * sizeof(float));
cudaMalloc((void**)&m_d_movMaskArray, m_movImgMemSize);
cudaMalloc((void**)&m_d_Weights, 3 * sizeof(float)); // bk,lungs,infection
// Copy arrays related to the moving image onto device array
cudaMemcpy(m_d_movImgArray, movImgArray, m_movImgMemSize, cudaMemcpyHostToDevice);
cudaMemcpy(m_d_MovSize, MovSize, 3 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(m_d_MovSpacing, MovSpacing, 3 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(m_d_movMaskArray, movMaskArray, m_movImgMemSize, cudaMemcpyHostToDevice);
cudaMemcpy(m_d_Weights, Weights, 3 * sizeof(float), cudaMemcpyHostToDevice);
//std::cout << "Siddon object Initialization: GPU memory prepared \n" << std::endl;
//printf("ctor %p\n", this); // in constructors
}
/**
*
* Destructor clears everything left from the GPU memory
*
**/
SiddonGpu::~SiddonGpu() {
cudaFree(m_d_movImgArray);
cudaFree(m_d_MovSize);
cudaFree(m_d_MovSpacing);
std::cout << "Siddon object destruction: GPU memory cleared \n" << std::endl;
//printf("dtor %p\n", this); // in destructor
}
/**
*-The function generate DRR must be called with the following variables :
*
* @param source : array of(transformed) source physical coordinates
* @param DestArray : C - ordered 1D array of physical coordinates relative to the(transformed) output DRR image.
* @param drrArray : output, 1D array for output values of projected CT densities
*
**/
void SiddonGpu::generateDRR(float *source,
float *DestArray,
float *drrArray,
float *maskArray,
float *lungArray,
float *valueArray
) {
cudaError_t ierrAsync;
cudaError_t ierrSync;
// declare pointer to device memory for output DRR array
float *d_DestArray;
float *d_source;
float *d_drr_array; // drr image
float *d_mask_array; // infection mask length
float *d_lung_array; // lung length
float *d_value_array; // total length
// allocate space on device
cudaMalloc((void**)&d_drr_array, m_DrrMemSize);
cudaMalloc((void**)&d_mask_array, m_DrrMemSize); // 0: infection-shot mask, 1: lung-shot mask, 2: infection total mask
cudaMalloc((void**)&d_lung_array, m_DrrMemSize);
cudaMalloc((void**)&d_value_array, m_DrrMemSize);
if (d_mask_array==NULL || d_drr_array==NULL || d_lung_array==NULL|| d_value_array==NULL)
//printf("Memory allocation error!\n");
std::cout << "Memory allocation error!\n" << std::endl;
cudaMalloc((void**)&d_source, 3 * sizeof(float));
cudaMalloc((void**)&d_DestArray, m_DestMemSize);
// Copy source and destination to device
cudaMemcpy(d_DestArray, DestArray, m_DestMemSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_source, source, 3 * sizeof(float), cudaMemcpyHostToDevice);
//std::cout << "DRR generation: GPU memory prepared \n" << std::endl;
// determine number of required blocks
dim3 threads_per_block(m_NumThreadsPerBlock[0], m_NumThreadsPerBlock[1], 1);
dim3 number_of_blocks((m_DRRsize[0] / threads_per_block.x) + 1, (m_DRRsize[1] / threads_per_block.y) + 1, 1);
//// Query GPU device
//cudaDeviceProp prop;
//cudaGetDeviceProperties(&prop, 0);
//std::cout << "Max threads per block " << prop.maxThreadsPerBlock << std::endl;
//cudaGetDeviceProperties(&prop, 0);
//if (threads_per_block.x * threads_per_block.y * threads_per_block.z > prop.maxThreadsPerBlock) {
// printf("Too many threads per block ... exiting\n");
// goto cleanup;
//}
//if (threads_per_block.x > prop.maxThreadsDim[0]) {
// printf("Too many threads in x-direction ... exiting\n");
// goto cleanup;
//}
//if (threads_per_block.y > prop.maxThreadsDim[1]) {
// printf("Too many threads in y-direction ... exiting\n");
// goto cleanup;
//}
//if (threads_per_block.z > prop.maxThreadsDim[2]) {
// printf("Too many threads in z-direction ... exiting\n");
// goto cleanup;
//}
// launch kernel
cuda_kernel << <number_of_blocks, threads_per_block >> >(d_drr_array,
d_mask_array,
d_lung_array,
d_value_array,
d_source,
d_DestArray,
m_DRRsize0,
m_d_movImgArray,
m_d_movMaskArray,
m_d_Weights,
m_d_MovSize,
m_d_MovSpacing,
m_X0, m_Y0, m_Z0);
// Check for errors in Kernel launch
ierrSync = cudaGetLastError();
ierrAsync = cudaDeviceSynchronize(); // Wait for the GPU to finish
if (ierrSync != cudaSuccess) {
//printf("Cuda Sync error: %s\n", cudaGetErrorString(ierrSync));
std::cout << "Cuda Sync error: "<< cudaGetErrorString(ierrSync) << std::endl;
//goto cleanup;
}
if (ierrAsync != cudaSuccess) {
//printf("Cuda Async error: %s\n", cudaGetErrorString(ierrAsync));
std::cout << "Cuda Sync error: "<< cudaGetErrorString(ierrSync) << std::endl;
//goto cleanup;
}
// Copy result to host array
cudaMemcpy(drrArray, d_drr_array, m_DrrMemSize, cudaMemcpyDeviceToHost);
cudaMemcpy(maskArray, d_mask_array, m_DrrMemSize, cudaMemcpyDeviceToHost);
cudaMemcpy(lungArray, d_lung_array, m_DrrMemSize, cudaMemcpyDeviceToHost);
cudaMemcpy(valueArray, d_value_array, m_DrrMemSize, cudaMemcpyDeviceToHost);
// Clean up device DRR array
cleanup:
cudaFree(d_drr_array);
cudaFree(d_mask_array);
cudaFree(d_lung_array);
cudaFree(d_value_array);
cudaFree(d_source);
cudaFree(d_DestArray);
//std::cout << "DRR generation: GPU memory cleared \n" << std::endl;
return;
} |
74bab8de73798ee5c01c0c6e8d2200ae14c65e24.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <random/rng.cuh>
#include <stats/mean.cuh>
#include <stats/mean_center.cuh>
#include "matrix_vector_op.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace Stats {
template <typename T, typename IdxType>
struct MeanCenterInputs {
T tolerance, mean;
IdxType rows, cols;
bool sample, rowMajor, bcastAlongRows;
unsigned long long int seed;
};
template <typename T, typename IdxType>
::std::ostream &operator<<(::std::ostream &os,
const MeanCenterInputs<T, IdxType> &dims) {
return os;
}
template <typename T, typename IdxType>
class MeanCenterTest
: public ::testing::TestWithParam<MeanCenterInputs<T, IdxType>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MeanCenterInputs<T, IdxType>>::GetParam();
Random::Rng r(params.seed);
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
auto rows = params.rows, cols = params.cols;
auto len = rows * cols;
IdxType vecLen = params.bcastAlongRows ? cols : rows;
allocate(out, len);
allocate(out_ref, len);
allocate(data, len);
allocate(meanVec, vecLen);
r.normal(data, len, params.mean, (T)1.0, stream);
mean(meanVec, data, cols, rows, params.sample, params.rowMajor, stream);
meanCenter(out, data, meanVec, cols, rows, params.rowMajor,
params.bcastAlongRows, stream);
LinAlg::naiveMatVec(out_ref, data, meanVec, cols, rows, params.rowMajor,
params.bcastAlongRows, (T)-1.0);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(meanVec));
}
protected:
MeanCenterInputs<T, IdxType> params;
T *data, *meanVec, *out, *out_ref;
};
const std::vector<MeanCenterInputs<float, int>> inputsf_i32 = {
{0.05f, 1.f, 1024, 32, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, false, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<float, int> MeanCenterTestF_i32;
TEST_P(MeanCenterTestF_i32, Result) {
ASSERT_TRUE(devArrMatch(out, out_ref, params.cols,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestF_i32,
::testing::ValuesIn(inputsf_i32));
const std::vector<MeanCenterInputs<float, size_t>> inputsf_i64 = {
{0.05f, 1.f, 1024, 32, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, false, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<float, size_t> MeanCenterTestF_i64;
TEST_P(MeanCenterTestF_i64, Result) {
ASSERT_TRUE(devArrMatch(out, out_ref, params.cols,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestF_i64,
::testing::ValuesIn(inputsf_i64));
const std::vector<MeanCenterInputs<double, int>> inputsd_i32 = {
{0.05, 1.0, 1024, 32, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, false, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<double, int> MeanCenterTestD_i32;
TEST_P(MeanCenterTestD_i32, Result) {
ASSERT_TRUE(devArrMatch(out, out_ref, params.cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestD_i32,
::testing::ValuesIn(inputsd_i32));
const std::vector<MeanCenterInputs<double, size_t>> inputsd_i64 = {
{0.05, 1.0, 1024, 32, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, false, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<double, size_t> MeanCenterTestD_i64;
TEST_P(MeanCenterTestD_i64, Result) {
ASSERT_TRUE(devArrMatch(out, out_ref, params.cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestD_i64,
::testing::ValuesIn(inputsd_i64));
} // end namespace Stats
} // end namespace MLCommon
| 74bab8de73798ee5c01c0c6e8d2200ae14c65e24.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <random/rng.cuh>
#include <stats/mean.cuh>
#include <stats/mean_center.cuh>
#include "matrix_vector_op.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace Stats {
template <typename T, typename IdxType>
struct MeanCenterInputs {
T tolerance, mean;
IdxType rows, cols;
bool sample, rowMajor, bcastAlongRows;
unsigned long long int seed;
};
template <typename T, typename IdxType>
::std::ostream &operator<<(::std::ostream &os,
const MeanCenterInputs<T, IdxType> &dims) {
return os;
}
template <typename T, typename IdxType>
class MeanCenterTest
: public ::testing::TestWithParam<MeanCenterInputs<T, IdxType>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MeanCenterInputs<T, IdxType>>::GetParam();
Random::Rng r(params.seed);
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
auto rows = params.rows, cols = params.cols;
auto len = rows * cols;
IdxType vecLen = params.bcastAlongRows ? cols : rows;
allocate(out, len);
allocate(out_ref, len);
allocate(data, len);
allocate(meanVec, vecLen);
r.normal(data, len, params.mean, (T)1.0, stream);
mean(meanVec, data, cols, rows, params.sample, params.rowMajor, stream);
meanCenter(out, data, meanVec, cols, rows, params.rowMajor,
params.bcastAlongRows, stream);
LinAlg::naiveMatVec(out_ref, data, meanVec, cols, rows, params.rowMajor,
params.bcastAlongRows, (T)-1.0);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(meanVec));
}
protected:
MeanCenterInputs<T, IdxType> params;
T *data, *meanVec, *out, *out_ref;
};
const std::vector<MeanCenterInputs<float, int>> inputsf_i32 = {
{0.05f, 1.f, 1024, 32, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, false, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<float, int> MeanCenterTestF_i32;
TEST_P(MeanCenterTestF_i32, Result) {
ASSERT_TRUE(devArrMatch(out, out_ref, params.cols,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestF_i32,
::testing::ValuesIn(inputsf_i32));
const std::vector<MeanCenterInputs<float, size_t>> inputsf_i64 = {
{0.05f, 1.f, 1024, 32, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, true, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, true, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, true, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, true, 1234ULL},
{0.05f, 1.f, 1024, 32, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, false, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, false, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, false, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, false, false, 1234ULL},
{0.05f, 1.f, 1024, 32, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 64, true, true, false, 1234ULL},
{0.05f, 1.f, 1024, 128, true, true, false, 1234ULL},
{0.05f, -1.f, 1024, 32, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 64, false, true, false, 1234ULL},
{0.05f, -1.f, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<float, size_t> MeanCenterTestF_i64;
TEST_P(MeanCenterTestF_i64, Result) {
ASSERT_TRUE(devArrMatch(out, out_ref, params.cols,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestF_i64,
::testing::ValuesIn(inputsf_i64));
const std::vector<MeanCenterInputs<double, int>> inputsd_i32 = {
{0.05, 1.0, 1024, 32, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, false, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<double, int> MeanCenterTestD_i32;
TEST_P(MeanCenterTestD_i32, Result) {
ASSERT_TRUE(devArrMatch(out, out_ref, params.cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestD_i32,
::testing::ValuesIn(inputsd_i32));
const std::vector<MeanCenterInputs<double, size_t>> inputsd_i64 = {
{0.05, 1.0, 1024, 32, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, true, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, true, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, true, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, true, 1234ULL},
{0.05, 1.0, 1024, 32, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, false, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, false, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, false, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, false, false, 1234ULL},
{0.05, 1.0, 1024, 32, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 64, true, true, false, 1234ULL},
{0.05, 1.0, 1024, 128, true, true, false, 1234ULL},
{0.05, -1.0, 1024, 32, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 64, false, true, false, 1234ULL},
{0.05, -1.0, 1024, 128, false, true, false, 1234ULL}};
typedef MeanCenterTest<double, size_t> MeanCenterTestD_i64;
TEST_P(MeanCenterTestD_i64, Result) {
ASSERT_TRUE(devArrMatch(out, out_ref, params.cols,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MeanCenterTests, MeanCenterTestD_i64,
::testing::ValuesIn(inputsd_i64));
} // end namespace Stats
} // end namespace MLCommon
|
0eb0fb9c584d44a2b16d1c8a66aec3164e0d1557.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ProjHelperFun.h"
#include "Constants.h"
#include "TridagPar.h"
#include "../include/CudaUtilProj.cu.h"
#define EPSILON 0.01
#define VALIDATION
#define T 32
//{{{KERNELS
__global__ void
d_initTimeline( REAL* d_timeline, const unsigned int numT, const REAL t){
const unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x;
if(gid < numT) {
d_timeline[gid] = t*gid / (numT-1);
}
}
__global__ void
d_initNUM( REAL* d_num, unsigned int num_size, const REAL d, unsigned myIndex, const REAL s){
const unsigned long gid = blockIdx.x*blockDim.x + threadIdx.x;
if(gid < num_size) {
d_num[gid] = gid*d - myIndex*d + s;
}
}
__global__ void
d_initOperator( REAL* d_x, unsigned int x_size, REAL* d_dxx){
const unsigned long gid = blockIdx.x*blockDim.x + threadIdx.x;
if(gid < x_size) {
REAL dxl, dxu;
if(gid == 0){
// lower boundary
dxl = 0.0;
dxu = d_x[1] - d_x[0];
d_dxx[0] = 0.0;
d_dxx[1] = 0.0;
d_dxx[2] = 0.0;
d_dxx[3] = 0.0;
}else if(gid == x_size-1){
// upper boundary
dxl = d_x[x_size-1] - d_x[x_size-2];
dxu = 0.0;
d_dxx[(x_size-1)*4+0] = 0.0;
d_dxx[(x_size-1)*4+1] = 0.0;
d_dxx[(x_size-1)*4+2] = 0.0;
d_dxx[(x_size-1)*4+3] = 0.0;
}else{
dxl = d_x[gid] - d_x[gid-1];
dxu = d_x[gid+1] - d_x[gid];
d_dxx[gid*4+0] = 2.0/dxl/(dxl+dxu);
d_dxx[gid*4+1] = -2.0*(1.0/dxl + 1.0/dxu)/(dxl+dxu);
d_dxx[gid*4+2] = 2.0/dxu/(dxl+dxu);
d_dxx[gid*4+3] = 0.0;
}
}
}
__global__ void
d_setPayoff(REAL* d_result, REAL* d_x, unsigned int x_size, unsigned int y_size, unsigned int z_size){
unsigned int x = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int y = blockDim.y*blockIdx.y + threadIdx.y;
unsigned int z = blockDim.z*blockIdx.z + threadIdx.z;
if(x < x_size && y < y_size && z < z_size){
d_result[z*y_size*x_size + y*x_size + x] = max(d_x[y]-(0.001*z), (REAL)0.0);
}
}
__global__ void
d_updateParams(REAL* d_varX, REAL* d_varY, REAL* d_x, REAL* d_y, REAL* d_timeline, unsigned int g,
REAL alpha, REAL beta, REAL nu, unsigned int numX, unsigned int numY){
unsigned int j = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int i = blockDim.y*blockIdx.y + threadIdx.y;
if(i >= numX || j >= numY)
return;
d_varX[i*numY+j] = exp(2.0*( beta*log(d_x[i])+d_y[j] - 0.5*nu*nu*d_timeline[g]));
d_varY[i*numY+j] = exp(2.0*( alpha*log(d_x[i]) + d_y[j] - 0.5*nu*nu*d_timeline[g]));
}
#define YX(k,j,i) (k*(numY*numX)+j*numX+i)
#define XY(k,j,i) (k*(numY*numX)+j*numY+i)
#define ZID(k,j,i) (k*(numZ*numZ)+j*numZ+i)
#define DID(j,i) (j*4+i)
__global__ void
d_explicit_xy_implicit_x(REAL* u, REAL* v, REAL* a, REAL* b, REAL* c, REAL* varX, REAL* varY, REAL* timeline, REAL* dxx, REAL* dyy, REAL* result, unsigned int g, unsigned numX, unsigned numY, unsigned outer, unsigned numZ){
//for(k, j, i)
unsigned int k = blockDim.z * blockIdx.z + blockIdx.z; //Outer
unsigned int j = blockDim.y * blockIdx.y + blockIdx.y; //numY
unsigned int i = blockDim.x * blockIdx.x + blockIdx.x; //numX
if(k >= outer || j >= numY || i >= numX)
return;
// explicit x
u[YX(k,j,i)] = (1.0/(timeline[g+1]-timeline[g])) *result[XY(k,j,i)];
if(i > 0) {
u[YX(k,j,i)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[DID(i,0)] )
* result[XY(k,i-1,j)];
}
u[YX(k,j,i)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[DID(i,1)] )
* result[XY(k,i,j)];
if(i < numX-1) {
u[YX(k,j,i)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[DID(i,2)] )
* result[XY(k,i+1,j)];
}
// explicit y ; RAW v, write u
v[XY(0,0,j)] = 0.0;
if(j > 0) {
v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy[DID(j,0)] )
* result[XY(k,i,j-1)];
}
v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy[DID(j,1)] )
* result[XY(k,i,j)];
if(j < numY-1) {
v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy[DID(j,2)] )
* result[XY(k,i,j+1)];
}
u[YX(k,i,j)] += v[XY(k,i,j)];
// implicit x // write a,b,c
a[ZID(k,j,i)] = - 0.5*(0.5*varX[XY(0,i,j)]*dxx[DID(i,0)]);
b[ZID(k,j,i)] = ( 1.0/(timeline[g+1]-timeline[g])) - 0.5*(0.5*varX[XY(0,i,j)]*dxx[DID(i,1)]);
c[ZID(k,j,i)] = - 0.5*(0.5*varX[XY(0,i,j)]*dxx[DID(i,2)]);
}
//}}}
//{{{WRAPPERS
void initGrid_GPU( const REAL s0, const REAL alpha, const REAL nu,const REAL t,
const unsigned numX, const unsigned numY, const unsigned numT,
REAL* d_myX, REAL* d_myY, REAL* d_myTimeline, unsigned myXindex,
unsigned myYindex) {
const unsigned int BLOCK_SIZE = 256;
unsigned int NUM_BLOCKS = ceil(numT / (float)BLOCK_SIZE);
hipLaunchKernelGGL(( d_initTimeline), dim3(NUM_BLOCKS),dim3(BLOCK_SIZE), 0, 0, d_myTimeline, numT, t);
NUM_BLOCKS = ceil(numX / (float)BLOCK_SIZE);
const REAL stdX = 20.0*alpha*s0*sqrt(t);
const REAL dx = stdX/numX;
hipLaunchKernelGGL(( d_initNUM), dim3(NUM_BLOCKS),dim3(BLOCK_SIZE), 0, 0, d_myX, numX, dx, myXindex, s0);
const REAL stdY = 10.0*nu*sqrt(t);
const REAL dy = stdY/numY;
const REAL logAlpha = log(alpha);
NUM_BLOCKS = ceil(numY / (float)BLOCK_SIZE);
hipLaunchKernelGGL(( d_initNUM), dim3(NUM_BLOCKS),dim3(BLOCK_SIZE), 0, 0, d_myY, numY, dy, myYindex, logAlpha);
}
void initOperator_GPU(REAL* d_x, unsigned int x_size, REAL* d_dxx){
const unsigned int BLOCK_SIZE = 256;
unsigned int NUM_BLOCKS = ceil(x_size / (float)BLOCK_SIZE);
hipLaunchKernelGGL(( d_initOperator), dim3(NUM_BLOCKS),dim3(BLOCK_SIZE), 0, 0, d_x, x_size, d_dxx);
}
//}}}
// read a b c r, write u
inline void tridag(
const vector<REAL>& a, // size [n]
const vector<REAL>& b, // size [n]
const vector<REAL>& c, // size [n]
const vector<REAL>& r, // size [n]
const int n,
vector<REAL>& u, // size [n]
vector<REAL>& uu // size [n] temporary
) {
int i, offset;
REAL beta;
u[0] = r[0];
uu[0] = b[0];
for(i=1; i<n; i++) {
beta = a[i] / uu[i-1];
uu[i] = b[i] - beta*c[i-1];
u[i] = r[i] - beta*u[i-1];
}
#if 1
// X) this is a backward recurrence
u[n-1] = u[n-1] / uu[n-1];
for(i=n-2; i>=0; i--) {
u[i] = (u[i] - c[i]*u[i+1]) / uu[i];
}
#else
// Hint: X) can be written smth like (once you make a non-constant)
for(i=0; i<n; i++) a[i] = u[n-1-i];
a[0] = a[0] / uu[n-1];
for(i=1; i<n; i++) a[i] = (a[i] - c[n-1-i]*a[i-1]) / uu[n-1-i];
for(i=0; i<n; i++) u[i] = a[n-1-i];
#endif
}
void run_OrigCPU(
const unsigned int& outer,
const unsigned int& numX,
const unsigned int& numY,
const unsigned int& numT,
const REAL& s0,
const REAL& t,
const REAL& alpha,
const REAL& nu,
const REAL& beta,
REAL* res // [outer] RESULT
) {
PrivGlobs globs(numX, numY, numT);
initGrid (s0,alpha,nu,t, numX, numY, numT, globs);
initOperator(globs.myX,globs.myDxx);
initOperator(globs.myY,globs.myDyy);
// array expansion on myResult (originally globs.myResult) from [numX][numY] to [outer][numX][numY]
vector<vector<vector<REAL> > > myResult;
myResult.resize(outer);
#pragma omp parallel for default(shared) schedule(static)
for(int i=0; i<outer; i++) {
myResult[i].resize(numX);
for(int j=0; j<numX; j++){
myResult[i][j].resize(numY);
}
}
//myVarX myVarY: [numX][numY]
vector<vector<REAL> > myVarX, myVarY;
myVarX.resize(numX);
myVarY.resize(numX);
for(int i=0; i<numX; i++){
myVarX[i].resize(numY);
myVarY[i].resize(numY);
}
unsigned numZ = max(numX, numY);
// array expansion on a, b, c, y, yy, [outer][numZ][numZ]
vector<vector<vector<REAL> > > a,b,c,y,yy;
a.resize(outer);
b.resize(outer);
c.resize(outer);
y.resize(outer);
yy.resize(outer);
#pragma omp parallel for default(shared) schedule(static)
for(int i=0; i<outer; i++) {
a[i].resize(numZ);
b[i].resize(numZ);
c[i].resize(numZ);
y[i].resize(numZ);
yy[i].resize(numZ);
for(int j=0; j<numZ; j++){
a[i][j].resize(numZ);
b[i][j].resize(numZ);
c[i][j].resize(numZ);
y[i][j].resize(numZ);
yy[i][j].resize(numZ);
}
}
// array expansion on u,v, u is [outer][numY][numX], v is [outer][numX][]
vector<vector<vector<REAL> > > u,v;
u.resize(outer);
v.resize(outer);
for(int k=0; k<outer; k++){
u[k].resize(numY);
for(int i=0; i< numY; i++)
u[k][i].resize(numX);
v[k].resize(numX);
for(int i=0; i< numX; i++)
v[k][i].resize(numY);
}
// setPayoff(strike, globs); it's parallel so can be loop-distributed on the outmost loop
// also need to do array expansion on globs.myResult, i.e. myResult
#pragma omp parallel for default(shared) schedule(static) //Kernel-1: 3D
for( unsigned k = 0; k < outer; ++ k ) { // outmost loop
// modified setPayoff function below
for(unsigned i=0;i<globs.myX.size();++i)
{
//REAL payoff = max(globs.myX[i]-strike, (REAL)0.0); // move this inside the loop to do privatization
for(unsigned j=0;j<globs.myY.size();++j)
// globs.myResult[i][j] = payoff; // note that payoff is just a scalar variables,
myResult[k][i][j] = max(globs.myX[i]-(0.001*k), (REAL)0.0);
}
}
//--- original code:
// for(int i = globs.myTimeline.size()-2;i>=0;--i)
// {
// updateParams(i,alpha,beta,nu,globs);
// rollback(i, globs);
// }
//--- use loop interchange and loop distribution
//modified updateParams(g,alpha,beta,nu,globs);
// Kernel-2: 3D
for(int g = globs.myTimeline.size()-2;g>=0;--g) { // second outer loop, g
#pragma omp parallel for default(shared) schedule(static) // Kernel-2: 2D
for(unsigned i=0;i<globs.myX.size();++i){
for(unsigned j=0;j<globs.myY.size();++j) {
myVarX[i][j] = exp(2.0*( beta*log(globs.myX[i])
+ globs.myY[j]
- 0.5*nu*nu*globs.myTimeline[g] )
);
myVarY[i][j] = exp(2.0*( alpha*log(globs.myX[i])
+ globs.myY[j]
- 0.5*nu*nu*globs.myTimeline[g] )
); // nu*nu
}
}
// rollback Part 1, write u,v, a, b, c
#pragma omp parallel for default(shared) schedule(static) // Kernel-3: 3D
for( unsigned k = 0; k < outer; ++ k ) { //outermost loop k, after interchanged //Par
for(unsigned j=0;j<numY;j++) { // interchanged with the inner loop
for(unsigned i=0;i<numX;i++) {
// explicit x
u[k][j][i] = (1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) *myResult[k][i][j];
if(i > 0) {
u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][0] )
* myResult[k][i-1][j];
}
u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][1] )
* myResult[k][i][j];
if(i < numX-1) {
u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][2] )
* myResult[k][i+1][j];
}
// explicit y ; RAW v, write u
v[k][i][j] = 0.0;
if(j > 0) {
v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][0] )
* myResult[k][i][j-1];
}
v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][1] )
* myResult[k][i][j];
if(j < numY-1) {
v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][2] )
* myResult[k][i][j+1];
}
u[k][j][i] += v[k][i][j];
// implicit x // write a,b,c
a[k][j][i] = - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][0]);
b[k][j][i] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][1]);
c[k][j][i] = - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][2]);
}
}
}
//Part 2 : read a,b,c,u to write u
#pragma omp parallel for default(shared) schedule(static) //kernel-4: 2D Kernel or can be merged with the last one to make a 2D kernel
for( unsigned k = 0; k < outer; ++ k ) { //outermost loop distribution //Par
for(unsigned j=0;j<numY;j++) { // Par
tridagPar(a[k][j],b[k][j],c[k][j],u[k][j],numX,u[k][j],yy[k][j]);
}
}
//Part 3, write a b c y reading from u,v // implicit y,
#pragma omp parallel for default(shared) schedule(static) // Kernel-5: 3D
for( unsigned k = 0; k < outer; ++ k ) { //outermost loop distribution //Par
for(unsigned i=0;i<numX;i++) {
for(unsigned j=0;j<numY;j++) {
a[k][i][j] = - 0.5*(0.5*myVarY[i][j]*globs.myDyy[j][0]);
b[k][i][j] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) - 0.5*(0.5*myVarY[i][j]*globs.myDyy[j][1]);
c[k][i][j] = - 0.5*(0.5*myVarY[i][j]*globs.myDyy[j][2]);
y[k][i][j] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) *u[k][j][i] - 0.5*v[k][i][j];
}
}
}
//Part 4: write myResult reading from a b c y
#pragma omp parallel for default(shared) schedule(static) //kernel-6
for( unsigned k = 0; k < outer; ++ k ) { //outermost loop distribution //Par
for(unsigned i=0;i<numX;i++) {
tridagPar(a[k][i],b[k][i],c[k][i],y[k][i],numY,myResult[k][i],yy[k][i]);
}
}
}
#pragma omp parallel for default(shared) schedule(static)
for( unsigned k = 0; k < outer; ++ k ) //outermost loop k
res[k] = myResult[k][globs.myXindex][globs.myYindex]; // myRes[0][k];
//// ---------- GPU version -------------------- ////
// globs vars for gpu
REAL *d_x, *d_y, *d_timeline, *d_dxx, *d_dyy;
REAL *d_result;// *d_varX, *d_varY;
// REAL *d_a, *d_b, *d_c, *d_yy, *d_yyy, *d_u, *d_v;
const REAL stdX = 20.0*alpha*s0*sqrt(t);
const REAL dx = stdX/numX;
unsigned myXindex = static_cast<unsigned>(s0/dx) % numX;
unsigned myYindex = static_cast<unsigned>(numY/2.0);
// printf("myXindex : %d, myYindex: %d \n", myXindex, myYindex );
int memsize_X = numX * sizeof(REAL);
int memsize_Y = numY * sizeof(REAL);
int memsize_T = numT * sizeof(REAL);
int memsize_OXY = outer * numX * numY * sizeof (REAL);
hipMalloc((void**)&d_result, memsize_OXY); //[outer][numX][numY]
// hipMalloc((void**)&d_varX, numX*numY*sizeof(REAL)); //[numX][numY]
// hipMalloc((void**)&d_varY, numX*numY*sizeof(REAL)); //[numX][numY]
hipMalloc((void**)&d_x, memsize_X); //[numX]
hipMalloc((void**)&d_y, memsize_Y); //[numY]
hipMalloc((void**)&d_timeline, memsize_T); //[numT]
hipMalloc((void**)&d_dxx, 4 * memsize_X); //[numX][4]
hipMalloc((void**)&d_dyy, 4 * memsize_Y); //[numY][4]
// a b c yy yyy: [outer][numZ][numZ]
// hipMalloc((void**)&d_a , outer*numZ*numZ*sizeof(REAL));
// hipMalloc((void**)&d_b , outer*numZ*numZ*sizeof(REAL));
// hipMalloc((void**)&d_c , outer*numZ*numZ*sizeof(REAL));
// hipMalloc((void**)&d_yy , outer*numZ*numZ*sizeof(REAL)); //y in seq code
// hipMalloc((void**)&d_yyy, outer*numZ*numZ*sizeof(REAL)); //yy in seq code
// hipMalloc((void**)&d_u , outer*numY*numX*sizeof(REAL)); //d_u : [outer][numY][numX]
// hipMalloc((void**)&d_v , outer*numX*numY*sizeof(REAL)); //d_v : [outer][numX][numY]
//GPU init
// set myXindex and myYindex, both are scalars
REAL * h_timeline;
// REAL *h_x, *h_y, *h_timeline, *h_dxx, *h_dyy;
// h_x = (REAL *) malloc (memsize_X );
// h_y = (REAL *) malloc (memsize_Y );
h_timeline = (REAL *) malloc (memsize_T );
// for(int i = 0; i<numT; i++)
// h_timeline[i] = 0;
//hipMemcpy(d_timeline, h_timeline_in, memsize_T , hipMemcpyHostToDevice);
// initGrid_GPU(s0, alpha, nu,t, numX,numY, numT, d_x, d_y, d_timeline, myXindex, myYindex);
// initOperator_GPU( d_x, numX, d_dxx);
// initOperator_GPU( d_y, numY, d_dyy);
// unsigned int block_size = T*T;
// unsigned int num_blocks_numT = ceil(numT / (float)block_size);
for(unsigned i = 0; i< numT; i++)
h_timeline[i] = t*gid / (numT-1);
// t*gid / (numT-1);
// printf ("num_blocks_numT :%d block_size: %d", num_blocks_numT, block_size);
// d_initTimeline<<< num_blocks_numT, block_size >>>(d_timeline, numT, t);
// unsigned int num_blocks_numX = ceil(numX / (float)block_size);
// d_initNUM<<<num_blocks_numX,block_size>>>(d_x, numX, dx, myXindex, s0);
// const REAL stdY = 10.0*nu*sqrt(t);
// const REAL dy = stdY/numY;
// const REAL logAlpha = log(alpha);
// unsigned int num_blocks_numY = ceil(numY / (float)block_size);
// d_initNUM<<<num_blocks_numY,block_size>>>(d_y, numY, dy, myYindex, logAlpha);
// h_dxx = (REAL *) malloc (numX*4*sizeof(REAL) );
// h_dyy = (REAL *) malloc (numY*4*sizeof(REAL) );
// hipMemcpy( h_x , d_x , numX*sizeof(REAL) , hipMemcpyDeviceToHost);
// hipMemcpy( h_y , d_y , numY*sizeof(REAL) , hipMemcpyDeviceToHost);
// hipMemcpy( h_timeline, d_timeline, memsize_T , hipMemcpyDeviceToHost);
// hipMemcpy( h_dxx , d_dxx , numX*4*sizeof(REAL) , hipMemcpyDeviceToHost);
// hipMemcpy( h_dyy , d_dyy , numY*4*sizeof(REAL) , hipMemcpyDeviceToHost);
bool valid = true;
// for(int i = 0; i < numX; i++){
// if(abs(h_x[i]-globs.myX[i]) > EPSILON){
// valid = false;
// printf("\n** invalid h_x %f %f**\n",
// h_x[i], globs.myX[i]);
// break;
// }
// }
// for(int i = 0; i < numY; i++){
// if(abs(h_y[i]-globs.myY[i]) > EPSILON){
// valid = false;
// printf("\n** invalid h_y **\n");
// break;
// }
// }
for(int i = 0; i < numT; i++){
if(abs(h_timeline[i]-globs.myTimeline[i]) > EPSILON){
valid = false;
printf("\n** invalid h_timeline %d %d**\n",
h_timeline[i], globs.myTimeline[i]);
break;
}
}
// for(int i = 0; i < numX*4; i++){
// if(abs(h_dxx[i]-globs.myDxx[i/4][i%4]) > EPSILON){
// valid = false;
// printf("\n** Invalid h_dxx **\n");
// break;
// }
// }
// for(int i = 0; i < numY*4; i++){
// if(abs(h_dyy[i]-globs.myDyy[i/4][i%4]) > EPSILON){
// valid = false;
// printf("\n** Invalid h_dyy **\n");
// break;
// }
// }
if(!valid){
printf("\n**Initialization did not validate**\n");
//return;
}
// const dim3 blockSize(8, 8, 8);
// const dim3 gridSize(ceil(numY/8.0), ceil(numX/8.0), ceil(outer/8.0));
// d_setPayoff<<<gridSize, blockSize>>>(d_result, d_x, numY, numX, outer);
// REAL *h_result;//, *h_varX, *h_varY,
// h_result = (REAL*) malloc (memsize_OXY);
// h_varX = (REAL*) malloc (numX*numY*sizeof(REAL) );
// h_varY = (REAL*) malloc (numX*numY*sizeof(REAL) );
// hipMemcpy( h_result , d_result , numX*numY*outer*sizeof(REAL), hipMemcpyDeviceToHost);
// for(int k = 0; k < outer; k++)
// for(int i = 0; i < globs.myX.size(); i++)
// for(int j = 0; j < globs.myY.size(); j++){
// if(abs(h_result[k*numX*numY+i*numY+j]-myResult[k][i][j]) > EPSILON){
// printf("\n**SetPayOff did not validate %f %f**\n",
// h_result[k*numX*numY+i*numY+j], myResult[k][i][j]);
// break;
// }
// }
// hipFree(d_timeline); hipFree(d_result);
}
//#endif // PROJ_CORE_ORIG
/*Generic Validation function on vectors
template<class T >
bool validate_real_arrs(REAL* arr, T check){
}
*/
void run_OrigCPU_(
const unsigned int& outer,
const unsigned int& numX,
const unsigned int& numY,
const unsigned int& numT,
const REAL& s0,
const REAL& t,
const REAL& alpha,
const REAL& nu,
const REAL& beta,
REAL* res // [outer] RESULT
)
{
//globs vars for gpu
// REAL *d_x, *d_y, *d_timeline, *d_dxx, *d_dyy;
// REAL *d_result, *d_varX, *d_varY;
// REAL xIndex, yIndex;
// REAL *d_a, *d_b, *d_c, *d_yy, *d_yyy, *d_u, *d_v;
// const unsigned int result_size = numX*numY*outer*sizeof(REAL);
// hipMalloc((REAL**)&d_result, numX*numY*outer*sizeof(REAL)); //[outer][numX][numY]
// hipMalloc((REAL**)&d_varX, numX*numY*sizeof(REAL)); //[numX][numY]
// hipMalloc((REAL**)&d_varY, numX*numY*sizeof(REAL)); //[numX][numY]
// hipMalloc((REAL**)&d_x, numX*sizeof(REAL)); //[numX]
// hipMalloc((REAL**)&d_y, numY*sizeof(REAL)); //[numY]
// hipMalloc((REAL**)&d_timeline, numT*sizeof(REAL)); //[numT]
// hipMalloc((REAL**)&d_dxx, numX*4*sizeof(REAL)); //[numX][4]
// hipMalloc((REAL**)&d_dyy, numY*4*sizeof(REAL)); //[numY][4]
//Needed in validation as well.
unsigned numZ = max(numX, numY);
// a b c yy yyy: [outer][numZ][numZ]
// hipMalloc((REAL**)&d_a , outer*numZ*numZ*sizeof(REAL));
// hipMalloc((REAL**)&d_b , outer*numZ*numZ*sizeof(REAL));
// hipMalloc((REAL**)&d_c , outer*numZ*numZ*sizeof(REAL));
// hipMalloc((REAL**)&d_yy , outer*numZ*numZ*sizeof(REAL)); //y in seq code
// hipMalloc((REAL**)&d_yyy, outer*numZ*numZ*sizeof(REAL)); //yy in seq code
// hipMalloc((REAL**)&d_u , outer*numY*numX*sizeof(REAL)); //d_u : [outer][numY][numX]
// hipMalloc((REAL**)&d_v , outer*numX*numY*sizeof(REAL)); //d_v : [outer][numX][numY]
//#ifdef VALIDATION
// array expansion on myResult (originally globs.myResult) from [numX][numY] to [outer][numX][numY]
vector<vector<vector<REAL> > > myResult;
myResult.resize(outer);
#pragma omp parallel for default(shared) schedule(static)
for(int i=0; i<outer; i++) {
myResult[i].resize(numX);
for(int j=0; j<numX; j++){
myResult[i][j].resize(numY);
}
}
//myVarX myVarY: [numX][numY]
vector<vector<REAL> > myVarX, myVarY;
myVarX.resize(numX); myVarY.resize(numX);
for(int i=0; i<numX; i++){
myVarX[i].resize(numY); myVarY[i].resize(numY);
}
// array expansion on a, b, c, y, yy, [outer][numZ][numZ]
vector<vector<vector<REAL> > > a,b,c,y,yy;
a.resize(outer); b.resize(outer); c.resize(outer); y.resize(outer); yy.resize(outer);
#pragma omp parallel for default(shared) schedule(static)
for(int i=0; i<outer; i++) {
a[i].resize(numZ); b[i].resize(numZ); c[i].resize(numZ); y[i].resize(numZ); yy[i].resize(numZ);
for(int j=0; j<numZ; j++){
a[i][j].resize(numZ); b[i][j].resize(numZ); c[i][j].resize(numZ); y[i][j].resize(numZ); yy[i][j].resize(numZ);
}
}
// array expansion on u,v, u is [outer][numY][numX], v is [outer][numX][]
vector<vector<vector<REAL> > > u,v;
u.resize(outer); v.resize(outer);
for(int k=0; k<outer; k++){
u[k].resize(numY);
for(int i=0; i< numY; i++)
u[k][i].resize(numX);
v[k].resize(numX);
for(int i=0; i< numX; i++)
v[k][i].resize(numY);
}
//#endif
//GPU init
// initGrid_GPU(s0, alpha, nu,t, numX,numY, numT, d_x, d_y, d_timeline, xIndex, yIndex);
// initOperator_GPU( d_x, numX, d_dxx);
// initOperator_GPU( d_y, numY, d_dyy);
// test initGird_GPU and initOperator_GPU
// #ifdef VALIDATION
PrivGlobs globs(numX, numY, numT);
initGrid(s0,alpha,nu,t, numX, numY, numT, globs);
initOperator(globs.myX,globs.myDxx);
initOperator(globs.myY,globs.myDyy);
// REAL *h_x, *h_y, *h_timeline, *h_dxx, *h_dyy;
// h_x = (REAL*) malloc (numX*sizeof(REAL) );
// h_y = (REAL*) malloc (numY*sizeof(REAL) );
// h_timeline = (REAL*) malloc (numT*sizeof(REAL) );
// h_dxx = (REAL*) malloc (numX*4*sizeof(REAL) );
// h_dyy = (REAL*) malloc (numY*4*sizeof(REAL) );
// hipMemcpy( h_x , d_x , numX*sizeof(REAL) , hipMemcpyDeviceToHost);
// hipMemcpy( h_y , d_y , numY*sizeof(REAL) , hipMemcpyDeviceToHost);
// hipMemcpy( h_timeline , d_timeline, numT*sizeof(REAL) , hipMemcpyDeviceToHost);
// hipMemcpy( h_dxx , d_dxx , numX*4*sizeof(REAL) , hipMemcpyDeviceToHost);
// hipMemcpy( h_dyy , d_dyy , numY*4*sizeof(REAL) , hipMemcpyDeviceToHost);
// bool valid = true;
// for(int i = 0; i < numX; i++){
// if(abs(h_x[i]-globs.myX[i]) > EPSILON){
// valid = false;
// break;
// }
// }
// for(int i = 0; i < numY; i++){
// if(abs(h_y[i]-globs.myY[i]) > EPSILON){
// valid = false;
// break;
// }
// }
// for(int i = 0; i < numT; i++){
// if(abs(h_timeline[i]-globs.myTimeline[i]) > EPSILON){
// valid = false;
// break;
// }
// }
// for(int i = 0; i < numX*4; i++){
// if(abs(h_dxx[i]-globs.myDxx[i/4][i%4]) > EPSILON){
// valid = false;
// break;
// }
// }
// for(int i = 0; i < numY*4; i++){
// if(abs(h_dyy[i]-globs.myDyy[i/4][i%4]) > EPSILON){
// valid = false;
// break;
// }
// }
// if(!valid){
// printf("\n**Initialization did not validate**\n");
// //return;
// }
// #endif
// REAL *h_result, *h_varX, *h_varY,
// h_result = (REAL*) malloc (numX*numY*outer*sizeof(REAL));
// h_varX = (REAL*) malloc (numX*numY*sizeof(REAL) );
// h_varY = (REAL*) malloc (numX*numY*sizeof(REAL) );
// // Test setPayoff
// #ifdef VALIDATION
// const dim3 blockSize(8, 8, 8);
// const dim3 gridSize(ceil(numY/8.0), ceil(numX/8.0), ceil(outer/8.0));
// d_setPayoff<<<gridSize, blockSize>>>(d_result, d_x, numY, numX, outer);
// CPU setPayoff
#pragma omp parallel for default(shared) schedule(static) //Kernel-1: 3D
for( unsigned k = 0; k < outer; ++ k ) { // outmost loop
for(unsigned i=0;i<globs.myX.size();++i){
for(unsigned j=0;j<globs.myY.size();++j){
myResult[k][i][j] = max(globs.myX[i]-(0.001*k), (REAL)0.0);
}
}
}
// hipMemcpy( h_result , d_result , numX*numY*outer*sizeof(REAL), hipMemcpyDeviceToHost);
// if(globs.myX.size() != numX && globs.myY.size())
// printf("Numx not myX.size()");
// for(int k = 0; k < outer; k++)
// for(int i = 0; i < globs.myX.size(); i++)
// for(int j = 0; j < globs.myY.size(); j++)
// if(abs(h_result[k*numX*numY+i*numY+j]-myResult[k][i][j]) > EPSILON){
// printf("\n**SetPayOff did not validate %f %f**\n",
// h_result[k*numX*numY+i*numY+j], myResult[k][i][j]);
// return;
// }
for(int g = globs.myTimeline.size()-2;g>=0;--g) { // second outer loop, g
// { //GPU updateParams
// const dim3 blockSize(8, 8, 1);
// const dim3 gridSize(ceil(numY/8.0), ceil(numX/8.0), 1);
// d_updateParams<<<gridSize, blockSize>>>(d_varX, d_varY, d_x, d_y, d_timeline, g, alpha, beta, nu, numX, numY);
// }
//CPU updateParams(g,alpha,beta,nu,globs)
#pragma omp parallel for default(shared) schedule(static)
for(unsigned i=0;i<globs.myX.size();++i){
for(unsigned j=0;j<globs.myY.size();++j) {
myVarX[i][j] = exp(2.0*( beta*log(globs.myX[i])
+ globs.myY[j]
- 0.5*nu*nu*globs.myTimeline[g] )
);
myVarY[i][j] = exp(2.0*( alpha*log(globs.myX[i])
+ globs.myY[j]
- 0.5*nu*nu*globs.myTimeline[g] )
); // nu*nu
}
}
// hipMemcpy( h_varX , d_varX , numX*numY*sizeof(REAL) , hipMemcpyDeviceToHost);
// hipMemcpy( h_varY , d_varY , numX*numY*sizeof(REAL) , hipMemcpyDeviceToHost);
// // check the result of CPU and GPU
// for(int i = 0; i < numX*numY; i++){
// if(abs(h_varX[i] - myVarX[i/numY][i%numY]) > EPSILON || abs(h_varY[i] - myVarY[i/numY][i%numY]) > EPSILON){
// printf("\n**Update Params did not validate %f=%f and %f=%f**\n",
// h_varX[i], myVarX[i/numY][i%numY], h_varY[i], myVarY[i/numY][i%numY]);
// break;
// }
// }
// { // GPU rollback Part_1
// const dim3 blockSize(8, 8, 8);
// const dim3 gridSize(ceil(numX/8.0), ceil(numY/8.0), ceil(outer/8.0));
// d_explicit_xy_implicit_x<<<gridSize, blockSize>>>(d_u,d_v,d_a,d_b,d_c,d_varX,d_varY,d_timeline,d_dxx,d_dyy,d_result, g, numX, numY, outer, numZ);
// }
// #ifdef VALIDATION
// rollback Part 1, write u,v, a, b, c
#pragma omp parallel for default(shared) schedule(static) // Kernel-3: 3D
for( unsigned k = 0; k < outer; ++ k ) { //outermost loop k, after interchanged //Par
for(unsigned j=0;j<numY;j++) { // interchanged with the inner loop
for(unsigned i=0;i<numX;i++) {
// explicit x
u[k][j][i] = (1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) *myResult[k][i][j];
if(i > 0) {
u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][0] )
* myResult[k][i-1][j];
}
u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][1] )
* myResult[k][i][j];
if(i < numX-1) {
u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][2] )
* myResult[k][i+1][j];
}
// explicit y ; RAW v, write u
v[k][i][j] = 0.0;
if(j > 0) {
v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][0] )
* myResult[k][i][j-1];
}
v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][1] )
* myResult[k][i][j];
if(j < numY-1) {
v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][2] )
* myResult[k][i][j+1];
}
u[k][j][i] += v[k][i][j];
// implicit x // write a,b,c
a[k][j][i] = - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][0]);
b[k][j][i] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][1]);
c[k][j][i] = - 05*(0.5*myVarX[i][j]*globs.myDxx[i][2]);
}
}
}
//Part 2 : read a,b,c,u to write u
#pragma omp parallel for default(shared) schedule(static) //2D Kernel or can be merged with the last one to make a 2D kernel
for( unsigned k = 0; k < outer; ++ k ) { //outermost loop distribution //Par
for(unsigned j=0;j<numY;j++) { // Par
tridag(a[k][j],b[k][j],c[k][j],u[k][j],numX,u[k][j],yy[k][j]);
}
}
//Part 3, write a b c y reading from u,v // implicit y,
#pragma omp parallel for default(shared) schedule(static) // Kernel-4: 3D
for( unsigned k = 0; k < outer; ++ k ) { //outermost loop distribution //Par
for(unsigned i=0;i<numX;i++) {
for(unsigned j=0;j<numY;j++) {
a[k][i][j] = - 0.5*(0.5*myVarY[i][j]*globs.myDyy[j][0]);
b[k][i][j] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) - 0.5*(0.5*myVarY[i][j]*globs.myDyy[j][1]);
c[k][i][j] = - 0.5*(0.5*myVarY[i][j]*globs.myDyy[j][2]);
y[k][i][j] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) *u[k][j][i] - 0.5*v[k][i][j];
}
}
}
//Part 4: write myResult reading from a b c y
#pragma omp parallel for default(shared) schedule(static)
for( unsigned k = 0; k < outer; ++ k ) { //outermost loop distribution //Par
for(unsigned i=0;i<numX;i++) {
tridag(a[k][i],b[k][i],c[k][i],y[k][i],numY,myResult[k][i],yy[k][i]);
}
}
}
#pragma omp parallel for default(shared) schedule(static)
for( unsigned k = 0; k < outer; ++ k ) //outermost loop k
res[k] = myResult[k][globs.myXindex][globs.myYindex]; // myRes[0][k];
//SHould perhaps be initialized on the gpu instead to save PCI bandwidth. Possibly negl
/*
* setPayOff:
* INPUT: globs.myX
* Output: myResult
*
* updateParams:
* input: globs.myTimeline, globs.myX, globs.myY, alpha, beta,
* output: myVarX, myVarY
*
* rollback-1:
* input: globs.myTimeLine, myResult,
* output:
*
* tridagPar:
*
* rollback-2:
* input:
* output:
* */
// #endif
}
| 0eb0fb9c584d44a2b16d1c8a66aec3164e0d1557.cu | #include "ProjHelperFun.h"
#include "Constants.h"
#include "TridagPar.h"
#include "../include/CudaUtilProj.cu.h"
#define EPSILON 0.01
#define VALIDATION
#define T 32
//{{{KERNELS
__global__ void
d_initTimeline( REAL* d_timeline, const unsigned int numT, const REAL t){
const unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x;
if(gid < numT) {
d_timeline[gid] = t*gid / (numT-1);
}
}
__global__ void
d_initNUM( REAL* d_num, unsigned int num_size, const REAL d, unsigned myIndex, const REAL s){
const unsigned long gid = blockIdx.x*blockDim.x + threadIdx.x;
if(gid < num_size) {
d_num[gid] = gid*d - myIndex*d + s;
}
}
__global__ void
d_initOperator( REAL* d_x, unsigned int x_size, REAL* d_dxx){
const unsigned long gid = blockIdx.x*blockDim.x + threadIdx.x;
if(gid < x_size) {
REAL dxl, dxu;
if(gid == 0){
// lower boundary
dxl = 0.0;
dxu = d_x[1] - d_x[0];
d_dxx[0] = 0.0;
d_dxx[1] = 0.0;
d_dxx[2] = 0.0;
d_dxx[3] = 0.0;
}else if(gid == x_size-1){
// upper boundary
dxl = d_x[x_size-1] - d_x[x_size-2];
dxu = 0.0;
d_dxx[(x_size-1)*4+0] = 0.0;
d_dxx[(x_size-1)*4+1] = 0.0;
d_dxx[(x_size-1)*4+2] = 0.0;
d_dxx[(x_size-1)*4+3] = 0.0;
}else{
dxl = d_x[gid] - d_x[gid-1];
dxu = d_x[gid+1] - d_x[gid];
d_dxx[gid*4+0] = 2.0/dxl/(dxl+dxu);
d_dxx[gid*4+1] = -2.0*(1.0/dxl + 1.0/dxu)/(dxl+dxu);
d_dxx[gid*4+2] = 2.0/dxu/(dxl+dxu);
d_dxx[gid*4+3] = 0.0;
}
}
}
__global__ void
d_setPayoff(REAL* d_result, REAL* d_x, unsigned int x_size, unsigned int y_size, unsigned int z_size){
unsigned int x = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int y = blockDim.y*blockIdx.y + threadIdx.y;
unsigned int z = blockDim.z*blockIdx.z + threadIdx.z;
if(x < x_size && y < y_size && z < z_size){
d_result[z*y_size*x_size + y*x_size + x] = max(d_x[y]-(0.001*z), (REAL)0.0);
}
}
__global__ void
d_updateParams(REAL* d_varX, REAL* d_varY, REAL* d_x, REAL* d_y, REAL* d_timeline, unsigned int g,
REAL alpha, REAL beta, REAL nu, unsigned int numX, unsigned int numY){
unsigned int j = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int i = blockDim.y*blockIdx.y + threadIdx.y;
if(i >= numX || j >= numY)
return;
d_varX[i*numY+j] = exp(2.0*( beta*log(d_x[i])+d_y[j] - 0.5*nu*nu*d_timeline[g]));
d_varY[i*numY+j] = exp(2.0*( alpha*log(d_x[i]) + d_y[j] - 0.5*nu*nu*d_timeline[g]));
}
#define YX(k,j,i) (k*(numY*numX)+j*numX+i)
#define XY(k,j,i) (k*(numY*numX)+j*numY+i)
#define ZID(k,j,i) (k*(numZ*numZ)+j*numZ+i)
#define DID(j,i) (j*4+i)
__global__ void
d_explicit_xy_implicit_x(REAL* u, REAL* v, REAL* a, REAL* b, REAL* c, REAL* varX, REAL* varY, REAL* timeline, REAL* dxx, REAL* dyy, REAL* result, unsigned int g, unsigned numX, unsigned numY, unsigned outer, unsigned numZ){
//for(k, j, i)
unsigned int k = blockDim.z * blockIdx.z + blockIdx.z; //Outer
unsigned int j = blockDim.y * blockIdx.y + blockIdx.y; //numY
unsigned int i = blockDim.x * blockIdx.x + blockIdx.x; //numX
if(k >= outer || j >= numY || i >= numX)
return;
// explicit x
u[YX(k,j,i)] = (1.0/(timeline[g+1]-timeline[g])) *result[XY(k,j,i)];
if(i > 0) {
u[YX(k,j,i)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[DID(i,0)] )
* result[XY(k,i-1,j)];
}
u[YX(k,j,i)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[DID(i,1)] )
* result[XY(k,i,j)];
if(i < numX-1) {
u[YX(k,j,i)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[DID(i,2)] )
* result[XY(k,i+1,j)];
}
// explicit y ; RAW v, write u
v[XY(0,0,j)] = 0.0;
if(j > 0) {
v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy[DID(j,0)] )
* result[XY(k,i,j-1)];
}
v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy[DID(j,1)] )
* result[XY(k,i,j)];
if(j < numY-1) {
v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy[DID(j,2)] )
* result[XY(k,i,j+1)];
}
u[YX(k,i,j)] += v[XY(k,i,j)];
// implicit x // write a,b,c
a[ZID(k,j,i)] = - 0.5*(0.5*varX[XY(0,i,j)]*dxx[DID(i,0)]);
b[ZID(k,j,i)] = ( 1.0/(timeline[g+1]-timeline[g])) - 0.5*(0.5*varX[XY(0,i,j)]*dxx[DID(i,1)]);
c[ZID(k,j,i)] = - 0.5*(0.5*varX[XY(0,i,j)]*dxx[DID(i,2)]);
}
//}}}
//{{{WRAPPERS
void initGrid_GPU( const REAL s0, const REAL alpha, const REAL nu,const REAL t,
const unsigned numX, const unsigned numY, const unsigned numT,
REAL* d_myX, REAL* d_myY, REAL* d_myTimeline, unsigned myXindex,
unsigned myYindex) {
const unsigned int BLOCK_SIZE = 256;
unsigned int NUM_BLOCKS = ceil(numT / (float)BLOCK_SIZE);
d_initTimeline<<<NUM_BLOCKS,BLOCK_SIZE>>>(d_myTimeline, numT, t);
NUM_BLOCKS = ceil(numX / (float)BLOCK_SIZE);
const REAL stdX = 20.0*alpha*s0*sqrt(t);
const REAL dx = stdX/numX;
d_initNUM<<<NUM_BLOCKS,BLOCK_SIZE>>>(d_myX, numX, dx, myXindex, s0);
const REAL stdY = 10.0*nu*sqrt(t);
const REAL dy = stdY/numY;
const REAL logAlpha = log(alpha);
NUM_BLOCKS = ceil(numY / (float)BLOCK_SIZE);
d_initNUM<<<NUM_BLOCKS,BLOCK_SIZE>>>(d_myY, numY, dy, myYindex, logAlpha);
}
void initOperator_GPU(REAL* d_x, unsigned int x_size, REAL* d_dxx){
const unsigned int BLOCK_SIZE = 256;
unsigned int NUM_BLOCKS = ceil(x_size / (float)BLOCK_SIZE);
d_initOperator<<<NUM_BLOCKS,BLOCK_SIZE>>>(d_x, x_size, d_dxx);
}
//}}}
// read a b c r, write u
inline void tridag(
const vector<REAL>& a, // size [n]
const vector<REAL>& b, // size [n]
const vector<REAL>& c, // size [n]
const vector<REAL>& r, // size [n]
const int n,
vector<REAL>& u, // size [n]
vector<REAL>& uu // size [n] temporary
) {
int i, offset;
REAL beta;
u[0] = r[0];
uu[0] = b[0];
for(i=1; i<n; i++) {
beta = a[i] / uu[i-1];
uu[i] = b[i] - beta*c[i-1];
u[i] = r[i] - beta*u[i-1];
}
#if 1
// X) this is a backward recurrence
u[n-1] = u[n-1] / uu[n-1];
for(i=n-2; i>=0; i--) {
u[i] = (u[i] - c[i]*u[i+1]) / uu[i];
}
#else
// Hint: X) can be written smth like (once you make a non-constant)
for(i=0; i<n; i++) a[i] = u[n-1-i];
a[0] = a[0] / uu[n-1];
for(i=1; i<n; i++) a[i] = (a[i] - c[n-1-i]*a[i-1]) / uu[n-1-i];
for(i=0; i<n; i++) u[i] = a[n-1-i];
#endif
}
void run_OrigCPU(
const unsigned int& outer,
const unsigned int& numX,
const unsigned int& numY,
const unsigned int& numT,
const REAL& s0,
const REAL& t,
const REAL& alpha,
const REAL& nu,
const REAL& beta,
REAL* res // [outer] RESULT
) {
PrivGlobs globs(numX, numY, numT);
initGrid (s0,alpha,nu,t, numX, numY, numT, globs);
initOperator(globs.myX,globs.myDxx);
initOperator(globs.myY,globs.myDyy);
// array expansion on myResult (originally globs.myResult) from [numX][numY] to [outer][numX][numY]
vector<vector<vector<REAL> > > myResult;
myResult.resize(outer);
#pragma omp parallel for default(shared) schedule(static)
for(int i=0; i<outer; i++) {
myResult[i].resize(numX);
for(int j=0; j<numX; j++){
myResult[i][j].resize(numY);
}
}
//myVarX myVarY: [numX][numY]
vector<vector<REAL> > myVarX, myVarY;
myVarX.resize(numX);
myVarY.resize(numX);
for(int i=0; i<numX; i++){
myVarX[i].resize(numY);
myVarY[i].resize(numY);
}
unsigned numZ = max(numX, numY);
// array expansion on a, b, c, y, yy, [outer][numZ][numZ]
vector<vector<vector<REAL> > > a,b,c,y,yy;
a.resize(outer);
b.resize(outer);
c.resize(outer);
y.resize(outer);
yy.resize(outer);
#pragma omp parallel for default(shared) schedule(static)
for(int i=0; i<outer; i++) {
a[i].resize(numZ);
b[i].resize(numZ);
c[i].resize(numZ);
y[i].resize(numZ);
yy[i].resize(numZ);
for(int j=0; j<numZ; j++){
a[i][j].resize(numZ);
b[i][j].resize(numZ);
c[i][j].resize(numZ);
y[i][j].resize(numZ);
yy[i][j].resize(numZ);
}
}
// array expansion on u,v, u is [outer][numY][numX], v is [outer][numX][]
vector<vector<vector<REAL> > > u,v;
u.resize(outer);
v.resize(outer);
for(int k=0; k<outer; k++){
u[k].resize(numY);
for(int i=0; i< numY; i++)
u[k][i].resize(numX);
v[k].resize(numX);
for(int i=0; i< numX; i++)
v[k][i].resize(numY);
}
// setPayoff(strike, globs); it's parallel so can be loop-distributed on the outmost loop
// also need to do array expansion on globs.myResult, i.e. myResult
#pragma omp parallel for default(shared) schedule(static) //Kernel-1: 3D
for( unsigned k = 0; k < outer; ++ k ) { // outmost loop
// modified setPayoff function below
for(unsigned i=0;i<globs.myX.size();++i)
{
//REAL payoff = max(globs.myX[i]-strike, (REAL)0.0); // move this inside the loop to do privatization
for(unsigned j=0;j<globs.myY.size();++j)
// globs.myResult[i][j] = payoff; // note that payoff is just a scalar variables,
myResult[k][i][j] = max(globs.myX[i]-(0.001*k), (REAL)0.0);
}
}
//--- original code:
// for(int i = globs.myTimeline.size()-2;i>=0;--i)
// {
// updateParams(i,alpha,beta,nu,globs);
// rollback(i, globs);
// }
//--- use loop interchange and loop distribution
//modified updateParams(g,alpha,beta,nu,globs);
// Kernel-2: 3D
for(int g = globs.myTimeline.size()-2;g>=0;--g) { // second outer loop, g
#pragma omp parallel for default(shared) schedule(static) // Kernel-2: 2D
for(unsigned i=0;i<globs.myX.size();++i){
for(unsigned j=0;j<globs.myY.size();++j) {
myVarX[i][j] = exp(2.0*( beta*log(globs.myX[i])
+ globs.myY[j]
- 0.5*nu*nu*globs.myTimeline[g] )
);
myVarY[i][j] = exp(2.0*( alpha*log(globs.myX[i])
+ globs.myY[j]
- 0.5*nu*nu*globs.myTimeline[g] )
); // nu*nu
}
}
// rollback Part 1, write u,v, a, b, c
#pragma omp parallel for default(shared) schedule(static) // Kernel-3: 3D
for( unsigned k = 0; k < outer; ++ k ) { //outermost loop k, after interchanged //Par
for(unsigned j=0;j<numY;j++) { // interchanged with the inner loop
for(unsigned i=0;i<numX;i++) {
// explicit x
u[k][j][i] = (1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) *myResult[k][i][j];
if(i > 0) {
u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][0] )
* myResult[k][i-1][j];
}
u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][1] )
* myResult[k][i][j];
if(i < numX-1) {
u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][2] )
* myResult[k][i+1][j];
}
// explicit y ; RAW v, write u
v[k][i][j] = 0.0;
if(j > 0) {
v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][0] )
* myResult[k][i][j-1];
}
v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][1] )
* myResult[k][i][j];
if(j < numY-1) {
v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][2] )
* myResult[k][i][j+1];
}
u[k][j][i] += v[k][i][j];
// implicit x // write a,b,c
a[k][j][i] = - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][0]);
b[k][j][i] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][1]);
c[k][j][i] = - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][2]);
}
}
}
//Part 2 : read a,b,c,u to write u
#pragma omp parallel for default(shared) schedule(static) //kernel-4: 2D Kernel or can be merged with the last one to make a 2D kernel
for( unsigned k = 0; k < outer; ++ k ) { //outermost loop distribution //Par
for(unsigned j=0;j<numY;j++) { // Par
tridagPar(a[k][j],b[k][j],c[k][j],u[k][j],numX,u[k][j],yy[k][j]);
}
}
//Part 3, write a b c y reading from u,v // implicit y,
#pragma omp parallel for default(shared) schedule(static) // Kernel-5: 3D
for( unsigned k = 0; k < outer; ++ k ) { //outermost loop distribution //Par
for(unsigned i=0;i<numX;i++) {
for(unsigned j=0;j<numY;j++) {
a[k][i][j] = - 0.5*(0.5*myVarY[i][j]*globs.myDyy[j][0]);
b[k][i][j] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) - 0.5*(0.5*myVarY[i][j]*globs.myDyy[j][1]);
c[k][i][j] = - 0.5*(0.5*myVarY[i][j]*globs.myDyy[j][2]);
y[k][i][j] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) *u[k][j][i] - 0.5*v[k][i][j];
}
}
}
//Part 4: write myResult reading from a b c y
#pragma omp parallel for default(shared) schedule(static) //kernel-6
for( unsigned k = 0; k < outer; ++ k ) { //outermost loop distribution //Par
for(unsigned i=0;i<numX;i++) {
tridagPar(a[k][i],b[k][i],c[k][i],y[k][i],numY,myResult[k][i],yy[k][i]);
}
}
}
#pragma omp parallel for default(shared) schedule(static)
for( unsigned k = 0; k < outer; ++ k ) //outermost loop k
res[k] = myResult[k][globs.myXindex][globs.myYindex]; // myRes[0][k];
//// ---------- GPU version -------------------- ////
// globs vars for gpu
REAL *d_x, *d_y, *d_timeline, *d_dxx, *d_dyy;
REAL *d_result;// *d_varX, *d_varY;
// REAL *d_a, *d_b, *d_c, *d_yy, *d_yyy, *d_u, *d_v;
const REAL stdX = 20.0*alpha*s0*sqrt(t);
const REAL dx = stdX/numX;
unsigned myXindex = static_cast<unsigned>(s0/dx) % numX;
unsigned myYindex = static_cast<unsigned>(numY/2.0);
// printf("myXindex : %d, myYindex: %d \n", myXindex, myYindex );
int memsize_X = numX * sizeof(REAL);
int memsize_Y = numY * sizeof(REAL);
int memsize_T = numT * sizeof(REAL);
int memsize_OXY = outer * numX * numY * sizeof (REAL);
cudaMalloc((void**)&d_result, memsize_OXY); //[outer][numX][numY]
// cudaMalloc((void**)&d_varX, numX*numY*sizeof(REAL)); //[numX][numY]
// cudaMalloc((void**)&d_varY, numX*numY*sizeof(REAL)); //[numX][numY]
cudaMalloc((void**)&d_x, memsize_X); //[numX]
cudaMalloc((void**)&d_y, memsize_Y); //[numY]
cudaMalloc((void**)&d_timeline, memsize_T); //[numT]
cudaMalloc((void**)&d_dxx, 4 * memsize_X); //[numX][4]
cudaMalloc((void**)&d_dyy, 4 * memsize_Y); //[numY][4]
// a b c yy yyy: [outer][numZ][numZ]
// cudaMalloc((void**)&d_a , outer*numZ*numZ*sizeof(REAL));
// cudaMalloc((void**)&d_b , outer*numZ*numZ*sizeof(REAL));
// cudaMalloc((void**)&d_c , outer*numZ*numZ*sizeof(REAL));
// cudaMalloc((void**)&d_yy , outer*numZ*numZ*sizeof(REAL)); //y in seq code
// cudaMalloc((void**)&d_yyy, outer*numZ*numZ*sizeof(REAL)); //yy in seq code
// cudaMalloc((void**)&d_u , outer*numY*numX*sizeof(REAL)); //d_u : [outer][numY][numX]
// cudaMalloc((void**)&d_v , outer*numX*numY*sizeof(REAL)); //d_v : [outer][numX][numY]
//GPU init
// set myXindex and myYindex, both are scalars
REAL * h_timeline;
// REAL *h_x, *h_y, *h_timeline, *h_dxx, *h_dyy;
// h_x = (REAL *) malloc (memsize_X );
// h_y = (REAL *) malloc (memsize_Y );
h_timeline = (REAL *) malloc (memsize_T );
// for(int i = 0; i<numT; i++)
// h_timeline[i] = 0;
//cudaMemcpy(d_timeline, h_timeline_in, memsize_T , cudaMemcpyHostToDevice);
// initGrid_GPU(s0, alpha, nu,t, numX,numY, numT, d_x, d_y, d_timeline, myXindex, myYindex);
// initOperator_GPU( d_x, numX, d_dxx);
// initOperator_GPU( d_y, numY, d_dyy);
// unsigned int block_size = T*T;
// unsigned int num_blocks_numT = ceil(numT / (float)block_size);
for(unsigned i = 0; i< numT; i++)
h_timeline[i] = t*gid / (numT-1);
// t*gid / (numT-1);
// printf ("num_blocks_numT :%d block_size: %d", num_blocks_numT, block_size);
// d_initTimeline<<< num_blocks_numT, block_size >>>(d_timeline, numT, t);
// unsigned int num_blocks_numX = ceil(numX / (float)block_size);
// d_initNUM<<<num_blocks_numX,block_size>>>(d_x, numX, dx, myXindex, s0);
// const REAL stdY = 10.0*nu*sqrt(t);
// const REAL dy = stdY/numY;
// const REAL logAlpha = log(alpha);
// unsigned int num_blocks_numY = ceil(numY / (float)block_size);
// d_initNUM<<<num_blocks_numY,block_size>>>(d_y, numY, dy, myYindex, logAlpha);
// h_dxx = (REAL *) malloc (numX*4*sizeof(REAL) );
// h_dyy = (REAL *) malloc (numY*4*sizeof(REAL) );
// cudaMemcpy( h_x , d_x , numX*sizeof(REAL) , cudaMemcpyDeviceToHost);
// cudaMemcpy( h_y , d_y , numY*sizeof(REAL) , cudaMemcpyDeviceToHost);
// cudaMemcpy( h_timeline, d_timeline, memsize_T , cudaMemcpyDeviceToHost);
// cudaMemcpy( h_dxx , d_dxx , numX*4*sizeof(REAL) , cudaMemcpyDeviceToHost);
// cudaMemcpy( h_dyy , d_dyy , numY*4*sizeof(REAL) , cudaMemcpyDeviceToHost);
bool valid = true;
// for(int i = 0; i < numX; i++){
// if(abs(h_x[i]-globs.myX[i]) > EPSILON){
// valid = false;
// printf("\n** invalid h_x %f %f**\n",
// h_x[i], globs.myX[i]);
// break;
// }
// }
// for(int i = 0; i < numY; i++){
// if(abs(h_y[i]-globs.myY[i]) > EPSILON){
// valid = false;
// printf("\n** invalid h_y **\n");
// break;
// }
// }
for(int i = 0; i < numT; i++){
if(abs(h_timeline[i]-globs.myTimeline[i]) > EPSILON){
valid = false;
printf("\n** invalid h_timeline %d %d**\n",
h_timeline[i], globs.myTimeline[i]);
break;
}
}
// for(int i = 0; i < numX*4; i++){
// if(abs(h_dxx[i]-globs.myDxx[i/4][i%4]) > EPSILON){
// valid = false;
// printf("\n** Invalid h_dxx **\n");
// break;
// }
// }
// for(int i = 0; i < numY*4; i++){
// if(abs(h_dyy[i]-globs.myDyy[i/4][i%4]) > EPSILON){
// valid = false;
// printf("\n** Invalid h_dyy **\n");
// break;
// }
// }
if(!valid){
printf("\n**Initialization did not validate**\n");
//return;
}
// const dim3 blockSize(8, 8, 8);
// const dim3 gridSize(ceil(numY/8.0), ceil(numX/8.0), ceil(outer/8.0));
// d_setPayoff<<<gridSize, blockSize>>>(d_result, d_x, numY, numX, outer);
// REAL *h_result;//, *h_varX, *h_varY,
// h_result = (REAL*) malloc (memsize_OXY);
// h_varX = (REAL*) malloc (numX*numY*sizeof(REAL) );
// h_varY = (REAL*) malloc (numX*numY*sizeof(REAL) );
// cudaMemcpy( h_result , d_result , numX*numY*outer*sizeof(REAL), cudaMemcpyDeviceToHost);
// for(int k = 0; k < outer; k++)
// for(int i = 0; i < globs.myX.size(); i++)
// for(int j = 0; j < globs.myY.size(); j++){
// if(abs(h_result[k*numX*numY+i*numY+j]-myResult[k][i][j]) > EPSILON){
// printf("\n**SetPayOff did not validate %f %f**\n",
// h_result[k*numX*numY+i*numY+j], myResult[k][i][j]);
// break;
// }
// }
// cudaFree(d_timeline); cudaFree(d_result);
}
//#endif // PROJ_CORE_ORIG
/*Generic Validation function on vectors
template<class T >
bool validate_real_arrs(REAL* arr, T check){
}
*/
void run_OrigCPU_(
const unsigned int& outer,
const unsigned int& numX,
const unsigned int& numY,
const unsigned int& numT,
const REAL& s0,
const REAL& t,
const REAL& alpha,
const REAL& nu,
const REAL& beta,
REAL* res // [outer] RESULT
)
{
//globs vars for gpu
// REAL *d_x, *d_y, *d_timeline, *d_dxx, *d_dyy;
// REAL *d_result, *d_varX, *d_varY;
// REAL xIndex, yIndex;
// REAL *d_a, *d_b, *d_c, *d_yy, *d_yyy, *d_u, *d_v;
// const unsigned int result_size = numX*numY*outer*sizeof(REAL);
// cudaMalloc((REAL**)&d_result, numX*numY*outer*sizeof(REAL)); //[outer][numX][numY]
// cudaMalloc((REAL**)&d_varX, numX*numY*sizeof(REAL)); //[numX][numY]
// cudaMalloc((REAL**)&d_varY, numX*numY*sizeof(REAL)); //[numX][numY]
// cudaMalloc((REAL**)&d_x, numX*sizeof(REAL)); //[numX]
// cudaMalloc((REAL**)&d_y, numY*sizeof(REAL)); //[numY]
// cudaMalloc((REAL**)&d_timeline, numT*sizeof(REAL)); //[numT]
// cudaMalloc((REAL**)&d_dxx, numX*4*sizeof(REAL)); //[numX][4]
// cudaMalloc((REAL**)&d_dyy, numY*4*sizeof(REAL)); //[numY][4]
//Needed in validation as well.
unsigned numZ = max(numX, numY);
// a b c yy yyy: [outer][numZ][numZ]
// cudaMalloc((REAL**)&d_a , outer*numZ*numZ*sizeof(REAL));
// cudaMalloc((REAL**)&d_b , outer*numZ*numZ*sizeof(REAL));
// cudaMalloc((REAL**)&d_c , outer*numZ*numZ*sizeof(REAL));
// cudaMalloc((REAL**)&d_yy , outer*numZ*numZ*sizeof(REAL)); //y in seq code
// cudaMalloc((REAL**)&d_yyy, outer*numZ*numZ*sizeof(REAL)); //yy in seq code
// cudaMalloc((REAL**)&d_u , outer*numY*numX*sizeof(REAL)); //d_u : [outer][numY][numX]
// cudaMalloc((REAL**)&d_v , outer*numX*numY*sizeof(REAL)); //d_v : [outer][numX][numY]
//#ifdef VALIDATION
// array expansion on myResult (originally globs.myResult) from [numX][numY] to [outer][numX][numY]
vector<vector<vector<REAL> > > myResult;
myResult.resize(outer);
#pragma omp parallel for default(shared) schedule(static)
for(int i=0; i<outer; i++) {
myResult[i].resize(numX);
for(int j=0; j<numX; j++){
myResult[i][j].resize(numY);
}
}
//myVarX myVarY: [numX][numY]
vector<vector<REAL> > myVarX, myVarY;
myVarX.resize(numX); myVarY.resize(numX);
for(int i=0; i<numX; i++){
myVarX[i].resize(numY); myVarY[i].resize(numY);
}
// array expansion on a, b, c, y, yy, [outer][numZ][numZ]
vector<vector<vector<REAL> > > a,b,c,y,yy;
a.resize(outer); b.resize(outer); c.resize(outer); y.resize(outer); yy.resize(outer);
#pragma omp parallel for default(shared) schedule(static)
for(int i=0; i<outer; i++) {
a[i].resize(numZ); b[i].resize(numZ); c[i].resize(numZ); y[i].resize(numZ); yy[i].resize(numZ);
for(int j=0; j<numZ; j++){
a[i][j].resize(numZ); b[i][j].resize(numZ); c[i][j].resize(numZ); y[i][j].resize(numZ); yy[i][j].resize(numZ);
}
}
// array expansion on u,v, u is [outer][numY][numX], v is [outer][numX][]
vector<vector<vector<REAL> > > u,v;
u.resize(outer); v.resize(outer);
for(int k=0; k<outer; k++){
u[k].resize(numY);
for(int i=0; i< numY; i++)
u[k][i].resize(numX);
v[k].resize(numX);
for(int i=0; i< numX; i++)
v[k][i].resize(numY);
}
//#endif
//GPU init
// initGrid_GPU(s0, alpha, nu,t, numX,numY, numT, d_x, d_y, d_timeline, xIndex, yIndex);
// initOperator_GPU( d_x, numX, d_dxx);
// initOperator_GPU( d_y, numY, d_dyy);
// test initGird_GPU and initOperator_GPU
// #ifdef VALIDATION
PrivGlobs globs(numX, numY, numT);
initGrid(s0,alpha,nu,t, numX, numY, numT, globs);
initOperator(globs.myX,globs.myDxx);
initOperator(globs.myY,globs.myDyy);
// REAL *h_x, *h_y, *h_timeline, *h_dxx, *h_dyy;
// h_x = (REAL*) malloc (numX*sizeof(REAL) );
// h_y = (REAL*) malloc (numY*sizeof(REAL) );
// h_timeline = (REAL*) malloc (numT*sizeof(REAL) );
// h_dxx = (REAL*) malloc (numX*4*sizeof(REAL) );
// h_dyy = (REAL*) malloc (numY*4*sizeof(REAL) );
// cudaMemcpy( h_x , d_x , numX*sizeof(REAL) , cudaMemcpyDeviceToHost);
// cudaMemcpy( h_y , d_y , numY*sizeof(REAL) , cudaMemcpyDeviceToHost);
// cudaMemcpy( h_timeline , d_timeline, numT*sizeof(REAL) , cudaMemcpyDeviceToHost);
// cudaMemcpy( h_dxx , d_dxx , numX*4*sizeof(REAL) , cudaMemcpyDeviceToHost);
// cudaMemcpy( h_dyy , d_dyy , numY*4*sizeof(REAL) , cudaMemcpyDeviceToHost);
// bool valid = true;
// for(int i = 0; i < numX; i++){
// if(abs(h_x[i]-globs.myX[i]) > EPSILON){
// valid = false;
// break;
// }
// }
// for(int i = 0; i < numY; i++){
// if(abs(h_y[i]-globs.myY[i]) > EPSILON){
// valid = false;
// break;
// }
// }
// for(int i = 0; i < numT; i++){
// if(abs(h_timeline[i]-globs.myTimeline[i]) > EPSILON){
// valid = false;
// break;
// }
// }
// for(int i = 0; i < numX*4; i++){
// if(abs(h_dxx[i]-globs.myDxx[i/4][i%4]) > EPSILON){
// valid = false;
// break;
// }
// }
// for(int i = 0; i < numY*4; i++){
// if(abs(h_dyy[i]-globs.myDyy[i/4][i%4]) > EPSILON){
// valid = false;
// break;
// }
// }
// if(!valid){
// printf("\n**Initialization did not validate**\n");
// //return;
// }
// #endif
// REAL *h_result, *h_varX, *h_varY,
// h_result = (REAL*) malloc (numX*numY*outer*sizeof(REAL));
// h_varX = (REAL*) malloc (numX*numY*sizeof(REAL) );
// h_varY = (REAL*) malloc (numX*numY*sizeof(REAL) );
// // Test setPayoff
// #ifdef VALIDATION
// const dim3 blockSize(8, 8, 8);
// const dim3 gridSize(ceil(numY/8.0), ceil(numX/8.0), ceil(outer/8.0));
// d_setPayoff<<<gridSize, blockSize>>>(d_result, d_x, numY, numX, outer);
// CPU setPayoff
#pragma omp parallel for default(shared) schedule(static) //Kernel-1: 3D
for( unsigned k = 0; k < outer; ++ k ) { // outmost loop
for(unsigned i=0;i<globs.myX.size();++i){
for(unsigned j=0;j<globs.myY.size();++j){
myResult[k][i][j] = max(globs.myX[i]-(0.001*k), (REAL)0.0);
}
}
}
// cudaMemcpy( h_result , d_result , numX*numY*outer*sizeof(REAL), cudaMemcpyDeviceToHost);
// if(globs.myX.size() != numX && globs.myY.size())
// printf("Numx not myX.size()");
// for(int k = 0; k < outer; k++)
// for(int i = 0; i < globs.myX.size(); i++)
// for(int j = 0; j < globs.myY.size(); j++)
// if(abs(h_result[k*numX*numY+i*numY+j]-myResult[k][i][j]) > EPSILON){
// printf("\n**SetPayOff did not validate %f %f**\n",
// h_result[k*numX*numY+i*numY+j], myResult[k][i][j]);
// return;
// }
for(int g = globs.myTimeline.size()-2;g>=0;--g) { // second outer loop, g
// { //GPU updateParams
// const dim3 blockSize(8, 8, 1);
// const dim3 gridSize(ceil(numY/8.0), ceil(numX/8.0), 1);
// d_updateParams<<<gridSize, blockSize>>>(d_varX, d_varY, d_x, d_y, d_timeline, g, alpha, beta, nu, numX, numY);
// }
//CPU updateParams(g,alpha,beta,nu,globs)
#pragma omp parallel for default(shared) schedule(static)
for(unsigned i=0;i<globs.myX.size();++i){
for(unsigned j=0;j<globs.myY.size();++j) {
myVarX[i][j] = exp(2.0*( beta*log(globs.myX[i])
+ globs.myY[j]
- 0.5*nu*nu*globs.myTimeline[g] )
);
myVarY[i][j] = exp(2.0*( alpha*log(globs.myX[i])
+ globs.myY[j]
- 0.5*nu*nu*globs.myTimeline[g] )
); // nu*nu
}
}
// cudaMemcpy( h_varX , d_varX , numX*numY*sizeof(REAL) , cudaMemcpyDeviceToHost);
// cudaMemcpy( h_varY , d_varY , numX*numY*sizeof(REAL) , cudaMemcpyDeviceToHost);
// // check the result of CPU and GPU
// for(int i = 0; i < numX*numY; i++){
// if(abs(h_varX[i] - myVarX[i/numY][i%numY]) > EPSILON || abs(h_varY[i] - myVarY[i/numY][i%numY]) > EPSILON){
// printf("\n**Update Params did not validate %f=%f and %f=%f**\n",
// h_varX[i], myVarX[i/numY][i%numY], h_varY[i], myVarY[i/numY][i%numY]);
// break;
// }
// }
// { // GPU rollback Part_1
// const dim3 blockSize(8, 8, 8);
// const dim3 gridSize(ceil(numX/8.0), ceil(numY/8.0), ceil(outer/8.0));
// d_explicit_xy_implicit_x<<<gridSize, blockSize>>>(d_u,d_v,d_a,d_b,d_c,d_varX,d_varY,d_timeline,d_dxx,d_dyy,d_result, g, numX, numY, outer, numZ);
// }
// #ifdef VALIDATION
// rollback Part 1, write u,v, a, b, c
#pragma omp parallel for default(shared) schedule(static) // Kernel-3: 3D
for( unsigned k = 0; k < outer; ++ k ) { //outermost loop k, after interchanged //Par
for(unsigned j=0;j<numY;j++) { // interchanged with the inner loop
for(unsigned i=0;i<numX;i++) {
// explicit x
u[k][j][i] = (1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) *myResult[k][i][j];
if(i > 0) {
u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][0] )
* myResult[k][i-1][j];
}
u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][1] )
* myResult[k][i][j];
if(i < numX-1) {
u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][2] )
* myResult[k][i+1][j];
}
// explicit y ; RAW v, write u
v[k][i][j] = 0.0;
if(j > 0) {
v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][0] )
* myResult[k][i][j-1];
}
v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][1] )
* myResult[k][i][j];
if(j < numY-1) {
v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][2] )
* myResult[k][i][j+1];
}
u[k][j][i] += v[k][i][j];
// implicit x // write a,b,c
a[k][j][i] = - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][0]);
b[k][j][i] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][1]);
c[k][j][i] = - 05*(0.5*myVarX[i][j]*globs.myDxx[i][2]);
}
}
}
//Part 2 : read a,b,c,u to write u
#pragma omp parallel for default(shared) schedule(static) //2D Kernel or can be merged with the last one to make a 2D kernel
for( unsigned k = 0; k < outer; ++ k ) { //outermost loop distribution //Par
for(unsigned j=0;j<numY;j++) { // Par
tridag(a[k][j],b[k][j],c[k][j],u[k][j],numX,u[k][j],yy[k][j]);
}
}
//Part 3, write a b c y reading from u,v // implicit y,
#pragma omp parallel for default(shared) schedule(static) // Kernel-4: 3D
for( unsigned k = 0; k < outer; ++ k ) { //outermost loop distribution //Par
for(unsigned i=0;i<numX;i++) {
for(unsigned j=0;j<numY;j++) {
a[k][i][j] = - 0.5*(0.5*myVarY[i][j]*globs.myDyy[j][0]);
b[k][i][j] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) - 0.5*(0.5*myVarY[i][j]*globs.myDyy[j][1]);
c[k][i][j] = - 0.5*(0.5*myVarY[i][j]*globs.myDyy[j][2]);
y[k][i][j] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) *u[k][j][i] - 0.5*v[k][i][j];
}
}
}
//Part 4: write myResult reading from a b c y
#pragma omp parallel for default(shared) schedule(static)
for( unsigned k = 0; k < outer; ++ k ) { //outermost loop distribution //Par
for(unsigned i=0;i<numX;i++) {
tridag(a[k][i],b[k][i],c[k][i],y[k][i],numY,myResult[k][i],yy[k][i]);
}
}
}
#pragma omp parallel for default(shared) schedule(static)
for( unsigned k = 0; k < outer; ++ k ) //outermost loop k
res[k] = myResult[k][globs.myXindex][globs.myYindex]; // myRes[0][k];
//SHould perhaps be initialized on the gpu instead to save PCI bandwidth. Possibly negl
/*
* setPayOff:
* INPUT: globs.myX
* Output: myResult
*
* updateParams:
* input: globs.myTimeline, globs.myX, globs.myY, alpha, beta,
* output: myVarX, myVarY
*
* rollback-1:
* input: globs.myTimeLine, myResult,
* output:
*
* tridagPar:
*
* rollback-2:
* input:
* output:
* */
// #endif
}
|
00a02b4b9b662b23fe2865a376d7113dcf84e62e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "__extractmat.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
long long *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
__extractmat), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,n);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
__extractmat), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
__extractmat), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 00a02b4b9b662b23fe2865a376d7113dcf84e62e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "__extractmat.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
long long *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
__extractmat<<<gridBlock,threadBlock>>>(a,b,c,n);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
__extractmat<<<gridBlock,threadBlock>>>(a,b,c,n);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
__extractmat<<<gridBlock,threadBlock>>>(a,b,c,n);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
934a267453f7bbf89f940901ab57cddfd12b5af7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <cmath>
#include "AD.cuh"
#if defined (__HIPCC__)
#define KERNEL_ARGS2(grid,hipLaunchKernelGGL(( block)) , dim3(grid), dim3(block) , 0, 0,
#define KERNEL_ARGS3grid, block,hipLaunchKernelGGL(( sh_mem)) , dim3(grid), dim3(block), sh_mem , 0,
#define KERNEL_ARGS4grid, block, sh_mem,hipLaunchKernelGGL(( stream)) , dim3(grid), dim3(block), sh_mem, stream ,
#else
#define KERNEL_ARGS2grid, block)
#define KERNEL_ARGS3(grid, block, sh_mem)
#define KERNEL_ARGS4(grid, block, sh_mem, stream)
#endif
# define N 3
template <size_t NUMVAR, class Precision>
__host__ __device__ void Print(const Dual<NUMVAR, Precision> &a)
{
//printf("Ez a print lefutott \n");
printf(" real: %.3f \n", (float)a.real);
for (int i = 0; i < N; i++)
{
printf(" dual[%d]: %.3f \n", i, (float)a.dual[i]);
}
printf("\n");
}
__host__ __device__ void TryVariableInitialization()
{
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#host
#if defined (__CUDA_ARCH__)
// Device code here
printf(" TryVariableInitialization(): Function is called on the device \n");
#else
// Host code here
printf(" TryVariableInitialization(): Function is called on the host \n");
#endif
// Variable initialized with int
Dual<N, double> a1 = { 2 };
Dual<N, float> a2 = { 2 };
Dual<N, int> a3 = { 2 };
printf("Dual double (a1)\n");
Print(a1);
printf("Dual float (a2) \n");
Print(a2);
printf("Dual int (a3)\n");
Print(a3);
Dual<N, double> b1 = { 1, 0 };
Dual<N, float> b2 = { 2, 1 };
Dual<N, int> b3 = { 3, 2 };
printf("Dual double (b1) \n");
Print(b1);
printf("Dual float (b2) \n");
Print(b2);
printf("Dual int (b3) \n");
Print(b3);
}
__host__ __device__ void TryAddition()
{
#if defined (__CUDA_ARCH__)
// Device code here
printf(" TryAddition(): Function is called on the device \n");
#else
// Host code here
printf(" TryAddition(): Function is called on the host \n");
#endif
Dual<N, double> a1 = { 2, 1 };
Dual<N, double> b1 = { 10, 0 };
int b2 = 2;
float b3 = 3.0f;
double b4 = 5.0;
printf("Dual + Dual (a1+b1)\n");
Print(a1 + b1);
printf("Dual + Dual (b1+a1)\n");
Print(b1 + a1);
printf("Dual + int (a1+b2) \n");
Print(a1 + b2);
printf("int + Dual (b2+a1) \n");
Print(b2 + a1);
printf("Dual + float (a1+b3) \n");
Print(a1 + b3);
printf("float + Dual (b3+a1) \n");
Print(b3 + a1);
printf("Dual + double (a1+b4) \n");
Print(a1 + b4);
printf("double + Dual (a1+b4) \n");
Print(b4 + a1);
printf("Mixed: b2 + a1 + b3 + b4 + b1 + b3\n");
Print(b2 + a1 + b3 + b4 + b1);
printf("Mixed: b4 + b1 + b2 + a1 + b3 + b2\n");
Print(b4 + b1 + b2 + a1 + b3);
}
__host__ __device__ void TrySubtraction()
{
#if defined (__CUDA_ARCH__)
// Device code here
printf(" TrySubtraction(): Function is called on the device \n");
#else
// Host code here
printf(" TrySubtraction(): Function is called on the host \n");
#endif
Dual<N, double> a1 = { 2, 2 };
Dual<N, double> b1 = { 10, 1 };
Dual<N, double> c1 = { 6, 0 };
int b2 = 3;
float b3 = 3.0f;
double b4 = 5.0;
printf("Dual - Dual (a1-b1)\n");
Print(a1 - b1);
printf("Dual - Dual (b1-a1)\n");
Print(b1 - a1);
printf("Dual - Dual - Dual (a1-b1-c1)\n");
Print(a1 - b1 - c1);
printf("Dual - Dual - Dual (b1-c1-a1)\n");
Print(b1 - c1 - a1);
printf("Dual - int (a1-b2) \n");
Print(a1 - b2);
printf("int - Dual (b2-a1) \n");
Print(b2 - a1);
printf("Dual - float (a1-b3) \n");
Print(a1 - b3);
printf("float - Dual (b3-a1) \n");
Print(b3 - a1);
printf("Dual - double (a1-b4) \n");
Print(a1 - b4);
printf("double - Dual (a1-b4) \n");
Print(b4 - a1);
printf("Mixed: b2 + a1 + b3 + b4 + b1 + b3\n");
Print(b2 - a1 - b3 - b4 - b1);
printf("Mixed: b4 - b1 - b2 - a1 - b3 - b2\n");
Print(b4 - b1 - b2 - a1 - b3);
}
__host__ __device__ void TryMultiplication()
{
#if defined (__CUDA_ARCH__)
// Device code here
printf(" TryMultiplication(): Function is called on the device \n");
#else
// Host code here
printf(" TryMultiplication(): Function is called on the host \n");
#endif
Dual<N, double> a1 = { 5, 0 };
Dual<N, double> b1 = { 5, 1 };
Dual<N, double> c1 = { 2, 2 };
printf("Dual*Dual: a1*b1\n");
Print(a1*b1);
printf("Dual*Dual: b1*a1\n");
Print(b1*a1);
printf("b1*(a1+c1)\n");
Print(b1*(a1+c1));
printf("(b1*a1)+c1)\n");
Print((b1*a1)+c1);
double d1 = 5.6;
float d2 = 6.1f;
int d3 = 4;
printf("Dual*double: a1 * d1 \n");
Print(a1*d1);
printf("Dual*double: d1 * a1 \n");
Print(d1*a1);
printf("Dual*float: a1 * d2 \n");
Print(a1*d2);
printf("Dual*float: d2 * a1 \n");
Print(d2*a1);
printf("Dual*int: a1 * d3 \n");
Print(a1*d3);
printf("Dual*int: d3 * a1 \n");
Print(d3*a1);
}
__host__ __device__ void TryDivision()
{
#if defined (__CUDA_ARCH__)
// Device code here
printf(" TryDivision(): Function is called on the device \n");
#else
// Host code here
printf(" TryDivision(): Function is called on the host \n");
#endif
Dual<N, double> a1 = { 5, 0 };
Dual<N, double> b1 = { 2.5, 1 };
Dual<N, double> c1 = { 2, 2 };
printf("Dual/Dual: a1/b1\n");
Print(a1/b1);
printf("Dual/Dual: b1/a1\n");
Print(b1/a1);
printf("b1/(a1/c1)\n");
Print(b1/(a1/c1));
printf("(b1/a1)/c1)\n");
Print((b1/a1)/c1);
double d1 = 5.6;
float d2 = 6.1f;
int d3 = 4;
printf("Dual/double: a1 / d1 \n");
Print(a1/d1);
printf("double/Dual: d1 / a1 \n");
Print(d1/a1);
printf("Dual/float: a1 / d2 \n");
Print(a1/d2);
printf("float/Dual: d2 / a1 \n");
Print(d2/a1);
printf("Dual/int: a1 / d3 \n");
Print(a1/d3);
printf("int/Dual: d3 / a1 \n");
Print(d3/a1);
}
__global__ void TryDualNumbers()
{
TryVariableInitialization();
TryAddition();
TrySubtraction();
TryMultiplication();
TryDivision();
}
int main()
{
TryVariableInitialization();
TryAddition();
TrySubtraction();
TryMultiplication();
TryDivision();
TryDualNumbers KERNEL_ARGS2(1, 1)();
return 0;
} | 934a267453f7bbf89f940901ab57cddfd12b5af7.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <cmath>
#include "AD.cuh"
#if defined (__CUDACC__)
#define KERNEL_ARGS2(grid, block) <<< grid, block >>>
#define KERNEL_ARGS3(grid, block, sh_mem) <<< grid, block, sh_mem >>>
#define KERNEL_ARGS4(grid, block, sh_mem, stream) <<< grid, block, sh_mem, stream >>>
#else
#define KERNEL_ARGS2(grid, block)
#define KERNEL_ARGS3(grid, block, sh_mem)
#define KERNEL_ARGS4(grid, block, sh_mem, stream)
#endif
# define N 3
template <size_t NUMVAR, class Precision>
__host__ __device__ void Print(const Dual<NUMVAR, Precision> &a)
{
//printf("Ez a print lefutott \n");
printf(" real: %.3f \n", (float)a.real);
for (int i = 0; i < N; i++)
{
printf(" dual[%d]: %.3f \n", i, (float)a.dual[i]);
}
printf("\n");
}
__host__ __device__ void TryVariableInitialization()
{
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#host
#if defined (__CUDA_ARCH__)
// Device code here
printf(" TryVariableInitialization(): Function is called on the device \n");
#else
// Host code here
printf(" TryVariableInitialization(): Function is called on the host \n");
#endif
// Variable initialized with int
Dual<N, double> a1 = { 2 };
Dual<N, float> a2 = { 2 };
Dual<N, int> a3 = { 2 };
printf("Dual double (a1)\n");
Print(a1);
printf("Dual float (a2) \n");
Print(a2);
printf("Dual int (a3)\n");
Print(a3);
Dual<N, double> b1 = { 1, 0 };
Dual<N, float> b2 = { 2, 1 };
Dual<N, int> b3 = { 3, 2 };
printf("Dual double (b1) \n");
Print(b1);
printf("Dual float (b2) \n");
Print(b2);
printf("Dual int (b3) \n");
Print(b3);
}
__host__ __device__ void TryAddition()
{
#if defined (__CUDA_ARCH__)
// Device code here
printf(" TryAddition(): Function is called on the device \n");
#else
// Host code here
printf(" TryAddition(): Function is called on the host \n");
#endif
Dual<N, double> a1 = { 2, 1 };
Dual<N, double> b1 = { 10, 0 };
int b2 = 2;
float b3 = 3.0f;
double b4 = 5.0;
printf("Dual + Dual (a1+b1)\n");
Print(a1 + b1);
printf("Dual + Dual (b1+a1)\n");
Print(b1 + a1);
printf("Dual + int (a1+b2) \n");
Print(a1 + b2);
printf("int + Dual (b2+a1) \n");
Print(b2 + a1);
printf("Dual + float (a1+b3) \n");
Print(a1 + b3);
printf("float + Dual (b3+a1) \n");
Print(b3 + a1);
printf("Dual + double (a1+b4) \n");
Print(a1 + b4);
printf("double + Dual (a1+b4) \n");
Print(b4 + a1);
printf("Mixed: b2 + a1 + b3 + b4 + b1 + b3\n");
Print(b2 + a1 + b3 + b4 + b1);
printf("Mixed: b4 + b1 + b2 + a1 + b3 + b2\n");
Print(b4 + b1 + b2 + a1 + b3);
}
__host__ __device__ void TrySubtraction()
{
#if defined (__CUDA_ARCH__)
// Device code here
printf(" TrySubtraction(): Function is called on the device \n");
#else
// Host code here
printf(" TrySubtraction(): Function is called on the host \n");
#endif
Dual<N, double> a1 = { 2, 2 };
Dual<N, double> b1 = { 10, 1 };
Dual<N, double> c1 = { 6, 0 };
int b2 = 3;
float b3 = 3.0f;
double b4 = 5.0;
printf("Dual - Dual (a1-b1)\n");
Print(a1 - b1);
printf("Dual - Dual (b1-a1)\n");
Print(b1 - a1);
printf("Dual - Dual - Dual (a1-b1-c1)\n");
Print(a1 - b1 - c1);
printf("Dual - Dual - Dual (b1-c1-a1)\n");
Print(b1 - c1 - a1);
printf("Dual - int (a1-b2) \n");
Print(a1 - b2);
printf("int - Dual (b2-a1) \n");
Print(b2 - a1);
printf("Dual - float (a1-b3) \n");
Print(a1 - b3);
printf("float - Dual (b3-a1) \n");
Print(b3 - a1);
printf("Dual - double (a1-b4) \n");
Print(a1 - b4);
printf("double - Dual (a1-b4) \n");
Print(b4 - a1);
printf("Mixed: b2 + a1 + b3 + b4 + b1 + b3\n");
Print(b2 - a1 - b3 - b4 - b1);
printf("Mixed: b4 - b1 - b2 - a1 - b3 - b2\n");
Print(b4 - b1 - b2 - a1 - b3);
}
__host__ __device__ void TryMultiplication()
{
#if defined (__CUDA_ARCH__)
// Device code here
printf(" TryMultiplication(): Function is called on the device \n");
#else
// Host code here
printf(" TryMultiplication(): Function is called on the host \n");
#endif
Dual<N, double> a1 = { 5, 0 };
Dual<N, double> b1 = { 5, 1 };
Dual<N, double> c1 = { 2, 2 };
printf("Dual*Dual: a1*b1\n");
Print(a1*b1);
printf("Dual*Dual: b1*a1\n");
Print(b1*a1);
printf("b1*(a1+c1)\n");
Print(b1*(a1+c1));
printf("(b1*a1)+c1)\n");
Print((b1*a1)+c1);
double d1 = 5.6;
float d2 = 6.1f;
int d3 = 4;
printf("Dual*double: a1 * d1 \n");
Print(a1*d1);
printf("Dual*double: d1 * a1 \n");
Print(d1*a1);
printf("Dual*float: a1 * d2 \n");
Print(a1*d2);
printf("Dual*float: d2 * a1 \n");
Print(d2*a1);
printf("Dual*int: a1 * d3 \n");
Print(a1*d3);
printf("Dual*int: d3 * a1 \n");
Print(d3*a1);
}
__host__ __device__ void TryDivision()
{
#if defined (__CUDA_ARCH__)
// Device code here
printf(" TryDivision(): Function is called on the device \n");
#else
// Host code here
printf(" TryDivision(): Function is called on the host \n");
#endif
Dual<N, double> a1 = { 5, 0 };
Dual<N, double> b1 = { 2.5, 1 };
Dual<N, double> c1 = { 2, 2 };
printf("Dual/Dual: a1/b1\n");
Print(a1/b1);
printf("Dual/Dual: b1/a1\n");
Print(b1/a1);
printf("b1/(a1/c1)\n");
Print(b1/(a1/c1));
printf("(b1/a1)/c1)\n");
Print((b1/a1)/c1);
double d1 = 5.6;
float d2 = 6.1f;
int d3 = 4;
printf("Dual/double: a1 / d1 \n");
Print(a1/d1);
printf("double/Dual: d1 / a1 \n");
Print(d1/a1);
printf("Dual/float: a1 / d2 \n");
Print(a1/d2);
printf("float/Dual: d2 / a1 \n");
Print(d2/a1);
printf("Dual/int: a1 / d3 \n");
Print(a1/d3);
printf("int/Dual: d3 / a1 \n");
Print(d3/a1);
}
__global__ void TryDualNumbers()
{
TryVariableInitialization();
TryAddition();
TrySubtraction();
TryMultiplication();
TryDivision();
}
int main()
{
TryVariableInitialization();
TryAddition();
TrySubtraction();
TryMultiplication();
TryDivision();
TryDualNumbers KERNEL_ARGS2(1, 1)();
return 0;
} |
51891e00c8c46b5e5a1ef66975622a564f9c6dca.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
//removing __global__ specifier:
//error: a host function call cannot be configured vectorAdd.cu
//we cannot callhipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
//without global function defined
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
//DATA STRUCTURE HOST:
// Print the vector length to be used, and compute its size
//set size to 2^24:
int numElements = 2 << 24;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
/*
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
*/
//DATA STRUCTURE DEVICE:
//now it's managed memory allocation (both device and host have access to it)
// Allocate the device input vector A
float *d_A = NULL;
//replace hipMalloc with hipMallocManaged
err = hipMallocManaged((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
//replace hipMalloc with hipMallocManaged
err = hipMallocManaged((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
//replace hipMalloc with hipMallocManaged
err = hipMallocManaged((void **)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//using managed memory we do not have the need to copy objects to the device
/*
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
*/
//DEFINING GRID LAYOUT:
// Launch the Vector Add CUDA Kernel
// make grid size 1x1:
//choose 5 configs of grid:
int threadsPerBlock = 32 << 2; //max: 1024
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
//if we use kernel as a normal function then we get an error:
//a __global__ function call must be configured vectorAdd.cu
//vectorAdd(d_A, d_B, d_C, numElements);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//using managed memory we do not have the need to copy objects to the device
/*
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
*/
// Verify that the result vector in managed memory is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(d_A[i] + d_B[i] - d_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/*
// Free host memory
free(h_A);
free(h_B);
free(h_C);
*/
printf("Done\n");
return 0;
}
| 51891e00c8c46b5e5a1ef66975622a564f9c6dca.cu | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <helper_cuda.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
//removing __global__ specifier:
//error: a host function call cannot be configured vectorAdd.cu
//we cannot call vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
//without global function defined
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
//DATA STRUCTURE HOST:
// Print the vector length to be used, and compute its size
//set size to 2^24:
int numElements = 2 << 24;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
/*
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
*/
//DATA STRUCTURE DEVICE:
//now it's managed memory allocation (both device and host have access to it)
// Allocate the device input vector A
float *d_A = NULL;
//replace cudaMalloc with cudaMallocManaged
err = cudaMallocManaged((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
//replace cudaMalloc with cudaMallocManaged
err = cudaMallocManaged((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
//replace cudaMalloc with cudaMallocManaged
err = cudaMallocManaged((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//using managed memory we do not have the need to copy objects to the device
/*
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
*/
//DEFINING GRID LAYOUT:
// Launch the Vector Add CUDA Kernel
// make grid size 1x1:
//choose 5 configs of grid:
int threadsPerBlock = 32 << 2; //max: 1024
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
//if we use kernel as a normal function then we get an error:
//a __global__ function call must be configured vectorAdd.cu
//vectorAdd(d_A, d_B, d_C, numElements);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//using managed memory we do not have the need to copy objects to the device
/*
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
*/
// Verify that the result vector in managed memory is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(d_A[i] + d_B[i] - d_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*
// Free host memory
free(h_A);
free(h_B);
free(h_C);
*/
printf("Done\n");
return 0;
}
|
e0671523ccd4d96bc7627b8fc95e5c18532e3d58.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020 NVIDIA Corporation.
* Copyright (c) Chris Choy ([email protected]).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "coordinate_map.hpp"
#include "coordinate_map_cpu.hpp"
#include "coordinate_map_key.hpp"
#include "coordinate_map_manager.hpp"
#include "errors.hpp"
#include "types.hpp"
#include "utils.hpp"
#include "pooling_avg_kernel.cuh"
#include "pooling_max_kernel.cuh"
// Ninja
#include "local_pooling_cpu.cpp"
#include <pybind11/pybind11.h>
#include <torch/extension.h>
namespace minkowski {
template <typename coordinate_type,
template <typename C> class TemplatedAllocator>
std::pair<at::Tensor, at::Tensor> LocalPoolingForwardGPU(
at::Tensor const &in_feat,
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) {
ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous");
ASSERT(in_feat.is_cuda(), "in_feat must be on CUDA");
ASSERT(in_feat.dim() == 2, "in_feat.dim():", in_feat.dim());
coordinate_map_key_type in_key = p_in_map_key->get_key();
ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND);
ASSERT(in_feat.size(0) == p_map_manager->size(in_key), "Invalid in_feat size",
in_feat.size(0), "!=", p_map_manager->size(in_key));
// create an output coordinate map
if (!p_out_map_key->is_key_set()) {
coordinate_map_key_type out_key =
std::get<0>(p_map_manager->stride(in_key, kernel_stride));
p_out_map_key->set_key(out_key);
}
auto const &in_out = p_map_manager->kernel_map(
p_in_map_key, //
p_out_map_key, //
kernel_size, //
kernel_stride, //
kernel_dilation, //
region_type, //
offset, false /* is_transpose */, true /* is_pool */);
auto const out_nrows = p_map_manager->size(p_out_map_key->get_key());
at::Tensor out_feat =
torch::zeros({out_nrows, in_feat.size(1)}, in_feat.options());
LOG_DEBUG("Allocated", out_nrows, "x", in_feat.size(1), "features.");
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
if (pooling_mode == PoolingMode::LOCAL_MAX_POOLING) {
at::Tensor max_index = torch::empty({0}, torch::TensorOptions()
.device(in_feat.device())
.dtype(torch::kInt)
.requires_grad(false));
max_index.resize_({out_nrows, in_feat.size(1)});
max_index.zero_();
TemplatedAllocator<char> byte_allocator;
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "local_pooling_forward_gpu", [&] {
MaxPoolingForwardKernelGPU<scalar_t, default_types::index_type,
TemplatedAllocator<char>>(
in_feat.template data_ptr<scalar_t>(),
out_feat.template data_ptr<scalar_t>(), out_nrows,
max_index.data_ptr<int>(), in_feat.size(1), in_out,
byte_allocator, stream);
});
return std::make_pair(out_feat, max_index);
} else {
at::Tensor num_nonzero =
torch::empty({0}, in_feat.options().requires_grad(false));
if (pooling_mode == PoolingMode::LOCAL_AVG_POOLING) {
num_nonzero.resize_({out_nrows});
num_nonzero.zero_();
}
hipsparseHandle_t handle = getCurrentCUDASparseHandle();
hipsparseSetStream(handle, stream);
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "local_pooling_forward_gpu", [&] {
TemplatedAllocator<char> byte_allocator;
NonzeroAvgPoolingForwardKernelGPU<scalar_t, default_types::index_type,
TemplatedAllocator<char>>(
in_feat.template data_ptr<scalar_t>(), in_feat.size(0),
out_feat.template data_ptr<scalar_t>(), out_nrows,
num_nonzero.template data_ptr<scalar_t>(), in_feat.size(1),
in_out, pooling_mode == PoolingMode::LOCAL_AVG_POOLING,
byte_allocator, handle, stream);
});
return std::make_pair(out_feat, num_nonzero);
}
}
template <typename coordinate_type,
template <typename C> class TemplatedAllocator>
at::Tensor LocalPoolingBackwardGPU(
at::Tensor const &in_feat, //
at::Tensor const &grad_out_feat, //
at::Tensor const &num_nonzero, //
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) {
ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous");
ASSERT(grad_out_feat.is_contiguous(), "grad_out_feata must be contiguous");
ASSERT(in_feat.is_cuda(), "in_feat must be on CUDA");
ASSERT(grad_out_feat.is_cuda(), "in_feat must be on CUDA");
ASSERT(in_feat.scalar_type() == grad_out_feat.scalar_type(), "type mismatch");
ASSERT(in_feat.dim() == 2, "in_feat.dim():", in_feat.dim());
ASSERT(grad_out_feat.dim() == 2, "grad_out_feat.dim():", grad_out_feat.dim());
coordinate_map_key_type in_key = p_in_map_key->get_key();
ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND);
coordinate_map_key_type out_key = p_out_map_key->get_key();
ASSERT(p_map_manager->exists(out_key), ERROR_MAP_NOT_FOUND);
auto const &in_out = p_map_manager->kernel_map(
p_in_map_key, //
p_out_map_key, //
kernel_size, //
kernel_stride, //
kernel_dilation, //
region_type, //
offset, false /* is_transpose */, true /* is_pool */);
at::Tensor grad_in_feat =
torch::zeros({in_feat.size(0), in_feat.size(1)}, in_feat.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
if (pooling_mode == PoolingMode::LOCAL_MAX_POOLING) {
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "local_pooling_backward_gpu", [&] {
MaxPoolingBackwardKernelGPU<scalar_t>(
grad_in_feat.template data_ptr<scalar_t>(), in_feat.size(0),
grad_out_feat.template data_ptr<scalar_t>(),
grad_out_feat.size(0), num_nonzero.data_ptr<int>(),
in_feat.size(1));
});
} else {
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "local_pooling_backward_gpu", [&] {
NonzeroAvgPoolingBackwardKernelGPU<
scalar_t, default_types::index_type, TemplatedAllocator<char>>(
grad_in_feat.template data_ptr<scalar_t>(), in_feat.size(0),
grad_out_feat.template data_ptr<scalar_t>(),
grad_out_feat.size(0), num_nonzero.template data_ptr<scalar_t>(),
in_feat.size(1), in_out,
pooling_mode == PoolingMode::LOCAL_AVG_POOLING, stream);
});
}
return grad_in_feat;
}
// Forward
template std::pair<at::Tensor, at::Tensor>
LocalPoolingForwardGPU<default_types::dcoordinate_type,
detail::default_allocator>(
at::Tensor const &in_feat,
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator>
*p_map_manager);
template std::pair<at::Tensor, at::Tensor>
LocalPoolingForwardGPU<default_types::dcoordinate_type, detail::c10_allocator>(
at::Tensor const &in_feat,
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator>
*p_map_manager);
// Backward
template at::Tensor LocalPoolingBackwardGPU<default_types::dcoordinate_type,
detail::default_allocator>(
at::Tensor const &in_feat, //
at::Tensor const &grad_out_feat, //
at::Tensor const &num_nonzero, //
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator>
*p_map_manager);
template at::Tensor
LocalPoolingBackwardGPU<default_types::dcoordinate_type, detail::c10_allocator>(
at::Tensor const &in_feat, //
at::Tensor const &grad_out_feat, //
at::Tensor const &num_nonzero, //
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator>
*p_map_manager);
} // end namespace minkowski
| e0671523ccd4d96bc7627b8fc95e5c18532e3d58.cu | /*
* Copyright (c) 2020 NVIDIA Corporation.
* Copyright (c) Chris Choy ([email protected]).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "coordinate_map.hpp"
#include "coordinate_map_cpu.hpp"
#include "coordinate_map_key.hpp"
#include "coordinate_map_manager.hpp"
#include "errors.hpp"
#include "types.hpp"
#include "utils.hpp"
#include "pooling_avg_kernel.cuh"
#include "pooling_max_kernel.cuh"
// Ninja
#include "local_pooling_cpu.cpp"
#include <pybind11/pybind11.h>
#include <torch/extension.h>
namespace minkowski {
template <typename coordinate_type,
template <typename C> class TemplatedAllocator>
std::pair<at::Tensor, at::Tensor> LocalPoolingForwardGPU(
at::Tensor const &in_feat,
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) {
ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous");
ASSERT(in_feat.is_cuda(), "in_feat must be on CUDA");
ASSERT(in_feat.dim() == 2, "in_feat.dim():", in_feat.dim());
coordinate_map_key_type in_key = p_in_map_key->get_key();
ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND);
ASSERT(in_feat.size(0) == p_map_manager->size(in_key), "Invalid in_feat size",
in_feat.size(0), "!=", p_map_manager->size(in_key));
// create an output coordinate map
if (!p_out_map_key->is_key_set()) {
coordinate_map_key_type out_key =
std::get<0>(p_map_manager->stride(in_key, kernel_stride));
p_out_map_key->set_key(out_key);
}
auto const &in_out = p_map_manager->kernel_map(
p_in_map_key, //
p_out_map_key, //
kernel_size, //
kernel_stride, //
kernel_dilation, //
region_type, //
offset, false /* is_transpose */, true /* is_pool */);
auto const out_nrows = p_map_manager->size(p_out_map_key->get_key());
at::Tensor out_feat =
torch::zeros({out_nrows, in_feat.size(1)}, in_feat.options());
LOG_DEBUG("Allocated", out_nrows, "x", in_feat.size(1), "features.");
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
if (pooling_mode == PoolingMode::LOCAL_MAX_POOLING) {
at::Tensor max_index = torch::empty({0}, torch::TensorOptions()
.device(in_feat.device())
.dtype(torch::kInt)
.requires_grad(false));
max_index.resize_({out_nrows, in_feat.size(1)});
max_index.zero_();
TemplatedAllocator<char> byte_allocator;
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "local_pooling_forward_gpu", [&] {
MaxPoolingForwardKernelGPU<scalar_t, default_types::index_type,
TemplatedAllocator<char>>(
in_feat.template data_ptr<scalar_t>(),
out_feat.template data_ptr<scalar_t>(), out_nrows,
max_index.data_ptr<int>(), in_feat.size(1), in_out,
byte_allocator, stream);
});
return std::make_pair(out_feat, max_index);
} else {
at::Tensor num_nonzero =
torch::empty({0}, in_feat.options().requires_grad(false));
if (pooling_mode == PoolingMode::LOCAL_AVG_POOLING) {
num_nonzero.resize_({out_nrows});
num_nonzero.zero_();
}
cusparseHandle_t handle = getCurrentCUDASparseHandle();
cusparseSetStream(handle, stream);
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "local_pooling_forward_gpu", [&] {
TemplatedAllocator<char> byte_allocator;
NonzeroAvgPoolingForwardKernelGPU<scalar_t, default_types::index_type,
TemplatedAllocator<char>>(
in_feat.template data_ptr<scalar_t>(), in_feat.size(0),
out_feat.template data_ptr<scalar_t>(), out_nrows,
num_nonzero.template data_ptr<scalar_t>(), in_feat.size(1),
in_out, pooling_mode == PoolingMode::LOCAL_AVG_POOLING,
byte_allocator, handle, stream);
});
return std::make_pair(out_feat, num_nonzero);
}
}
template <typename coordinate_type,
template <typename C> class TemplatedAllocator>
at::Tensor LocalPoolingBackwardGPU(
at::Tensor const &in_feat, //
at::Tensor const &grad_out_feat, //
at::Tensor const &num_nonzero, //
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) {
ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous");
ASSERT(grad_out_feat.is_contiguous(), "grad_out_feata must be contiguous");
ASSERT(in_feat.is_cuda(), "in_feat must be on CUDA");
ASSERT(grad_out_feat.is_cuda(), "in_feat must be on CUDA");
ASSERT(in_feat.scalar_type() == grad_out_feat.scalar_type(), "type mismatch");
ASSERT(in_feat.dim() == 2, "in_feat.dim():", in_feat.dim());
ASSERT(grad_out_feat.dim() == 2, "grad_out_feat.dim():", grad_out_feat.dim());
coordinate_map_key_type in_key = p_in_map_key->get_key();
ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND);
coordinate_map_key_type out_key = p_out_map_key->get_key();
ASSERT(p_map_manager->exists(out_key), ERROR_MAP_NOT_FOUND);
auto const &in_out = p_map_manager->kernel_map(
p_in_map_key, //
p_out_map_key, //
kernel_size, //
kernel_stride, //
kernel_dilation, //
region_type, //
offset, false /* is_transpose */, true /* is_pool */);
at::Tensor grad_in_feat =
torch::zeros({in_feat.size(0), in_feat.size(1)}, in_feat.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
if (pooling_mode == PoolingMode::LOCAL_MAX_POOLING) {
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "local_pooling_backward_gpu", [&] {
MaxPoolingBackwardKernelGPU<scalar_t>(
grad_in_feat.template data_ptr<scalar_t>(), in_feat.size(0),
grad_out_feat.template data_ptr<scalar_t>(),
grad_out_feat.size(0), num_nonzero.data_ptr<int>(),
in_feat.size(1));
});
} else {
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "local_pooling_backward_gpu", [&] {
NonzeroAvgPoolingBackwardKernelGPU<
scalar_t, default_types::index_type, TemplatedAllocator<char>>(
grad_in_feat.template data_ptr<scalar_t>(), in_feat.size(0),
grad_out_feat.template data_ptr<scalar_t>(),
grad_out_feat.size(0), num_nonzero.template data_ptr<scalar_t>(),
in_feat.size(1), in_out,
pooling_mode == PoolingMode::LOCAL_AVG_POOLING, stream);
});
}
return grad_in_feat;
}
// Forward
template std::pair<at::Tensor, at::Tensor>
LocalPoolingForwardGPU<default_types::dcoordinate_type,
detail::default_allocator>(
at::Tensor const &in_feat,
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator>
*p_map_manager);
template std::pair<at::Tensor, at::Tensor>
LocalPoolingForwardGPU<default_types::dcoordinate_type, detail::c10_allocator>(
at::Tensor const &in_feat,
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator>
*p_map_manager);
// Backward
template at::Tensor LocalPoolingBackwardGPU<default_types::dcoordinate_type,
detail::default_allocator>(
at::Tensor const &in_feat, //
at::Tensor const &grad_out_feat, //
at::Tensor const &num_nonzero, //
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator>
*p_map_manager);
template at::Tensor
LocalPoolingBackwardGPU<default_types::dcoordinate_type, detail::c10_allocator>(
at::Tensor const &in_feat, //
at::Tensor const &grad_out_feat, //
at::Tensor const &num_nonzero, //
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator>
*p_map_manager);
} // end namespace minkowski
|
4772c1006f981a623b61304da3eff202261caa84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "find_ellipse_kernel.h"
#include <cutil.h>
#include <stdio.h>
// The number of sample points in each ellipse (stencil)
#define NPOINTS 150
// The maximum radius of a sample ellipse
#define MAX_RAD 20
// The total number of sample ellipses
#define NCIRCLES 7
// The size of the structuring element used in dilation
#define STREL_SIZE (12 * 2 + 1)
// Matrix used to store the maximal GICOV score at each pixels
// Produced by the GICOV kernel and consumed by the dilation kernel
float *device_gicov;
// Constant device arrays holding the stencil parameters used by the GICOV kernel
__constant__ float c_sin_angle[NPOINTS];
__constant__ float c_cos_angle[NPOINTS];
__constant__ int c_tX[NCIRCLES * NPOINTS];
__constant__ int c_tY[NCIRCLES * NPOINTS];
// Texture references to the gradient matrices used by the GICOV kernel
texture<float, 1, hipReadModeElementType> t_grad_x;
texture<float, 1, hipReadModeElementType> t_grad_y;
// Kernel to find the maximal GICOV value at each pixel of a
// video frame, based on the input x- and y-gradient matrices
__global__ void GICOV_kernel(int grad_m, float *gicov)
{
int i, j, k, n, x, y;
// Determine this thread's pixel
i = blockIdx.x + MAX_RAD + 2;
j = threadIdx.x + MAX_RAD + 2;
// Initialize the maximal GICOV score to 0
float max_GICOV = 0.0f;
// Iterate across each stencil
for (k = 0; k < NCIRCLES; k++) {
// Variables used to compute the mean and variance
// of the gradients along the current stencil
float sum = 0.0f, M2 = 0.0f, mean = 0.0f;
// Iterate across each sample point in the current stencil
for (n = 0; n < NPOINTS; n++) {
// Determine the x- and y-coordinates of the current sample point
y = j + c_tY[(k * NPOINTS) + n];
x = i + c_tX[(k * NPOINTS) + n];
// Compute the combined gradient value at the current sample point
int addr = x * grad_m + y;
float p = tex1Dfetch(t_grad_x,addr) * c_cos_angle[n] +
tex1Dfetch(t_grad_y,addr) * c_sin_angle[n];
// Update the running total
sum += p;
// Partially compute the variance
float delta = p - mean;
mean = mean + (delta / (float) (n + 1));
M2 = M2 + (delta * (p - mean));
}
// Finish computing the mean
mean = sum / ((float) NPOINTS);
// Finish computing the variance
float var = M2 / ((float) (NPOINTS - 1));
// Keep track of the maximal GICOV value seen so far
if (((mean * mean) / var) > max_GICOV) max_GICOV = (mean * mean) / var;
}
// Store the maximal GICOV value
gicov[(i * grad_m) + j] = max_GICOV;
}
// Sets up and invokes the GICOV kernel and returns its output
float *GICOV_CUDA(int grad_m, int grad_n, float *host_grad_x, float *host_grad_y)
{
int MaxR = MAX_RAD + 2;
// Allocate device memory
unsigned int grad_mem_size = sizeof(float) * grad_m * grad_n;
float *device_grad_x, *device_grad_y;
hipMalloc((void**) &device_grad_x, grad_mem_size);
hipMalloc((void**) &device_grad_y, grad_mem_size);
// Copy the input gradients to the device
hipMemcpy(device_grad_x, host_grad_x, grad_mem_size, hipMemcpyHostToDevice);
hipMemcpy(device_grad_y, host_grad_y, grad_mem_size, hipMemcpyHostToDevice);
// Bind the device arrays to texture references
hipBindTexture(0, t_grad_x, device_grad_x, grad_mem_size);
hipBindTexture(0, t_grad_y, device_grad_y, grad_mem_size);
// Allocate & initialize device memory for result
// (some elements are not assigned values in the kernel)
hipMalloc((void**) &device_gicov, grad_mem_size);
hipMemset(device_gicov, 0, grad_mem_size);
// Setup execution parameters
int num_blocks = grad_n - (2 * MaxR);
int threads_per_block = grad_m - (2 * MaxR);
// Execute the GICOV kernel
hipLaunchKernelGGL(( GICOV_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, grad_m, device_gicov);
// Check for kernel errors
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
printf("GICOV kernel error: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Copy the result to the host
float *host_gicov = (float *) malloc(grad_mem_size);
hipMemcpy(host_gicov, device_gicov, grad_mem_size, hipMemcpyDeviceToHost);
// Cleanup memory
hipUnbindTexture(t_grad_x);
hipUnbindTexture(t_grad_y);
hipFree(device_grad_x);
hipFree(device_grad_y);
return host_gicov;
}
// Constant device array holding the structuring element used by the dilation kernel
__constant__ float c_strel[STREL_SIZE * STREL_SIZE];
// Texture reference to the GICOV matrix used by the dilation kernel
texture<float, 1, hipReadModeElementType> t_img;
// Kernel to compute the dilation of the GICOV matrix produced by the GICOV kernel
// Each element (i, j) of the output matrix is set equal to the maximal value in
// the neighborhood surrounding element (i, j) in the input matrix
// Here the neighborhood is defined by the structuring element (c_strel)
__global__ void dilate_kernel(int img_m, int img_n, int strel_m, int strel_n, float *dilated)
{
// Find the center of the structuring element
int el_center_i = strel_m / 2;
int el_center_j = strel_n / 2;
// Determine this thread's location in the matrix
int thread_id = (blockIdx.x * blockDim.x) + threadIdx.x;
int i = thread_id % img_m;
int j = thread_id / img_m;
// Initialize the maximum GICOV score seen so far to zero
float max = 0.0;
// Iterate across the structuring element in one dimension
int el_i, el_j, x, y;
for(el_i = 0; el_i < strel_m; el_i++) {
y = i - el_center_i + el_i;
// Make sure we have not gone off the edge of the matrix
if( (y >= 0) && (y < img_m) ) {
// Iterate across the structuring element in the other dimension
for(el_j = 0; el_j < strel_n; el_j++) {
x = j - el_center_j + el_j;
// Make sure we have not gone off the edge of the matrix
// and that the current structuring element value is not zero
if( (x >= 0) && (x < img_n) && (c_strel[(el_i * strel_n) + el_j] != 0) ) {
// Determine if this is maximal value seen so far
int addr = (x * img_m) + y;
float temp = tex1Dfetch(t_img, addr);
if (temp > max) max = temp;
}
}
}
}
// Store the maximum value found
dilated[(i * img_n) + j] = max;
}
// Sets up and invokes the dilation kernel and returns its output
float *dilate_CUDA(int max_gicov_m, int max_gicov_n, int strel_m, int strel_n)
{
// Allocate device memory for result
unsigned int max_gicov_mem_size = sizeof(float) * max_gicov_m * max_gicov_n;
float* device_img_dilated;
hipMalloc( (void**) &device_img_dilated, max_gicov_mem_size);
// Bind the input matrix of GICOV values to a texture reference
hipBindTexture(0, t_img, device_gicov, max_gicov_mem_size);
// Setup execution parameters
int num_threads = max_gicov_m * max_gicov_n;
int threads_per_block = 176;
int num_blocks = (int) (((float) num_threads / (float) threads_per_block) + 0.5);
// Execute the dilation kernel
hipLaunchKernelGGL(( dilate_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, max_gicov_m, max_gicov_n, strel_m, strel_n, device_img_dilated);
// Check for kernel errors
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
printf("Dilation kernel error: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Copy the result to the host
float *host_img_dilated = (float*) malloc(max_gicov_mem_size);
hipMemcpy(host_img_dilated, device_img_dilated, max_gicov_mem_size, hipMemcpyDeviceToHost);
// Cleanup memory
hipUnbindTexture(t_img);
hipFree(device_gicov);
hipFree(device_img_dilated);
return host_img_dilated;
}
// Chooses the most appropriate GPU on which to execute
void select_device()
{
// Figure out how many devices exist
int num_devices, device;
hipGetDeviceCount(&num_devices);
// Choose the device with the largest number of multiprocessors
if (num_devices > 0) {
int max_multiprocessors = 0, max_device = -1;
for (device = 0; device < num_devices; device++) {
hipDeviceProp_t properties;
hipGetDeviceProperties(&properties, device);
if (max_multiprocessors < properties.multiProcessorCount) {
max_multiprocessors = properties.multiProcessorCount;
max_device = device;
}
}
hipSetDevice(max_device);
}
// The following is to remove the API initialization overhead from the runtime measurements
hipFree(0);
}
// Transfers pre-computed constants used by the two kernels to the GPU
void transfer_constants(float *host_sin_angle, float *host_cos_angle, int *host_tX, int *host_tY, int strel_m, int strel_n, float *host_strel)
{
// Compute the sizes of the matrices
unsigned int angle_mem_size = sizeof(float) * NPOINTS;
unsigned int t_mem_size = sizeof(int) * NCIRCLES * NPOINTS;
unsigned int strel_mem_size = sizeof(float) * strel_m * strel_n;
// Copy the matrices from host memory to device constant memory
hipMemcpyToSymbol("c_sin_angle", host_sin_angle, angle_mem_size, 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol("c_cos_angle", host_cos_angle, angle_mem_size, 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol("c_tX", host_tX, t_mem_size, 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol("c_tY", host_tY, t_mem_size, 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol("c_strel", host_strel, strel_mem_size, 0, hipMemcpyHostToDevice);
}
| 4772c1006f981a623b61304da3eff202261caa84.cu | #include "find_ellipse_kernel.h"
#include <cutil.h>
#include <stdio.h>
// The number of sample points in each ellipse (stencil)
#define NPOINTS 150
// The maximum radius of a sample ellipse
#define MAX_RAD 20
// The total number of sample ellipses
#define NCIRCLES 7
// The size of the structuring element used in dilation
#define STREL_SIZE (12 * 2 + 1)
// Matrix used to store the maximal GICOV score at each pixels
// Produced by the GICOV kernel and consumed by the dilation kernel
float *device_gicov;
// Constant device arrays holding the stencil parameters used by the GICOV kernel
__constant__ float c_sin_angle[NPOINTS];
__constant__ float c_cos_angle[NPOINTS];
__constant__ int c_tX[NCIRCLES * NPOINTS];
__constant__ int c_tY[NCIRCLES * NPOINTS];
// Texture references to the gradient matrices used by the GICOV kernel
texture<float, 1, cudaReadModeElementType> t_grad_x;
texture<float, 1, cudaReadModeElementType> t_grad_y;
// Kernel to find the maximal GICOV value at each pixel of a
// video frame, based on the input x- and y-gradient matrices
__global__ void GICOV_kernel(int grad_m, float *gicov)
{
int i, j, k, n, x, y;
// Determine this thread's pixel
i = blockIdx.x + MAX_RAD + 2;
j = threadIdx.x + MAX_RAD + 2;
// Initialize the maximal GICOV score to 0
float max_GICOV = 0.0f;
// Iterate across each stencil
for (k = 0; k < NCIRCLES; k++) {
// Variables used to compute the mean and variance
// of the gradients along the current stencil
float sum = 0.0f, M2 = 0.0f, mean = 0.0f;
// Iterate across each sample point in the current stencil
for (n = 0; n < NPOINTS; n++) {
// Determine the x- and y-coordinates of the current sample point
y = j + c_tY[(k * NPOINTS) + n];
x = i + c_tX[(k * NPOINTS) + n];
// Compute the combined gradient value at the current sample point
int addr = x * grad_m + y;
float p = tex1Dfetch(t_grad_x,addr) * c_cos_angle[n] +
tex1Dfetch(t_grad_y,addr) * c_sin_angle[n];
// Update the running total
sum += p;
// Partially compute the variance
float delta = p - mean;
mean = mean + (delta / (float) (n + 1));
M2 = M2 + (delta * (p - mean));
}
// Finish computing the mean
mean = sum / ((float) NPOINTS);
// Finish computing the variance
float var = M2 / ((float) (NPOINTS - 1));
// Keep track of the maximal GICOV value seen so far
if (((mean * mean) / var) > max_GICOV) max_GICOV = (mean * mean) / var;
}
// Store the maximal GICOV value
gicov[(i * grad_m) + j] = max_GICOV;
}
// Sets up and invokes the GICOV kernel and returns its output
float *GICOV_CUDA(int grad_m, int grad_n, float *host_grad_x, float *host_grad_y)
{
int MaxR = MAX_RAD + 2;
// Allocate device memory
unsigned int grad_mem_size = sizeof(float) * grad_m * grad_n;
float *device_grad_x, *device_grad_y;
cudaMalloc((void**) &device_grad_x, grad_mem_size);
cudaMalloc((void**) &device_grad_y, grad_mem_size);
// Copy the input gradients to the device
cudaMemcpy(device_grad_x, host_grad_x, grad_mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(device_grad_y, host_grad_y, grad_mem_size, cudaMemcpyHostToDevice);
// Bind the device arrays to texture references
cudaBindTexture(0, t_grad_x, device_grad_x, grad_mem_size);
cudaBindTexture(0, t_grad_y, device_grad_y, grad_mem_size);
// Allocate & initialize device memory for result
// (some elements are not assigned values in the kernel)
cudaMalloc((void**) &device_gicov, grad_mem_size);
cudaMemset(device_gicov, 0, grad_mem_size);
// Setup execution parameters
int num_blocks = grad_n - (2 * MaxR);
int threads_per_block = grad_m - (2 * MaxR);
// Execute the GICOV kernel
GICOV_kernel <<< num_blocks, threads_per_block >>> (grad_m, device_gicov);
// Check for kernel errors
cudaThreadSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf("GICOV kernel error: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Copy the result to the host
float *host_gicov = (float *) malloc(grad_mem_size);
cudaMemcpy(host_gicov, device_gicov, grad_mem_size, cudaMemcpyDeviceToHost);
// Cleanup memory
cudaUnbindTexture(t_grad_x);
cudaUnbindTexture(t_grad_y);
cudaFree(device_grad_x);
cudaFree(device_grad_y);
return host_gicov;
}
// Constant device array holding the structuring element used by the dilation kernel
__constant__ float c_strel[STREL_SIZE * STREL_SIZE];
// Texture reference to the GICOV matrix used by the dilation kernel
texture<float, 1, cudaReadModeElementType> t_img;
// Kernel to compute the dilation of the GICOV matrix produced by the GICOV kernel
// Each element (i, j) of the output matrix is set equal to the maximal value in
// the neighborhood surrounding element (i, j) in the input matrix
// Here the neighborhood is defined by the structuring element (c_strel)
__global__ void dilate_kernel(int img_m, int img_n, int strel_m, int strel_n, float *dilated)
{
// Find the center of the structuring element
int el_center_i = strel_m / 2;
int el_center_j = strel_n / 2;
// Determine this thread's location in the matrix
int thread_id = (blockIdx.x * blockDim.x) + threadIdx.x;
int i = thread_id % img_m;
int j = thread_id / img_m;
// Initialize the maximum GICOV score seen so far to zero
float max = 0.0;
// Iterate across the structuring element in one dimension
int el_i, el_j, x, y;
for(el_i = 0; el_i < strel_m; el_i++) {
y = i - el_center_i + el_i;
// Make sure we have not gone off the edge of the matrix
if( (y >= 0) && (y < img_m) ) {
// Iterate across the structuring element in the other dimension
for(el_j = 0; el_j < strel_n; el_j++) {
x = j - el_center_j + el_j;
// Make sure we have not gone off the edge of the matrix
// and that the current structuring element value is not zero
if( (x >= 0) && (x < img_n) && (c_strel[(el_i * strel_n) + el_j] != 0) ) {
// Determine if this is maximal value seen so far
int addr = (x * img_m) + y;
float temp = tex1Dfetch(t_img, addr);
if (temp > max) max = temp;
}
}
}
}
// Store the maximum value found
dilated[(i * img_n) + j] = max;
}
// Sets up and invokes the dilation kernel and returns its output
float *dilate_CUDA(int max_gicov_m, int max_gicov_n, int strel_m, int strel_n)
{
// Allocate device memory for result
unsigned int max_gicov_mem_size = sizeof(float) * max_gicov_m * max_gicov_n;
float* device_img_dilated;
cudaMalloc( (void**) &device_img_dilated, max_gicov_mem_size);
// Bind the input matrix of GICOV values to a texture reference
cudaBindTexture(0, t_img, device_gicov, max_gicov_mem_size);
// Setup execution parameters
int num_threads = max_gicov_m * max_gicov_n;
int threads_per_block = 176;
int num_blocks = (int) (((float) num_threads / (float) threads_per_block) + 0.5);
// Execute the dilation kernel
dilate_kernel <<< num_blocks, threads_per_block >>> (max_gicov_m, max_gicov_n, strel_m, strel_n, device_img_dilated);
// Check for kernel errors
cudaThreadSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf("Dilation kernel error: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Copy the result to the host
float *host_img_dilated = (float*) malloc(max_gicov_mem_size);
cudaMemcpy(host_img_dilated, device_img_dilated, max_gicov_mem_size, cudaMemcpyDeviceToHost);
// Cleanup memory
cudaUnbindTexture(t_img);
cudaFree(device_gicov);
cudaFree(device_img_dilated);
return host_img_dilated;
}
// Chooses the most appropriate GPU on which to execute
void select_device()
{
// Figure out how many devices exist
int num_devices, device;
cudaGetDeviceCount(&num_devices);
// Choose the device with the largest number of multiprocessors
if (num_devices > 0) {
int max_multiprocessors = 0, max_device = -1;
for (device = 0; device < num_devices; device++) {
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, device);
if (max_multiprocessors < properties.multiProcessorCount) {
max_multiprocessors = properties.multiProcessorCount;
max_device = device;
}
}
cudaSetDevice(max_device);
}
// The following is to remove the API initialization overhead from the runtime measurements
cudaFree(0);
}
// Transfers pre-computed constants used by the two kernels to the GPU
void transfer_constants(float *host_sin_angle, float *host_cos_angle, int *host_tX, int *host_tY, int strel_m, int strel_n, float *host_strel)
{
// Compute the sizes of the matrices
unsigned int angle_mem_size = sizeof(float) * NPOINTS;
unsigned int t_mem_size = sizeof(int) * NCIRCLES * NPOINTS;
unsigned int strel_mem_size = sizeof(float) * strel_m * strel_n;
// Copy the matrices from host memory to device constant memory
cudaMemcpyToSymbol("c_sin_angle", host_sin_angle, angle_mem_size, 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol("c_cos_angle", host_cos_angle, angle_mem_size, 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol("c_tX", host_tX, t_mem_size, 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol("c_tY", host_tY, t_mem_size, 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol("c_strel", host_strel, strel_mem_size, 0, cudaMemcpyHostToDevice);
}
|
33e67abf6f7ca8bcf29665da623c74634087171e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Implements the math functions for GPU.
#include "caffe2/utils/math.h"
#include <limits>
#include <numeric>
#include <vector>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Funcname, function) \
__global__ \
void _Kernel_##T##_##Funcname(const int N, const T* x, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
y[i] = function(x[i]); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* x, T* y, \
CUDAContext* context) { \
hipLaunchKernelGGL(( _Kernel_##T##_##Funcname), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), \
0, context->cuda_stream(), \
N, x, y); \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, InvSqrt, rsqrtf);
__device__ float cuda_sqrf(const float x) { return x * x; }
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, cuda_sqrf);
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define DELEGATE_SINCOS_CUDA_FUNCTION(T) \
__global__ void _Kernel_##T##_##SinCos( \
const int N, const T* x, T* ys, T* yc) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
sincos(x[i], ys + i, yc + i); \
} \
} \
template <> \
void SinCos<T, CUDAContext>( \
const int N, const T* x, T* ys, T* yc, CUDAContext* context) { \
hipLaunchKernelGGL(( _Kernel_##T##_##SinCos), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, x, ys, yc); \
}
DELEGATE_SINCOS_CUDA_FUNCTION(float)
DELEGATE_SINCOS_CUDA_FUNCTION(double)
#undef DELEGATE_SINCOS_CUDA_FUNCTION
#define DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(T, Funcname, expr) \
__global__ void _Kernel_##T##_##Funcname( \
const int N, const T* a, const T* b, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
float r = convert::To<T, float>(a[i]) expr convert::To<T, float>(b[i]); \
y[i] = convert::To<float, T>(r); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* a, const T* b, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( _Kernel_##T##_##Funcname), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, a, b, y); \
}
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Add, +);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(int32_t, Add, +);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Sub, -);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Mul, *);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Div, /);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Add, +);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Sub, -);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Mul, *);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Div, /);
#undef DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION
#define DELEGATE_SIMPLE_CUDA_BINARY_PREFIX_FUNCTION(T, Funcname, func) \
__global__ void _Kernel_##T##_##Funcname( \
const int N, const T* a, const T* b, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
float r = \
func(convert::To<T, float>(a[i]), convert::To<T, float>(b[i])); \
y[i] = convert::To<float, T>(r); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* a, const T* b, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( _Kernel_##T##_##Funcname), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, a, b, y); \
}
DELEGATE_SIMPLE_CUDA_BINARY_PREFIX_FUNCTION(float, ElemwiseMax, fmaxf);
#undef DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION
#define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \
template <> \
void Funcname<T, CUDAContext>( \
const int N, \
const T* src, \
T* dst, \
Tensor<CUDAContext>* scratch_ptr, \
CUDAContext* context) { \
size_t memRequired = 0; \
hipcub::DeviceReduce::func( \
nullptr, memRequired, src, dst, N, context->cuda_stream()); \
auto buffer_size = \
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T)); \
scratch_ptr->Resize(std::vector<TIndex>{buffer_size}); \
hipcub::DeviceReduce::func( \
static_cast<void*>(scratch_ptr->mutable_data<T>()), \
memRequired, \
src, \
dst, \
N, \
context->cuda_stream()); \
}
DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min)
DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max)
#undef DELEGATE_REDUCTION_FUNCTION
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(hipblasSgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
N));
}
template <>
void Gemm<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_CHECK(cublasSgemmEx(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
A,
HIP_R_16F,
lda,
&beta,
C,
HIP_R_16F,
N));
} else if (math_type == TensorProto_DataType_FLOAT16) {
// convert alpha, beta from float -> __half
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
// call hipblasHgemm
CUBLAS_CHECK(hipblasHgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
(const __half*)A,
lda,
&beta_fp16,
(__half*)C,
N));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
template <>
void GemmBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
const int a_stride = M * K;
const int b_stride = K * N;
const int c_stride = M * N;
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
math::Gemm<float, CUDAContext>(
TransA,
TransB,
M,
N,
K,
alpha,
A + a_stride * i,
B + b_stride * i,
beta,
C + c_stride * i,
context);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (TransA == CblasNoTrans) ? K : M;
const int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(hipblasSgemmStridedBatched(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
b_stride,
A,
lda,
a_stride,
&beta,
C,
N,
c_stride,
batch_size));
#endif
}
namespace {
__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __float2half(X[i]);
}
}
__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __half2float(X[i]);
}
}
};
template <>
void GemmBatched<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
const int a_stride = M * K;
const int b_stride = K * N;
const int c_stride = M * N;
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
math::Gemm<float16, CUDAContext>(
TransA,
TransB,
M,
N,
K,
alpha,
A + a_stride * i,
B + b_stride * i,
beta,
C + c_stride * i,
context);
}
#else
// 3 options:
// 1) scratch != null = cast to fp32, SgemmStridedBatched, cast result to fp16
// 2) math_type == FLOAT, scratch == nullptr = looped SgemmEx
// 3) math_type == FLOAT16, scratch == nullptr = batched Hgemm
if (scratch != nullptr) {
const int A_size = a_stride * batch_size;
const int B_size = b_stride * batch_size;
// cast, hipblasSgemmStridedBatched, cast
size_t in_elems = A_size + B_size;
size_t out_elems = c_stride * batch_size;
scratch->Resize(in_elems + out_elems);
float* scratch_ptr = scratch->mutable_data<float>();
float* A_fp32 = scratch_ptr;
float* B_fp32 = scratch_ptr + A_size;
float* C_fp32 = scratch_ptr + A_size + B_size;
// cast A, B into fp32
hipLaunchKernelGGL(( HalfToFloatKernel), dim3(CAFFE_GET_BLOCKS(A_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), A_size, (half*)A, A_fp32);
hipLaunchKernelGGL(( HalfToFloatKernel), dim3(CAFFE_GET_BLOCKS(B_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), B_size, (half*)B, B_fp32);
// run fp32 batched Gemm
GemmBatched<float, CUDAContext>(
TransA,
TransB,
batch_size,
M,
N,
K,
alpha,
A_fp32,
B_fp32,
beta,
C_fp32,
context);
// cast result back to fp16
hipLaunchKernelGGL(( FloatToHalfKernel),
dim3(CAFFE_GET_BLOCKS(batch_size * M * N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), batch_size * M * N, C_fp32, (half*)C);
} else {
if (math_type == TensorProto_DataType_FLOAT) {
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
math::Gemm<float16, CUDAContext>(
TransA,
TransB,
M,
N,
K,
alpha,
A + a_stride * i,
B + b_stride * i,
beta,
C + c_stride * i,
context);
}
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (TransA == CblasNoTrans) ? K : M;
const int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
// convert alpha, beta from float -> __half
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
CUBLAS_ENFORCE(hipblasHgemmStridedBatched(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
b_stride,
(const __half*)A,
lda,
a_stride,
&beta_fp16,
(__half*)C,
N,
c_stride,
batch_size));
}
}
#endif
}
#if TORCH_HIP_VERSION >= 9000
// No change, but required. Defer to default CUDA engine
template <>
void Gemm<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
return Gemm<float,CUDAContext>(TransA,
TransB,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
math_type);
}
template <>
void Gemm<float16, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
// enable TensorCore for this call on this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(cublasSetMathMode(
context->cublas_handle(),
CUBLAS_TENSOR_OP_MATH));
}
CUBLAS_CHECK(hipblasGemmEx(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
A,
HIP_R_16F,
lda,
&beta,
C,
HIP_R_16F,
N,
HIP_R_32F,
CUBLAS_GEMM_DFALT_TENSOR_OP));
// Now disable TensorCore math for subsequent calls to this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(cublasSetMathMode(
context->cublas_handle(),
CUBLAS_DEFAULT_MATH));
}
}
template <>
void GemmBatched<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
return GemmBatched<float, CUDAContext, DefaultEngine>(
TransA,
TransB,
batch_size,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
scratch,
math_type);
}
template <>
void GemmBatched<float16, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
return GemmBatched<float16, CUDAContext, DefaultEngine>(
TransA,
TransB,
batch_size,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
scratch,
math_type);
}
#endif // TORCH_HIP_VERSION >= 9000
template <>
void GemmEx<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int lda,
const float* B,
const int ldb,
const float beta,
float* C,
const int ldc,
CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(hipblasSgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
ldc));
}
template <>
void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_ENFORCE(hipblasSgemv(
context->cublas_handle(),
cuTransA,
N,
M,
&alpha,
A,
N,
x,
1,
&beta,
y,
1));
}
// Batched Add variants
namespace {
template <typename T>
__global__ void AddStripedBatchKernel(
const int N,
const T* first,
T* Y,
const int stripe,
const int batch) {
for (int j = 0; j < batch; j++) {
const T* x = first + j * stripe;
CUDA_1D_KERNEL_LOOP(i, N) {
float tmpY = convert::To<T, float>(Y[i]);
tmpY += convert::To<T,float>(x[i]);
Y[i] = convert::To<float,T>(tmpY);
}
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \
template <> \
void AddStripedBatch<T, CUDAContext>( \
const int N, \
const T* first, \
T* Y, \
const int stripe, \
const int batch, \
CUDAContext* context) { \
hipLaunchKernelGGL(( AddStripedBatchKernel<T>), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, first, Y, stripe, batch); \
}
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float);
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float16);
#undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH
template <>
void Gemv<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const int M,
const int N,
const float alpha,
const float16* A,
const float16* x,
const float beta,
float16* y,
CUDAContext* context,
TensorProto::DataType math_type) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
// sort out what we need to call cublasSgemmEx / hipblasHgemm
int m = (cuTransA == HIPBLAS_OP_N) ? N : M;
int k = (cuTransA == HIPBLAS_OP_N) ? M : N;
int LDA = (cuTransA == HIPBLAS_OP_N) ? m : k;
int LDC = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_CHECK(cublasSgemmEx(
context->cublas_handle(),
cuTransA,
HIPBLAS_OP_N,
m,
1,
k,
&alpha,
A,
HIP_R_16F,
LDA,
x,
HIP_R_16F,
k,
&beta,
y,
HIP_R_16F,
LDC));
} else if (math_type == TensorProto_DataType_FLOAT16) {
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
CUBLAS_CHECK(hipblasHgemm(
context->cublas_handle(),
cuTransA,
HIPBLAS_OP_N,
m,
1,
k,
&alpha_fp16,
(const __half*)A,
LDA,
(const __half*)x,
k,
&beta_fp16,
(__half*)y,
LDC));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
namespace {
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
void Set<T, CUDAContext>( \
const size_t N, const T alpha, T* Y, CUDAContext* context) { \
hipLaunchKernelGGL(( SetKernel), \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, alpha, Y); \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
CAFFE2_SPECIALIZED_CUDA_SET(double);
CAFFE2_SPECIALIZED_CUDA_SET(bool);
CAFFE2_SPECIALIZED_CUDA_SET(int8_t);
CAFFE2_SPECIALIZED_CUDA_SET(int16_t);
CAFFE2_SPECIALIZED_CUDA_SET(float16);
CAFFE2_SPECIALIZED_CUDA_SET(int);
CAFFE2_SPECIALIZED_CUDA_SET(int64_t);
CAFFE2_SPECIALIZED_CUDA_SET(char);
CAFFE2_SPECIALIZED_CUDA_SET(uint8_t);
CAFFE2_SPECIALIZED_CUDA_SET(uint16_t);
#undef CAFFE2_SPECIALIZED_CUDA_SET
namespace {
template <typename T>
__global__ void
UniformShift(const size_t N, const float min, const float max, T* x) {
float scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min);
}
}
__global__ void
UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
void RandUniform<float, CUDAContext>(
const size_t n,
const float min,
const float max,
float* r,
CUDAContext* context) {
CURAND_ENFORCE(hiprandGenerateUniform(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<float>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, min, max, r);
}
template <>
void RandUniform<double, CUDAContext>(
const size_t n,
const double min,
const double max,
double* r,
CUDAContext* context) {
CURAND_ENFORCE(
hiprandGenerateUniformDouble(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<double>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, min, max, r);
}
template <>
void RandUniform<int, CUDAContext>(
const size_t n,
const int min,
const int max,
int* r,
CUDAContext* context) {
CURAND_ENFORCE(hiprandGenerate(
context->curand_generator(), reinterpret_cast<unsigned int*>(r), n));
hipLaunchKernelGGL(( UniformIntFit),
dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
n, min, max, reinterpret_cast<unsigned int*>(r));
}
template <typename T>
size_t HandleOddLengthRandGaussian(
const size_t n,
const T mean,
const T std,
T* r,
CUDAContext* context) {
if (n % 2 == 1) {
std::default_random_engine generator;
std::normal_distribution<T> distribution(mean, std);
const T random_value = distribution(generator);
math::Set<T, CUDAContext>(1, random_value, r + (n - 1), context);
return n - 1;
}
return n;
}
template <>
void RandGaussian<float, CUDAContext>(
const size_t n,
const float mean,
const float std,
float* r,
CUDAContext* context) {
// If n is odd, we add a random Gaussian value at the end manually
// and generate n-1 random values using hiprandGenerateNormal.
// hiprandGenerateNormal requires n to be even.
const size_t even_n =
HandleOddLengthRandGaussian<float>(n, mean, std, r, context);
CURAND_ENFORCE(
hiprandGenerateNormal(context->curand_generator(), r, even_n, mean, std));
}
template <>
void RandGaussian<double, CUDAContext>(
const size_t n,
const double mean,
const double std,
double* r,
CUDAContext* context) {
const size_t even_n =
HandleOddLengthRandGaussian<double>(n, mean, std, r, context);
CURAND_ENFORCE(hiprandGenerateNormalDouble(
context->curand_generator(), r, even_n, mean, std));
}
template <>
void Dot<float, CUDAContext>(
const int n,
const float* a,
const float* b,
float* y,
CUDAContext* context) {
float result;
CUBLAS_ENFORCE(hipblasSdot(context->cublas_handle(), n, a, 1, b, 1, &result));
context->Copy<float, CPUContext, CUDAContext>(1, &result, y);
}
template <>
void Dot<float16, CUDAContext>(
const int n,
const float16* a,
const float16* b,
float16* y,
CUDAContext* context) {
float16 result;
// execute with 32-bit math
CUBLAS_CHECK(hipblasDotEx_v2(
context->cublas_handle(),
n,
a,
HIP_R_16F,
1,
b,
HIP_R_16F,
1,
&result,
HIP_R_16F,
HIP_R_32F));
context->Copy<float16, CPUContext, CUDAContext>(1, &result, y);
}
// A previous version of caffe2 used Thrust but it turns out that thrust
// reduction has an implicit scratch space allocation and deallocation, which
// may interfere with NCCL and create a deadlock. Hence we are using a custom
// reduction here.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y, bool square) {
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A multilevel reduction.
// N -> 128
if (!square) {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += convert::To<T, float>(X[i]);
}
} else {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
float Xi = convert::To<T, float>(X[i]);
reduction_buffer[idx] += Xi * Xi;
}
}
__syncthreads();
// 128 -> 32
if (idx < 32) {
reduction_buffer[idx] +=
reduction_buffer[idx + 32] +
reduction_buffer[idx + 64] +
reduction_buffer[idx + 96];
}
__syncthreads();
// 32 -> 1
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < 32; ++i) {
tmp += reduction_buffer[i];
}
*Y = convert::To<float, T>(tmp);
}
}
// According to the benchmarks script
// caffe2/caffe2/experiments/python/device_reduce_sum_bench.py,
// device reduce is slower for N <= 10000.
#define DEVICE_REDUCE_SIZE_THRESHOLD 10000
namespace {
template <typename T>
__global__ void SumConvertKernel(float* sum, T* dest) {
*dest = convert::To<float, T>(*sum);
}
template <typename T, typename IterT>
void SumGenericIter(
const int N,
IterT it,
T*& dest,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
size_t memRequired = 0;
hipcub::DeviceReduce::Sum(
nullptr, memRequired, it, dest, N, context->cuda_stream());
auto buffer_size =
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T));
if (!dest) {
// allocate one more T at the end of scratch for dest
scratch_ptr->Resize(std::vector<TIndex>{buffer_size + 1});
dest = scratch_ptr->template mutable_data<T>() + buffer_size;
} else {
scratch_ptr->Resize(std::vector<TIndex>{buffer_size});
}
hipcub::DeviceReduce::Sum(
static_cast<void*>(scratch_ptr->template mutable_data<T>()),
memRequired,
it,
dest,
N,
context->cuda_stream());
}
} // namespace
template <>
void Sum<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<float>(N, x, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, false);
}
}
template <>
void Sum<int32_t, CUDAContext>(
const int N,
const int32_t* x,
int32_t* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<int32_t>(N, x, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, false);
}
}
namespace {
template <typename T>
struct FloatTransform {
inline __host__ __device__ float operator()(const T v) const {
return convert::To<T, float>(v);
}
};
} // namespace
#define CAFFE2_MATH_SUM_FUNC(T) \
template <> \
void Sum<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor<CUDAContext>* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> transform; \
hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \
x, transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \
} else { \
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \
N, x, y, false); \
} \
}
CAFFE2_MATH_SUM_FUNC(float16)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
} // namespace
template <>
void SumSqr<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SqrTransform<float> transform;
hipcub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
x, transform);
SumGenericIter<float>(N, it, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, true);
}
}
#define CAFFE2_MATH_SUMSQR_FUNC(T) \
template <> \
void SumSqr<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor<CUDAContext>* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> float_transform; \
hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> \
float_it(x, float_transform); \
SqrTransform<float> sqr_transform; \
hipcub::TransformInputIterator< \
float, \
SqrTransform<float>, \
decltype(float_it)> \
it(float_it, sqr_transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \
} else { \
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \
N, x, y, true); \
} \
}
CAFFE2_MATH_SUMSQR_FUNC(float16)
#undef CAFFE2_MATH_SUMSQR_FUNC
#undef DEVICE_REDUCE_SIZE_THRESHOLD
namespace {
template <typename T>
__global__ void SelectKernel(
const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
void Select<float, CUDAContext>(
const int N, const int D, const float* x, const int* idx, float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), N, D, x, idx, y);
}
template <>
void Select<float16, CUDAContext>(
const int N,
const int D,
const float16* x,
const int* idx,
float16* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<float16>),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, x, idx, y);
}
namespace {
template <typename T>
__global__ void ScaleKernel(const int n, const float alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
// y[i] = convert::To<float,T>(convert::To<T, float>(x[i]) * alpha);
y[i] = convert::Get<T>(convert::Get<float>(x[i]) * alpha);
}
}
template <typename T>
__global__ void
ScaleKernelDeviceAlpha(const int n, const float* alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = x[i] * (*alpha);
}
}
template <typename T>
__global__ void PowKernel(const int n, const T* x, const T exponent, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = powf(x[i], exponent);
}
}
// fp16 specialization
template <>
__global__ void ScaleKernelDeviceAlpha(
const int n,
const float* alpha,
const float16* x,
float16* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = convert::To<float, float16>(
convert::To<float16, float>(x[i]) * (*alpha));
}
}
} // namespace
template <>
void Powx<float, CUDAContext>(
const int N,
const float* a,
const float b,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( PowKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, a, b, y);
}
template <>
void Scale<float, CUDAContext>(
const int n,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernel<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, alpha, x, y);
}
template <>
void Scale<float16, CUDAContext>(
const int n,
const float alpha,
const float16* x,
float16* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernel<float16>),
dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, alpha, x, y);
}
template <>
void Scale<float, CUDAContext>(
const int n, const float* alpha, const float *x, float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernelDeviceAlpha<float>),
dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(),
n, alpha, x, y);
}
template <>
void Scale<float16, CUDAContext>(
const int n,
const float* alpha,
const float16* x,
float16* y,
CUDAContext* context) {
hipLaunchKernelGGL(( ScaleKernelDeviceAlpha<float16>),
dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, alpha, x, y);
}
template <>
void Axpy<float, CUDAContext>(
const int N,
const float alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(hipblasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void Axpy<double, CUDAContext>(
const int N,
const float alpha,
const double* X,
double* Y,
CUDAContext* context) {
double alpha_d{alpha};
CUBLAS_ENFORCE(
hipblasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1));
}
template <>
void Axpy<float16, CUDAContext>(
const int N,
const float alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
CUBLAS_CHECK(hipblasAxpyEx_v2(
context->cublas_handle(),
N,
&alpha,
HIP_R_16F,
X,
HIP_R_16F,
1,
Y,
HIP_R_16F,
1,
HIP_R_32F));
}
namespace {
template <typename T>
__global__ void AxpyKernel(const int n, const float* a, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = convert::Get<T>(
convert::Get<float>(x[index]) * (*a) + convert::Get<float>(y[index]));
}
}
} // namespace
template <>
void Axpy<float, CUDAContext>(
const int n, const float* alpha, const float* X,
float* Y, CUDAContext* context) {
hipLaunchKernelGGL(( AxpyKernel<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, alpha, X, Y);
}
template <>
void Axpy<float16, CUDAContext>(
const int n,
const float* alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
hipLaunchKernelGGL(( AxpyKernel<float16>),
dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, alpha, X, Y);
}
namespace {
template <typename T>
__global__ void AxpbyKernel(const int n, const T a, const T* x,
const T b, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = x[index] * a + y[index] * b;
}
}
} // namespace
template <>
void Axpby<float, CUDAContext>(
const int n, const float a, const float* x, const float b, float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( AxpbyKernel<float>), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context->cuda_stream(), n, a, x, b, y);
}
namespace {
template <typename T>
__global__ void im2col_gpu_kernel_nchw(const int n, const T* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_col) {
CUDA_1D_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * kernel_h * kernel_w;
int h_in = h_out * stride_h - pad_t;
int w_in = w_out * stride_w - pad_l;
T* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const T* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h = h_in + i * dilation_h;
int w = w_in + j * dilation_w;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename T>
__global__ void im2col_gpu_kernel_nhwc(const int n, const T* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int width_col, const int channels,
T* data_col) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
int channel_in = index % channels;
int w_out = index / channels % width_col;
int h_out = index / channels / width_col;
int h_in = h_out * stride_h - pad_t;
int w_in = w_out * stride_w - pad_l;
T* local_data_col = data_col +
((h_out * width_col) + w_out) * channels * kernel_h * kernel_w
+ channel_in;
for (int i = 0; i < dkernel_h; i += dilation_h) {
int h = h_in + i;
for (int j = 0; j < dkernel_w; j += dilation_w) {
int w = w_in + j;
*local_data_col = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im[(h * width + w) * channels + channel_in] : 0;
local_data_col += channels;
}
}
}
}
template <typename T>
__global__ void col2im_gpu_kernel_nchw(const int n, const T* data_col,
const int height, const int width,
const int patch_h, const int patch_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_im) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
int w = index % width + pad_l;
int h = (index / width) % height + pad_t;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h - h_col * stride_h);
int w_k = (w - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index =
(((c * patch_h + h_k) * patch_w + w_k) * height_col + h_col) *
width_col +
w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename T>
__global__ void col2im_gpu_kernel_nhwc(const int n, const T* data_col,
const int width, const int channels,
const int patch_h, const int patch_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_im) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
int c = index % channels;
int w = index / channels % width + pad_l;
int h = index / channels / width + pad_t;
// compute the start and end of the output
int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
int channels_col = patch_h * patch_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = h - h_col * stride_h;
int w_k = w - w_col * stride_w;
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int c_col = (h_k * patch_w + w_k) * channels + c;
val += data_col[(h_col * width_col + w_col) * channels_col + c_col];
}
}
}
data_im[index] = val;
}
}
// Ported from caffe1
template <typename T, int num_axes>
__global__ void im2col_nd_gpu_kernel(
const int n,
const T* data_im,
const int* im_shape,
const int* col_shape,
const int* kernel_shape,
const int* pad,
const int* stride,
const int* dilation,
T* data_col) {
int d_offset[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
int i;
int kernel_size = 1;
for (i = 0; i < num_axes; ++i) {
kernel_size *= shared_kernel_shape[i];
}
CUDA_1D_KERNEL_LOOP(index, n) {
if (index >= col_shape[0]) {
break;
}
// Initialize offset, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int offset = index;
for (i = num_axes - 1; i >= 0; --i) {
if (i < num_axes - 1) {
offset /= shared_kernel_shape[i + 1];
}
d_offset[i] = offset % shared_kernel_shape[i];
}
for (i = 0; i < num_axes; ++i) {
d_iter[i] = 0;
}
bool incremented;
do {
int index_col = index;
int index_im = index / kernel_size;
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d = d_iter[i];
const int d_im = d * shared_stride[i] - shared_pad[i] +
d_offset[i] * shared_dilation[i];
in_range &= (d_im >= 0 && d_im < shared_im_shape[i + 1]);
index_col *= shared_col_shape[i + 1];
index_col += d;
index_im *= shared_im_shape[i + 1];
index_im += d_im;
}
if (in_range) {
// data_col[index_col] = 0;
data_col[index_col] = data_im[index_im];
// T temp = data_im[index_im];
} else {
data_col[index_col] = 0;
}
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
// const int d_max = shared_kernel_shape[i];
const int d_max = shared_col_shape[i + 1];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename T, int num_axes>
__global__ void col2im_nd_gpu_kernel(
const int n,
const T* data_col,
const int* im_shape,
const int* col_shape,
const int* kernel_shape,
const int* pad,
const int* stride,
const int* dilation,
T* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_1D_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] = (d_im[i] < kernel_extent)
? 0
: (d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
T val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
} // namespace
template <>
void Im2col<float, CUDAContext, StorageOrder::NCHW>(
const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_col, CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel_nchw<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context->cuda_stream(),
num_kernels, data_im, height, width, kernel_h, kernel_w,
dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w,
height_col, width_col, data_col);
}
template <>
void Im2col<float, CUDAContext, StorageOrder::NHWC>(
const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_col, CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
// We are going to launch height_col * width_col * channels kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int num_kernels = height_col * width_col * channels;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( im2col_gpu_kernel_nhwc<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context->cuda_stream(),
num_kernels, data_im, height, width, kernel_h, kernel_w,
dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w,
width_col, channels, data_col);
}
template <>
void Col2im<float, CUDAContext, StorageOrder::NCHW>(
const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_im, CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
hipLaunchKernelGGL(( col2im_gpu_kernel_nchw<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context->cuda_stream(),
num_kernels, data_col, height, width, kernel_h, kernel_w,
dilation_h, dilation_w,
pad_t, pad_l, stride_h, stride_w,
height_col, width_col, data_im);
}
template <>
void Col2im<float, CUDAContext, StorageOrder::NHWC>(
const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_im, CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int num_kernels = height * width * channels;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
hipLaunchKernelGGL(( col2im_gpu_kernel_nhwc<float>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context->cuda_stream(),
num_kernels, data_col, width, channels, kernel_h, kernel_w,
dilation_h, dilation_w,
pad_t, pad_l, stride_h, stride_w, height_col, width_col, data_im);
}
template <>
void Col2imNd<float, CUDAContext, StorageOrder::NCHW>(
const float* data_col,
const int* img_shape,
const int* col_shape,
const int img_size,
const int col_size,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const int N,
float* data_img,
CUDAContext* context) {
CAFFE_ENFORCE_LT(
N, CAFFE_CUDA_NUM_THREADS, "num_axes should be smaller than block size.");
#define COL2IM_ND_KERNEL(n) \
col2im_nd_gpu_kernel<float, n> /* NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) */ \
, dim3(CAFFE_GET_BLOCKS(img_size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
img_size, \
data_col, \
img_shape, \
col_shape, \
kernel_shape, \
pad, \
stride, \
dilation, \
data_img)
switch (N) {
case 1:
COL2IM_ND_KERNEL(1);
break;
case 2:
COL2IM_ND_KERNEL(2);
break;
case 3:
COL2IM_ND_KERNEL(3);
break;
case 4:
COL2IM_ND_KERNEL(4);
break;
case 5:
COL2IM_ND_KERNEL(5);
break;
default:
CAFFE_THROW(
"Col2imNd does not support computation with ", N, " spatial axes");
}
}
template <>
void Im2colNd<float, CUDAContext, StorageOrder::NCHW>(
const float* data_img,
const int* img_shape,
const int* col_shape,
const int img_size,
const int col_size,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const int N,
float* data_col,
CUDAContext* context,
bool /*accumlate_output*/) {
CAFFE_ENFORCE_LT(
N, CAFFE_CUDA_NUM_THREADS, "num_axes should be smaller than block size.");
#define IM2COL_ND_KERNEL(n) \
im2col_nd_gpu_kernel<float, n> /* NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) */ \
, dim3(CAFFE_GET_BLOCKS(col_size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
col_size, \
data_img, \
img_shape, \
col_shape, \
kernel_shape, \
pad, \
stride, \
dilation, \
data_col)
switch (N) {
case 1:
IM2COL_ND_KERNEL(1);
break;
case 2:
IM2COL_ND_KERNEL(2);
break;
case 3:
IM2COL_ND_KERNEL(3);
break;
case 4:
IM2COL_ND_KERNEL(4);
break;
case 5:
IM2COL_ND_KERNEL(5);
break;
default:
CAFFE_THROW(
"Im2colNd does not support computation with ", N, " spatial axes");
}
}
template <>
void CopyMatrix<CUDAContext>(
const size_t itemsize,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
CUDAContext* context,
TypeMeta::TypedCopy copy) {
CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context");
hipMemcpy2DAsync(B, ldb * itemsize, A, lda * itemsize, N * itemsize, M,
hipMemcpyDeviceToDevice, context->cuda_stream());
}
template <>
void CopyVector<float, CUDAContext>(
const int N,
const float* src,
float* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
hipMemcpyAsync(
dst,
src,
sizeof(float) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
}
namespace {
template <typename T>
using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, class Reducer>
__global__ void RowwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
val = reducer(X[i * cols + j], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val;
}
__syncthreads();
}
}
template <typename T, class Reducer>
__global__ void ColwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
val = reducer(X[j * cols + i], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val;
}
__syncthreads();
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \
template <> \
void RowwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( RowwiseReduceKernel), \
::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX
#define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \
template <> \
void ColwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( ColwiseReduceKernel), \
::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX
namespace {
__global__ void
maximum_kernel(const int N, const float alpha, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = fmaxf(x[i], alpha);
}
}
} // namespace
template <>
void Maximum(
const int N,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( maximum_kernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, alpha, x, y);
}
namespace {
std::vector<int> MakeTransposeAxes(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes) {
std::vector<int> transpose_axes(num_dims);
const int d = num_dims - num_axes;
std::copy_n(axes, num_axes, transpose_axes.begin() + d);
std::sort(transpose_axes.begin() + d, transpose_axes.end());
int p = 0;
int q = d;
for (int i = 0; i < num_dims; ++i) {
if (q < num_dims && i == transpose_axes[q]) {
++q;
} else {
transpose_axes[p++] = i;
}
}
return transpose_axes;
}
template <int D>
void ComputeTransposedStrides(
const int* X_dims,
const int* axes,
int* X_strides) {
int buff[D];
int cur_stride = 1;
for (int i = D - 1; i >= 0; --i) {
buff[i] = cur_stride;
cur_stride *= X_dims[i];
}
for (int i = 0; i < D; ++i) {
X_strides[i] = buff[axes[i]];
}
}
template <typename T, class Reducer, int D>
__global__ void ReduceTensorCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<int, D> Y_dims,
const Reducer reducer,
const T init,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += (Y_index % Y_dims.data[i]) * X_strides.data[i];
Y_index /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350
val = reducer(val, __ldg(X + X_index));
#else
val = reducer(val, X[X_index]);
#endif
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val;
}
__syncthreads();
}
}
template <typename T, class Reducer, int D>
void ReduceTensorCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const Reducer& reducer,
const T& init,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<int, D> Y_dims;
ComputeTransposedStrides<D>(dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = dims[axes[i]];
}
hipLaunchKernelGGL(( ReduceTensorCUDAKernel<T, Reducer, D>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size, inner_size, X_strides, Y_dims, reducer, init, X, Y);
}
template <typename T, class Reducer>
void ReduceTensorCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const Reducer& reducer,
const T& init,
const T* X,
T* Y,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
const std::vector<int> transpose_axes =
MakeTransposeAxes(num_dims, dims, num_axes, axes);
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
if (transpose_axes[pivot] == pivot) {
hipLaunchKernelGGL(( RowwiseReduceKernel<T>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size, inner_size, reducer, init, X, Y);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(
num_dims,
ReduceTensorCUDAImpl,
T,
Reducer,
outer_size,
inner_size,
dims,
transpose_axes.data(),
reducer,
init,
X,
Y,
context);
}
template <typename T>
void ReduceMeanCUDAImpl(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* Y,
CUDAContext* context) {
ReduceTensorCUDA(
num_dims, dims, num_axes, axes, hipcub::Sum(), T(0), X, Y, context);
const int X_size =
std::accumulate(dims, dims + num_dims, 1, std::multiplies<int>());
int scale = 1;
for (int i = 0; i < num_axes; ++i) {
scale *= dims[axes[i]];
}
const int Y_size = X_size / scale;
Scale<T, CUDAContext>(
Y_size, 1.0f / static_cast<float>(scale), Y, Y, context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(T) \
template <> \
void ReduceMin<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
hipcub::Min(), \
std::numeric_limits<T>::max(), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(T) \
template <> \
void ReduceMax<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
hipcub::Max(), \
std::numeric_limits<T>::lowest(), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(T) \
template <> \
void ReduceSum<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, dims, num_axes, axes, hipcub::Sum(), T(0), X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(T) \
template <> \
void ReduceMean<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceMeanCUDAImpl<T>(num_dims, dims, num_axes, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(float)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN
namespace {
template <typename T, int D>
__global__ void BroadcastCUDAKernel(
const int Y_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<int, D> Y_dims,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, Y_size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += X_strides.data[i] == 0
? 0
: (Y_index_val % Y_dims.data[i]) * X_strides.data[i];
Y_index_val /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename T, int D>
void BroadcastCUDAImpl(
const int X_ndim,
const int* X_dims,
const int* Y_dims,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides_array;
SimpleArray<int, D> Y_dims_array;
const int d = D - X_ndim;
std::fill(X_strides_array.data, X_strides_array.data + d, 0);
int cur_stride = 1;
for (int i = D - 1; i >= d; --i) {
CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]);
X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride;
cur_stride *= X_dims[i - d];
}
std::copy_n(Y_dims, D, Y_dims_array.data);
const int Y_size =
std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>());
hipLaunchKernelGGL(( BroadcastCUDAKernel<T, D>)
, dim3(CAFFE_GET_BLOCKS(Y_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), Y_size, X_strides_array, Y_dims_array, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \
template <> \
void Broadcast<T, CUDAContext>( \
const int X_ndim, \
const int* X_dims, \
const int Y_ndim, \
const int* Y_dims, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
Y_ndim, BroadcastCUDAImpl, T, X_ndim, X_dims, Y_dims, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(float)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(double)
#undef CAFFE2_SPECIALIZED_CUDA_BROADCAST
namespace {
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
const int X_index = i * cols + j;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Reduce(m_val, hipcub::Sum());
v_val = BlockReduce<T>(v_storage).Reduce(v_val, hipcub::Sum());
if (threadIdx.x == 0) {
mean[i] = m_val / static_cast<T>(cols);
variance[i] = v_val / static_cast<T>(cols) - mean[i] * mean[i];
}
__syncthreads();
}
}
template <typename T, int D>
__global__ void MomentsCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<int, D> Y_dims,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += (Y_index % Y_dims.data[i]) * X_strides.data[i];
Y_index /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Reduce(m_val, hipcub::Sum());
v_val = BlockReduce<T>(v_storage).Reduce(v_val, hipcub::Sum());
if (threadIdx.x == 0) {
mean[i] = m_val / static_cast<T>(inner_size);
variance[i] = v_val / static_cast<T>(inner_size) - mean[i] * mean[i];
}
__syncthreads();
}
}
template <typename T, int D>
void MomentsCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<int, D> Y_dims;
ComputeTransposedStrides<D>(dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = dims[axes[i]];
}
hipLaunchKernelGGL(( MomentsCUDAKernel<T, D>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size, inner_size, X_strides, Y_dims, X, mean, variance);
}
template <typename T>
void MomentsCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
const std::vector<int> transpose_axes =
MakeTransposeAxes(num_dims, dims, num_axes, axes);
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
if (transpose_axes[pivot] == pivot) {
hipLaunchKernelGGL(( RowwiseMomentsCUDAKernel<T>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), outer_size, inner_size, X, mean, variance);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
num_dims,
MomentsCUDAImpl,
T,
outer_size,
inner_size,
dims,
transpose_axes.data(),
X,
mean,
variance,
context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_MOMENTS(T) \
template <> \
void Moments<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* mean, \
T* variance, \
CUDAContext* context) { \
MomentsCUDA<T>( \
num_dims, dims, num_axes, axes, X, mean, variance, context); \
}
CAFFE2_SPECIALIZED_CUDA_MOMENTS(float)
#undef CAFFE2_SPECIALIZED_CUDA_MOMENTS
namespace {
template <typename T, int D>
__global__ void TransposeCUDAKernel(
const int size,
const SimpleArray<int, D> X_strides,
const SimpleArray<int, D> Y_dims,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += (Y_index_val % Y_dims.data[i]) * X_strides.data[i];
Y_index_val /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename T, int D>
void TransposeCUDAImpl(
const int* dims,
const int* axes,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<int, D> Y_dims;
ComputeTransposedStrides<D>(dims, axes, X_strides.data);
int size = 1;
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = dims[axes[i]];
size *= dims[i];
}
hipLaunchKernelGGL(( TransposeCUDAKernel<T, D>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, X_strides, Y_dims, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(T) \
template <> \
void Transpose<T, CUDAContext>( \
const int ndim, \
const int* dims, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
ndim, TransposeCUDAImpl, T, dims, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(float)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(double)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(TIndex)
#undef CAFFE2_SPECIALIZED_CUDA_TRANSPOSE
} // namespace math
} // namespace caffe2
| 33e67abf6f7ca8bcf29665da623c74634087171e.cu | // Implements the math functions for GPU.
#include "caffe2/utils/math.h"
#include <limits>
#include <numeric>
#include <vector>
#include <cub/block/block_reduce.cuh>
#include <cub/cub.cuh>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
#define DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(T, Funcname, function) \
__global__ \
void _Kernel_##T##_##Funcname(const int N, const T* x, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
y[i] = function(x[i]); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* x, T* y, \
CUDAContext* context) { \
_Kernel_##T##_##Funcname<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, \
0, context->cuda_stream()>>>( \
N, x, y); \
}
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Exp, expf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Log, logf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Cos, cosf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sin, sinf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Abs, fabsf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqrt, sqrtf);
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, InvSqrt, rsqrtf);
__device__ float cuda_sqrf(const float x) { return x * x; }
DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION(float, Sqr, cuda_sqrf);
#undef DELEGATE_SIMPLE_CUDA_UNARY_FUNCTION
#define DELEGATE_SINCOS_CUDA_FUNCTION(T) \
__global__ void _Kernel_##T##_##SinCos( \
const int N, const T* x, T* ys, T* yc) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
sincos(x[i], ys + i, yc + i); \
} \
} \
template <> \
void SinCos<T, CUDAContext>( \
const int N, const T* x, T* ys, T* yc, CUDAContext* context) { \
_Kernel_##T##_##SinCos<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, x, ys, yc); \
}
DELEGATE_SINCOS_CUDA_FUNCTION(float)
DELEGATE_SINCOS_CUDA_FUNCTION(double)
#undef DELEGATE_SINCOS_CUDA_FUNCTION
#define DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(T, Funcname, expr) \
__global__ void _Kernel_##T##_##Funcname( \
const int N, const T* a, const T* b, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
float r = convert::To<T, float>(a[i]) expr convert::To<T, float>(b[i]); \
y[i] = convert::To<float, T>(r); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* a, const T* b, T* y, CUDAContext* context) { \
_Kernel_##T##_##Funcname<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, a, b, y); \
}
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Add, +);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(int32_t, Add, +);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Sub, -);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Mul, *);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float, Div, /);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Add, +);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Sub, -);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Mul, *);
DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION(float16, Div, /);
#undef DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION
#define DELEGATE_SIMPLE_CUDA_BINARY_PREFIX_FUNCTION(T, Funcname, func) \
__global__ void _Kernel_##T##_##Funcname( \
const int N, const T* a, const T* b, T* y) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
float r = \
func(convert::To<T, float>(a[i]), convert::To<T, float>(b[i])); \
y[i] = convert::To<float, T>(r); \
} \
} \
template <> \
void Funcname<T, CUDAContext>( \
const int N, const T* a, const T* b, T* y, CUDAContext* context) { \
_Kernel_##T##_##Funcname<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, a, b, y); \
}
DELEGATE_SIMPLE_CUDA_BINARY_PREFIX_FUNCTION(float, ElemwiseMax, fmaxf);
#undef DELEGATE_SIMPLE_CUDA_BINARY_INFIX_FUNCTION
#define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \
template <> \
void Funcname<T, CUDAContext>( \
const int N, \
const T* src, \
T* dst, \
Tensor<CUDAContext>* scratch_ptr, \
CUDAContext* context) { \
size_t memRequired = 0; \
cub::DeviceReduce::func( \
nullptr, memRequired, src, dst, N, context->cuda_stream()); \
auto buffer_size = \
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T)); \
scratch_ptr->Resize(std::vector<TIndex>{buffer_size}); \
cub::DeviceReduce::func( \
static_cast<void*>(scratch_ptr->mutable_data<T>()), \
memRequired, \
src, \
dst, \
N, \
context->cuda_stream()); \
}
DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min)
DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max)
#undef DELEGATE_REDUCTION_FUNCTION
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(cublasSgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
N));
}
template <>
void Gemm<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_CHECK(cublasSgemmEx(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
A,
CUDA_R_16F,
lda,
&beta,
C,
CUDA_R_16F,
N));
} else if (math_type == TensorProto_DataType_FLOAT16) {
// convert alpha, beta from float -> __half
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
// call cublasHgemm
CUBLAS_CHECK(cublasHgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
(const __half*)A,
lda,
&beta_fp16,
(__half*)C,
N));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
template <>
void GemmBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
const int a_stride = M * K;
const int b_stride = K * N;
const int c_stride = M * N;
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
math::Gemm<float, CUDAContext>(
TransA,
TransB,
M,
N,
K,
alpha,
A + a_stride * i,
B + b_stride * i,
beta,
C + c_stride * i,
context);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (TransA == CblasNoTrans) ? K : M;
const int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(cublasSgemmStridedBatched(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
b_stride,
A,
lda,
a_stride,
&beta,
C,
N,
c_stride,
batch_size));
#endif
}
namespace {
__global__ void FloatToHalfKernel(const int N, const float* X, half* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __float2half(X[i]);
}
}
__global__ void HalfToFloatKernel(const int N, const half* X, float* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = __half2float(X[i]);
}
}
};
template <>
void GemmBatched<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
const int a_stride = M * K;
const int b_stride = K * N;
const int c_stride = M * N;
#if __CUDACC_VER_MAJOR__ < 8
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
math::Gemm<float16, CUDAContext>(
TransA,
TransB,
M,
N,
K,
alpha,
A + a_stride * i,
B + b_stride * i,
beta,
C + c_stride * i,
context);
}
#else
// 3 options:
// 1) scratch != null = cast to fp32, SgemmStridedBatched, cast result to fp16
// 2) math_type == FLOAT, scratch == nullptr = looped SgemmEx
// 3) math_type == FLOAT16, scratch == nullptr = batched Hgemm
if (scratch != nullptr) {
const int A_size = a_stride * batch_size;
const int B_size = b_stride * batch_size;
// cast, cublasSgemmStridedBatched, cast
size_t in_elems = A_size + B_size;
size_t out_elems = c_stride * batch_size;
scratch->Resize(in_elems + out_elems);
float* scratch_ptr = scratch->mutable_data<float>();
float* A_fp32 = scratch_ptr;
float* B_fp32 = scratch_ptr + A_size;
float* C_fp32 = scratch_ptr + A_size + B_size;
// cast A, B into fp32
HalfToFloatKernel<<<CAFFE_GET_BLOCKS(A_size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(A_size, (half*)A, A_fp32);
HalfToFloatKernel<<<CAFFE_GET_BLOCKS(B_size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(B_size, (half*)B, B_fp32);
// run fp32 batched Gemm
GemmBatched<float, CUDAContext>(
TransA,
TransB,
batch_size,
M,
N,
K,
alpha,
A_fp32,
B_fp32,
beta,
C_fp32,
context);
// cast result back to fp16
FloatToHalfKernel<<<
CAFFE_GET_BLOCKS(batch_size * M * N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(batch_size * M * N, C_fp32, (half*)C);
} else {
if (math_type == TensorProto_DataType_FLOAT) {
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
math::Gemm<float16, CUDAContext>(
TransA,
TransB,
M,
N,
K,
alpha,
A + a_stride * i,
B + b_stride * i,
beta,
C + c_stride * i,
context);
}
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (TransA == CblasNoTrans) ? K : M;
const int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
// convert alpha, beta from float -> __half
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
CUBLAS_ENFORCE(cublasHgemmStridedBatched(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha_fp16,
(const __half*)B,
ldb,
b_stride,
(const __half*)A,
lda,
a_stride,
&beta_fp16,
(__half*)C,
N,
c_stride,
batch_size));
}
}
#endif
}
#if CUDA_VERSION >= 9000
// No change, but required. Defer to default CUDA engine
template <>
void Gemm<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
return Gemm<float,CUDAContext>(TransA,
TransB,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
math_type);
}
template <>
void Gemm<float16, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
// enable TensorCore for this call on this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(cublasSetMathMode(
context->cublas_handle(),
CUBLAS_TENSOR_OP_MATH));
}
CUBLAS_CHECK(cublasGemmEx(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
A,
CUDA_R_16F,
lda,
&beta,
C,
CUDA_R_16F,
N,
CUDA_R_32F,
CUBLAS_GEMM_DFALT_TENSOR_OP));
// Now disable TensorCore math for subsequent calls to this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(cublasSetMathMode(
context->cublas_handle(),
CUBLAS_DEFAULT_MATH));
}
}
template <>
void GemmBatched<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
return GemmBatched<float, CUDAContext, DefaultEngine>(
TransA,
TransB,
batch_size,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
scratch,
math_type);
}
template <>
void GemmBatched<float16, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float16* A,
const float16* B,
const float beta,
float16* C,
CUDAContext* context,
Tensor<CUDAContext>* scratch,
TensorProto::DataType math_type) {
return GemmBatched<float16, CUDAContext, DefaultEngine>(
TransA,
TransB,
batch_size,
M,
N,
K,
alpha,
A,
B,
beta,
C,
context,
scratch,
math_type);
}
#endif // CUDA_VERSION >= 9000
template <>
void GemmEx<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int lda,
const float* B,
const int ldb,
const float beta,
float* C,
const int ldc,
CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(cublasSgemm(
context->cublas_handle(),
cuTransB,
cuTransA,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
ldc));
}
template <>
void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_ENFORCE(cublasSgemv(
context->cublas_handle(),
cuTransA,
N,
M,
&alpha,
A,
N,
x,
1,
&beta,
y,
1));
}
// Batched Add variants
namespace {
template <typename T>
__global__ void AddStripedBatchKernel(
const int N,
const T* first,
T* Y,
const int stripe,
const int batch) {
for (int j = 0; j < batch; j++) {
const T* x = first + j * stripe;
CUDA_1D_KERNEL_LOOP(i, N) {
float tmpY = convert::To<T, float>(Y[i]);
tmpY += convert::To<T,float>(x[i]);
Y[i] = convert::To<float,T>(tmpY);
}
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \
template <> \
void AddStripedBatch<T, CUDAContext>( \
const int N, \
const T* first, \
T* Y, \
const int stripe, \
const int batch, \
CUDAContext* context) { \
AddStripedBatchKernel<T><<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, first, Y, stripe, batch); \
}
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float);
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float16);
#undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH
template <>
void Gemv<float16, CUDAContext>(
const CBLAS_TRANSPOSE TransA,
const int M,
const int N,
const float alpha,
const float16* A,
const float16* x,
const float beta,
float16* y,
CUDAContext* context,
TensorProto::DataType math_type) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
// sort out what we need to call cublasSgemmEx / cublasHgemm
int m = (cuTransA == CUBLAS_OP_N) ? N : M;
int k = (cuTransA == CUBLAS_OP_N) ? M : N;
int LDA = (cuTransA == CUBLAS_OP_N) ? m : k;
int LDC = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_CHECK(cublasSgemmEx(
context->cublas_handle(),
cuTransA,
CUBLAS_OP_N,
m,
1,
k,
&alpha,
A,
CUDA_R_16F,
LDA,
x,
CUDA_R_16F,
k,
&beta,
y,
CUDA_R_16F,
LDC));
} else if (math_type == TensorProto_DataType_FLOAT16) {
auto alpha_fp16 = convert::floatToHalf(alpha);
auto beta_fp16 = convert::floatToHalf(beta);
CUBLAS_CHECK(cublasHgemm(
context->cublas_handle(),
cuTransA,
CUBLAS_OP_N,
m,
1,
k,
&alpha_fp16,
(const __half*)A,
LDA,
(const __half*)x,
k,
&beta_fp16,
(__half*)y,
LDC));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
namespace {
template <typename T>
__global__ void SetKernel(const int N, const T alpha, T* Y) {
CUDA_1D_KERNEL_LOOP(i, N) {
Y[i] = alpha;
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_SET(T) \
template <> \
void Set<T, CUDAContext>( \
const size_t N, const T alpha, T* Y, CUDAContext* context) { \
SetKernel<<< \
CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, alpha, Y); \
}
CAFFE2_SPECIALIZED_CUDA_SET(float);
CAFFE2_SPECIALIZED_CUDA_SET(double);
CAFFE2_SPECIALIZED_CUDA_SET(bool);
CAFFE2_SPECIALIZED_CUDA_SET(int8_t);
CAFFE2_SPECIALIZED_CUDA_SET(int16_t);
CAFFE2_SPECIALIZED_CUDA_SET(float16);
CAFFE2_SPECIALIZED_CUDA_SET(int);
CAFFE2_SPECIALIZED_CUDA_SET(int64_t);
CAFFE2_SPECIALIZED_CUDA_SET(char);
CAFFE2_SPECIALIZED_CUDA_SET(uint8_t);
CAFFE2_SPECIALIZED_CUDA_SET(uint16_t);
#undef CAFFE2_SPECIALIZED_CUDA_SET
namespace {
template <typename T>
__global__ void
UniformShift(const size_t N, const float min, const float max, T* x) {
float scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min);
}
}
__global__ void
UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
void RandUniform<float, CUDAContext>(
const size_t n,
const float min,
const float max,
float* r,
CUDAContext* context) {
CURAND_ENFORCE(curandGenerateUniform(context->curand_generator(), r, n));
UniformShift<float>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, min, max, r);
}
template <>
void RandUniform<double, CUDAContext>(
const size_t n,
const double min,
const double max,
double* r,
CUDAContext* context) {
CURAND_ENFORCE(
curandGenerateUniformDouble(context->curand_generator(), r, n));
UniformShift<double>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, min, max, r);
}
template <>
void RandUniform<int, CUDAContext>(
const size_t n,
const int min,
const int max,
int* r,
CUDAContext* context) {
CURAND_ENFORCE(curandGenerate(
context->curand_generator(), reinterpret_cast<unsigned int*>(r), n));
UniformIntFit<<<
CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
n, min, max, reinterpret_cast<unsigned int*>(r));
}
template <typename T>
size_t HandleOddLengthRandGaussian(
const size_t n,
const T mean,
const T std,
T* r,
CUDAContext* context) {
if (n % 2 == 1) {
std::default_random_engine generator;
std::normal_distribution<T> distribution(mean, std);
const T random_value = distribution(generator);
math::Set<T, CUDAContext>(1, random_value, r + (n - 1), context);
return n - 1;
}
return n;
}
template <>
void RandGaussian<float, CUDAContext>(
const size_t n,
const float mean,
const float std,
float* r,
CUDAContext* context) {
// If n is odd, we add a random Gaussian value at the end manually
// and generate n-1 random values using curandGenerateNormal.
// curandGenerateNormal requires n to be even.
const size_t even_n =
HandleOddLengthRandGaussian<float>(n, mean, std, r, context);
CURAND_ENFORCE(
curandGenerateNormal(context->curand_generator(), r, even_n, mean, std));
}
template <>
void RandGaussian<double, CUDAContext>(
const size_t n,
const double mean,
const double std,
double* r,
CUDAContext* context) {
const size_t even_n =
HandleOddLengthRandGaussian<double>(n, mean, std, r, context);
CURAND_ENFORCE(curandGenerateNormalDouble(
context->curand_generator(), r, even_n, mean, std));
}
template <>
void Dot<float, CUDAContext>(
const int n,
const float* a,
const float* b,
float* y,
CUDAContext* context) {
float result;
CUBLAS_ENFORCE(cublasSdot(context->cublas_handle(), n, a, 1, b, 1, &result));
context->Copy<float, CPUContext, CUDAContext>(1, &result, y);
}
template <>
void Dot<float16, CUDAContext>(
const int n,
const float16* a,
const float16* b,
float16* y,
CUDAContext* context) {
float16 result;
// execute with 32-bit math
CUBLAS_CHECK(cublasDotEx(
context->cublas_handle(),
n,
a,
CUDA_R_16F,
1,
b,
CUDA_R_16F,
1,
&result,
CUDA_R_16F,
CUDA_R_32F));
context->Copy<float16, CPUContext, CUDAContext>(1, &result, y);
}
// A previous version of caffe2 used Thrust but it turns out that thrust
// reduction has an implicit scratch space allocation and deallocation, which
// may interfere with NCCL and create a deadlock. Hence we are using a custom
// reduction here.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y, bool square) {
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A multilevel reduction.
// N -> 128
if (!square) {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += convert::To<T, float>(X[i]);
}
} else {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
float Xi = convert::To<T, float>(X[i]);
reduction_buffer[idx] += Xi * Xi;
}
}
__syncthreads();
// 128 -> 32
if (idx < 32) {
reduction_buffer[idx] +=
reduction_buffer[idx + 32] +
reduction_buffer[idx + 64] +
reduction_buffer[idx + 96];
}
__syncthreads();
// 32 -> 1
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < 32; ++i) {
tmp += reduction_buffer[i];
}
*Y = convert::To<float, T>(tmp);
}
}
// According to the benchmarks script
// caffe2/caffe2/experiments/python/device_reduce_sum_bench.py,
// device reduce is slower for N <= 10000.
#define DEVICE_REDUCE_SIZE_THRESHOLD 10000
namespace {
template <typename T>
__global__ void SumConvertKernel(float* sum, T* dest) {
*dest = convert::To<float, T>(*sum);
}
template <typename T, typename IterT>
void SumGenericIter(
const int N,
IterT it,
T*& dest,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
size_t memRequired = 0;
cub::DeviceReduce::Sum(
nullptr, memRequired, it, dest, N, context->cuda_stream());
auto buffer_size =
static_cast<TIndex>((memRequired + sizeof(T) - 1) / sizeof(T));
if (!dest) {
// allocate one more T at the end of scratch for dest
scratch_ptr->Resize(std::vector<TIndex>{buffer_size + 1});
dest = scratch_ptr->template mutable_data<T>() + buffer_size;
} else {
scratch_ptr->Resize(std::vector<TIndex>{buffer_size});
}
cub::DeviceReduce::Sum(
static_cast<void*>(scratch_ptr->template mutable_data<T>()),
memRequired,
it,
dest,
N,
context->cuda_stream());
}
} // namespace
template <>
void Sum<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<float>(N, x, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, false);
}
}
template <>
void Sum<int32_t, CUDAContext>(
const int N,
const int32_t* x,
int32_t* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<int32_t>(N, x, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, false);
}
}
namespace {
template <typename T>
struct FloatTransform {
inline __host__ __device__ float operator()(const T v) const {
return convert::To<T, float>(v);
}
};
} // namespace
#define CAFFE2_MATH_SUM_FUNC(T) \
template <> \
void Sum<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor<CUDAContext>* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> transform; \
cub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \
x, transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \
} else { \
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \
N, x, y, false); \
} \
}
CAFFE2_MATH_SUM_FUNC(float16)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
} // namespace
template <>
void SumSqr<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor<CUDAContext>* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SqrTransform<float> transform;
cub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
x, transform);
SumGenericIter<float>(N, it, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, true);
}
}
#define CAFFE2_MATH_SUMSQR_FUNC(T) \
template <> \
void SumSqr<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor<CUDAContext>* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> float_transform; \
cub::TransformInputIterator<float, FloatTransform<T>, const T*> \
float_it(x, float_transform); \
SqrTransform<float> sqr_transform; \
cub::TransformInputIterator< \
float, \
SqrTransform<float>, \
decltype(float_it)> \
it(float_it, sqr_transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \
} else { \
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \
N, x, y, true); \
} \
}
CAFFE2_MATH_SUMSQR_FUNC(float16)
#undef CAFFE2_MATH_SUMSQR_FUNC
#undef DEVICE_REDUCE_SIZE_THRESHOLD
namespace {
template <typename T>
__global__ void SelectKernel(
const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
void Select<float, CUDAContext>(
const int N, const int D, const float* x, const int* idx, float* y,
CUDAContext* context) {
SelectKernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(N, D, x, idx, y);
}
template <>
void Select<float16, CUDAContext>(
const int N,
const int D,
const float16* x,
const int* idx,
float16* y,
CUDAContext* context) {
SelectKernel<float16><<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, x, idx, y);
}
namespace {
template <typename T>
__global__ void ScaleKernel(const int n, const float alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
// y[i] = convert::To<float,T>(convert::To<T, float>(x[i]) * alpha);
y[i] = convert::Get<T>(convert::Get<float>(x[i]) * alpha);
}
}
template <typename T>
__global__ void
ScaleKernelDeviceAlpha(const int n, const float* alpha, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = x[i] * (*alpha);
}
}
template <typename T>
__global__ void PowKernel(const int n, const T* x, const T exponent, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = powf(x[i], exponent);
}
}
// fp16 specialization
template <>
__global__ void ScaleKernelDeviceAlpha(
const int n,
const float* alpha,
const float16* x,
float16* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = convert::To<float, float16>(
convert::To<float16, float>(x[i]) * (*alpha));
}
}
} // namespace
template <>
void Powx<float, CUDAContext>(
const int N,
const float* a,
const float b,
float* y,
CUDAContext* context) {
PowKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, a, b, y);
}
template <>
void Scale<float, CUDAContext>(
const int n,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
ScaleKernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, alpha, x, y);
}
template <>
void Scale<float16, CUDAContext>(
const int n,
const float alpha,
const float16* x,
float16* y,
CUDAContext* context) {
ScaleKernel<float16><<<
CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, alpha, x, y);
}
template <>
void Scale<float, CUDAContext>(
const int n, const float* alpha, const float *x, float* y,
CUDAContext* context) {
ScaleKernelDeviceAlpha<float><<<
CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(
n, alpha, x, y);
}
template <>
void Scale<float16, CUDAContext>(
const int n,
const float* alpha,
const float16* x,
float16* y,
CUDAContext* context) {
ScaleKernelDeviceAlpha<float16><<<
CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, alpha, x, y);
}
template <>
void Axpy<float, CUDAContext>(
const int N,
const float alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(cublasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void Axpy<double, CUDAContext>(
const int N,
const float alpha,
const double* X,
double* Y,
CUDAContext* context) {
double alpha_d{alpha};
CUBLAS_ENFORCE(
cublasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1));
}
template <>
void Axpy<float16, CUDAContext>(
const int N,
const float alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
CUBLAS_CHECK(cublasAxpyEx(
context->cublas_handle(),
N,
&alpha,
CUDA_R_16F,
X,
CUDA_R_16F,
1,
Y,
CUDA_R_16F,
1,
CUDA_R_32F));
}
namespace {
template <typename T>
__global__ void AxpyKernel(const int n, const float* a, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = convert::Get<T>(
convert::Get<float>(x[index]) * (*a) + convert::Get<float>(y[index]));
}
}
} // namespace
template <>
void Axpy<float, CUDAContext>(
const int n, const float* alpha, const float* X,
float* Y, CUDAContext* context) {
AxpyKernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, alpha, X, Y);
}
template <>
void Axpy<float16, CUDAContext>(
const int n,
const float* alpha,
const float16* X,
float16* Y,
CUDAContext* context) {
AxpyKernel<float16><<<
CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, alpha, X, Y);
}
namespace {
template <typename T>
__global__ void AxpbyKernel(const int n, const T a, const T* x,
const T b, T* y) {
CUDA_1D_KERNEL_LOOP(index, n) {
y[index] = x[index] * a + y[index] * b;
}
}
} // namespace
template <>
void Axpby<float, CUDAContext>(
const int n, const float a, const float* x, const float b, float* y,
CUDAContext* context) {
AxpbyKernel<float><<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS,
0, context->cuda_stream()>>>(n, a, x, b, y);
}
namespace {
template <typename T>
__global__ void im2col_gpu_kernel_nchw(const int n, const T* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_col) {
CUDA_1D_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * kernel_h * kernel_w;
int h_in = h_out * stride_h - pad_t;
int w_in = w_out * stride_w - pad_l;
T* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const T* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h = h_in + i * dilation_h;
int w = w_in + j * dilation_w;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * dilation_h * width + j * dilation_w] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
template <typename T>
__global__ void im2col_gpu_kernel_nhwc(const int n, const T* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int width_col, const int channels,
T* data_col) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
int channel_in = index % channels;
int w_out = index / channels % width_col;
int h_out = index / channels / width_col;
int h_in = h_out * stride_h - pad_t;
int w_in = w_out * stride_w - pad_l;
T* local_data_col = data_col +
((h_out * width_col) + w_out) * channels * kernel_h * kernel_w
+ channel_in;
for (int i = 0; i < dkernel_h; i += dilation_h) {
int h = h_in + i;
for (int j = 0; j < dkernel_w; j += dilation_w) {
int w = w_in + j;
*local_data_col = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im[(h * width + w) * channels + channel_in] : 0;
local_data_col += channels;
}
}
}
}
template <typename T>
__global__ void col2im_gpu_kernel_nchw(const int n, const T* data_col,
const int height, const int width,
const int patch_h, const int patch_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_im) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
int w = index % width + pad_l;
int h = (index / width) % height + pad_t;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h - h_col * stride_h);
int w_k = (w - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int data_col_index =
(((c * patch_h + h_k) * patch_w + w_k) * height_col + h_col) *
width_col +
w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
template <typename T>
__global__ void col2im_gpu_kernel_nhwc(const int n, const T* data_col,
const int width, const int channels,
const int patch_h, const int patch_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
T* data_im) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
int c = index % channels;
int w = index / channels % width + pad_l;
int h = index / channels / width + pad_t;
// compute the start and end of the output
int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
int w_col_end = min(w / stride_w + 1, width_col);
int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
int h_col_end = min(h / stride_h + 1, height_col);
int channels_col = patch_h * patch_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = h - h_col * stride_h;
int w_k = w - w_col * stride_w;
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
int c_col = (h_k * patch_w + w_k) * channels + c;
val += data_col[(h_col * width_col + w_col) * channels_col + c_col];
}
}
}
data_im[index] = val;
}
}
// Ported from caffe1
template <typename T, int num_axes>
__global__ void im2col_nd_gpu_kernel(
const int n,
const T* data_im,
const int* im_shape,
const int* col_shape,
const int* kernel_shape,
const int* pad,
const int* stride,
const int* dilation,
T* data_col) {
int d_offset[num_axes]; // NOLINT(runtime/arrays)
int d_iter[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
int i;
int kernel_size = 1;
for (i = 0; i < num_axes; ++i) {
kernel_size *= shared_kernel_shape[i];
}
CUDA_1D_KERNEL_LOOP(index, n) {
if (index >= col_shape[0]) {
break;
}
// Initialize offset, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int offset = index;
for (i = num_axes - 1; i >= 0; --i) {
if (i < num_axes - 1) {
offset /= shared_kernel_shape[i + 1];
}
d_offset[i] = offset % shared_kernel_shape[i];
}
for (i = 0; i < num_axes; ++i) {
d_iter[i] = 0;
}
bool incremented;
do {
int index_col = index;
int index_im = index / kernel_size;
bool in_range = true;
for (i = 0; i < num_axes; ++i) {
const int d = d_iter[i];
const int d_im = d * shared_stride[i] - shared_pad[i] +
d_offset[i] * shared_dilation[i];
in_range &= (d_im >= 0 && d_im < shared_im_shape[i + 1]);
index_col *= shared_col_shape[i + 1];
index_col += d;
index_im *= shared_im_shape[i + 1];
index_im += d_im;
}
if (in_range) {
// data_col[index_col] = 0;
data_col[index_col] = data_im[index_im];
// T temp = data_im[index_im];
} else {
data_col[index_col] = 0;
}
incremented = false;
for (i = num_axes - 1; i >= 0; --i) {
// const int d_max = shared_kernel_shape[i];
const int d_max = shared_col_shape[i + 1];
if (d_iter[i] == d_max - 1) {
d_iter[i] = 0;
} else { // d_iter[i] < d_max - 1
++d_iter[i];
incremented = true;
break;
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented); // do
} // CUDA_KERNEL_LOOP(index, n)
}
template <typename T, int num_axes>
__global__ void col2im_nd_gpu_kernel(
const int n,
const T* data_col,
const int* im_shape,
const int* col_shape,
const int* kernel_shape,
const int* pad,
const int* stride,
const int* dilation,
T* data_im) {
int d_im[num_axes]; // NOLINT(runtime/arrays)
int d_col_iter[num_axes]; // NOLINT(runtime/arrays)
int d_col_start[num_axes]; // NOLINT(runtime/arrays)
int d_col_end[num_axes]; // NOLINT(runtime/arrays)
__shared__ int shared_dilation[num_axes];
__shared__ int shared_kernel_shape[num_axes];
__shared__ int shared_pad[num_axes];
__shared__ int shared_stride[num_axes];
__shared__ int shared_col_shape[num_axes + 1];
__shared__ int shared_im_shape[num_axes + 1];
if (threadIdx.x < num_axes) {
shared_dilation[threadIdx.x] = dilation[threadIdx.x];
shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x];
shared_pad[threadIdx.x] = pad[threadIdx.x];
shared_stride[threadIdx.x] = stride[threadIdx.x];
}
if (threadIdx.x < num_axes + 1) {
shared_col_shape[threadIdx.x] = col_shape[threadIdx.x];
shared_im_shape[threadIdx.x] = im_shape[threadIdx.x];
}
__syncthreads();
CUDA_1D_KERNEL_LOOP(index, n) {
// Initialize channel_in, computed in the loop below, with intermediate
// computations used to compute the spatial indices.
int c_im = index;
// Calculate d_im (image dimensions).
for (int i = num_axes - 1; i >= 0; --i) {
d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i];
c_im /= shared_im_shape[i + 1];
}
// Calculate col start/end indices.
bool done = false;
for (int i = 0; i < num_axes; ++i) {
const int kernel_extent =
shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1;
d_col_start[i] = d_col_iter[i] = (d_im[i] < kernel_extent)
? 0
: (d_im[i] - kernel_extent) / shared_stride[i] + 1;
d_col_end[i] =
min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]);
if (d_col_start[i] >= d_col_end[i]) {
// Skip computation if the dimension is 0 at any spatial axis --
// final val will be 0.
data_im[index] = 0;
done = true;
break; // for (int i = 0; i < num_axes; ++i)
}
}
if (done) {
continue; // CUDA_KERNEL_LOOP(index, n)
}
// Loop over the col to compute the output val.
T val = 0;
bool incremented = true;
bool skip = false;
do {
// Compute the final offset.
int final_offset = 0;
int kernel_shape_prod = 1;
int kernel_index;
for (int i = num_axes - 1; i >= 0; --i) {
kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i];
if (kernel_index % shared_dilation[i]) {
skip = true;
break;
} else {
kernel_index /= shared_dilation[i];
final_offset += kernel_index * kernel_shape_prod;
kernel_shape_prod *= shared_kernel_shape[i];
}
}
if (!skip) {
final_offset += kernel_shape_prod * c_im;
for (int i = 0; i < num_axes; ++i) {
final_offset *= shared_col_shape[i + 1];
final_offset += d_col_iter[i];
}
val += data_col[final_offset];
}
skip = false;
incremented = false;
for (int i = num_axes - 1; i >= 0; --i) {
const int d_max = d_col_end[i];
if (d_col_iter[i] == d_max - 1) {
d_col_iter[i] = d_col_start[i];
} else { // d_col_iter[i] < d_max - 1
++d_col_iter[i];
incremented = true;
break; // for (int i = num_axes - 1; i >= 0; --i)
}
} // for (int i = num_axes - 1; i >= 0; --i)
} while (incremented);
data_im[index] = val;
} // CUDA_KERNEL_LOOP(index, n)
}
} // namespace
template <>
void Im2col<float, CUDAContext, StorageOrder::NCHW>(
const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_col, CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int num_kernels = channels * height_col * width_col;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel_nchw<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS, 0,
context->cuda_stream()>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w,
dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w,
height_col, width_col, data_col);
}
template <>
void Im2col<float, CUDAContext, StorageOrder::NHWC>(
const float* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_col, CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
// We are going to launch height_col * width_col * channels kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int num_kernels = height_col * width_col * channels;
// NOLINT_NEXT_LINE(whitespace/operators)
im2col_gpu_kernel_nhwc<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS, 0,
context->cuda_stream()>>>(
num_kernels, data_im, height, width, kernel_h, kernel_w,
dilation_h, dilation_w, pad_t, pad_l, stride_h, stride_w,
width_col, channels, data_col);
}
template <>
void Col2im<float, CUDAContext, StorageOrder::NCHW>(
const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_im, CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
col2im_gpu_kernel_nchw<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS, 0,
context->cuda_stream()>>>(
num_kernels, data_col, height, width, kernel_h, kernel_w,
dilation_h, dilation_w,
pad_t, pad_l, stride_h, stride_w,
height_col, width_col, data_im);
}
template <>
void Col2im<float, CUDAContext, StorageOrder::NHWC>(
const float* data_col, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h,
const int stride_w, float* data_im, CUDAContext* context) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int num_kernels = height * width * channels;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
col2im_gpu_kernel_nhwc<float><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS, 0,
context->cuda_stream()>>>(
num_kernels, data_col, width, channels, kernel_h, kernel_w,
dilation_h, dilation_w,
pad_t, pad_l, stride_h, stride_w, height_col, width_col, data_im);
}
template <>
void Col2imNd<float, CUDAContext, StorageOrder::NCHW>(
const float* data_col,
const int* img_shape,
const int* col_shape,
const int img_size,
const int col_size,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const int N,
float* data_img,
CUDAContext* context) {
CAFFE_ENFORCE_LT(
N, CAFFE_CUDA_NUM_THREADS, "num_axes should be smaller than block size.");
#define COL2IM_ND_KERNEL(n) \
col2im_nd_gpu_kernel<float, n> /* NOLINT_NEXT_LINE(whitespace/operators) */ \
<<<CAFFE_GET_BLOCKS(img_size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
img_size, \
data_col, \
img_shape, \
col_shape, \
kernel_shape, \
pad, \
stride, \
dilation, \
data_img)
switch (N) {
case 1:
COL2IM_ND_KERNEL(1);
break;
case 2:
COL2IM_ND_KERNEL(2);
break;
case 3:
COL2IM_ND_KERNEL(3);
break;
case 4:
COL2IM_ND_KERNEL(4);
break;
case 5:
COL2IM_ND_KERNEL(5);
break;
default:
CAFFE_THROW(
"Col2imNd does not support computation with ", N, " spatial axes");
}
}
template <>
void Im2colNd<float, CUDAContext, StorageOrder::NCHW>(
const float* data_img,
const int* img_shape,
const int* col_shape,
const int img_size,
const int col_size,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const int N,
float* data_col,
CUDAContext* context,
bool /*accumlate_output*/) {
CAFFE_ENFORCE_LT(
N, CAFFE_CUDA_NUM_THREADS, "num_axes should be smaller than block size.");
#define IM2COL_ND_KERNEL(n) \
im2col_nd_gpu_kernel<float, n> /* NOLINT_NEXT_LINE(whitespace/operators) */ \
<<<CAFFE_GET_BLOCKS(col_size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
col_size, \
data_img, \
img_shape, \
col_shape, \
kernel_shape, \
pad, \
stride, \
dilation, \
data_col)
switch (N) {
case 1:
IM2COL_ND_KERNEL(1);
break;
case 2:
IM2COL_ND_KERNEL(2);
break;
case 3:
IM2COL_ND_KERNEL(3);
break;
case 4:
IM2COL_ND_KERNEL(4);
break;
case 5:
IM2COL_ND_KERNEL(5);
break;
default:
CAFFE_THROW(
"Im2colNd does not support computation with ", N, " spatial axes");
}
}
template <>
void CopyMatrix<CUDAContext>(
const size_t itemsize,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
CUDAContext* context,
TypeMeta::TypedCopy copy) {
CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context");
cudaMemcpy2DAsync(B, ldb * itemsize, A, lda * itemsize, N * itemsize, M,
cudaMemcpyDeviceToDevice, context->cuda_stream());
}
template <>
void CopyVector<float, CUDAContext>(
const int N,
const float* src,
float* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
cudaMemcpyAsync(
dst,
src,
sizeof(float) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
}
namespace {
template <typename T>
using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, class Reducer>
__global__ void RowwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
val = reducer(X[i * cols + j], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val;
}
__syncthreads();
}
}
template <typename T, class Reducer>
__global__ void ColwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
val = reducer(X[j * cols + i], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val;
}
__syncthreads();
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \
template <> \
void RowwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
RowwiseReduceKernel<<< \
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
N, D, cub::Max(), std::numeric_limits<T>::lowest(), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX
#define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \
template <> \
void ColwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
ColwiseReduceKernel<<< \
std::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
N, D, cub::Max(), std::numeric_limits<T>::lowest(), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX
namespace {
__global__ void
maximum_kernel(const int N, const float alpha, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = fmaxf(x[i], alpha);
}
}
} // namespace
template <>
void Maximum(
const int N,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
maximum_kernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, alpha, x, y);
}
namespace {
std::vector<int> MakeTransposeAxes(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes) {
std::vector<int> transpose_axes(num_dims);
const int d = num_dims - num_axes;
std::copy_n(axes, num_axes, transpose_axes.begin() + d);
std::sort(transpose_axes.begin() + d, transpose_axes.end());
int p = 0;
int q = d;
for (int i = 0; i < num_dims; ++i) {
if (q < num_dims && i == transpose_axes[q]) {
++q;
} else {
transpose_axes[p++] = i;
}
}
return transpose_axes;
}
template <int D>
void ComputeTransposedStrides(
const int* X_dims,
const int* axes,
int* X_strides) {
int buff[D];
int cur_stride = 1;
for (int i = D - 1; i >= 0; --i) {
buff[i] = cur_stride;
cur_stride *= X_dims[i];
}
for (int i = 0; i < D; ++i) {
X_strides[i] = buff[axes[i]];
}
}
template <typename T, class Reducer, int D>
__global__ void ReduceTensorCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<int, D> Y_dims,
const Reducer reducer,
const T init,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += (Y_index % Y_dims.data[i]) * X_strides.data[i];
Y_index /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350
val = reducer(val, __ldg(X + X_index));
#else
val = reducer(val, X[X_index]);
#endif
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val;
}
__syncthreads();
}
}
template <typename T, class Reducer, int D>
void ReduceTensorCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const Reducer& reducer,
const T& init,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<int, D> Y_dims;
ComputeTransposedStrides<D>(dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = dims[axes[i]];
}
ReduceTensorCUDAKernel<T, Reducer, D>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size, inner_size, X_strides, Y_dims, reducer, init, X, Y);
}
template <typename T, class Reducer>
void ReduceTensorCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const Reducer& reducer,
const T& init,
const T* X,
T* Y,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
const std::vector<int> transpose_axes =
MakeTransposeAxes(num_dims, dims, num_axes, axes);
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
if (transpose_axes[pivot] == pivot) {
RowwiseReduceKernel<T>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size, inner_size, reducer, init, X, Y);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_2(
num_dims,
ReduceTensorCUDAImpl,
T,
Reducer,
outer_size,
inner_size,
dims,
transpose_axes.data(),
reducer,
init,
X,
Y,
context);
}
template <typename T>
void ReduceMeanCUDAImpl(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* Y,
CUDAContext* context) {
ReduceTensorCUDA(
num_dims, dims, num_axes, axes, cub::Sum(), T(0), X, Y, context);
const int X_size =
std::accumulate(dims, dims + num_dims, 1, std::multiplies<int>());
int scale = 1;
for (int i = 0; i < num_axes; ++i) {
scale *= dims[axes[i]];
}
const int Y_size = X_size / scale;
Scale<T, CUDAContext>(
Y_size, 1.0f / static_cast<float>(scale), Y, Y, context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(T) \
template <> \
void ReduceMin<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
cub::Min(), \
std::numeric_limits<T>::max(), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MIN
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(T) \
template <> \
void ReduceMax<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, \
dims, \
num_axes, \
axes, \
cub::Max(), \
std::numeric_limits<T>::lowest(), \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MAX
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(T) \
template <> \
void ReduceSum<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceTensorCUDA( \
num_dims, dims, num_axes, axes, cub::Sum(), T(0), X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(float)
CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM(double)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_SUM
#define CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(T) \
template <> \
void ReduceMean<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
ReduceMeanCUDAImpl<T>(num_dims, dims, num_axes, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN(float)
#undef CAFFE2_SPECIALIZED_CUDA_REDUCE_MEAN
namespace {
template <typename T, int D>
__global__ void BroadcastCUDAKernel(
const int Y_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<int, D> Y_dims,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, Y_size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += X_strides.data[i] == 0
? 0
: (Y_index_val % Y_dims.data[i]) * X_strides.data[i];
Y_index_val /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename T, int D>
void BroadcastCUDAImpl(
const int X_ndim,
const int* X_dims,
const int* Y_dims,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides_array;
SimpleArray<int, D> Y_dims_array;
const int d = D - X_ndim;
std::fill(X_strides_array.data, X_strides_array.data + d, 0);
int cur_stride = 1;
for (int i = D - 1; i >= d; --i) {
CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]);
X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride;
cur_stride *= X_dims[i - d];
}
std::copy_n(Y_dims, D, Y_dims_array.data);
const int Y_size =
std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>());
BroadcastCUDAKernel<T, D>
<<<CAFFE_GET_BLOCKS(Y_size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(Y_size, X_strides_array, Y_dims_array, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \
template <> \
void Broadcast<T, CUDAContext>( \
const int X_ndim, \
const int* X_dims, \
const int Y_ndim, \
const int* Y_dims, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
Y_ndim, BroadcastCUDAImpl, T, X_ndim, X_dims, Y_dims, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(float)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(double)
#undef CAFFE2_SPECIALIZED_CUDA_BROADCAST
namespace {
template <typename T>
__global__ void RowwiseMomentsCUDAKernel(
const int rows,
const int cols,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
const int X_index = i * cols + j;
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Reduce(m_val, cub::Sum());
v_val = BlockReduce<T>(v_storage).Reduce(v_val, cub::Sum());
if (threadIdx.x == 0) {
mean[i] = m_val / static_cast<T>(cols);
variance[i] = v_val / static_cast<T>(cols) - mean[i] * mean[i];
}
__syncthreads();
}
}
template <typename T, int D>
__global__ void MomentsCUDAKernel(
const int outer_size,
const int inner_size,
SimpleArray<int, D> X_strides,
SimpleArray<int, D> Y_dims,
const T* X,
T* mean,
T* variance) {
__shared__ typename BlockReduce<T>::TempStorage m_storage;
__shared__ typename BlockReduce<T>::TempStorage v_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
T m_val = 0;
T v_val = 0;
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int X_index = 0;
int Y_index = i * inner_size + j;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += (Y_index % Y_dims.data[i]) * X_strides.data[i];
Y_index /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350
m_val += __ldg(X + X_index);
v_val += __ldg(X + X_index) * __ldg(X + X_index);
#else
m_val += X[X_index];
v_val += X[X_index] * X[X_index];
#endif
}
m_val = BlockReduce<T>(m_storage).Reduce(m_val, cub::Sum());
v_val = BlockReduce<T>(v_storage).Reduce(v_val, cub::Sum());
if (threadIdx.x == 0) {
mean[i] = m_val / static_cast<T>(inner_size);
variance[i] = v_val / static_cast<T>(inner_size) - mean[i] * mean[i];
}
__syncthreads();
}
}
template <typename T, int D>
void MomentsCUDAImpl(
const int outer_size,
const int inner_size,
const int* dims,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<int, D> Y_dims;
ComputeTransposedStrides<D>(dims, axes, X_strides.data);
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = dims[axes[i]];
}
MomentsCUDAKernel<T, D>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size, inner_size, X_strides, Y_dims, X, mean, variance);
}
template <typename T>
void MomentsCUDA(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* X,
T* mean,
T* variance,
CUDAContext* context) {
CAFFE_ENFORCE_LE(num_axes, num_dims);
const std::vector<int> transpose_axes =
MakeTransposeAxes(num_dims, dims, num_axes, axes);
const int pivot = num_dims - num_axes;
int outer_size = 1;
for (int i = 0; i < pivot; ++i) {
outer_size *= dims[transpose_axes[i]];
}
int inner_size = 1;
for (int i = pivot; i < num_dims; ++i) {
inner_size *= dims[transpose_axes[i]];
}
if (transpose_axes[pivot] == pivot) {
RowwiseMomentsCUDAKernel<T>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(outer_size, inner_size, X, mean, variance);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
num_dims,
MomentsCUDAImpl,
T,
outer_size,
inner_size,
dims,
transpose_axes.data(),
X,
mean,
variance,
context);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_MOMENTS(T) \
template <> \
void Moments<T, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* X, \
T* mean, \
T* variance, \
CUDAContext* context) { \
MomentsCUDA<T>( \
num_dims, dims, num_axes, axes, X, mean, variance, context); \
}
CAFFE2_SPECIALIZED_CUDA_MOMENTS(float)
#undef CAFFE2_SPECIALIZED_CUDA_MOMENTS
namespace {
template <typename T, int D>
__global__ void TransposeCUDAKernel(
const int size,
const SimpleArray<int, D> X_strides,
const SimpleArray<int, D> Y_dims,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
X_index += (Y_index_val % Y_dims.data[i]) * X_strides.data[i];
Y_index_val /= Y_dims.data[i];
}
#if __CUDA_ARCH__ >= 350
Y[Y_index] = __ldg(X + X_index);
#else
Y[Y_index] = X[X_index];
#endif
}
}
template <typename T, int D>
void TransposeCUDAImpl(
const int* dims,
const int* axes,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides;
SimpleArray<int, D> Y_dims;
ComputeTransposedStrides<D>(dims, axes, X_strides.data);
int size = 1;
for (int i = 0; i < D; ++i) {
Y_dims.data[i] = dims[axes[i]];
size *= dims[i];
}
TransposeCUDAKernel<T, D>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, X_strides, Y_dims, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(T) \
template <> \
void Transpose<T, CUDAContext>( \
const int ndim, \
const int* dims, \
const int* axes, \
const T* X, \
T* Y, \
CUDAContext* context) { \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
ndim, TransposeCUDAImpl, T, dims, axes, X, Y, context); \
}
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(float)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(double)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(int)
CAFFE2_SPECIALIZED_CUDA_TRANSPOSE(TIndex)
#undef CAFFE2_SPECIALIZED_CUDA_TRANSPOSE
} // namespace math
} // namespace caffe2
|
125c71c3f77fff7609a60af5957529149621d81d.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <fstream>
#include <cmath>
#include <map>
#include <stdio.h>
#include <stdlib.h>
#include <cusp/io/matrix_market.h>
#include <cusp/csr_matrix.h>
#include <cusp/multiply.h>
#include <cusp/blas.h>
#include <timer.h>
#include "hipsparse.h"
// -----------------------------------------------------------------------------
// Macro to obtain a random number between two specified values
// -----------------------------------------------------------------------------
#define RAND(L,H) ((L) + ((H)-(L)) * (float)rand()/(float)RAND_MAX)
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
#ifdef WIN32
# define ISNAN(A) (_isnan(A))
#else
# define ISNAN(A) (isnan(A))
#endif
// -----------------------------------------------------------------------------
// Typedefs
// -----------------------------------------------------------------------------
typedef double REAL;
typedef double PREC_REAL;
typedef typename cusp::csr_matrix<int, REAL, cusp::device_memory> Matrix;
typedef typename cusp::array1d<REAL, cusp::device_memory> Vector;
typedef typename cusp::array1d<REAL, cusp::host_memory> VectorH;
typedef typename cusp::array1d<PREC_REAL, cusp::device_memory> PrecVector;
// -----------------------------------------------------------------------------
using std::cout;
using std::cerr;
using std::cin;
using std::endl;
using std::string;
using std::vector;
void spmv(cusparseHybMat_t& hybA,
hipsparseHandle_t& handle, hipsparseMatDescr_t& descrA,
const float *x, float *y);
void spmv(cusparseHybMat_t& hybA,
hipsparseHandle_t& handle, hipsparseMatDescr_t& descrA,
const double *x, double *y);
// -----------------------------------------------------------------------------
// MAIN
// -----------------------------------------------------------------------------
int main(int argc, char** argv)
{
// Set up the problem to be solved.
string fileMat;
if (argc < 2) {
cerr << "Usage: ./driver_cusparse_hyb ${MATRIX_FILE_NAME}" << endl;
exit(-1);
}
fileMat = argv[1];
//cout << fileMat << endl;
// Get matrix and rhs.
Matrix A;
Vector b;
Vector x;
cusp::io::read_matrix_market_file(A, fileMat);
b.resize(A.num_rows);
{
VectorH x_h(A.num_rows);
for (int i = 0; i < A.num_rows; i++)
x_h[i] = RAND(2,10) / 2;
x = x_h;
}
hipsparseHandle_t handle;
hipsparseCreate(&handle);
cusparseHybMat_t hybA;
cusparseCreateHybMat(&hybA);
hipsparseMatDescr_t descrA;
hipsparseCreateMatDescr(&descrA);
hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatDiagType(descrA, HIPSPARSE_DIAG_TYPE_NON_UNIT);
hipsparseSetMatIndexBase(descrA, HIPSPARSE_INDEX_BASE_ZERO);
hipsparseSetMatFillMode(descrA, HIPSPARSE_FILL_MODE_LOWER);
cusparseDcsr2hyb(handle, A.num_rows, A.num_rows, descrA, thrust::raw_pointer_cast(&A.values[0]), thrust::raw_pointer_cast(&A.row_offsets[0]), thrust::raw_pointer_cast(&A.column_indices[0]), hybA, A.num_entries, CUSPARSE_HYB_PARTITION_AUTO);
CUDATimer timer;
int counter = 0;
double elapsed = 0.0;
for (int i = 0; i < 10; i++) {
timer.Start();
spmv(hybA, handle, descrA, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&b[0]));
timer.Stop();
if (i > 0) {
counter ++;
elapsed += timer.getElapsed();
}
}
elapsed /= counter;
//cout << "cuSparse HYB: " << elapsed << endl;
long int rows,cols,temp,NNZ=0;
std::ifstream infile(fileMat.c_str());
string line;
bool flag=false;
while (std::getline(infile, line)){
if(!flag && line.substr(0,1).compare("%")!=0){
std::istringstream iss(line);
iss>>rows>>cols>>temp;
flag=true;
}
if(flag)
NNZ++;
}
cout<<"name = SPMV"<<endl;
while(fileMat.find("/")!=string::npos){
fileMat = fileMat.substr(fileMat.find("/")+1,fileMat.length());
}
cout<<"input = "<<fileMat<<endl;
cout<<"datatype = double"<<endl;
cout<<"dim_x = "<<rows<<endl;
cout<<"dim_y = "<<cols<<endl;
cout<<"NNZ = "<<NNZ-1<<endl;
cout<<"value_type = GFLOPS"<<endl;
cout<<"value = "<<(2*NNZ*1e-9)/(elapsed*1e-3)<<endl;
hipsparseDestroyMatDescr(descrA);
cusparseDestroyHybMat(hybA);
hipsparseDestroy(handle);
return 0;
}
void spmv(cusparseHybMat_t& hybA,
hipsparseHandle_t& handle, hipsparseMatDescr_t& descrA,
const float *x, float *y)
{
float one = 1.f, zero = 0.f;
cusparseShybmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &one, descrA, hybA, x, &zero, y);
}
void spmv(cusparseHybMat_t& hybA,
hipsparseHandle_t& handle, hipsparseMatDescr_t& descrA,
const double *x, double *y)
{
double one = 1.0, zero = 0.0;
cusparseDhybmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &one, descrA, hybA, x, &zero, y);
}
| 125c71c3f77fff7609a60af5957529149621d81d.cu | #include <algorithm>
#include <fstream>
#include <cmath>
#include <map>
#include <stdio.h>
#include <stdlib.h>
#include <cusp/io/matrix_market.h>
#include <cusp/csr_matrix.h>
#include <cusp/multiply.h>
#include <cusp/blas.h>
#include <timer.h>
#include "cusparse.h"
// -----------------------------------------------------------------------------
// Macro to obtain a random number between two specified values
// -----------------------------------------------------------------------------
#define RAND(L,H) ((L) + ((H)-(L)) * (float)rand()/(float)RAND_MAX)
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
#ifdef WIN32
# define ISNAN(A) (_isnan(A))
#else
# define ISNAN(A) (isnan(A))
#endif
// -----------------------------------------------------------------------------
// Typedefs
// -----------------------------------------------------------------------------
typedef double REAL;
typedef double PREC_REAL;
typedef typename cusp::csr_matrix<int, REAL, cusp::device_memory> Matrix;
typedef typename cusp::array1d<REAL, cusp::device_memory> Vector;
typedef typename cusp::array1d<REAL, cusp::host_memory> VectorH;
typedef typename cusp::array1d<PREC_REAL, cusp::device_memory> PrecVector;
// -----------------------------------------------------------------------------
using std::cout;
using std::cerr;
using std::cin;
using std::endl;
using std::string;
using std::vector;
void spmv(cusparseHybMat_t& hybA,
cusparseHandle_t& handle, cusparseMatDescr_t& descrA,
const float *x, float *y);
void spmv(cusparseHybMat_t& hybA,
cusparseHandle_t& handle, cusparseMatDescr_t& descrA,
const double *x, double *y);
// -----------------------------------------------------------------------------
// MAIN
// -----------------------------------------------------------------------------
int main(int argc, char** argv)
{
// Set up the problem to be solved.
string fileMat;
if (argc < 2) {
cerr << "Usage: ./driver_cusparse_hyb ${MATRIX_FILE_NAME}" << endl;
exit(-1);
}
fileMat = argv[1];
//cout << fileMat << endl;
// Get matrix and rhs.
Matrix A;
Vector b;
Vector x;
cusp::io::read_matrix_market_file(A, fileMat);
b.resize(A.num_rows);
{
VectorH x_h(A.num_rows);
for (int i = 0; i < A.num_rows; i++)
x_h[i] = RAND(2,10) / 2;
x = x_h;
}
cusparseHandle_t handle;
cusparseCreate(&handle);
cusparseHybMat_t hybA;
cusparseCreateHybMat(&hybA);
cusparseMatDescr_t descrA;
cusparseCreateMatDescr(&descrA);
cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatDiagType(descrA, CUSPARSE_DIAG_TYPE_NON_UNIT);
cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ZERO);
cusparseSetMatFillMode(descrA, CUSPARSE_FILL_MODE_LOWER);
cusparseDcsr2hyb(handle, A.num_rows, A.num_rows, descrA, thrust::raw_pointer_cast(&A.values[0]), thrust::raw_pointer_cast(&A.row_offsets[0]), thrust::raw_pointer_cast(&A.column_indices[0]), hybA, A.num_entries, CUSPARSE_HYB_PARTITION_AUTO);
CUDATimer timer;
int counter = 0;
double elapsed = 0.0;
for (int i = 0; i < 10; i++) {
timer.Start();
spmv(hybA, handle, descrA, thrust::raw_pointer_cast(&x[0]), thrust::raw_pointer_cast(&b[0]));
timer.Stop();
if (i > 0) {
counter ++;
elapsed += timer.getElapsed();
}
}
elapsed /= counter;
//cout << "cuSparse HYB: " << elapsed << endl;
long int rows,cols,temp,NNZ=0;
std::ifstream infile(fileMat.c_str());
string line;
bool flag=false;
while (std::getline(infile, line)){
if(!flag && line.substr(0,1).compare("%")!=0){
std::istringstream iss(line);
iss>>rows>>cols>>temp;
flag=true;
}
if(flag)
NNZ++;
}
cout<<"name = SPMV"<<endl;
while(fileMat.find("/")!=string::npos){
fileMat = fileMat.substr(fileMat.find("/")+1,fileMat.length());
}
cout<<"input = "<<fileMat<<endl;
cout<<"datatype = double"<<endl;
cout<<"dim_x = "<<rows<<endl;
cout<<"dim_y = "<<cols<<endl;
cout<<"NNZ = "<<NNZ-1<<endl;
cout<<"value_type = GFLOPS"<<endl;
cout<<"value = "<<(2*NNZ*1e-9)/(elapsed*1e-3)<<endl;
cusparseDestroyMatDescr(descrA);
cusparseDestroyHybMat(hybA);
cusparseDestroy(handle);
return 0;
}
void spmv(cusparseHybMat_t& hybA,
cusparseHandle_t& handle, cusparseMatDescr_t& descrA,
const float *x, float *y)
{
float one = 1.f, zero = 0.f;
cusparseShybmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &one, descrA, hybA, x, &zero, y);
}
void spmv(cusparseHybMat_t& hybA,
cusparseHandle_t& handle, cusparseMatDescr_t& descrA,
const double *x, double *y)
{
double one = 1.0, zero = 0.0;
cusparseDhybmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &one, descrA, hybA, x, &zero, y);
}
|
8b8d2689c863653da3072cdb3ca15f3ef4121daa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "depthwise_conv_op.hpp"
namespace Shadow {
namespace Vision {
#if defined(USE_ROCM)
template <typename T>
__global__ void KernelDepthwiseConv(const T *in_data, int count,
const T *weight_data, const T *bias_data,
int in_c, int in_h, int in_w, int out_h,
int out_w, int kernel_size, int stride,
int pad, int bias_term, T *out_data) {
CUDA_KERNEL_LOOP(globalid, count) {
int w = globalid % out_w;
int h = (globalid / out_w) % out_h;
int c = (globalid / out_w / out_h) % in_c;
int n = globalid / out_w / out_h / in_c;
const T *in_offset_data = in_data + (n * in_c + c) * in_h * in_w;
const T *weight_offset_data = weight_data + c * kernel_size * kernel_size;
int hstart = h * stride - pad, wstart = w * stride - pad;
int hend = min(hstart + kernel_size, in_h + pad);
int wend = min(wstart + kernel_size, in_w + pad);
hstart = max(hstart, 0), wstart = max(wstart, 0);
hend = min(hend, in_h), wend = min(wend, in_w);
int khstart = hend < kernel_size ? (kernel_size - hend) : 0;
int kwstart = wend < kernel_size ? (kernel_size - wend) : 0;
auto sum_val = T(0);
for (int kh = hstart; kh < hend; ++kh) {
for (int kw = wstart; kw < wend; ++kw) {
sum_val += in_offset_data[kh * in_w + kw] *
weight_offset_data[(khstart + kh - hstart) * kernel_size +
kwstart + kw - wstart];
}
}
if (bias_term) {
sum_val += bias_data[c];
}
out_data[globalid] = sum_val;
}
}
template <typename T>
void DepthwiseConv(const T *in_data, const VecInt &in_shape,
const T *weight_data, const T *bias_data, int kernel_size,
int stride, int pad, int bias_term, const VecInt &out_shape,
T *out_data) {
int batch = in_shape[0];
int in_c = in_shape[1], in_h = in_shape[2], in_w = in_shape[3];
int out_h = out_shape[2], out_w = out_shape[3];
int count = batch * in_c * out_h * out_w;
hipLaunchKernelGGL(( KernelDepthwiseConv<T>), dim3(GetBlocks(count)), dim3(NumThreads), 0, 0,
in_data, count, weight_data, bias_data, in_c, in_h, in_w, out_h, out_w,
kernel_size, stride, pad, bias_term, out_data);
CUDA_CHECK(hipPeekAtLastError());
}
template void DepthwiseConv(const float *in_data, const VecInt &in_shape,
const float *weight_data, const float *bias_data,
int kernel_size, int stride, int pad, int bias_term,
const VecInt &out_shape, float *out_data);
#endif
} // namespace Vision
} // namespace Shadow | 8b8d2689c863653da3072cdb3ca15f3ef4121daa.cu | #include "depthwise_conv_op.hpp"
namespace Shadow {
namespace Vision {
#if defined(USE_CUDA)
template <typename T>
__global__ void KernelDepthwiseConv(const T *in_data, int count,
const T *weight_data, const T *bias_data,
int in_c, int in_h, int in_w, int out_h,
int out_w, int kernel_size, int stride,
int pad, int bias_term, T *out_data) {
CUDA_KERNEL_LOOP(globalid, count) {
int w = globalid % out_w;
int h = (globalid / out_w) % out_h;
int c = (globalid / out_w / out_h) % in_c;
int n = globalid / out_w / out_h / in_c;
const T *in_offset_data = in_data + (n * in_c + c) * in_h * in_w;
const T *weight_offset_data = weight_data + c * kernel_size * kernel_size;
int hstart = h * stride - pad, wstart = w * stride - pad;
int hend = min(hstart + kernel_size, in_h + pad);
int wend = min(wstart + kernel_size, in_w + pad);
hstart = max(hstart, 0), wstart = max(wstart, 0);
hend = min(hend, in_h), wend = min(wend, in_w);
int khstart = hend < kernel_size ? (kernel_size - hend) : 0;
int kwstart = wend < kernel_size ? (kernel_size - wend) : 0;
auto sum_val = T(0);
for (int kh = hstart; kh < hend; ++kh) {
for (int kw = wstart; kw < wend; ++kw) {
sum_val += in_offset_data[kh * in_w + kw] *
weight_offset_data[(khstart + kh - hstart) * kernel_size +
kwstart + kw - wstart];
}
}
if (bias_term) {
sum_val += bias_data[c];
}
out_data[globalid] = sum_val;
}
}
template <typename T>
void DepthwiseConv(const T *in_data, const VecInt &in_shape,
const T *weight_data, const T *bias_data, int kernel_size,
int stride, int pad, int bias_term, const VecInt &out_shape,
T *out_data) {
int batch = in_shape[0];
int in_c = in_shape[1], in_h = in_shape[2], in_w = in_shape[3];
int out_h = out_shape[2], out_w = out_shape[3];
int count = batch * in_c * out_h * out_w;
KernelDepthwiseConv<T><<<GetBlocks(count), NumThreads>>>(
in_data, count, weight_data, bias_data, in_c, in_h, in_w, out_h, out_w,
kernel_size, stride, pad, bias_term, out_data);
CUDA_CHECK(cudaPeekAtLastError());
}
template void DepthwiseConv(const float *in_data, const VecInt &in_shape,
const float *weight_data, const float *bias_data,
int kernel_size, int stride, int pad, int bias_term,
const VecInt &out_shape, float *out_data);
#endif
} // namespace Vision
} // namespace Shadow |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.