hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
1c87feba498504f435bf1595eea3097c143c0503.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
__global__ void vec_set (size_t n, double *result, double value)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = value;
}
}
//=== Vector arithmetic ======================================================
extern "C"
__global__ void vec_add (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] + y[id];
}
}
extern "C"
__global__ void vec_sub (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] - y[id];
}
}
extern "C"
__global__ void vec_mul (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] * y[id];
}
}
extern "C"
__global__ void vec_div (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] / y[id];
}
}
extern "C"
__global__ void vec_negate (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = -x[id];
}
}
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
__global__ void vec_addScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] + y;
}
}
extern "C"
__global__ void vec_subScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] - y;
}
}
extern "C"
__global__ void vec_mulScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] * y;
}
}
extern "C"
__global__ void vec_divScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] / y;
}
}
extern "C"
__global__ void vec_scalarAdd (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x + y[id];
}
}
extern "C"
__global__ void vec_scalarSub (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x - y[id];
}
}
extern "C"
__global__ void vec_scalarMul (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x * y[id];
}
}
extern "C"
__global__ void vec_scalarDiv (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x / y[id];
}
}
//=== Vector comparison ======================================================
extern "C"
__global__ void vec_lt (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] < y[id])?1.0:0.0;
}
}
extern "C"
__global__ void vec_lte (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] <= y[id])?1.0:0.0;
}
}
extern "C"
__global__ void vec_eq (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] == y[id])?1.0:0.0;
}
}
extern "C"
__global__ void vec_gte (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] >= y[id])?1.0:0.0;
}
}
extern "C"
__global__ void vec_gt (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] > y[id])?1.0:0.0;
}
}
extern "C"
__global__ void vec_ne (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] != y[id])?1.0:0.0;
}
}
//=== Vector-and-scalar comparison ===========================================
extern "C"
__global__ void vec_ltScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] < y)?1.0:0.0;
}
}
extern "C"
__global__ void vec_lteScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] <= y)?1.0:0.0;
}
}
extern "C"
__global__ void vec_eqScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] == y)?1.0:0.0;
}
}
extern "C"
__global__ void vec_gteScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] >= y)?1.0:0.0;
}
}
extern "C"
__global__ void vec_gtScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] > y)?1.0:0.0;
}
}
extern "C"
__global__ void vec_neScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] != y)?1.0:0.0;
}
}
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
__global__ void vec_acos (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = acos(x[id]);
}
}
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
__global__ void vec_acosh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = acosh(x[id]);
}
}
// Calculate the arc sine of the input argument.
extern "C"
__global__ void vec_asin (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = asin(x[id]);
}
}
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
__global__ void vec_asinh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = asinh(x[id]);
}
}
// Calculate the arc tangent of the input argument.
extern "C"
__global__ void vec_atan (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = atan(x[id]);
}
}
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
__global__ void vec_atanh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = atanh(x[id]);
}
}
// Calculate the cube root of the input argument.
extern "C"
__global__ void vec_cbrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cbrt(x[id]);
}
}
// Calculate ceiling of the input argument.
extern "C"
__global__ void vec_ceil (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = ceil(x[id]);
}
}
// Calculate the cosine of the input argument.
extern "C"
__global__ void vec_cos (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cos(x[id]);
}
}
// Calculate the hyperbolic cosine of the input argument.
extern "C"
__global__ void vec_cosh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cosh(x[id]);
}
}
// Calculate the cosine of the input argument p .
extern "C"
__global__ void vec_cospi (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cospi(x[id]);
}
}
// Calculate the complementary error function of the input argument.
extern "C"
__global__ void vec_erfc (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfc(x[id]);
}
}
// Calculate the inverse complementary error function of the input argument.
extern "C"
__global__ void vec_erfcinv (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfcinv(y[id]);
}
}
// Calculate the scaled complementary error function of the input argument.
extern "C"
__global__ void vec_erfcx (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfcx(x[id]);
}
}
// Calculate the error function of the input argument.
extern "C"
__global__ void vec_erf (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erf(x[id]);
}
}
// Calculate the inverse error function of the input argument.
extern "C"
__global__ void vec_erfinv (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfinv(y[id]);
}
}
// Calculate the base 10 exponential of the input argument.
extern "C"
__global__ void vec_exp10 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = exp10(x[id]);
}
}
// Calculate the base 2 exponential of the input argument.
extern "C"
__global__ void vec_exp2 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = exp2(x[id]);
}
}
// Calculate the base e exponential of the input argument.
extern "C"
__global__ void vec_exp (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = exp(x[id]);
}
}
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
__global__ void vec_expm1 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = expm1(x[id]);
}
}
// Calculate the absolute value of its argument.
extern "C"
__global__ void vec_fabs (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fabs(x[id]);
}
}
// Calculate the largest integer less than or equal to x.
extern "C"
__global__ void vec_floor (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = floor(x[id]);
}
}
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
__global__ void vec_j0 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = j0(x[id]);
}
}
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
__global__ void vec_j1 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = j1(x[id]);
}
}
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
__global__ void vec_lgamma (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = lgamma(x[id]);
}
}
// Calculate the base 10 logarithm of the input argument.
extern "C"
__global__ void vec_log10 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log10(x[id]);
}
}
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
__global__ void vec_log1p (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log1p(x[id]);
}
}
// Calculate the base 2 logarithm of the input argument.
extern "C"
__global__ void vec_log2 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log2(x[id]);
}
}
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
__global__ void vec_logb (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = logb(x[id]);
}
}
// Calculate the natural logarithm of the input argument.
extern "C"
__global__ void vec_log (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log(x[id]);
}
}
// Calculate the standard normal cumulative distribution function.
extern "C"
__global__ void vec_normcdf (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = normcdf(y[id]);
}
}
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
__global__ void vec_normcdfinv (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = normcdfinv(y[id]);
}
}
// Calculate reciprocal cube root function.
extern "C"
__global__ void vec_rcbrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = rcbrt(x[id]);
}
}
// Round input to nearest integer value in doubleing-point.
extern "C"
__global__ void vec_rint (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = rint(x[id]);
}
}
// Round to nearest integer value in doubleing-point.
extern "C"
__global__ void vec_round (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = round(x[id]);
}
}
// Calculate the reciprocal of the square root of the input argument.
extern "C"
__global__ void vec_rsqrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = rsqrt(x[id]);
}
}
// Calculate the sine of the input argument.
extern "C"
__global__ void vec_sin (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sin(x[id]);
}
}
// Calculate the hyperbolic sine of the input argument.
extern "C"
__global__ void vec_sinh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sinh(x[id]);
}
}
// Calculate the sine of the input argument p .
extern "C"
__global__ void vec_sinpi (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sinpi(x[id]);
}
}
// Calculate the square root of the input argument.
extern "C"
__global__ void vec_sqrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sqrt(x[id]);
}
}
// Calculate the tangent of the input argument.
extern "C"
__global__ void vec_tan (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = tan(x[id]);
}
}
// Calculate the hyperbolic tangent of the input argument.
extern "C"
__global__ void vec_tanh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = tanh(x[id]);
}
}
// Calculate the gamma function of the input argument.
extern "C"
__global__ void vec_tgamma (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = tgamma(x[id]);
}
}
// Truncate input argument to the integral part.
extern "C"
__global__ void vec_trunc (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = trunc(x[id]);
}
}
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
__global__ void vec_y0 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = y0(x[id]);
}
}
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
__global__ void vec_y1 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = y1(x[id]);
}
}
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
__global__ void vec_copysign (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = copysign(x[id], y[id]);
}
}
// Compute the positive difference between x and y.
extern "C"
__global__ void vec_fdim (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fdim(x[id], y[id]);
}
}
// Divide two doubleing point values.
extern "C"
__global__ void vec_fdivide (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fdivide(x[id], y[id]);
}
}
// Determine the maximum numeric value of the arguments.
extern "C"
__global__ void vec_fmax (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fmax(x[id], y[id]);
}
}
// Determine the minimum numeric value of the arguments.
extern "C"
__global__ void vec_fmin (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fmin(x[id], y[id]);
}
}
// Calculate the doubleing-point remainder of x / y.
extern "C"
__global__ void vec_fmod (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fmod(x[id], y[id]);
}
}
// Calculate the square root of the sum of squares of two arguments.
extern "C"
__global__ void vec_hypot (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = hypot(x[id], y[id]);
}
}
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
__global__ void vec_nextafter (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = nextafter(x[id], y[id]);
}
}
// Calculate the value of first argument to the power of second argument.
extern "C"
__global__ void vec_pow (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = pow(x[id], y[id]);
}
}
// Compute single-precision doubleing-point remainder.
extern "C"
__global__ void vec_remainder (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = remainder(x[id], y[id]);
}
}
| 1c87feba498504f435bf1595eea3097c143c0503.cu | /*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
__global__ void vec_set (size_t n, double *result, double value)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = value;
}
}
//=== Vector arithmetic ======================================================
extern "C"
__global__ void vec_add (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] + y[id];
}
}
extern "C"
__global__ void vec_sub (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] - y[id];
}
}
extern "C"
__global__ void vec_mul (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] * y[id];
}
}
extern "C"
__global__ void vec_div (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] / y[id];
}
}
extern "C"
__global__ void vec_negate (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = -x[id];
}
}
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
__global__ void vec_addScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] + y;
}
}
extern "C"
__global__ void vec_subScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] - y;
}
}
extern "C"
__global__ void vec_mulScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] * y;
}
}
extern "C"
__global__ void vec_divScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] / y;
}
}
extern "C"
__global__ void vec_scalarAdd (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x + y[id];
}
}
extern "C"
__global__ void vec_scalarSub (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x - y[id];
}
}
extern "C"
__global__ void vec_scalarMul (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x * y[id];
}
}
extern "C"
__global__ void vec_scalarDiv (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x / y[id];
}
}
//=== Vector comparison ======================================================
extern "C"
__global__ void vec_lt (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] < y[id])?1.0:0.0;
}
}
extern "C"
__global__ void vec_lte (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] <= y[id])?1.0:0.0;
}
}
extern "C"
__global__ void vec_eq (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] == y[id])?1.0:0.0;
}
}
extern "C"
__global__ void vec_gte (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] >= y[id])?1.0:0.0;
}
}
extern "C"
__global__ void vec_gt (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] > y[id])?1.0:0.0;
}
}
extern "C"
__global__ void vec_ne (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] != y[id])?1.0:0.0;
}
}
//=== Vector-and-scalar comparison ===========================================
extern "C"
__global__ void vec_ltScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] < y)?1.0:0.0;
}
}
extern "C"
__global__ void vec_lteScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] <= y)?1.0:0.0;
}
}
extern "C"
__global__ void vec_eqScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] == y)?1.0:0.0;
}
}
extern "C"
__global__ void vec_gteScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] >= y)?1.0:0.0;
}
}
extern "C"
__global__ void vec_gtScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] > y)?1.0:0.0;
}
}
extern "C"
__global__ void vec_neScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] != y)?1.0:0.0;
}
}
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
__global__ void vec_acos (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = acos(x[id]);
}
}
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
__global__ void vec_acosh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = acosh(x[id]);
}
}
// Calculate the arc sine of the input argument.
extern "C"
__global__ void vec_asin (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = asin(x[id]);
}
}
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
__global__ void vec_asinh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = asinh(x[id]);
}
}
// Calculate the arc tangent of the input argument.
extern "C"
__global__ void vec_atan (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = atan(x[id]);
}
}
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
__global__ void vec_atanh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = atanh(x[id]);
}
}
// Calculate the cube root of the input argument.
extern "C"
__global__ void vec_cbrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cbrt(x[id]);
}
}
// Calculate ceiling of the input argument.
extern "C"
__global__ void vec_ceil (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = ceil(x[id]);
}
}
// Calculate the cosine of the input argument.
extern "C"
__global__ void vec_cos (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cos(x[id]);
}
}
// Calculate the hyperbolic cosine of the input argument.
extern "C"
__global__ void vec_cosh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cosh(x[id]);
}
}
// Calculate the cosine of the input argument × p .
extern "C"
__global__ void vec_cospi (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cospi(x[id]);
}
}
// Calculate the complementary error function of the input argument.
extern "C"
__global__ void vec_erfc (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfc(x[id]);
}
}
// Calculate the inverse complementary error function of the input argument.
extern "C"
__global__ void vec_erfcinv (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfcinv(y[id]);
}
}
// Calculate the scaled complementary error function of the input argument.
extern "C"
__global__ void vec_erfcx (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfcx(x[id]);
}
}
// Calculate the error function of the input argument.
extern "C"
__global__ void vec_erf (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erf(x[id]);
}
}
// Calculate the inverse error function of the input argument.
extern "C"
__global__ void vec_erfinv (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfinv(y[id]);
}
}
// Calculate the base 10 exponential of the input argument.
extern "C"
__global__ void vec_exp10 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = exp10(x[id]);
}
}
// Calculate the base 2 exponential of the input argument.
extern "C"
__global__ void vec_exp2 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = exp2(x[id]);
}
}
// Calculate the base e exponential of the input argument.
extern "C"
__global__ void vec_exp (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = exp(x[id]);
}
}
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
__global__ void vec_expm1 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = expm1(x[id]);
}
}
// Calculate the absolute value of its argument.
extern "C"
__global__ void vec_fabs (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fabs(x[id]);
}
}
// Calculate the largest integer less than or equal to x.
extern "C"
__global__ void vec_floor (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = floor(x[id]);
}
}
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
__global__ void vec_j0 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = j0(x[id]);
}
}
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
__global__ void vec_j1 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = j1(x[id]);
}
}
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
__global__ void vec_lgamma (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = lgamma(x[id]);
}
}
// Calculate the base 10 logarithm of the input argument.
extern "C"
__global__ void vec_log10 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log10(x[id]);
}
}
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
__global__ void vec_log1p (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log1p(x[id]);
}
}
// Calculate the base 2 logarithm of the input argument.
extern "C"
__global__ void vec_log2 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log2(x[id]);
}
}
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
__global__ void vec_logb (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = logb(x[id]);
}
}
// Calculate the natural logarithm of the input argument.
extern "C"
__global__ void vec_log (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log(x[id]);
}
}
// Calculate the standard normal cumulative distribution function.
extern "C"
__global__ void vec_normcdf (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = normcdf(y[id]);
}
}
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
__global__ void vec_normcdfinv (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = normcdfinv(y[id]);
}
}
// Calculate reciprocal cube root function.
extern "C"
__global__ void vec_rcbrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = rcbrt(x[id]);
}
}
// Round input to nearest integer value in doubleing-point.
extern "C"
__global__ void vec_rint (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = rint(x[id]);
}
}
// Round to nearest integer value in doubleing-point.
extern "C"
__global__ void vec_round (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = round(x[id]);
}
}
// Calculate the reciprocal of the square root of the input argument.
extern "C"
__global__ void vec_rsqrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = rsqrt(x[id]);
}
}
// Calculate the sine of the input argument.
extern "C"
__global__ void vec_sin (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sin(x[id]);
}
}
// Calculate the hyperbolic sine of the input argument.
extern "C"
__global__ void vec_sinh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sinh(x[id]);
}
}
// Calculate the sine of the input argument × p .
extern "C"
__global__ void vec_sinpi (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sinpi(x[id]);
}
}
// Calculate the square root of the input argument.
extern "C"
__global__ void vec_sqrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sqrt(x[id]);
}
}
// Calculate the tangent of the input argument.
extern "C"
__global__ void vec_tan (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = tan(x[id]);
}
}
// Calculate the hyperbolic tangent of the input argument.
extern "C"
__global__ void vec_tanh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = tanh(x[id]);
}
}
// Calculate the gamma function of the input argument.
extern "C"
__global__ void vec_tgamma (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = tgamma(x[id]);
}
}
// Truncate input argument to the integral part.
extern "C"
__global__ void vec_trunc (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = trunc(x[id]);
}
}
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
__global__ void vec_y0 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = y0(x[id]);
}
}
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
__global__ void vec_y1 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = y1(x[id]);
}
}
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
__global__ void vec_copysign (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = copysign(x[id], y[id]);
}
}
// Compute the positive difference between x and y.
extern "C"
__global__ void vec_fdim (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fdim(x[id], y[id]);
}
}
// Divide two doubleing point values.
extern "C"
__global__ void vec_fdivide (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fdivide(x[id], y[id]);
}
}
// Determine the maximum numeric value of the arguments.
extern "C"
__global__ void vec_fmax (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fmax(x[id], y[id]);
}
}
// Determine the minimum numeric value of the arguments.
extern "C"
__global__ void vec_fmin (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fmin(x[id], y[id]);
}
}
// Calculate the doubleing-point remainder of x / y.
extern "C"
__global__ void vec_fmod (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fmod(x[id], y[id]);
}
}
// Calculate the square root of the sum of squares of two arguments.
extern "C"
__global__ void vec_hypot (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = hypot(x[id], y[id]);
}
}
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
__global__ void vec_nextafter (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = nextafter(x[id], y[id]);
}
}
// Calculate the value of first argument to the power of second argument.
extern "C"
__global__ void vec_pow (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = pow(x[id], y[id]);
}
}
// Compute single-precision doubleing-point remainder.
extern "C"
__global__ void vec_remainder (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = remainder(x[id], y[id]);
}
}
|
e3ccff1e7593c970c4257beabb2965270369d5c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TEST_KERNEL_NEWSEED 0
#define TEST_KERNEL_REFILL 0
#define kThreadBlockMinPtsScale 32
#define GPU_POINT_COORDS(pData,point_id,d) (pData[ARRAY_INDEX((point_id)*D + (d),N*D)])
#define ARRAY_INDEX(a,arrlen) (a) // can be used for checking
#define DBSCAN_WITHIDX_CLUSTER_ID_INIT (0)
#define DBSCAN_WITHIDX_CLUSTER_ID_NOISE (1)
#define DBSCAN_WITHIDX_CLUSTER_ID_CANDIDATE_FIRST (2)
#define DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST (DBSCAN_WITHIDX_CLUSTER_ID_CANDIDATE_FIRST+N)
#define DBSCAN_WITHIDX_CLUSTER_ID_CANDIDATE_2_FINAL(x) (x + N)
#define DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(x) (x - N)
#define INF_32 (0x7fffffff) // ; 32 bit max value, signed
#define MY_ASSERT(x,msg) if (!(x)) { printf("ASSERT FAILED (%s) : %s\n",#x,msg); exit(0); }
//~ #define DBSCAN_ID_LOOKUP_IN_VRAM
//~ #define DBSCAN_ID_LOOKUP_IN_VRAM_CHECK
//~ #define EMU_CHECKBOUNDS(name,index,num)
#define EMU_CHECKBOUNDS(name,index,num) if (index < 0 || index >= num) { printf("EMU_CHECKBOUNDS(%s,%d,%d) failed\n",name,(int)index,(int)num); exit(0); }
#define PROFILE_TIME_ENABLE
#ifdef PROFILE_TIME_ENABLE
#define PROFILE_TIME_SECTION_START() ProfileTimerStartSection()
#define PROFILE_TIME_SECTION_STEP(name) float name = ProfileTimerStartSection()
#define PROFILE_TIME_SECTION_SUM(name) name += ProfileTimerStartSection()
#else
#define PROFILE_TIME_SECTION_START()
#define PROFILE_TIME_SECTION_STEP(name)
#define PROFILE_TIME_SECTION_SUM(name)
#endif
/*
int atomicMin(int* address, int val);
int atomicMax(int* address, int val);
int atomicExch(int* address, int val);
uint atomicExch(uint* address, uint val);
float atomicExch(float* address, float val);
unsigned int atomicInc(unsigned int* address,
unsigned int val);
reads the 32-bit word old located at the address address in global or shared
memory, computes ((old >= val) ? 0 : (old+1)), and stores the result
back to memory at the same address. These three operations are performed in one
atomic transaction. The function returns old.
unsigned int atomicDec(unsigned int* address,
unsigned int val);
int atomicCAS(int* address, int compare, int val);
unsigned int atomicCAS(unsigned int* address,
unsigned int compare,
unsigned int val);
unsigned long long int atomicCAS(unsigned long long int* address,
unsigned long long int compare,
unsigned long long int val);
writes (old == compare ? val : old) , and returns old
Atomic functions operating on shared memory and atomic functions operating on
64-bit words are only available for devices of compute capability 1.2 and above.
*/
__constant__ unsigned int gConst_pFinalChainIDs[DBSCAN_NUM_SEEDS];
char gsInfoGPUaux[512] = "";
// ***** ***** ***** ***** ***** main kernel
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
#define DBSCAN_LOOKUP_PARAM ,pClusterIDLookup
#else
#define DBSCAN_LOOKUP_PARAM
#endif
// function used by kernel, inlined automatically
__device__ void DBScanGPU_TryMarkAsCandidate ( const unsigned int iCurPointID,
unsigned int* pPointState,
const unsigned int iCandidateID,
const unsigned int iSeedID,
unsigned int* piSeedState_NotListedLen,
unsigned int* piSeedState_ListLen,
unsigned int* pCandidateLists,
unsigned int* pConnectionMatrix // DBSCAN_NUM_SEEDS^2
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
,unsigned int* pClusterIDLookup // [N]
#endif
)
{
unsigned int iOldState = atomicCAS(&pPointState[iCurPointID],DBSCAN_WITHIDX_CLUSTER_ID_INIT,iCandidateID); // this also marks a few candidates if ownpoint is an outlier
//~ if (iCurPointID == 126530) printf("DEBUG002:set:old=%d,cid=%d,seed=%d\n",iOldState,iCandidateID,iSeedID);
if (iOldState == DBSCAN_WITHIDX_CLUSTER_ID_INIT) { // otherwise just count and don't do anything
// claimed as candidate, add to list
// pointstate already set, need to add to list now
unsigned int iMyListIndex = atomicInc(piSeedState_ListLen,0xffffffff);
if (iMyListIndex < CANDIDATE_LIST_MAXLEN)
pCandidateLists[iSeedID*CANDIDATE_LIST_MAXLEN+iMyListIndex] = iCurPointID;
else atomicInc(piSeedState_NotListedLen,0xffffffff);
} else if (iOldState != DBSCAN_WITHIDX_CLUSTER_ID_NOISE) { // connection with other cluster detected
// iOldState can be candidate or final, transform to one of them for lookup table
if (iOldState < DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST)
iOldState = DBSCAN_WITHIDX_CLUSTER_ID_CANDIDATE_2_FINAL(iOldState);
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
if (iOldState != DBSCAN_WITHIDX_CLUSTER_ID_CANDIDATE_2_FINAL(iCandidateID) &&
iOldState >= DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST &&
iOldState < DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST+N) {
// lookup in global mem.. faster for big seednum
unsigned int iOtherSeedID = pClusterIDLookup[iOldState-DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST];
#else
if (iOldState != DBSCAN_WITHIDX_CLUSTER_ID_CANDIDATE_2_FINAL(iCandidateID)) { // this could save us the lookup, rather probably case also
// use lookup table to get seedid for connection matrix
unsigned int iOtherSeedID = 0xffffffff;
#pragma unroll // unroll next loop
for (int d=0;d<DBSCAN_NUM_SEEDS;++d) // warning, slow for big seednum
if (gConst_pFinalChainIDs[d] == iOldState) iOtherSeedID = d;
#endif
// set bit in connection matrix.. atomic not needed as at least one of concurrent writes is guaranteed to succeed
if (iOtherSeedID < DBSCAN_NUM_SEEDS) {
//~ atomicCAS(&pConnectionMatrix[iSeedID*DBSCAN_NUM_SEEDS + iOtherSeedID],0,1);
pConnectionMatrix[iSeedID*DBSCAN_NUM_SEEDS + iOtherSeedID] = 1;
}
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
}
#else
}
#endif
}
}
/// kernel code : this gets executed on the GPU
__global__ static void dbscan_kernel_main (
float* pPointCoords, // N*D
unsigned int* pPointState, // N (outlier,candidate:chain-id,finished:chain-id)
float* pIndex, // INDEX_NUM_FLOATS
unsigned int* pSeedStates, // DBSCAN_NUM_SEEDS * x (notlisted,listlen,iNeighBorCount : atomicops)
//~ unsigned int* pFinalChainIDs, // DBSCAN_NUM_SEEDS (constant memory, values >= DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST)
unsigned int* pCandidateLists, // DBSCAN_NUM_SEEDS * CANDIDATE_LIST_MAXLEN (fresh-seeds)
unsigned int* pConnectionMatrix // DBSCAN_NUM_SEEDS^2
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
,unsigned int* pClusterIDLookup // [N]
#endif
)
{
#define NEIGHBOR_BUFFER_SIZE (DBSCAN_PARAM_MINPTS-1)
__shared__ unsigned int iOwnPointID;
__shared__ unsigned int iNeighborCount;
__shared__ unsigned int iNeighbors[NEIGHBOR_BUFFER_SIZE];
__shared__ float fOwnPoint[D];
__shared__ float3 vMin;
__shared__ float3 vMax;
int d,n;
// prepare variables
const unsigned int iSeedID = BLOCKIDX; // in [0;DBSCAN_NUM_SEEDS[
const unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(gConst_pFinalChainIDs[iSeedID]);
//~ if (threadIdx.x == 0) printf("iSeedID=%d, iCandidateID=%d\n",(int)iSeedID,(int)iCandidateID);
unsigned int* piSeedState_NotListedLen = &pSeedStates[iSeedID*SEEDSTATEDIM + 0];
unsigned int* piSeedState_ListLen = &pSeedStates[iSeedID*SEEDSTATEDIM + 1];
unsigned int* piSeedState_NeighborCount = &pSeedStates[iSeedID*SEEDSTATEDIM + 2]; // ("atomics not in shared mem for compute 1.1");
// get iOwnPointID from seedstate(cpu) or from seedlist
if (threadIdx.x == 0) {
if (*piSeedState_ListLen > CANDIDATE_LIST_MAXLEN) // cut/limit overincrementation before polling
*piSeedState_ListLen = CANDIDATE_LIST_MAXLEN;
iOwnPointID = pSeedStates[iSeedID*SEEDSTATEDIM + 3];
if (iOwnPointID == INF_32) {
// no seed from cpu, take one from list
unsigned int ll = *piSeedState_ListLen;
if (ll > 0) {
unsigned int llm1 = ll - 1;
*piSeedState_ListLen = llm1;
iOwnPointID = pCandidateLists[ARRAY_INDEX(
iSeedID*CANDIDATE_LIST_MAXLEN + llm1,
DBSCAN_NUM_SEEDS * CANDIDATE_LIST_MAXLEN)];
} else {
//return; // seed terminated and not revivable, algorithm nearing it's finish
// but exit function only after syncthreads
}
} else {
pSeedStates[iSeedID*SEEDSTATEDIM + 3] = INF_32;
}
}
__syncthreads();
if (iOwnPointID == INF_32) return;
// read own point coordinates
if (threadIdx.x < D) fOwnPoint[threadIdx.x] = GPU_POINT_COORDS(pPointCoords,iOwnPointID,threadIdx.x);
__syncthreads();
// calculate epsilon area used for index
if (threadIdx.x == 0) {
vMin.x = fOwnPoint[0] - DBSCAN_PARAM_EPSILON;
vMin.y = fOwnPoint[1] - DBSCAN_PARAM_EPSILON;
vMin.z = fOwnPoint[2] - DBSCAN_PARAM_EPSILON;
vMax.x = fOwnPoint[0] + DBSCAN_PARAM_EPSILON;
vMax.y = fOwnPoint[1] + DBSCAN_PARAM_EPSILON;
vMax.z = fOwnPoint[2] + DBSCAN_PARAM_EPSILON;
iNeighborCount = 0;
*piSeedState_NeighborCount = 0;
}
#pragma unroll // unroll next loop
for (d=0;d<kThreadBlockMinPtsScale;++d) {
if (threadIdx.x*kThreadBlockMinPtsScale + d < NEIGHBOR_BUFFER_SIZE)
iNeighbors[threadIdx.x*kThreadBlockMinPtsScale + d] = 0xffffffff; // mark as invalid
}
__syncthreads();
#define K_I_0(a) (pIndex[INDEXPOS_0( ((int)x)+(a))])
#define K_I_1(a) (pIndex[INDEXPOS_1((int)x, ((int)y)+(a))])
#define K_I_2(a) (pIndex[INDEXPOS_2((int)x, (int)y, ((int)z)+(a))])
#define K_INIT_INDEX n = (x)*SX + (y)*SY + (z)*SZ; // oldname : iMyLocalDataIndex
#define GPU_IDX1 (K_I_0(1) >= vMin.x && K_I_0(0) <= vMax.x)
#define GPU_IDX2 (K_I_1(1) >= vMin.y && K_I_1(0) <= vMax.y)
#define GPU_IDX3 (K_I_2(1) >= vMin.z && K_I_2(0) <= vMax.z)
#ifndef ENABLE_GPU_IDX3
#undef GPU_IDX3
#define GPU_IDX3 (1)
#endif
// iteration : 299
unsigned int x;
unsigned int y;
unsigned int z;
unsigned int w;
float* iy;
float* iz;
float* iw;
#ifdef MYTEST_DISABLE_INDEX
for (x=0;x<I0;++x) {
for (y=0;y<I0;++y) {
for (z=0;z<I0;++z) { K_INIT_INDEX // n init here
for (w=0;w<SZ;w+=kThreadBlockSize,n+=kThreadBlockSize) {
#else
for (iy = &pIndex[INDEXPOS_0(0 )], x=0;x<I0;++x,++iy) if (iy[1] >= vMin.x && *iy <= vMax.x) {
for (iz = &pIndex[INDEXPOS_1(x,0 )], y=0;y<I0;++y,++iz) if (iz[1] >= vMin.y && *iz <= vMax.y) {
for (iw = &pIndex[INDEXPOS_2(x,y,0)], z=0;z<I0;++z,++iw) if (iw[1] >= vMin.z && *iw <= vMax.z) { K_INIT_INDEX // n init here
for (w=0;w<SZ;w+=kThreadBlockSize,n+=kThreadBlockSize) {
#endif
/*
// iteration : other try, but was slower .. 305 for 500k
unsigned int n1;
unsigned int x;
unsigned int y;
unsigned int z;
unsigned int w;
float* iy;
float* iz;
float* iw;
for (iy = &pIndex[INDEXSTART_0], x=0;x<I0*(I0+1) ;x+=(I0+1),++iy) if (iy[1] >= vMin.x && *iy <= vMax.x) {
for (iz = &pIndex[INDEXSTART_1+x], y=0;y<I0*(I0+1) ;y+=(I0+1),++iz) if (iz[1] >= vMin.y && *iz <= vMax.y) { n1 = (x/(I0+1))*SX + (y/(I0+1))*SY;
for (iw = &pIndex[INDEXSTART_2+y+x*I0], z=0;z<I0*SZ ;z+=SZ,++iw) if (iw[1] >= vMin.z && *iw <= vMax.z) { n = n1 + z; // n init here
for (w=0;w<SZ;w+=kThreadBlockSize,n+=kThreadBlockSize) {
*/
//~ for (unsigned int x=0;x<I0;++x) if (GPU_IDX1) {
//~ for (unsigned int y=0;y<I0;++y) if (GPU_IDX2) {
//~ for (unsigned int z=0;z<I0;++z) if (GPU_IDX3) { K_INIT_INDEX // n init here
//~ for (unsigned int w=0;w<SZ;w+=kThreadBlockSize,n+=kThreadBlockSize) {
//~ for (unsigned int nend = n + SZ;n<nend;n+=kThreadBlockSize) { // slower than normal, no an improvement
// read current point
unsigned int iCurPointID = n + threadIdx.x;
// calc distance
float fSqDist = 0; // 1110.0 + fOwnPoint[0];
#pragma unroll // unroll next loop
for (d=0;d<D;++d) { float a = GPU_POINT_COORDS(pPointCoords,iCurPointID,d) - fOwnPoint[d]; fSqDist += a*a; }
// check distance
if (fSqDist <= DBSCAN_PARAM_SQEPSILON) {
// self is also counted (iCurPointID == iOwnPointID) here for simplicity
// until we know that self is a core-point, only remember neightbors, don't spread yet (atomic op on shared memory)
unsigned int myNeighborIndex = 0x7fffffff;
if (iNeighborCount < NEIGHBOR_BUFFER_SIZE) { // otherwise there are already minpts-1 OTHER neighbors besides me
myNeighborIndex = atomicInc(piSeedState_NeighborCount,0xffffffff); // inc by 1 and return old
if (myNeighborIndex == DBSCAN_PARAM_MINPTS)
iNeighborCount = DBSCAN_PARAM_MINPTS;
}
if (myNeighborIndex < NEIGHBOR_BUFFER_SIZE) {
// not enough points yet, save in buffer for later
iNeighbors[myNeighborIndex] = iCurPointID;
} else {
// index>=NEIGHBOR_BUFFER_SIZE(=(DBSCAN_PARAM_MINPTS-1)) so iNeighborCount>=DBSCAN_PARAM_MINPTS
DBScanGPU_TryMarkAsCandidate(iCurPointID,pPointState,iCandidateID,iSeedID,
piSeedState_NotListedLen,piSeedState_ListLen,pCandidateLists,pConnectionMatrix DBSCAN_LOOKUP_PARAM);
}
}
#ifdef MYTEST_DISABLE_INDEX
}}}}
#else
}}}}
#endif
#undef K_I_0
#undef K_I_1
#undef K_I_2
#undef K_INIT_INDEX
#undef GPU_IDX1
#undef GPU_IDX2
#undef GPU_IDX3
// wait until all are finished, so we know if it's a corepoint
__syncthreads();
// process stored neighbors
#pragma unroll // unroll next loop:
for (d=0;d<kThreadBlockMinPtsScale;++d) {
if (iNeighborCount >= DBSCAN_PARAM_MINPTS &&
threadIdx.x*kThreadBlockMinPtsScale + d < NEIGHBOR_BUFFER_SIZE &&
iNeighbors[threadIdx.x*kThreadBlockMinPtsScale + d] < N) {
DBScanGPU_TryMarkAsCandidate(iNeighbors[threadIdx.x*kThreadBlockMinPtsScale + d],pPointState,iCandidateID,iSeedID,
piSeedState_NotListedLen,piSeedState_ListLen,pCandidateLists,pConnectionMatrix DBSCAN_LOOKUP_PARAM);
}
}
__syncthreads();
// mark self as either confirmed-core-point or noise
if (threadIdx.x == 0) {
//~ if (iSeedID == 3) printf("DEBUG002:finalseed:%d>=%d\n",*piSeedState_NeighborCount,(int)DBSCAN_PARAM_MINPTS);
//~ if (iOwnPointID == 126530) printf("DEBUG002:final:%d>=%d\n",*piSeedState_NeighborCount,(int)DBSCAN_PARAM_MINPTS);
if (iNeighborCount >= DBSCAN_PARAM_MINPTS)
pPointState[iOwnPointID] = DBSCAN_WITHIDX_CLUSTER_ID_CANDIDATE_2_FINAL(iCandidateID);
else pPointState[iOwnPointID] = DBSCAN_WITHIDX_CLUSTER_ID_NOISE;
if (*piSeedState_ListLen > CANDIDATE_LIST_MAXLEN)
*piSeedState_ListLen = CANDIDATE_LIST_MAXLEN; // clip if over-incremented and didn't fit into list
}
}
// ***** ***** ***** ***** ***** newseeds
/// helper kernel, searches new seeds
/// kernel code : this gets executed on the GPU
__global__ static void dbscan_kernel_newseeds (
unsigned int* pPointState, // N (outlier,candidate:chain-id,finished:chain-id)
unsigned int* pHelperData, // DBSCAN_NUM_SEEDS + x
unsigned int iNewSeedsNeeded
)
{
#define NEWSEEDSCAN_SECTIONSIZE (N/NUM_THREADS_NEWSEEDSCAN)
unsigned int iMyID = MYID; // #define MYID (BLOCKIDX * blockDim.x + threadIdx.x)
unsigned int* piNextScannedOffset = &pHelperData[DBSCAN_NUM_SEEDS+NEWSEEDSCAN_NUM_PARAMS+iMyID];
unsigned int* piNumFound = &pHelperData[DBSCAN_NUM_SEEDS+0];
unsigned int i = *piNextScannedOffset;
// used sharedmem to allow quick abort
__shared__ unsigned int iNumFoundCache;
if (threadIdx.x == 0) iNumFoundCache = 0;
__syncthreads();
// check for earlyout
if (i >= NEWSEEDSCAN_SECTIONSIZE) return; // this block is finished
const unsigned int base = iMyID * NEWSEEDSCAN_SECTIONSIZE;
// scanloop
for (;i<NEWSEEDSCAN_SECTIONSIZE;++i) {
if (iNumFoundCache >= iNewSeedsNeeded) break; // stop scanning, this point will be scanned again next time
unsigned int n = i + base;
if (pPointState[n] == DBSCAN_WITHIDX_CLUSTER_ID_INIT) {
unsigned int iMyIndex = atomicInc(piNumFound,0xffffffff);
if (iMyIndex < iNewSeedsNeeded) {
pHelperData[iMyIndex] = n;
} else {
iNumFoundCache = iNewSeedsNeeded; // abort, we've got enough
break; // couldn't register, this point has to be scanned again next time
}
}
// else point cannot be fresh seed, so index can safely be incremented
}
// increment if point is not init -> cannot be fresh seed ever again
// increment if point is init and could be registered as new seed
// DON'T increment if point is init and could NOT be registered as new seed -> has to be scanned again next time
// abort if we found enough seeds
*piNextScannedOffset = i;
// piNextScannedOffset is unique for every thread, not just for every threadblock , so no synching is neccessary.
// the number of threads running in parallel here is rather limited, only 4 * threadblocksize
}
// ***** ***** ***** ***** ***** refill
/// helper kernel, refills candidate lists
/// kernel code : this gets executed on the GPU
__global__ static void dbscan_kernel_refill (
unsigned int* pPointState, // N (outlier,candidate:chain-id,finished:chain-id)
unsigned int* pSeedStates, // DBSCAN_NUM_SEEDS * x (notlisted,listlen,iNeighBorCount : atomicops)
unsigned int* pCandidateLists // DBSCAN_NUM_SEEDS * CANDIDATE_LIST_MAXLEN (fresh-seeds)
)
{
const unsigned int iSeedID = BLOCKIDX; // in [0;DBSCAN_NUM_SEEDS[
const unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(gConst_pFinalChainIDs[iSeedID]);
//~ if (threadIdx.x == 0) printf("iSeedID=%d, iCandidateID=%d\n",(int)iSeedID,(int)iCandidateID);
unsigned int* piSeedState_NotListedLen = &pSeedStates[iSeedID*SEEDSTATEDIM + 0];
unsigned int* piSeedState_ListLen = &pSeedStates[iSeedID*SEEDSTATEDIM + 1];
unsigned int iMaxTake = min(*piSeedState_NotListedLen,CANDIDATE_LIST_REFILL);
if (*piSeedState_ListLen > 0) return; // still seeds in list, nothing to do
if (*piSeedState_NotListedLen == 0) return; // no candidates left / fresh seed-chain
__shared__ unsigned int iNumFound;
if (threadIdx.x == 0) iNumFound = 0; // piSeedState_ListLen
__syncthreads();
const unsigned int iStep = kThreadBlockSize; // total number of threads for this seed
// iterate over points
for (int n=threadIdx.x; n<N && iNumFound < iMaxTake ; n+=iStep) {
if (pPointState[n] == iCandidateID) {
unsigned int iMyIndex = atomicInc(piSeedState_ListLen,0xffffffff); // has to be cut down in the end
if (iMyIndex < iMaxTake) {
atomicDec(piSeedState_NotListedLen,INF_32);
pCandidateLists[iSeedID*CANDIDATE_LIST_MAXLEN + iMyIndex] = n;
if (iMyIndex + 1 >= iMaxTake) iNumFound = iMaxTake; // abort, we've got enough
}
}
}
__syncthreads();
// cut down over-incrementation
if (threadIdx.x == 0) {
if (*piSeedState_ListLen > iMaxTake)
*piSeedState_ListLen = iMaxTake;
}
}
// ***** ***** ***** ***** ***** utils : final id
unsigned int giFinalClusterIDs[N];
unsigned int* gpFinalChainIDs = 0;
unsigned int* gpConnectionMatrix = 0;
unsigned int giLastFinalClusterID = 0;
unsigned int GetFinalClusterIDIndexFromSeedID (int iSeedID) {
unsigned int iFinalChainID = gpFinalChainIDs[iSeedID];
MY_ASSERT(iFinalChainID >= DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST,"GetFinalClusterIDIndexFromSeedID too low");
unsigned int iMyIndex = iFinalChainID - DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST;
MY_ASSERT(iMyIndex < N,"GetFinalClusterIDIndexFromSeedID too high");
return iMyIndex;
}
unsigned int GetFinalClusterIDBySeedID (int iSeedID) {
return giFinalClusterIDs[GetFinalClusterIDIndexFromSeedID(iSeedID)];
}
void SetFinalClusterIDBySeedID (int iSeedID,unsigned int iFinalClusterID) {
giFinalClusterIDs[GetFinalClusterIDIndexFromSeedID(iSeedID)] = iFinalClusterID;
}
// ***** ***** ***** ***** ***** utils : connection
bool DBScan_CheckConnection (int i,int j) {
EMU_CHECKBOUNDS("DBScan_CheckConnection",i,DBSCAN_NUM_SEEDS)
EMU_CHECKBOUNDS("DBScan_CheckConnection",j,DBSCAN_NUM_SEEDS)
return gpConnectionMatrix[i*DBSCAN_NUM_SEEDS + j] ||
gpConnectionMatrix[j*DBSCAN_NUM_SEEDS + i];
}
void DBScan_ClearConnection (int i) {
for (int j=0;j<DBSCAN_NUM_SEEDS;++j) {
gpConnectionMatrix[i*DBSCAN_NUM_SEEDS + j] = 0;
gpConnectionMatrix[j*DBSCAN_NUM_SEEDS + i] = 0;
}
}
void DBScan_SetConnection (int i,int j) {
EMU_CHECKBOUNDS("DBScan_SetConnection i",i,DBSCAN_NUM_SEEDS)
EMU_CHECKBOUNDS("DBScan_SetConnection j",j,DBSCAN_NUM_SEEDS)
gpConnectionMatrix[i*DBSCAN_NUM_SEEDS + j] = 1;
gpConnectionMatrix[j*DBSCAN_NUM_SEEDS + i] = 1;
}
void DBScan_SpreadConnection_DebugDump (const char* szMsg) {
return;
printf("########## %s\n",szMsg);
for (int c=0;c<=giLastFinalClusterID;++c) {
bool bFound = false;
for (int x=0;x<DBSCAN_NUM_SEEDS;++x) {
unsigned int cid = GetFinalClusterIDBySeedID(x);
if (cid == c) {
if (!bFound) {bFound = true; printf("c:%5d:",c);}
printf("%d,",x);
}
}
if (bFound) printf("\n");
}
}
int giDBScanClusterDoubleAssignmentCounter = 0;
void DBScan_SpreadConnection_Aux (int i) {
unsigned int iFinalClusterIDA = GetFinalClusterIDBySeedID(i);
MY_ASSERT(iFinalClusterIDA != INF_32,"DBScan_SpreadConnection_Aux on seed without clusterid ?");
EMU_CHECKBOUNDS("cpuspread",i,DBSCAN_NUM_SEEDS)
for (int j=0;j<DBSCAN_NUM_SEEDS;++j) {
if (j == i) continue;
if (DBScan_CheckConnection(i,j)) { // j and i are connected
unsigned int iFinalClusterIDB = GetFinalClusterIDBySeedID(j);
if (iFinalClusterIDB != iFinalClusterIDA) {
if (iFinalClusterIDB != INF_32) {
++giDBScanClusterDoubleAssignmentCounter;
printf("warning : DBScan_SpreadConnection_Aux unexpected double assignment : i=%d,j=%d,a=%d,b=%d\n",(int)i,(int)j,(int)iFinalClusterIDA,(int)iFinalClusterIDB);
//MY_ASSERT(0,"DBScan_SpreadConnection_Aux unexpected double assignment"); // fatal ? only during debug probably
}
SetFinalClusterIDBySeedID(j,iFinalClusterIDA);
DBScan_SpreadConnection_Aux(j); // spread
}
}
}
}
// spreads ClusterID Assignments over direct and indirect connections (a<->b<->c)
void DBScan_SpreadConnection () {
//~ printf("DBScan_SpreadConnection start\n");
for (int i=0;i<DBSCAN_NUM_SEEDS;++i) {
if (GetFinalClusterIDBySeedID(i) != INF_32)
DBScan_SpreadConnection_Aux(i);
}
//~ printf("DBScan_SpreadConnection end\n");
}
// ***** ***** ***** ***** ***** DBScanVerifyCandidates
//~ #define DBSCAN_VERIFY_CANDIDATES(sectionname) DBScanVerifyCandidates(p,sectionname,gpu_pPointState,gpu_pSeedStates,gpu_pCandidateLists);
#ifndef DBSCAN_VERIFY_CANDIDATES
#define DBSCAN_VERIFY_CANDIDATES(sectionname)
#endif
void DBScanVerifyCandidates (DBScanData* p,const char* szSectionName,unsigned int* gpu_pPointState,unsigned int* gpu_pSeedStates,unsigned int* gpu_pCandidateLists) {
hipError_t myLastErr;
unsigned int* pPointState = (unsigned int*)malloc(sizeof(p->pClusterIDs));
unsigned int* pSeedStates = (unsigned int*)malloc(sizeof(p->pSeedStates));
unsigned int* pCandidateLists = (unsigned int*)malloc(sizeof(p->pCandidateLists));
// download data from vram
CUDA_SAFE_CALL( hipMemcpy(pPointState,gpu_pPointState,sizeof(p->pClusterIDs),hipMemcpyDeviceToHost)); HANDLE_ERROR("hipMemcpy gpu_pPointState candidate-verify")
CUDA_SAFE_CALL( hipMemcpy(pSeedStates,gpu_pSeedStates,sizeof(p->pSeedStates),hipMemcpyDeviceToHost)); HANDLE_ERROR("hipMemcpy gpu_pSeedStates candidate-verify")
CUDA_SAFE_CALL( hipMemcpy(pCandidateLists,gpu_pCandidateLists,sizeof(p->pCandidateLists),hipMemcpyDeviceToHost)); HANDLE_ERROR("hipMemcpy gpu_pCandidateLists candidate-verify")
// count candidates
int c_candidates[DBSCAN_NUM_SEEDS];
int c_candidates_last[DBSCAN_NUM_SEEDS];
int n,iSeedID;
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) c_candidates[iSeedID] = 0;
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) c_candidates_last[iSeedID] = -1;
//const unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(gConst_pFinalChainIDs[iSeedID]);
//~ CUDA_SAFE_CALL( hipMemcpyToSymbol(gConst_pFinalChainIDs, gpFinalChainIDs, sizeof(p->pFinalChainIDs))); HANDLE_ERROR("hipMemcpy pFinalChainIDs") // const mem
for (n=0;n<N;++n) {
unsigned int iState = pPointState[n]; // iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(iFinalChainID);
if ( iState >= DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST) {
} else if (iState >= DBSCAN_WITHIDX_CLUSTER_ID_CANDIDATE_FIRST) {
int iFoundSeedID = -1;
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) {
unsigned int iFinalChainID = gpFinalChainIDs[iSeedID];
unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(iFinalChainID);
if (iCandidateID == iState) {
++c_candidates[iSeedID];
c_candidates_last[iSeedID] = n;
iFoundSeedID = iSeedID;
}
}
if (iFoundSeedID == -1) {
printf("DBScanVerifyCandidates(%s) failed to find seed state=%d\n",szSectionName,iState);
exit(0);
}
} else if (iState == DBSCAN_WITHIDX_CLUSTER_ID_INIT) {
} else if (iState == DBSCAN_WITHIDX_CLUSTER_ID_NOISE) {
}
}
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) {
unsigned int iSeedState_NotListedLen = pSeedStates[iSeedID*SEEDSTATEDIM + 0];
unsigned int iSeedState_ListLen = pSeedStates[iSeedID*SEEDSTATEDIM + 1];
unsigned int iOwnPointID = pSeedStates[iSeedID*SEEDSTATEDIM + 3];
unsigned int iRecordedCount = iSeedState_NotListedLen + iSeedState_ListLen;
unsigned int iState = 0xffffffff;
if (iOwnPointID != INF_32) {
iRecordedCount += 1;
iState = pPointState[iOwnPointID];
unsigned int iFinalChainID = gpFinalChainIDs[iSeedID];
unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(iFinalChainID);
if (iState != iCandidateID) {
printf("DBScanVerifyCandidates(%s) failed prime candidate bad, iSeedID=%d n=%d state=%d\n",szSectionName,iSeedID,iOwnPointID,iState);
exit(0);
}
}
int iRealCount = c_candidates[iSeedID];
if (iRealCount != iRecordedCount) {
printf("DBScanVerifyCandidates(%s) failed, iSeedID=%d iOwnPointID=%d:%d lastreal=%d realcount=%d : %d=iRecordedCount=NL:%d+L:%d mismatch\n",szSectionName,iSeedID,iOwnPointID,iState,c_candidates_last[iSeedID],iRealCount,iRecordedCount,iSeedState_NotListedLen,iSeedState_ListLen);
exit(0);
}
}
free(pPointState);
free(pSeedStates);
free(pCandidateLists);
}
// ***** ***** ***** ***** ***** cpu main
//~ unsigned int iVRamWriterUINT;
//~ #define VRAM_WRITE_UINT(p,v) { iVRamWriterUINT = v; CUDA_SAFE_CALL(hipMemcpy(p,&iVRamWriterUINT,sizeof(iVRamWriterUINT),hipMemcpyHostToDevice)); HANDLE_ERROR("VRAM_WRITE_UINT" #p) } // ","##v
void VRAM_WRITE_UINT(unsigned int* p,unsigned int v) {
hipError_t myLastErr;
CUDA_SAFE_CALL(hipMemcpy(p,&v,sizeof(v),hipMemcpyHostToDevice)); HANDLE_ERROR("hipMemcpy VRAM_WRITE_UINT");
}
unsigned int VRAM_READ_UINT(unsigned int* p) {
hipError_t myLastErr;
unsigned int v = 0;
CUDA_SAFE_CALL(hipMemcpy(&v,p,sizeof(v),hipMemcpyDeviceToHost)); HANDLE_ERROR("hipMemcpy VRAM_READ_UINT");
return v;
}
void DBScanAssignFinalChainID (unsigned int iSeedID,unsigned int iFinalChainID,unsigned int *gpu_pClusterIDLookup) {
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
unsigned int iOldFinalChainID = gpFinalChainIDs[iSeedID];
int oldn = iOldFinalChainID -DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST;
int newn = iFinalChainID -DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST;
if (oldn >= 0 && oldn < N) VRAM_WRITE_UINT(&gpu_pClusterIDLookup[oldn],INF_32);
if (newn >= 0 && newn < N) VRAM_WRITE_UINT(&gpu_pClusterIDLookup[newn],iSeedID);
#endif
gpFinalChainIDs[iSeedID] = iFinalChainID;
}
#define VRAM_READWRITE_UNIT_TEST
void dbscan_gpu (DBScanData* p) {
// a few sanity checks for parameters
printf("dbscan_gpu SZ=%d kThreadBlockSize=%d\n",(int)SZ,(int)kThreadBlockSize);
if ((SZ % kThreadBlockSize) != 0) {
printf("##### ERROR, SZ(%d) must be a multiple of kThreadBlockSize(%d)\n",(int)SZ,(int)kThreadBlockSize);
printf("##### try increasing the number of datapoints, or decreasing IO(%d)\n",(int)I0);
// because of kernel : for (unsigned int w=0;w<SZ;w+=kThreadBlockSize,n+=kThreadBlockSize) {
exit(0);
}
if ((kThreadBlockSize % 64) != 0) {
printf("threadblocksize should be multiple of 64.. 64 is ok if there are enough blocks running in parallel\n");
// cuda manual, number of parallel running threads etc, recommended for register access or so
exit(0);
}
if ((N % kThreadBlockSize) != 0) {
printf("N=%d should be a multiple of kThreadBlockSize=%d\n",(int)N,(int)kThreadBlockSize);
exit(0);
}
//~ if (kThreadBlockSize < DBSCAN_PARAM_MINPTS) {
if (kThreadBlockSize * kThreadBlockMinPtsScale < DBSCAN_PARAM_MINPTS) {
printf("(kThreadBlockSize * kThreadBlockMinPtsScale) must be >= DBSCAN_PARAM_MINPTS, other case not yet implemented (processing stored neightbors)\n");
// kernel : neightbors
exit(0);
}
if (kThreadBlockSize < D) {
printf("kThreadBlockSize must be >= D, other case not yet implemented (reading in mainpoint)\n");
// kernel : reading in mainpoint
exit(0);
}
if (GRIDHEIGHT != 1) {
printf("error, GRIDHEIGHT=1 assumed for MYID and BLOCKIDX implementation\n");
// MYID and BLOCKIDX
exit(0);
}
if ((DBSCAN_NUM_SEEDS % kThreadBlockSize) != 0) {
printf("DBSCAN_NUM_SEEDS(%d) must be a multiple of kThreadBlockSize(%d)\n",(int)DBSCAN_NUM_SEEDS,(int)kThreadBlockSize);
exit(0);
}
//~ if ((DBSCAN_NUM_SEEDS % (GRIDHEIGHT * kThreadBlockSize)) != 0) {
//~ printf("DBSCAN_NUM_SEEDS(%d) must be a multiple of (GRIDHEIGHT(%d) * kThreadBlockSize(%d))\n",(int)GRIDHEIGHT,(int)kThreadBlockSize,(int)kThreadBlockSize);
//~ // grid_size_one_thread_per_seed.x = DBSCAN_NUM_SEEDS / GRIDHEIGHT / kThreadBlockSize; UNUSED
//~ exit(0);
//~ }
// vars
int i,j,n;
hipError_t myLastErr;
#ifndef __DEVICE_EMULATION__
//~ CUDA_SAFE_CALL(hipSetDevice(0)); /// GT 8500
CUDA_SAFE_CALL(hipSetDevice(1)); /// GTX 280
#endif
// final cluster ids
for (i=0;i<N;++i) giFinalClusterIDs[i] = INF_32; // giFinalClusterIDs[pFinalChainIDs[iSeedID] = iFinalChainID] = iFinalClusterID;
// shortcuts
bool bFreshSeedsLeft = true;
unsigned int* pSeedStates = p->pSeedStates; // (atomicops)
unsigned int* pHelperData = p->pHelperData; // newseed
unsigned int* pPointState = (unsigned int*)p->pClusterIDs; // for final evaluation
gpFinalChainIDs = p->pFinalChainIDs; // old : pFinalChainIDs
gpConnectionMatrix = p->pConnectionMatrix;
// allocate and init gpu buffers
#define ALLOCATE_GPU_BUFFER(type,name,datasize) type name = 0; CUDA_SAFE_CALL(hipMalloc((void**)&name,datasize));
ALLOCATE_GPU_BUFFER(float* ,gpu_pPointCoords, sizeof(p->pPoints)); // N*D
ALLOCATE_GPU_BUFFER(unsigned int* ,gpu_pPointState, sizeof(p->pClusterIDs));// N (outlier,candidate:chain-id,finished:chain-id)
ALLOCATE_GPU_BUFFER(float* ,gpu_pIndex, sizeof(p->pIndex)); // INDEX_NUM_FLOATS
ALLOCATE_GPU_BUFFER(unsigned int* ,gpu_pSeedStates, sizeof(p->pSeedStates));// DBSCAN_NUM_SEEDS * x (notlisted,listlen,iNeighBorCount : atomicops)
//~ ALLOCATE_GPU_BUFFER(unsigned int* ,gpu_pFinalChainIDs, sizeof(p->pFinalChainIDs));// DBSCAN_NUM_SEEDS (constant memory, values >= DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST)
ALLOCATE_GPU_BUFFER(unsigned int* ,gpu_pCandidateLists, sizeof(p->pCandidateLists));// DBSCAN_NUM_SEEDS * CANDIDATE_LIST_MAXLEN (fresh-seeds)
ALLOCATE_GPU_BUFFER(unsigned int* ,gpu_pConnectionMatrix, sizeof(p->pConnectionMatrix));// DBSCAN_NUM_SEEDS^2
ALLOCATE_GPU_BUFFER(unsigned int* ,gpu_pHelperData, sizeof(p->pHelperData));// DBSCAN_NUM_SEEDS + x
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
ALLOCATE_GPU_BUFFER(unsigned int* ,gpu_pClusterIDLookup, sizeof(unsigned int)*N);
#else
unsigned int* gpu_pClusterIDLookup = 0;
#endif
// init vram data to zero
CUDA_SAFE_CALL( hipMemset(gpu_pPointState, 0, sizeof(p->pClusterIDs)));
CUDA_SAFE_CALL( hipMemset(gpu_pCandidateLists, 0, sizeof(p->pCandidateLists)));
CUDA_SAFE_CALL( hipMemset(gpu_pConnectionMatrix, 0, sizeof(p->pConnectionMatrix)));
CUDA_SAFE_CALL( hipMemset(gpu_pHelperData, 0, sizeof(p->pHelperData)));
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
CUDA_SAFE_CALL( hipMemset(gpu_pClusterIDLookup, 0xFF, sizeof(unsigned int)*N));
printf("gpu_pClusterIDLookup[0]=0x%08x\n",(int)VRAM_READ_UINT(&gpu_pClusterIDLookup[0]));
#endif
#ifdef VRAM_READWRITE_UNIT_TEST
printf("N=%d\n",(int)N);
#define VRAM_READWRITE_UNIT_TEST_ONE(addr,v) VRAM_WRITE_UINT(addr,v); if (VRAM_READ_UINT(addr) != v) { printf("writefail v=%d\n",(int)v); exit(0); } else { printf("vramwriteunit ok v=%d\n",(int)v);}
VRAM_READWRITE_UNIT_TEST_ONE(&gpu_pPointState[0],0);
VRAM_READWRITE_UNIT_TEST_ONE(&gpu_pPointState[0],1);
VRAM_READWRITE_UNIT_TEST_ONE(&gpu_pPointState[0],2);
VRAM_READWRITE_UNIT_TEST_ONE(&gpu_pPointState[0],0);
VRAM_READWRITE_UNIT_TEST_ONE(&gpu_pPointState[5],0);
VRAM_READWRITE_UNIT_TEST_ONE(&gpu_pPointState[5],1);
VRAM_READWRITE_UNIT_TEST_ONE(&gpu_pPointState[5],2);
VRAM_READWRITE_UNIT_TEST_ONE(&gpu_pPointState[5],0);
#endif
// choose initial seeds
printf("start choose initial\n");
unsigned int gNextFinalChainID = DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST;
int iSeedPoints[DBSCAN_NUM_SEEDS];
int iSeedID;
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) {
// pick one n randomly
bool bAlreadyUsed;
do {
n = rand() % (N-1);
bAlreadyUsed = false;
for (j=0;j<iSeedID;++j) if (iSeedPoints[j] == n) bAlreadyUsed = true;
} while (bAlreadyUsed) ;
iSeedPoints[iSeedID] = n;
unsigned int iFinalChainID = gNextFinalChainID++;
unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(iFinalChainID);
//~ printf("chooseinit i=%d n=%d finalchainid=%d\n",(int)i,(int)n,(int)iFinalChainID);
DBScanAssignFinalChainID(iSeedID,iFinalChainID,gpu_pClusterIDLookup);
pSeedStates[SEEDSTATEDIM*iSeedID + 0] = 0;
pSeedStates[SEEDSTATEDIM*iSeedID + 1] = 0;
pSeedStates[SEEDSTATEDIM*iSeedID + 2] = 0;
pSeedStates[SEEDSTATEDIM*iSeedID + 3] = n;
VRAM_WRITE_UINT(&gpu_pPointState[n],iCandidateID);
//~ printf("dbscan init : iSeedID=%d n=%d iCandidateID=%d\n",iSeedID,(int)n,iCandidateID);
}
// copy data from ram to vram
CUDA_SAFE_CALL( hipMemcpy(gpu_pPointCoords, p->pPoints, sizeof(p->pPoints), hipMemcpyHostToDevice )); HANDLE_ERROR("hipMemcpy pPoints")
CUDA_SAFE_CALL( hipMemcpy(gpu_pIndex, p->pIndex, sizeof(p->pIndex), hipMemcpyHostToDevice )); HANDLE_ERROR("hipMemcpy pIndex")
CUDA_SAFE_CALL( hipMemcpy(gpu_pSeedStates, p->pSeedStates, sizeof(p->pSeedStates), hipMemcpyHostToDevice )); HANDLE_ERROR("hipMemcpy pSeedStates")
printf("start copy to const vram\n");
// copy data from ram to constant vram
CUDA_SAFE_CALL( hipMemcpyToSymbol(gConst_pFinalChainIDs, gpFinalChainIDs, sizeof(p->pFinalChainIDs))); HANDLE_ERROR("hipMemcpy pFinalChainIDs") // const mem
printf("start size\n");
//~ DBSCAN_VERIFY_CANDIDATES("prepare1")
// kernel setup : grid_size, block_size, mem_shared
dim3 grid_size_many_threads_per_seed;
dim3 grid_size_one_thread_per_seed;
dim3 grid_size_4;
dim3 block_size;
unsigned int mem_shared = 0; // this is for dynamic alloc of shared mem, we alloc statically
grid_size_many_threads_per_seed.x = DBSCAN_NUM_SEEDS / GRIDHEIGHT; // TODO : make sure N is a multiple of kThreadBlockSize
grid_size_many_threads_per_seed.y = GRIDHEIGHT;
grid_size_many_threads_per_seed.z = 1;
grid_size_one_thread_per_seed.x = DBSCAN_NUM_SEEDS / GRIDHEIGHT / kThreadBlockSize;
grid_size_one_thread_per_seed.y = GRIDHEIGHT;
grid_size_one_thread_per_seed.z = 1;
grid_size_4.x = 4;
grid_size_4.y = 1;
grid_size_4.z = 1;
block_size.x = kThreadBlockSize;
block_size.y = 1;
block_size.z = 1;
#define MB(a) ((int)(a)/1024/1024)
printf("alloc %d %d %d %d gridsize_x=%d\n",MB(p->pPoints),MB(p->pCandidateLists),MB(p->pClusterIDs),MB(p->pConnectionMatrix),grid_size_many_threads_per_seed.x);
// **** TEST NEWSEED
if (TEST_KERNEL_NEWSEED) {
printf("TEST_KERNEL_NEWSEED start\n");
int iPointsLeft = N;
for (int iTestI=0;iTestI<10000000;++iTestI) {
unsigned int iNewSeedsNeeded = DBSCAN_NUM_SEEDS;
// helper kernel : search a few new seeds (why kernel : candidate ids are in vram)
// new seeds : sum_time="one iteration" : save index last checked and increment until next free point is found
VRAM_WRITE_UINT(&gpu_pHelperData[DBSCAN_NUM_SEEDS+0],0); // counter
hipLaunchKernelGGL(( dbscan_kernel_newseeds), dim3(grid_size_4), dim3(block_size), mem_shared , 0,
gpu_pPointState, // N (outlier,candidate:chain-id,finished:chain-id)
gpu_pHelperData,
iNewSeedsNeeded
);
// download gpu_pHelperData from vram
CUDA_SAFE_CALL( hipMemcpy(pHelperData,gpu_pHelperData,sizeof(unsigned int) * (DBSCAN_NUM_SEEDS+NEWSEEDSCAN_NUM_PARAMS),hipMemcpyDeviceToHost)); HANDLE_ERROR("hipMemcpy pHelperData readback")
unsigned int iNewSeedsFound = min(iNewSeedsNeeded,pHelperData[DBSCAN_NUM_SEEDS+0]);
// assign as noise
iPointsLeft -= iNewSeedsFound;
for (i=0;i<iNewSeedsFound;++i) {
n = pHelperData[i];
if (n < 0 || n >= N) printf("bad n:%d\n",n);
VRAM_WRITE_UINT(&gpu_pPointState[n],DBSCAN_WITHIDX_CLUSTER_ID_NOISE);
}
// download pointstates from vram and count states
CUDA_SAFE_CALL( hipMemcpy(pPointState,gpu_pPointState,sizeof(p->pClusterIDs),hipMemcpyDeviceToHost)); HANDLE_ERROR("hipMemcpy gpu_pPointState final download")
int cinit = 0;
int cnoise = 0;
int crest = 0;
for (n=0;n<N;++n) {
if (pPointState[n] == DBSCAN_WITHIDX_CLUSTER_ID_INIT ) { ++cinit; continue; }
if (pPointState[n] == DBSCAN_WITHIDX_CLUSTER_ID_NOISE ) { ++cnoise; continue; }
++crest;
}
printf("iNewSeedsFound=%3d pleft=%6d cinit=%6d,cnoise=%6d,crest=%d over=%d\n",iNewSeedsFound,iPointsLeft,cinit,cnoise,crest,iPointsLeft-cinit);
if (iNewSeedsFound == 0) break;
}
printf("TEST_KERNEL_NEWSEED end\n");
return;
}
// **** TEST REFILL
if (TEST_KERNEL_REFILL) {
printf("TEST_KERNEL_REFILL start\n");
// download pointstates
CUDA_SAFE_CALL( hipMemcpy(pPointState,gpu_pPointState,sizeof(p->pClusterIDs),hipMemcpyDeviceToHost)); HANDLE_ERROR("hipMemcpy gpu_pPointState final download")
// prepare test environment
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) {
unsigned int iSetNonList = 10;
unsigned int iSetList = 0;
unsigned int iFinalChainID = gpFinalChainIDs[iSeedID];
unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(iFinalChainID);
VRAM_WRITE_UINT(&gpu_pSeedStates[iSeedID*SEEDSTATEDIM + 0],iSetNonList);
VRAM_WRITE_UINT(&gpu_pSeedStates[iSeedID*SEEDSTATEDIM + 1],iSetList);
// pick random points with "init" state as new unmarked
for (i=0;i<iSetNonList;++i) {
// pick one n randomly
do {
n = rand() % (N-1);
if (pPointState[n] != DBSCAN_WITHIDX_CLUSTER_ID_INIT) continue;
pPointState[n] = iCandidateID;
VRAM_WRITE_UINT(&gpu_pPointState[n],iCandidateID);
break;
} while (true) ;
}
}
printf("TEST_KERNEL_REFILL kernel?\n");
// launch refill kernel
hipLaunchKernelGGL(( dbscan_kernel_refill), dim3(grid_size_many_threads_per_seed), dim3(block_size), mem_shared , 0,
gpu_pPointState, // N (outlier,candidate:chain-id,finished:chain-id)
gpu_pSeedStates, // DBSCAN_NUM_SEEDS * 3 (real,listlen,iNeighBorCount : atomicops)
gpu_pCandidateLists // DBSCAN_NUM_SEEDS * CANDIDATE_LIST_MAXLEN (fresh-seeds)
);
// init counter
int iSeedDataCounterF[DBSCAN_NUM_SEEDS];
int iSeedDataCounterC[DBSCAN_NUM_SEEDS];
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) iSeedDataCounterF[iSeedID] = 0;
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) iSeedDataCounterC[iSeedID] = 0;
// download pointstates from vram and count states
CUDA_SAFE_CALL( hipMemcpy(pPointState,gpu_pPointState,sizeof(p->pClusterIDs),hipMemcpyDeviceToHost)); HANDLE_ERROR("hipMemcpy gpu_pPointState final download")
int cinit = 0;
int cnoise = 0;
int crest = 0;
for (n=0;n<N;++n) {
unsigned int iPointState = pPointState[n];
if (iPointState == DBSCAN_WITHIDX_CLUSTER_ID_INIT ) { ++cinit; continue; }
if (iPointState == DBSCAN_WITHIDX_CLUSTER_ID_NOISE ) { ++cnoise; continue; }
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) {
unsigned int iFinalChainID = gpFinalChainIDs[iSeedID];
unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(iFinalChainID);
if (iPointState == iFinalChainID) ++iSeedDataCounterF[iSeedID];
if (iPointState == iCandidateID ) ++iSeedDataCounterC[iSeedID];
}
++crest;
}
printf("cinit=%6d,cnoise=%6d,crest=%d\n",cinit,cnoise,crest);
// download seedstate from vram
CUDA_SAFE_CALL( hipMemcpy(pSeedStates,gpu_pSeedStates,sizeof(p->pSeedStates),hipMemcpyDeviceToHost)); HANDLE_ERROR("hipMemcpy gpu_pSeedStates readback")
// analyse seeds
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) {
unsigned int iFinalChainID = gpFinalChainIDs[iSeedID];
unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(iFinalChainID);
unsigned int* piSeedState_NotListedLen = &pSeedStates[iSeedID*SEEDSTATEDIM + 0];
unsigned int* piSeedState_ListLen = &pSeedStates[iSeedID*SEEDSTATEDIM + 1];
printf("seed:%3d nonl=%d list=%d final=%d cand=%d ",iSeedID,
*piSeedState_NotListedLen,
*piSeedState_ListLen,
iSeedDataCounterF[iSeedID],
iSeedDataCounterC[iSeedID]);
for (i=0;i<*piSeedState_ListLen;++i) {
unsigned int n = VRAM_READ_UINT(&gpu_pCandidateLists[iSeedID*CANDIDATE_LIST_MAXLEN+i]);
unsigned int iState = pPointState[n];
printf("%d%s,",n,(iState != iCandidateID)?"ERROR":"");
}
printf("\n");
}
printf("TEST_KERNEL_REFILL end\n");
return;
}
// **** MAIN
float t_kernel_main = 0.0;
float t_download_states = 0.0;
float t_check_seedstates = 0.0;
float t_finished_seeds = 0.0;
float t_kernel_refill = 0.0;
float t_cleanup = 0.0;
float t_debug = 0.0;
printf("prepare check\n"); DBSCAN_VERIFY_CANDIDATES("prepare")
printf("start loop\n");
PROFILE_TIME_SECTION_START();
int iMainRoughPointsLeft = N;
int iOutout = 0;
do {
hipLaunchKernelGGL(( dbscan_kernel_main), dim3(grid_size_many_threads_per_seed), dim3(block_size), mem_shared , 0,
gpu_pPointCoords, // N*D
gpu_pPointState, // N (outlier,candidate:chain-id,finished:chain-id)
gpu_pIndex, // INDEX_NUM_FLOATS
gpu_pSeedStates, // DBSCAN_NUM_SEEDS * x (notlised,listlen,iNeighBorCount : atomicops)
//~ unsigned int* pFinalChainIDs, // DBSCAN_NUM_SEEDS (constant memory, values >= DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST)
gpu_pCandidateLists, // DBSCAN_NUM_SEEDS * CANDIDATE_LIST_MAXLEN (fresh-seeds)
gpu_pConnectionMatrix // DBSCAN_NUM_SEEDS^2
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
,gpu_pClusterIDLookup // [N]
#endif
);
CUDA_SAFE_CALL( hipDeviceSynchronize());HANDLE_ERROR("hipDeviceSynchronize")
PROFILE_TIME_SECTION_SUM(t_kernel_main);
DBSCAN_VERIFY_CANDIDATES("kernel_main") PROFILE_TIME_SECTION_SUM(t_debug);
// download seedstate from vram
CUDA_SAFE_CALL( hipMemcpy(pSeedStates,gpu_pSeedStates,sizeof(p->pSeedStates),hipMemcpyDeviceToHost)); HANDLE_ERROR("hipMemcpy gpu_pSeedStates readback")
PROFILE_TIME_SECTION_SUM(t_download_states);
// check seedstates
bool bListRefillNeeded = false;
int iNewSeedsNeeded = 0;
bool bSeedFinished[DBSCAN_NUM_SEEDS];
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) {
bSeedFinished[iSeedID] = false;
unsigned int* piSeedState_NotListedLen = &pSeedStates[iSeedID*SEEDSTATEDIM + 0];
unsigned int* piSeedState_ListLen = &pSeedStates[iSeedID*SEEDSTATEDIM + 1];
unsigned int* piSeedState_NeighborCount = &pSeedStates[iSeedID*SEEDSTATEDIM + 2];
if (*piSeedState_ListLen > 0) continue;
if (*piSeedState_NotListedLen > 0) {
// refill needed
bListRefillNeeded = true;
} else {
// seed finished
bSeedFinished[iSeedID] = true;
iNewSeedsNeeded++;
// if this is the first finished seed found this round : download connection matrix and spread
if (iNewSeedsNeeded == 1) {
CUDA_SAFE_CALL( hipMemcpy(gpConnectionMatrix,gpu_pConnectionMatrix,sizeof(p->pConnectionMatrix),hipMemcpyDeviceToHost)); HANDLE_ERROR("hipMemcpy pConnectionMatrix readback")
DBScan_SpreadConnection_DebugDump("seedcheck,first");
DBScan_SpreadConnection();
}
// cleanup
//~ if (*piSeedState_NeighborCount >= DBSCAN_PARAM_MINPTS) { // TODO (beim da_text schreiben gesehen und entfernt) : what was this if for ? (bad if last point is noise?)
if (GetFinalClusterIDBySeedID(iSeedID) == INF_32) { // no final id assigned yet, need to generate a new one
unsigned int iFinalClusterID = ++giLastFinalClusterID;
//~ printf("assign seed=%3d cid=%5d\n",iSeedID,iFinalClusterID);
SetFinalClusterIDBySeedID(iSeedID,iFinalClusterID); // generate new cluster id and assign
DBScan_SpreadConnection_DebugDump("seedcheck,cleanup");
DBScan_SpreadConnection_Aux(iSeedID); // spread
}
//~ }
// clear connection matrix entries for this seed
DBScan_ClearConnection(iSeedID);
// generate and assign new final chain id (upload to constant vram later)
unsigned int iFinalChainID = gNextFinalChainID++;
DBScanAssignFinalChainID(iSeedID,iFinalChainID,gpu_pClusterIDLookup);
}
//~ printf("seed %4d : %NotListedLen=%6d ListLen=%6d neighbor=%6d\n",(int)iSeedID,
//~ (int)*piSeedState_NotListedLen,(int)*piSeedState_ListLen,(int)*piSeedState_NeighborCount);
}
PROFILE_TIME_SECTION_SUM(t_check_seedstates);
DBSCAN_VERIFY_CANDIDATES("check_seedstates") PROFILE_TIME_SECTION_SUM(t_debug);
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM_CHECK
// check
if (1) {
unsigned int* temp = (unsigned int*)malloc(N*sizeof(unsigned int));
// download pointstates from vram
CUDA_SAFE_CALL( hipMemcpy(temp,gpu_pClusterIDLookup,N*sizeof(unsigned int),hipMemcpyDeviceToHost)); HANDLE_ERROR("hipMemcpy gpu_pClusterIDLookup debug download")
int c = 0;
for (int i=0;i<N;++i) {
if (temp[i] < INF_32) {
++c;
}
}
if (c > DBSCAN_NUM_SEEDS) {
printf("lookup debug : too many set %d,%d\n",c,(int)DBSCAN_NUM_SEEDS);
exit(0);
}
free(temp);
PROFILE_TIME_SECTION_SUM(t_debug);
}
#endif
#endif
// process finished seeds
int iNumberOfNonRevivableSeeds = 0;
if (iNewSeedsNeeded > 0) {
// upload changed final ids (new chains started)
CUDA_SAFE_CALL( hipMemcpyToSymbol(gConst_pFinalChainIDs, gpFinalChainIDs, sizeof(p->pFinalChainIDs))); HANDLE_ERROR("hipMemcpy gpFinalChainIDs upload2") // const mem
// search new seeds in vram by iterating over gpu_pPointState
unsigned int iNewSeedsFound = 0;
if (bFreshSeedsLeft) {
// helper kernel : search a few new seeds (why kernel : candidate ids are in vram)
// new seeds : sum_time="one iteration" : save index last checked and increment until next free point is found
VRAM_WRITE_UINT(&gpu_pHelperData[DBSCAN_NUM_SEEDS+0],0); // counter
MY_ASSERT(NUM_THREADS_NEWSEEDSCAN == grid_size_4.x * block_size.x,"newseeds check");
hipLaunchKernelGGL(( dbscan_kernel_newseeds), dim3(grid_size_4), dim3(block_size), mem_shared , 0,
gpu_pPointState, // N (outlier,candidate:chain-id,finished:chain-id)
gpu_pHelperData,
iNewSeedsNeeded
);
// download gpu_pHelperData from vram
CUDA_SAFE_CALL( hipMemcpy(pHelperData,gpu_pHelperData,sizeof(unsigned int) * (DBSCAN_NUM_SEEDS+NEWSEEDSCAN_NUM_PARAMS),hipMemcpyDeviceToHost)); HANDLE_ERROR("hipMemcpy pHelperData readback")
iNewSeedsFound = min(iNewSeedsNeeded,pHelperData[DBSCAN_NUM_SEEDS+0]);
// remember when no fresh seeds can be found anymore
if (iNewSeedsFound < iNewSeedsNeeded) bFreshSeedsLeft = false;
}
// process seeds : assign new seeds or split existing ones
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) {
// skip seeds that still have work to do
if (!bSeedFinished[iSeedID]) continue;
// calc common helper vars, and reset seed state
unsigned int iFinalChainID = gpFinalChainIDs[iSeedID];
unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(iFinalChainID);
pSeedStates[SEEDSTATEDIM*iSeedID + 0] = 0; //NotListedLen
pSeedStates[SEEDSTATEDIM*iSeedID + 1] = 0; //ListLen
pSeedStates[SEEDSTATEDIM*iSeedID + 2] = 0; //NeighborCount
pSeedStates[SEEDSTATEDIM*iSeedID + 3] = INF_32;
// did we find enough free seeds or do we have to split existing chains ?
if (iNewSeedsFound > 0) {
iNewSeedsFound -= 1;
// assign new seed
n = pHelperData[iNewSeedsFound]; // iNewSeedN
pSeedStates[SEEDSTATEDIM*iSeedID + 3] = n; // mark for instant use
// pPointState : write iCandidateID, otherwise it might be marked as candidat by another seedchain
VRAM_WRITE_UINT(&gpu_pPointState[n],iCandidateID);
} else {
//~ printf("split!\n");
// split
// choose largest existing
unsigned int iFoundOtherSeedID = INF_32;
unsigned int iFoundOtherListLen = 0;
for (unsigned int iOtherSeedID=0;iOtherSeedID<DBSCAN_NUM_SEEDS;++iOtherSeedID) {
unsigned int iOtherListLen = pSeedStates[iOtherSeedID*SEEDSTATEDIM + 1];
if (iFoundOtherSeedID == INF_32 || iOtherListLen > iFoundOtherListLen) {
iFoundOtherSeedID = iOtherSeedID;
iFoundOtherListLen = iOtherListLen;
}
}
// split chosen
if (iFoundOtherListLen > 1) {
// split only the last candidate from otherseed
unsigned int* iSplitOriginLen = &pSeedStates[iFoundOtherSeedID*SEEDSTATEDIM + 1];
unsigned int iLastIndex = *iSplitOriginLen - 1;
*iSplitOriginLen -= 1;
unsigned int n = VRAM_READ_UINT(&gpu_pCandidateLists[iFoundOtherSeedID*CANDIDATE_LIST_MAXLEN+iLastIndex]);
pSeedStates[SEEDSTATEDIM*iSeedID + 3] = n;
// change candidate seed-assignment to avoid refill-confusion for otherseed
VRAM_WRITE_UINT(&gpu_pPointState[n],iCandidateID);
// mark split-connection
DBScan_SetConnection(iSeedID,iFoundOtherSeedID);
} else {
++iNumberOfNonRevivableSeeds;
// split not possible algorithm nearing it's end
//~ printf("iSeedID:%03d split not possible anymore, algorithm nearing it's end\n",(int)iSeedID);
// listlen=0,nonlistlen=0,nextid=INF_32 signals end
}
}
}
// upload changed connection matrix (split)
CUDA_SAFE_CALL( hipMemcpy(gpu_pConnectionMatrix,gpConnectionMatrix,sizeof(p->pConnectionMatrix),hipMemcpyHostToDevice )); HANDLE_ERROR("hipMemcpy pConnectionMatrix upload2")
// upload updated states to vram (changed by new cluster started, not by refill)
CUDA_SAFE_CALL( hipMemcpy(gpu_pSeedStates, p->pSeedStates, sizeof(p->pSeedStates), hipMemcpyHostToDevice )); HANDLE_ERROR("hipMemcpy pSeedStates upload2")
}
PROFILE_TIME_SECTION_SUM(t_finished_seeds);
DBSCAN_VERIFY_CANDIDATES("finished_seeds") PROFILE_TIME_SECTION_SUM(t_debug);
// helper kernel : refill lists (why kernel : candidate ids are in vram)
if (bListRefillNeeded) {
hipLaunchKernelGGL(( dbscan_kernel_refill), dim3(grid_size_many_threads_per_seed), dim3(block_size), mem_shared , 0,
gpu_pPointState, // N (outlier,candidate:chain-id,finished:chain-id)
gpu_pSeedStates, // DBSCAN_NUM_SEEDS * 3 (real,listlen,iNeighBorCount : atomicops)
gpu_pCandidateLists // DBSCAN_NUM_SEEDS * CANDIDATE_LIST_MAXLEN (fresh-seeds)
);
CUDA_SAFE_CALL( hipDeviceSynchronize());HANDLE_ERROR("hipDeviceSynchronize")
}
PROFILE_TIME_SECTION_SUM(t_kernel_refill);
DBSCAN_VERIFY_CANDIDATES("kernel_refill") PROFILE_TIME_SECTION_SUM(t_debug);
// DETECT algorithm termination
if (iNumberOfNonRevivableSeeds > 0) printf("iNumberOfNonRevivableSeeds=%d\n",(int)iNumberOfNonRevivableSeeds);
if (iNumberOfNonRevivableSeeds >= DBSCAN_NUM_SEEDS) {
printf("algorithm finished\n");
break;
}
//printf("DEBUG:BREAK\n"); break;
iMainRoughPointsLeft -= DBSCAN_NUM_SEEDS - iNumberOfNonRevivableSeeds;
if ((iOutout++ % 16) == 0 || iOutout < 16) printf("iMainRoughPointsLeft=%7d iNewSeedsNeeded=%3d\n",iMainRoughPointsLeft,iNewSeedsNeeded);
} while (1) ;
// cleanup
// download pointstates from vram
CUDA_SAFE_CALL( hipMemcpy(pPointState,gpu_pPointState,sizeof(p->pClusterIDs),hipMemcpyDeviceToHost)); HANDLE_ERROR("hipMemcpy gpu_pPointState final download")
// assign final ids, and count groups
int counter_Init = 0;
int counter_Noise = 0;
int counter_Candidate = 0;
int counter_Final = 0;
for (n=0;n<N;++n) {
unsigned int iState = pPointState[n]; // iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(iFinalChainID);
unsigned int iNewState = INF_32;
if ( iState >= DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST) {
unsigned int iFinalChainID = iState;
unsigned int iMyIndex = iFinalChainID - DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST;
MY_ASSERT(iMyIndex < N,"FinalClusterIDIndex From pPointState too high");
iNewState = giFinalClusterIDs[iMyIndex];
++counter_Final;
} else if (iState >= DBSCAN_WITHIDX_CLUSTER_ID_CANDIDATE_FIRST) {
++counter_Candidate;
} else if (iState == DBSCAN_WITHIDX_CLUSTER_ID_INIT) {
++counter_Init;
} else if (iState == DBSCAN_WITHIDX_CLUSTER_ID_NOISE) {
++counter_Noise;
}
pPointState[n] = iNewState;
}
PROFILE_TIME_SECTION_SUM(t_cleanup);
printf("giDBScanClusterDoubleAssignmentCounter = %d\n",giDBScanClusterDoubleAssignmentCounter);
printf("time profile:\n");
printf("t_kernel_main = %f\n",t_kernel_main );
printf("t_download_states = %f\n",t_download_states );
printf("t_check_seedstates = %f\n",t_check_seedstates );
printf("t_finished_seeds = %f\n",t_finished_seeds );
printf("t_kernel_refill = %f\n",t_kernel_refill );
printf("t_cleanup = %f\n",t_cleanup );
printf("t_debug = %f\n",t_debug );
printf("dbscan final count : Init=%d,Noise=%d,Candidate=%d,Final=%d\n",(int)counter_Init,(int)counter_Noise,(int)counter_Candidate,(int)counter_Final);
sprintf(gsInfoGPUaux,"|double=%d,Init=%d,Noise=%d(%0.1f%%),Candidate=%d,Final=%d",
(int)giDBScanClusterDoubleAssignmentCounter,(int)counter_Init,(int)counter_Noise,(float)(float(counter_Noise)/float(N)),(int)counter_Candidate,(int)counter_Final);
if (counter_Init > 0) printf("warning, count(init)>0, algorithm not finished\n");
if (counter_Candidate > 0) printf("warning, count(Candidate)>0, algorithm not finished\n");
}
| e3ccff1e7593c970c4257beabb2965270369d5c1.cu | #define TEST_KERNEL_NEWSEED 0
#define TEST_KERNEL_REFILL 0
#define kThreadBlockMinPtsScale 32
#define GPU_POINT_COORDS(pData,point_id,d) (pData[ARRAY_INDEX((point_id)*D + (d),N*D)])
#define ARRAY_INDEX(a,arrlen) (a) // can be used for checking
#define DBSCAN_WITHIDX_CLUSTER_ID_INIT (0)
#define DBSCAN_WITHIDX_CLUSTER_ID_NOISE (1)
#define DBSCAN_WITHIDX_CLUSTER_ID_CANDIDATE_FIRST (2)
#define DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST (DBSCAN_WITHIDX_CLUSTER_ID_CANDIDATE_FIRST+N)
#define DBSCAN_WITHIDX_CLUSTER_ID_CANDIDATE_2_FINAL(x) (x + N)
#define DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(x) (x - N)
#define INF_32 (0x7fffffff) // ; 32 bit max value, signed
#define MY_ASSERT(x,msg) if (!(x)) { printf("ASSERT FAILED (%s) : %s\n",#x,msg); exit(0); }
//~ #define DBSCAN_ID_LOOKUP_IN_VRAM
//~ #define DBSCAN_ID_LOOKUP_IN_VRAM_CHECK
//~ #define EMU_CHECKBOUNDS(name,index,num)
#define EMU_CHECKBOUNDS(name,index,num) if (index < 0 || index >= num) { printf("EMU_CHECKBOUNDS(%s,%d,%d) failed\n",name,(int)index,(int)num); exit(0); }
#define PROFILE_TIME_ENABLE
#ifdef PROFILE_TIME_ENABLE
#define PROFILE_TIME_SECTION_START() ProfileTimerStartSection()
#define PROFILE_TIME_SECTION_STEP(name) float name = ProfileTimerStartSection()
#define PROFILE_TIME_SECTION_SUM(name) name += ProfileTimerStartSection()
#else
#define PROFILE_TIME_SECTION_START()
#define PROFILE_TIME_SECTION_STEP(name)
#define PROFILE_TIME_SECTION_SUM(name)
#endif
/*
int atomicMin(int* address, int val);
int atomicMax(int* address, int val);
int atomicExch(int* address, int val);
uint atomicExch(uint* address, uint val);
float atomicExch(float* address, float val);
unsigned int atomicInc(unsigned int* address,
unsigned int val);
reads the 32-bit word old located at the address address in global or shared
memory, computes ((old >= val) ? 0 : (old+1)), and stores the result
back to memory at the same address. These three operations are performed in one
atomic transaction. The function returns old.
unsigned int atomicDec(unsigned int* address,
unsigned int val);
int atomicCAS(int* address, int compare, int val);
unsigned int atomicCAS(unsigned int* address,
unsigned int compare,
unsigned int val);
unsigned long long int atomicCAS(unsigned long long int* address,
unsigned long long int compare,
unsigned long long int val);
writes (old == compare ? val : old) , and returns old
Atomic functions operating on shared memory and atomic functions operating on
64-bit words are only available for devices of compute capability 1.2 and above.
*/
__constant__ unsigned int gConst_pFinalChainIDs[DBSCAN_NUM_SEEDS];
char gsInfoGPUaux[512] = "";
// ***** ***** ***** ***** ***** main kernel
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
#define DBSCAN_LOOKUP_PARAM ,pClusterIDLookup
#else
#define DBSCAN_LOOKUP_PARAM
#endif
// function used by kernel, inlined automatically
__device__ void DBScanGPU_TryMarkAsCandidate ( const unsigned int iCurPointID,
unsigned int* pPointState,
const unsigned int iCandidateID,
const unsigned int iSeedID,
unsigned int* piSeedState_NotListedLen,
unsigned int* piSeedState_ListLen,
unsigned int* pCandidateLists,
unsigned int* pConnectionMatrix // DBSCAN_NUM_SEEDS^2
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
,unsigned int* pClusterIDLookup // [N]
#endif
)
{
unsigned int iOldState = atomicCAS(&pPointState[iCurPointID],DBSCAN_WITHIDX_CLUSTER_ID_INIT,iCandidateID); // this also marks a few candidates if ownpoint is an outlier
//~ if (iCurPointID == 126530) printf("DEBUG002:set:old=%d,cid=%d,seed=%d\n",iOldState,iCandidateID,iSeedID);
if (iOldState == DBSCAN_WITHIDX_CLUSTER_ID_INIT) { // otherwise just count and don't do anything
// claimed as candidate, add to list
// pointstate already set, need to add to list now
unsigned int iMyListIndex = atomicInc(piSeedState_ListLen,0xffffffff);
if (iMyListIndex < CANDIDATE_LIST_MAXLEN)
pCandidateLists[iSeedID*CANDIDATE_LIST_MAXLEN+iMyListIndex] = iCurPointID;
else atomicInc(piSeedState_NotListedLen,0xffffffff);
} else if (iOldState != DBSCAN_WITHIDX_CLUSTER_ID_NOISE) { // connection with other cluster detected
// iOldState can be candidate or final, transform to one of them for lookup table
if (iOldState < DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST)
iOldState = DBSCAN_WITHIDX_CLUSTER_ID_CANDIDATE_2_FINAL(iOldState);
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
if (iOldState != DBSCAN_WITHIDX_CLUSTER_ID_CANDIDATE_2_FINAL(iCandidateID) &&
iOldState >= DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST &&
iOldState < DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST+N) {
// lookup in global mem.. faster for big seednum
unsigned int iOtherSeedID = pClusterIDLookup[iOldState-DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST];
#else
if (iOldState != DBSCAN_WITHIDX_CLUSTER_ID_CANDIDATE_2_FINAL(iCandidateID)) { // this could save us the lookup, rather probably case also
// use lookup table to get seedid for connection matrix
unsigned int iOtherSeedID = 0xffffffff;
#pragma unroll // unroll next loop
for (int d=0;d<DBSCAN_NUM_SEEDS;++d) // warning, slow for big seednum
if (gConst_pFinalChainIDs[d] == iOldState) iOtherSeedID = d;
#endif
// set bit in connection matrix.. atomic not needed as at least one of concurrent writes is guaranteed to succeed
if (iOtherSeedID < DBSCAN_NUM_SEEDS) {
//~ atomicCAS(&pConnectionMatrix[iSeedID*DBSCAN_NUM_SEEDS + iOtherSeedID],0,1);
pConnectionMatrix[iSeedID*DBSCAN_NUM_SEEDS + iOtherSeedID] = 1;
}
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
}
#else
}
#endif
}
}
/// kernel code : this gets executed on the GPU
__global__ static void dbscan_kernel_main (
float* pPointCoords, // N*D
unsigned int* pPointState, // N (outlier,candidate:chain-id,finished:chain-id)
float* pIndex, // INDEX_NUM_FLOATS
unsigned int* pSeedStates, // DBSCAN_NUM_SEEDS * x (notlisted,listlen,iNeighBorCount : atomicops)
//~ unsigned int* pFinalChainIDs, // DBSCAN_NUM_SEEDS (constant memory, values >= DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST)
unsigned int* pCandidateLists, // DBSCAN_NUM_SEEDS * CANDIDATE_LIST_MAXLEN (fresh-seeds)
unsigned int* pConnectionMatrix // DBSCAN_NUM_SEEDS^2
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
,unsigned int* pClusterIDLookup // [N]
#endif
)
{
#define NEIGHBOR_BUFFER_SIZE (DBSCAN_PARAM_MINPTS-1)
__shared__ unsigned int iOwnPointID;
__shared__ unsigned int iNeighborCount;
__shared__ unsigned int iNeighbors[NEIGHBOR_BUFFER_SIZE];
__shared__ float fOwnPoint[D];
__shared__ float3 vMin;
__shared__ float3 vMax;
int d,n;
// prepare variables
const unsigned int iSeedID = BLOCKIDX; // in [0;DBSCAN_NUM_SEEDS[
const unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(gConst_pFinalChainIDs[iSeedID]);
//~ if (threadIdx.x == 0) printf("iSeedID=%d, iCandidateID=%d\n",(int)iSeedID,(int)iCandidateID);
unsigned int* piSeedState_NotListedLen = &pSeedStates[iSeedID*SEEDSTATEDIM + 0];
unsigned int* piSeedState_ListLen = &pSeedStates[iSeedID*SEEDSTATEDIM + 1];
unsigned int* piSeedState_NeighborCount = &pSeedStates[iSeedID*SEEDSTATEDIM + 2]; // ("atomics not in shared mem for compute 1.1");
// get iOwnPointID from seedstate(cpu) or from seedlist
if (threadIdx.x == 0) {
if (*piSeedState_ListLen > CANDIDATE_LIST_MAXLEN) // cut/limit overincrementation before polling
*piSeedState_ListLen = CANDIDATE_LIST_MAXLEN;
iOwnPointID = pSeedStates[iSeedID*SEEDSTATEDIM + 3];
if (iOwnPointID == INF_32) {
// no seed from cpu, take one from list
unsigned int ll = *piSeedState_ListLen;
if (ll > 0) {
unsigned int llm1 = ll - 1;
*piSeedState_ListLen = llm1;
iOwnPointID = pCandidateLists[ARRAY_INDEX(
iSeedID*CANDIDATE_LIST_MAXLEN + llm1,
DBSCAN_NUM_SEEDS * CANDIDATE_LIST_MAXLEN)];
} else {
//return; // seed terminated and not revivable, algorithm nearing it's finish
// but exit function only after syncthreads
}
} else {
pSeedStates[iSeedID*SEEDSTATEDIM + 3] = INF_32;
}
}
__syncthreads();
if (iOwnPointID == INF_32) return;
// read own point coordinates
if (threadIdx.x < D) fOwnPoint[threadIdx.x] = GPU_POINT_COORDS(pPointCoords,iOwnPointID,threadIdx.x);
__syncthreads();
// calculate epsilon area used for index
if (threadIdx.x == 0) {
vMin.x = fOwnPoint[0] - DBSCAN_PARAM_EPSILON;
vMin.y = fOwnPoint[1] - DBSCAN_PARAM_EPSILON;
vMin.z = fOwnPoint[2] - DBSCAN_PARAM_EPSILON;
vMax.x = fOwnPoint[0] + DBSCAN_PARAM_EPSILON;
vMax.y = fOwnPoint[1] + DBSCAN_PARAM_EPSILON;
vMax.z = fOwnPoint[2] + DBSCAN_PARAM_EPSILON;
iNeighborCount = 0;
*piSeedState_NeighborCount = 0;
}
#pragma unroll // unroll next loop
for (d=0;d<kThreadBlockMinPtsScale;++d) {
if (threadIdx.x*kThreadBlockMinPtsScale + d < NEIGHBOR_BUFFER_SIZE)
iNeighbors[threadIdx.x*kThreadBlockMinPtsScale + d] = 0xffffffff; // mark as invalid
}
__syncthreads();
#define K_I_0(a) (pIndex[INDEXPOS_0( ((int)x)+(a))])
#define K_I_1(a) (pIndex[INDEXPOS_1((int)x, ((int)y)+(a))])
#define K_I_2(a) (pIndex[INDEXPOS_2((int)x, (int)y, ((int)z)+(a))])
#define K_INIT_INDEX n = (x)*SX + (y)*SY + (z)*SZ; // oldname : iMyLocalDataIndex
#define GPU_IDX1 (K_I_0(1) >= vMin.x && K_I_0(0) <= vMax.x)
#define GPU_IDX2 (K_I_1(1) >= vMin.y && K_I_1(0) <= vMax.y)
#define GPU_IDX3 (K_I_2(1) >= vMin.z && K_I_2(0) <= vMax.z)
#ifndef ENABLE_GPU_IDX3
#undef GPU_IDX3
#define GPU_IDX3 (1)
#endif
// iteration : 299
unsigned int x;
unsigned int y;
unsigned int z;
unsigned int w;
float* iy;
float* iz;
float* iw;
#ifdef MYTEST_DISABLE_INDEX
for (x=0;x<I0;++x) {
for (y=0;y<I0;++y) {
for (z=0;z<I0;++z) { K_INIT_INDEX // n init here
for (w=0;w<SZ;w+=kThreadBlockSize,n+=kThreadBlockSize) {
#else
for (iy = &pIndex[INDEXPOS_0(0 )], x=0;x<I0;++x,++iy) if (iy[1] >= vMin.x && *iy <= vMax.x) {
for (iz = &pIndex[INDEXPOS_1(x,0 )], y=0;y<I0;++y,++iz) if (iz[1] >= vMin.y && *iz <= vMax.y) {
for (iw = &pIndex[INDEXPOS_2(x,y,0)], z=0;z<I0;++z,++iw) if (iw[1] >= vMin.z && *iw <= vMax.z) { K_INIT_INDEX // n init here
for (w=0;w<SZ;w+=kThreadBlockSize,n+=kThreadBlockSize) {
#endif
/*
// iteration : other try, but was slower .. 305 for 500k
unsigned int n1;
unsigned int x;
unsigned int y;
unsigned int z;
unsigned int w;
float* iy;
float* iz;
float* iw;
for (iy = &pIndex[INDEXSTART_0], x=0;x<I0*(I0+1) ;x+=(I0+1),++iy) if (iy[1] >= vMin.x && *iy <= vMax.x) {
for (iz = &pIndex[INDEXSTART_1+x], y=0;y<I0*(I0+1) ;y+=(I0+1),++iz) if (iz[1] >= vMin.y && *iz <= vMax.y) { n1 = (x/(I0+1))*SX + (y/(I0+1))*SY;
for (iw = &pIndex[INDEXSTART_2+y+x*I0], z=0;z<I0*SZ ;z+=SZ,++iw) if (iw[1] >= vMin.z && *iw <= vMax.z) { n = n1 + z; // n init here
for (w=0;w<SZ;w+=kThreadBlockSize,n+=kThreadBlockSize) {
*/
//~ for (unsigned int x=0;x<I0;++x) if (GPU_IDX1) {
//~ for (unsigned int y=0;y<I0;++y) if (GPU_IDX2) {
//~ for (unsigned int z=0;z<I0;++z) if (GPU_IDX3) { K_INIT_INDEX // n init here
//~ for (unsigned int w=0;w<SZ;w+=kThreadBlockSize,n+=kThreadBlockSize) {
//~ for (unsigned int nend = n + SZ;n<nend;n+=kThreadBlockSize) { // slower than normal, no an improvement
// read current point
unsigned int iCurPointID = n + threadIdx.x;
// calc distance
float fSqDist = 0; // 1110.0 + fOwnPoint[0];
#pragma unroll // unroll next loop
for (d=0;d<D;++d) { float a = GPU_POINT_COORDS(pPointCoords,iCurPointID,d) - fOwnPoint[d]; fSqDist += a*a; }
// check distance
if (fSqDist <= DBSCAN_PARAM_SQEPSILON) {
// self is also counted (iCurPointID == iOwnPointID) here for simplicity
// until we know that self is a core-point, only remember neightbors, don't spread yet (atomic op on shared memory)
unsigned int myNeighborIndex = 0x7fffffff;
if (iNeighborCount < NEIGHBOR_BUFFER_SIZE) { // otherwise there are already minpts-1 OTHER neighbors besides me
myNeighborIndex = atomicInc(piSeedState_NeighborCount,0xffffffff); // inc by 1 and return old
if (myNeighborIndex == DBSCAN_PARAM_MINPTS)
iNeighborCount = DBSCAN_PARAM_MINPTS;
}
if (myNeighborIndex < NEIGHBOR_BUFFER_SIZE) {
// not enough points yet, save in buffer for later
iNeighbors[myNeighborIndex] = iCurPointID;
} else {
// index>=NEIGHBOR_BUFFER_SIZE(=(DBSCAN_PARAM_MINPTS-1)) so iNeighborCount>=DBSCAN_PARAM_MINPTS
DBScanGPU_TryMarkAsCandidate(iCurPointID,pPointState,iCandidateID,iSeedID,
piSeedState_NotListedLen,piSeedState_ListLen,pCandidateLists,pConnectionMatrix DBSCAN_LOOKUP_PARAM);
}
}
#ifdef MYTEST_DISABLE_INDEX
}}}}
#else
}}}}
#endif
#undef K_I_0
#undef K_I_1
#undef K_I_2
#undef K_INIT_INDEX
#undef GPU_IDX1
#undef GPU_IDX2
#undef GPU_IDX3
// wait until all are finished, so we know if it's a corepoint
__syncthreads();
// process stored neighbors
#pragma unroll // unroll next loop:
for (d=0;d<kThreadBlockMinPtsScale;++d) {
if (iNeighborCount >= DBSCAN_PARAM_MINPTS &&
threadIdx.x*kThreadBlockMinPtsScale + d < NEIGHBOR_BUFFER_SIZE &&
iNeighbors[threadIdx.x*kThreadBlockMinPtsScale + d] < N) {
DBScanGPU_TryMarkAsCandidate(iNeighbors[threadIdx.x*kThreadBlockMinPtsScale + d],pPointState,iCandidateID,iSeedID,
piSeedState_NotListedLen,piSeedState_ListLen,pCandidateLists,pConnectionMatrix DBSCAN_LOOKUP_PARAM);
}
}
__syncthreads();
// mark self as either confirmed-core-point or noise
if (threadIdx.x == 0) {
//~ if (iSeedID == 3) printf("DEBUG002:finalseed:%d>=%d\n",*piSeedState_NeighborCount,(int)DBSCAN_PARAM_MINPTS);
//~ if (iOwnPointID == 126530) printf("DEBUG002:final:%d>=%d\n",*piSeedState_NeighborCount,(int)DBSCAN_PARAM_MINPTS);
if (iNeighborCount >= DBSCAN_PARAM_MINPTS)
pPointState[iOwnPointID] = DBSCAN_WITHIDX_CLUSTER_ID_CANDIDATE_2_FINAL(iCandidateID);
else pPointState[iOwnPointID] = DBSCAN_WITHIDX_CLUSTER_ID_NOISE;
if (*piSeedState_ListLen > CANDIDATE_LIST_MAXLEN)
*piSeedState_ListLen = CANDIDATE_LIST_MAXLEN; // clip if over-incremented and didn't fit into list
}
}
// ***** ***** ***** ***** ***** newseeds
/// helper kernel, searches new seeds
/// kernel code : this gets executed on the GPU
__global__ static void dbscan_kernel_newseeds (
unsigned int* pPointState, // N (outlier,candidate:chain-id,finished:chain-id)
unsigned int* pHelperData, // DBSCAN_NUM_SEEDS + x
unsigned int iNewSeedsNeeded
)
{
#define NEWSEEDSCAN_SECTIONSIZE (N/NUM_THREADS_NEWSEEDSCAN)
unsigned int iMyID = MYID; // #define MYID (BLOCKIDX * blockDim.x + threadIdx.x)
unsigned int* piNextScannedOffset = &pHelperData[DBSCAN_NUM_SEEDS+NEWSEEDSCAN_NUM_PARAMS+iMyID];
unsigned int* piNumFound = &pHelperData[DBSCAN_NUM_SEEDS+0];
unsigned int i = *piNextScannedOffset;
// used sharedmem to allow quick abort
__shared__ unsigned int iNumFoundCache;
if (threadIdx.x == 0) iNumFoundCache = 0;
__syncthreads();
// check for earlyout
if (i >= NEWSEEDSCAN_SECTIONSIZE) return; // this block is finished
const unsigned int base = iMyID * NEWSEEDSCAN_SECTIONSIZE;
// scanloop
for (;i<NEWSEEDSCAN_SECTIONSIZE;++i) {
if (iNumFoundCache >= iNewSeedsNeeded) break; // stop scanning, this point will be scanned again next time
unsigned int n = i + base;
if (pPointState[n] == DBSCAN_WITHIDX_CLUSTER_ID_INIT) {
unsigned int iMyIndex = atomicInc(piNumFound,0xffffffff);
if (iMyIndex < iNewSeedsNeeded) {
pHelperData[iMyIndex] = n;
} else {
iNumFoundCache = iNewSeedsNeeded; // abort, we've got enough
break; // couldn't register, this point has to be scanned again next time
}
}
// else point cannot be fresh seed, so index can safely be incremented
}
// increment if point is not init -> cannot be fresh seed ever again
// increment if point is init and could be registered as new seed
// DON'T increment if point is init and could NOT be registered as new seed -> has to be scanned again next time
// abort if we found enough seeds
*piNextScannedOffset = i;
// piNextScannedOffset is unique for every thread, not just for every threadblock , so no synching is neccessary.
// the number of threads running in parallel here is rather limited, only 4 * threadblocksize
}
// ***** ***** ***** ***** ***** refill
/// helper kernel, refills candidate lists
/// kernel code : this gets executed on the GPU
__global__ static void dbscan_kernel_refill (
unsigned int* pPointState, // N (outlier,candidate:chain-id,finished:chain-id)
unsigned int* pSeedStates, // DBSCAN_NUM_SEEDS * x (notlisted,listlen,iNeighBorCount : atomicops)
unsigned int* pCandidateLists // DBSCAN_NUM_SEEDS * CANDIDATE_LIST_MAXLEN (fresh-seeds)
)
{
const unsigned int iSeedID = BLOCKIDX; // in [0;DBSCAN_NUM_SEEDS[
const unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(gConst_pFinalChainIDs[iSeedID]);
//~ if (threadIdx.x == 0) printf("iSeedID=%d, iCandidateID=%d\n",(int)iSeedID,(int)iCandidateID);
unsigned int* piSeedState_NotListedLen = &pSeedStates[iSeedID*SEEDSTATEDIM + 0];
unsigned int* piSeedState_ListLen = &pSeedStates[iSeedID*SEEDSTATEDIM + 1];
unsigned int iMaxTake = min(*piSeedState_NotListedLen,CANDIDATE_LIST_REFILL);
if (*piSeedState_ListLen > 0) return; // still seeds in list, nothing to do
if (*piSeedState_NotListedLen == 0) return; // no candidates left / fresh seed-chain
__shared__ unsigned int iNumFound;
if (threadIdx.x == 0) iNumFound = 0; // piSeedState_ListLen
__syncthreads();
const unsigned int iStep = kThreadBlockSize; // total number of threads for this seed
// iterate over points
for (int n=threadIdx.x; n<N && iNumFound < iMaxTake ; n+=iStep) {
if (pPointState[n] == iCandidateID) {
unsigned int iMyIndex = atomicInc(piSeedState_ListLen,0xffffffff); // has to be cut down in the end
if (iMyIndex < iMaxTake) {
atomicDec(piSeedState_NotListedLen,INF_32);
pCandidateLists[iSeedID*CANDIDATE_LIST_MAXLEN + iMyIndex] = n;
if (iMyIndex + 1 >= iMaxTake) iNumFound = iMaxTake; // abort, we've got enough
}
}
}
__syncthreads();
// cut down over-incrementation
if (threadIdx.x == 0) {
if (*piSeedState_ListLen > iMaxTake)
*piSeedState_ListLen = iMaxTake;
}
}
// ***** ***** ***** ***** ***** utils : final id
unsigned int giFinalClusterIDs[N];
unsigned int* gpFinalChainIDs = 0;
unsigned int* gpConnectionMatrix = 0;
unsigned int giLastFinalClusterID = 0;
unsigned int GetFinalClusterIDIndexFromSeedID (int iSeedID) {
unsigned int iFinalChainID = gpFinalChainIDs[iSeedID];
MY_ASSERT(iFinalChainID >= DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST,"GetFinalClusterIDIndexFromSeedID too low");
unsigned int iMyIndex = iFinalChainID - DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST;
MY_ASSERT(iMyIndex < N,"GetFinalClusterIDIndexFromSeedID too high");
return iMyIndex;
}
unsigned int GetFinalClusterIDBySeedID (int iSeedID) {
return giFinalClusterIDs[GetFinalClusterIDIndexFromSeedID(iSeedID)];
}
void SetFinalClusterIDBySeedID (int iSeedID,unsigned int iFinalClusterID) {
giFinalClusterIDs[GetFinalClusterIDIndexFromSeedID(iSeedID)] = iFinalClusterID;
}
// ***** ***** ***** ***** ***** utils : connection
bool DBScan_CheckConnection (int i,int j) {
EMU_CHECKBOUNDS("DBScan_CheckConnection",i,DBSCAN_NUM_SEEDS)
EMU_CHECKBOUNDS("DBScan_CheckConnection",j,DBSCAN_NUM_SEEDS)
return gpConnectionMatrix[i*DBSCAN_NUM_SEEDS + j] ||
gpConnectionMatrix[j*DBSCAN_NUM_SEEDS + i];
}
void DBScan_ClearConnection (int i) {
for (int j=0;j<DBSCAN_NUM_SEEDS;++j) {
gpConnectionMatrix[i*DBSCAN_NUM_SEEDS + j] = 0;
gpConnectionMatrix[j*DBSCAN_NUM_SEEDS + i] = 0;
}
}
void DBScan_SetConnection (int i,int j) {
EMU_CHECKBOUNDS("DBScan_SetConnection i",i,DBSCAN_NUM_SEEDS)
EMU_CHECKBOUNDS("DBScan_SetConnection j",j,DBSCAN_NUM_SEEDS)
gpConnectionMatrix[i*DBSCAN_NUM_SEEDS + j] = 1;
gpConnectionMatrix[j*DBSCAN_NUM_SEEDS + i] = 1;
}
void DBScan_SpreadConnection_DebugDump (const char* szMsg) {
return;
printf("########## %s\n",szMsg);
for (int c=0;c<=giLastFinalClusterID;++c) {
bool bFound = false;
for (int x=0;x<DBSCAN_NUM_SEEDS;++x) {
unsigned int cid = GetFinalClusterIDBySeedID(x);
if (cid == c) {
if (!bFound) {bFound = true; printf("c:%5d:",c);}
printf("%d,",x);
}
}
if (bFound) printf("\n");
}
}
int giDBScanClusterDoubleAssignmentCounter = 0;
void DBScan_SpreadConnection_Aux (int i) {
unsigned int iFinalClusterIDA = GetFinalClusterIDBySeedID(i);
MY_ASSERT(iFinalClusterIDA != INF_32,"DBScan_SpreadConnection_Aux on seed without clusterid ?");
EMU_CHECKBOUNDS("cpuspread",i,DBSCAN_NUM_SEEDS)
for (int j=0;j<DBSCAN_NUM_SEEDS;++j) {
if (j == i) continue;
if (DBScan_CheckConnection(i,j)) { // j and i are connected
unsigned int iFinalClusterIDB = GetFinalClusterIDBySeedID(j);
if (iFinalClusterIDB != iFinalClusterIDA) {
if (iFinalClusterIDB != INF_32) {
++giDBScanClusterDoubleAssignmentCounter;
printf("warning : DBScan_SpreadConnection_Aux unexpected double assignment : i=%d,j=%d,a=%d,b=%d\n",(int)i,(int)j,(int)iFinalClusterIDA,(int)iFinalClusterIDB);
//MY_ASSERT(0,"DBScan_SpreadConnection_Aux unexpected double assignment"); // fatal ? only during debug probably
}
SetFinalClusterIDBySeedID(j,iFinalClusterIDA);
DBScan_SpreadConnection_Aux(j); // spread
}
}
}
}
// spreads ClusterID Assignments over direct and indirect connections (a<->b<->c)
void DBScan_SpreadConnection () {
//~ printf("DBScan_SpreadConnection start\n");
for (int i=0;i<DBSCAN_NUM_SEEDS;++i) {
if (GetFinalClusterIDBySeedID(i) != INF_32)
DBScan_SpreadConnection_Aux(i);
}
//~ printf("DBScan_SpreadConnection end\n");
}
// ***** ***** ***** ***** ***** DBScanVerifyCandidates
//~ #define DBSCAN_VERIFY_CANDIDATES(sectionname) DBScanVerifyCandidates(p,sectionname,gpu_pPointState,gpu_pSeedStates,gpu_pCandidateLists);
#ifndef DBSCAN_VERIFY_CANDIDATES
#define DBSCAN_VERIFY_CANDIDATES(sectionname)
#endif
void DBScanVerifyCandidates (DBScanData* p,const char* szSectionName,unsigned int* gpu_pPointState,unsigned int* gpu_pSeedStates,unsigned int* gpu_pCandidateLists) {
cudaError_t myLastErr;
unsigned int* pPointState = (unsigned int*)malloc(sizeof(p->pClusterIDs));
unsigned int* pSeedStates = (unsigned int*)malloc(sizeof(p->pSeedStates));
unsigned int* pCandidateLists = (unsigned int*)malloc(sizeof(p->pCandidateLists));
// download data from vram
CUDA_SAFE_CALL( cudaMemcpy(pPointState,gpu_pPointState,sizeof(p->pClusterIDs),cudaMemcpyDeviceToHost)); HANDLE_ERROR("cudaMemcpy gpu_pPointState candidate-verify")
CUDA_SAFE_CALL( cudaMemcpy(pSeedStates,gpu_pSeedStates,sizeof(p->pSeedStates),cudaMemcpyDeviceToHost)); HANDLE_ERROR("cudaMemcpy gpu_pSeedStates candidate-verify")
CUDA_SAFE_CALL( cudaMemcpy(pCandidateLists,gpu_pCandidateLists,sizeof(p->pCandidateLists),cudaMemcpyDeviceToHost)); HANDLE_ERROR("cudaMemcpy gpu_pCandidateLists candidate-verify")
// count candidates
int c_candidates[DBSCAN_NUM_SEEDS];
int c_candidates_last[DBSCAN_NUM_SEEDS];
int n,iSeedID;
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) c_candidates[iSeedID] = 0;
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) c_candidates_last[iSeedID] = -1;
//const unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(gConst_pFinalChainIDs[iSeedID]);
//~ CUDA_SAFE_CALL( cudaMemcpyToSymbol(gConst_pFinalChainIDs, gpFinalChainIDs, sizeof(p->pFinalChainIDs))); HANDLE_ERROR("cudaMemcpy pFinalChainIDs") // const mem
for (n=0;n<N;++n) {
unsigned int iState = pPointState[n]; // iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(iFinalChainID);
if ( iState >= DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST) {
} else if (iState >= DBSCAN_WITHIDX_CLUSTER_ID_CANDIDATE_FIRST) {
int iFoundSeedID = -1;
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) {
unsigned int iFinalChainID = gpFinalChainIDs[iSeedID];
unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(iFinalChainID);
if (iCandidateID == iState) {
++c_candidates[iSeedID];
c_candidates_last[iSeedID] = n;
iFoundSeedID = iSeedID;
}
}
if (iFoundSeedID == -1) {
printf("DBScanVerifyCandidates(%s) failed to find seed state=%d\n",szSectionName,iState);
exit(0);
}
} else if (iState == DBSCAN_WITHIDX_CLUSTER_ID_INIT) {
} else if (iState == DBSCAN_WITHIDX_CLUSTER_ID_NOISE) {
}
}
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) {
unsigned int iSeedState_NotListedLen = pSeedStates[iSeedID*SEEDSTATEDIM + 0];
unsigned int iSeedState_ListLen = pSeedStates[iSeedID*SEEDSTATEDIM + 1];
unsigned int iOwnPointID = pSeedStates[iSeedID*SEEDSTATEDIM + 3];
unsigned int iRecordedCount = iSeedState_NotListedLen + iSeedState_ListLen;
unsigned int iState = 0xffffffff;
if (iOwnPointID != INF_32) {
iRecordedCount += 1;
iState = pPointState[iOwnPointID];
unsigned int iFinalChainID = gpFinalChainIDs[iSeedID];
unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(iFinalChainID);
if (iState != iCandidateID) {
printf("DBScanVerifyCandidates(%s) failed prime candidate bad, iSeedID=%d n=%d state=%d\n",szSectionName,iSeedID,iOwnPointID,iState);
exit(0);
}
}
int iRealCount = c_candidates[iSeedID];
if (iRealCount != iRecordedCount) {
printf("DBScanVerifyCandidates(%s) failed, iSeedID=%d iOwnPointID=%d:%d lastreal=%d realcount=%d : %d=iRecordedCount=NL:%d+L:%d mismatch\n",szSectionName,iSeedID,iOwnPointID,iState,c_candidates_last[iSeedID],iRealCount,iRecordedCount,iSeedState_NotListedLen,iSeedState_ListLen);
exit(0);
}
}
free(pPointState);
free(pSeedStates);
free(pCandidateLists);
}
// ***** ***** ***** ***** ***** cpu main
//~ unsigned int iVRamWriterUINT;
//~ #define VRAM_WRITE_UINT(p,v) { iVRamWriterUINT = v; CUDA_SAFE_CALL(cudaMemcpy(p,&iVRamWriterUINT,sizeof(iVRamWriterUINT),cudaMemcpyHostToDevice)); HANDLE_ERROR("VRAM_WRITE_UINT" #p) } // ","##v
void VRAM_WRITE_UINT(unsigned int* p,unsigned int v) {
cudaError_t myLastErr;
CUDA_SAFE_CALL(cudaMemcpy(p,&v,sizeof(v),cudaMemcpyHostToDevice)); HANDLE_ERROR("cudaMemcpy VRAM_WRITE_UINT");
}
unsigned int VRAM_READ_UINT(unsigned int* p) {
cudaError_t myLastErr;
unsigned int v = 0;
CUDA_SAFE_CALL(cudaMemcpy(&v,p,sizeof(v),cudaMemcpyDeviceToHost)); HANDLE_ERROR("cudaMemcpy VRAM_READ_UINT");
return v;
}
void DBScanAssignFinalChainID (unsigned int iSeedID,unsigned int iFinalChainID,unsigned int *gpu_pClusterIDLookup) {
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
unsigned int iOldFinalChainID = gpFinalChainIDs[iSeedID];
int oldn = iOldFinalChainID -DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST;
int newn = iFinalChainID -DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST;
if (oldn >= 0 && oldn < N) VRAM_WRITE_UINT(&gpu_pClusterIDLookup[oldn],INF_32);
if (newn >= 0 && newn < N) VRAM_WRITE_UINT(&gpu_pClusterIDLookup[newn],iSeedID);
#endif
gpFinalChainIDs[iSeedID] = iFinalChainID;
}
#define VRAM_READWRITE_UNIT_TEST
void dbscan_gpu (DBScanData* p) {
// a few sanity checks for parameters
printf("dbscan_gpu SZ=%d kThreadBlockSize=%d\n",(int)SZ,(int)kThreadBlockSize);
if ((SZ % kThreadBlockSize) != 0) {
printf("##### ERROR, SZ(%d) must be a multiple of kThreadBlockSize(%d)\n",(int)SZ,(int)kThreadBlockSize);
printf("##### try increasing the number of datapoints, or decreasing IO(%d)\n",(int)I0);
// because of kernel : for (unsigned int w=0;w<SZ;w+=kThreadBlockSize,n+=kThreadBlockSize) {
exit(0);
}
if ((kThreadBlockSize % 64) != 0) {
printf("threadblocksize should be multiple of 64.. 64 is ok if there are enough blocks running in parallel\n");
// cuda manual, number of parallel running threads etc, recommended for register access or so
exit(0);
}
if ((N % kThreadBlockSize) != 0) {
printf("N=%d should be a multiple of kThreadBlockSize=%d\n",(int)N,(int)kThreadBlockSize);
exit(0);
}
//~ if (kThreadBlockSize < DBSCAN_PARAM_MINPTS) {
if (kThreadBlockSize * kThreadBlockMinPtsScale < DBSCAN_PARAM_MINPTS) {
printf("(kThreadBlockSize * kThreadBlockMinPtsScale) must be >= DBSCAN_PARAM_MINPTS, other case not yet implemented (processing stored neightbors)\n");
// kernel : neightbors
exit(0);
}
if (kThreadBlockSize < D) {
printf("kThreadBlockSize must be >= D, other case not yet implemented (reading in mainpoint)\n");
// kernel : reading in mainpoint
exit(0);
}
if (GRIDHEIGHT != 1) {
printf("error, GRIDHEIGHT=1 assumed for MYID and BLOCKIDX implementation\n");
// MYID and BLOCKIDX
exit(0);
}
if ((DBSCAN_NUM_SEEDS % kThreadBlockSize) != 0) {
printf("DBSCAN_NUM_SEEDS(%d) must be a multiple of kThreadBlockSize(%d)\n",(int)DBSCAN_NUM_SEEDS,(int)kThreadBlockSize);
exit(0);
}
//~ if ((DBSCAN_NUM_SEEDS % (GRIDHEIGHT * kThreadBlockSize)) != 0) {
//~ printf("DBSCAN_NUM_SEEDS(%d) must be a multiple of (GRIDHEIGHT(%d) * kThreadBlockSize(%d))\n",(int)GRIDHEIGHT,(int)kThreadBlockSize,(int)kThreadBlockSize);
//~ // grid_size_one_thread_per_seed.x = DBSCAN_NUM_SEEDS / GRIDHEIGHT / kThreadBlockSize; UNUSED
//~ exit(0);
//~ }
// vars
int i,j,n;
cudaError_t myLastErr;
#ifndef __DEVICE_EMULATION__
//~ CUDA_SAFE_CALL(cudaSetDevice(0)); /// GT 8500
CUDA_SAFE_CALL(cudaSetDevice(1)); /// GTX 280
#endif
// final cluster ids
for (i=0;i<N;++i) giFinalClusterIDs[i] = INF_32; // giFinalClusterIDs[pFinalChainIDs[iSeedID] = iFinalChainID] = iFinalClusterID;
// shortcuts
bool bFreshSeedsLeft = true;
unsigned int* pSeedStates = p->pSeedStates; // (atomicops)
unsigned int* pHelperData = p->pHelperData; // newseed
unsigned int* pPointState = (unsigned int*)p->pClusterIDs; // for final evaluation
gpFinalChainIDs = p->pFinalChainIDs; // old : pFinalChainIDs
gpConnectionMatrix = p->pConnectionMatrix;
// allocate and init gpu buffers
#define ALLOCATE_GPU_BUFFER(type,name,datasize) type name = 0; CUDA_SAFE_CALL(cudaMalloc((void**)&name,datasize));
ALLOCATE_GPU_BUFFER(float* ,gpu_pPointCoords, sizeof(p->pPoints)); // N*D
ALLOCATE_GPU_BUFFER(unsigned int* ,gpu_pPointState, sizeof(p->pClusterIDs));// N (outlier,candidate:chain-id,finished:chain-id)
ALLOCATE_GPU_BUFFER(float* ,gpu_pIndex, sizeof(p->pIndex)); // INDEX_NUM_FLOATS
ALLOCATE_GPU_BUFFER(unsigned int* ,gpu_pSeedStates, sizeof(p->pSeedStates));// DBSCAN_NUM_SEEDS * x (notlisted,listlen,iNeighBorCount : atomicops)
//~ ALLOCATE_GPU_BUFFER(unsigned int* ,gpu_pFinalChainIDs, sizeof(p->pFinalChainIDs));// DBSCAN_NUM_SEEDS (constant memory, values >= DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST)
ALLOCATE_GPU_BUFFER(unsigned int* ,gpu_pCandidateLists, sizeof(p->pCandidateLists));// DBSCAN_NUM_SEEDS * CANDIDATE_LIST_MAXLEN (fresh-seeds)
ALLOCATE_GPU_BUFFER(unsigned int* ,gpu_pConnectionMatrix, sizeof(p->pConnectionMatrix));// DBSCAN_NUM_SEEDS^2
ALLOCATE_GPU_BUFFER(unsigned int* ,gpu_pHelperData, sizeof(p->pHelperData));// DBSCAN_NUM_SEEDS + x
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
ALLOCATE_GPU_BUFFER(unsigned int* ,gpu_pClusterIDLookup, sizeof(unsigned int)*N);
#else
unsigned int* gpu_pClusterIDLookup = 0;
#endif
// init vram data to zero
CUDA_SAFE_CALL( cudaMemset(gpu_pPointState, 0, sizeof(p->pClusterIDs)));
CUDA_SAFE_CALL( cudaMemset(gpu_pCandidateLists, 0, sizeof(p->pCandidateLists)));
CUDA_SAFE_CALL( cudaMemset(gpu_pConnectionMatrix, 0, sizeof(p->pConnectionMatrix)));
CUDA_SAFE_CALL( cudaMemset(gpu_pHelperData, 0, sizeof(p->pHelperData)));
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
CUDA_SAFE_CALL( cudaMemset(gpu_pClusterIDLookup, 0xFF, sizeof(unsigned int)*N));
printf("gpu_pClusterIDLookup[0]=0x%08x\n",(int)VRAM_READ_UINT(&gpu_pClusterIDLookup[0]));
#endif
#ifdef VRAM_READWRITE_UNIT_TEST
printf("N=%d\n",(int)N);
#define VRAM_READWRITE_UNIT_TEST_ONE(addr,v) VRAM_WRITE_UINT(addr,v); if (VRAM_READ_UINT(addr) != v) { printf("writefail v=%d\n",(int)v); exit(0); } else { printf("vramwriteunit ok v=%d\n",(int)v);}
VRAM_READWRITE_UNIT_TEST_ONE(&gpu_pPointState[0],0);
VRAM_READWRITE_UNIT_TEST_ONE(&gpu_pPointState[0],1);
VRAM_READWRITE_UNIT_TEST_ONE(&gpu_pPointState[0],2);
VRAM_READWRITE_UNIT_TEST_ONE(&gpu_pPointState[0],0);
VRAM_READWRITE_UNIT_TEST_ONE(&gpu_pPointState[5],0);
VRAM_READWRITE_UNIT_TEST_ONE(&gpu_pPointState[5],1);
VRAM_READWRITE_UNIT_TEST_ONE(&gpu_pPointState[5],2);
VRAM_READWRITE_UNIT_TEST_ONE(&gpu_pPointState[5],0);
#endif
// choose initial seeds
printf("start choose initial\n");
unsigned int gNextFinalChainID = DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST;
int iSeedPoints[DBSCAN_NUM_SEEDS];
int iSeedID;
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) {
// pick one n randomly
bool bAlreadyUsed;
do {
n = rand() % (N-1);
bAlreadyUsed = false;
for (j=0;j<iSeedID;++j) if (iSeedPoints[j] == n) bAlreadyUsed = true;
} while (bAlreadyUsed) ;
iSeedPoints[iSeedID] = n;
unsigned int iFinalChainID = gNextFinalChainID++;
unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(iFinalChainID);
//~ printf("chooseinit i=%d n=%d finalchainid=%d\n",(int)i,(int)n,(int)iFinalChainID);
DBScanAssignFinalChainID(iSeedID,iFinalChainID,gpu_pClusterIDLookup);
pSeedStates[SEEDSTATEDIM*iSeedID + 0] = 0;
pSeedStates[SEEDSTATEDIM*iSeedID + 1] = 0;
pSeedStates[SEEDSTATEDIM*iSeedID + 2] = 0;
pSeedStates[SEEDSTATEDIM*iSeedID + 3] = n;
VRAM_WRITE_UINT(&gpu_pPointState[n],iCandidateID);
//~ printf("dbscan init : iSeedID=%d n=%d iCandidateID=%d\n",iSeedID,(int)n,iCandidateID);
}
// copy data from ram to vram
CUDA_SAFE_CALL( cudaMemcpy(gpu_pPointCoords, p->pPoints, sizeof(p->pPoints), cudaMemcpyHostToDevice )); HANDLE_ERROR("cudaMemcpy pPoints")
CUDA_SAFE_CALL( cudaMemcpy(gpu_pIndex, p->pIndex, sizeof(p->pIndex), cudaMemcpyHostToDevice )); HANDLE_ERROR("cudaMemcpy pIndex")
CUDA_SAFE_CALL( cudaMemcpy(gpu_pSeedStates, p->pSeedStates, sizeof(p->pSeedStates), cudaMemcpyHostToDevice )); HANDLE_ERROR("cudaMemcpy pSeedStates")
printf("start copy to const vram\n");
// copy data from ram to constant vram
CUDA_SAFE_CALL( cudaMemcpyToSymbol(gConst_pFinalChainIDs, gpFinalChainIDs, sizeof(p->pFinalChainIDs))); HANDLE_ERROR("cudaMemcpy pFinalChainIDs") // const mem
printf("start size\n");
//~ DBSCAN_VERIFY_CANDIDATES("prepare1")
// kernel setup : grid_size, block_size, mem_shared
dim3 grid_size_many_threads_per_seed;
dim3 grid_size_one_thread_per_seed;
dim3 grid_size_4;
dim3 block_size;
unsigned int mem_shared = 0; // this is for dynamic alloc of shared mem, we alloc statically
grid_size_many_threads_per_seed.x = DBSCAN_NUM_SEEDS / GRIDHEIGHT; // TODO : make sure N is a multiple of kThreadBlockSize
grid_size_many_threads_per_seed.y = GRIDHEIGHT;
grid_size_many_threads_per_seed.z = 1;
grid_size_one_thread_per_seed.x = DBSCAN_NUM_SEEDS / GRIDHEIGHT / kThreadBlockSize;
grid_size_one_thread_per_seed.y = GRIDHEIGHT;
grid_size_one_thread_per_seed.z = 1;
grid_size_4.x = 4;
grid_size_4.y = 1;
grid_size_4.z = 1;
block_size.x = kThreadBlockSize;
block_size.y = 1;
block_size.z = 1;
#define MB(a) ((int)(a)/1024/1024)
printf("alloc %d %d %d %d gridsize_x=%d\n",MB(p->pPoints),MB(p->pCandidateLists),MB(p->pClusterIDs),MB(p->pConnectionMatrix),grid_size_many_threads_per_seed.x);
// **** TEST NEWSEED
if (TEST_KERNEL_NEWSEED) {
printf("TEST_KERNEL_NEWSEED start\n");
int iPointsLeft = N;
for (int iTestI=0;iTestI<10000000;++iTestI) {
unsigned int iNewSeedsNeeded = DBSCAN_NUM_SEEDS;
// helper kernel : search a few new seeds (why kernel : candidate ids are in vram)
// new seeds : sum_time="one iteration" : save index last checked and increment until next free point is found
VRAM_WRITE_UINT(&gpu_pHelperData[DBSCAN_NUM_SEEDS+0],0); // counter
dbscan_kernel_newseeds<<< grid_size_4, block_size, mem_shared >>>(
gpu_pPointState, // N (outlier,candidate:chain-id,finished:chain-id)
gpu_pHelperData,
iNewSeedsNeeded
);
// download gpu_pHelperData from vram
CUDA_SAFE_CALL( cudaMemcpy(pHelperData,gpu_pHelperData,sizeof(unsigned int) * (DBSCAN_NUM_SEEDS+NEWSEEDSCAN_NUM_PARAMS),cudaMemcpyDeviceToHost)); HANDLE_ERROR("cudaMemcpy pHelperData readback")
unsigned int iNewSeedsFound = min(iNewSeedsNeeded,pHelperData[DBSCAN_NUM_SEEDS+0]);
// assign as noise
iPointsLeft -= iNewSeedsFound;
for (i=0;i<iNewSeedsFound;++i) {
n = pHelperData[i];
if (n < 0 || n >= N) printf("bad n:%d\n",n);
VRAM_WRITE_UINT(&gpu_pPointState[n],DBSCAN_WITHIDX_CLUSTER_ID_NOISE);
}
// download pointstates from vram and count states
CUDA_SAFE_CALL( cudaMemcpy(pPointState,gpu_pPointState,sizeof(p->pClusterIDs),cudaMemcpyDeviceToHost)); HANDLE_ERROR("cudaMemcpy gpu_pPointState final download")
int cinit = 0;
int cnoise = 0;
int crest = 0;
for (n=0;n<N;++n) {
if (pPointState[n] == DBSCAN_WITHIDX_CLUSTER_ID_INIT ) { ++cinit; continue; }
if (pPointState[n] == DBSCAN_WITHIDX_CLUSTER_ID_NOISE ) { ++cnoise; continue; }
++crest;
}
printf("iNewSeedsFound=%3d pleft=%6d cinit=%6d,cnoise=%6d,crest=%d over=%d\n",iNewSeedsFound,iPointsLeft,cinit,cnoise,crest,iPointsLeft-cinit);
if (iNewSeedsFound == 0) break;
}
printf("TEST_KERNEL_NEWSEED end\n");
return;
}
// **** TEST REFILL
if (TEST_KERNEL_REFILL) {
printf("TEST_KERNEL_REFILL start\n");
// download pointstates
CUDA_SAFE_CALL( cudaMemcpy(pPointState,gpu_pPointState,sizeof(p->pClusterIDs),cudaMemcpyDeviceToHost)); HANDLE_ERROR("cudaMemcpy gpu_pPointState final download")
// prepare test environment
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) {
unsigned int iSetNonList = 10;
unsigned int iSetList = 0;
unsigned int iFinalChainID = gpFinalChainIDs[iSeedID];
unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(iFinalChainID);
VRAM_WRITE_UINT(&gpu_pSeedStates[iSeedID*SEEDSTATEDIM + 0],iSetNonList);
VRAM_WRITE_UINT(&gpu_pSeedStates[iSeedID*SEEDSTATEDIM + 1],iSetList);
// pick random points with "init" state as new unmarked
for (i=0;i<iSetNonList;++i) {
// pick one n randomly
do {
n = rand() % (N-1);
if (pPointState[n] != DBSCAN_WITHIDX_CLUSTER_ID_INIT) continue;
pPointState[n] = iCandidateID;
VRAM_WRITE_UINT(&gpu_pPointState[n],iCandidateID);
break;
} while (true) ;
}
}
printf("TEST_KERNEL_REFILL kernel?\n");
// launch refill kernel
dbscan_kernel_refill<<< grid_size_many_threads_per_seed, block_size, mem_shared >>>(
gpu_pPointState, // N (outlier,candidate:chain-id,finished:chain-id)
gpu_pSeedStates, // DBSCAN_NUM_SEEDS * 3 (real,listlen,iNeighBorCount : atomicops)
gpu_pCandidateLists // DBSCAN_NUM_SEEDS * CANDIDATE_LIST_MAXLEN (fresh-seeds)
);
// init counter
int iSeedDataCounterF[DBSCAN_NUM_SEEDS];
int iSeedDataCounterC[DBSCAN_NUM_SEEDS];
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) iSeedDataCounterF[iSeedID] = 0;
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) iSeedDataCounterC[iSeedID] = 0;
// download pointstates from vram and count states
CUDA_SAFE_CALL( cudaMemcpy(pPointState,gpu_pPointState,sizeof(p->pClusterIDs),cudaMemcpyDeviceToHost)); HANDLE_ERROR("cudaMemcpy gpu_pPointState final download")
int cinit = 0;
int cnoise = 0;
int crest = 0;
for (n=0;n<N;++n) {
unsigned int iPointState = pPointState[n];
if (iPointState == DBSCAN_WITHIDX_CLUSTER_ID_INIT ) { ++cinit; continue; }
if (iPointState == DBSCAN_WITHIDX_CLUSTER_ID_NOISE ) { ++cnoise; continue; }
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) {
unsigned int iFinalChainID = gpFinalChainIDs[iSeedID];
unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(iFinalChainID);
if (iPointState == iFinalChainID) ++iSeedDataCounterF[iSeedID];
if (iPointState == iCandidateID ) ++iSeedDataCounterC[iSeedID];
}
++crest;
}
printf("cinit=%6d,cnoise=%6d,crest=%d\n",cinit,cnoise,crest);
// download seedstate from vram
CUDA_SAFE_CALL( cudaMemcpy(pSeedStates,gpu_pSeedStates,sizeof(p->pSeedStates),cudaMemcpyDeviceToHost)); HANDLE_ERROR("cudaMemcpy gpu_pSeedStates readback")
// analyse seeds
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) {
unsigned int iFinalChainID = gpFinalChainIDs[iSeedID];
unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(iFinalChainID);
unsigned int* piSeedState_NotListedLen = &pSeedStates[iSeedID*SEEDSTATEDIM + 0];
unsigned int* piSeedState_ListLen = &pSeedStates[iSeedID*SEEDSTATEDIM + 1];
printf("seed:%3d nonl=%d list=%d final=%d cand=%d ",iSeedID,
*piSeedState_NotListedLen,
*piSeedState_ListLen,
iSeedDataCounterF[iSeedID],
iSeedDataCounterC[iSeedID]);
for (i=0;i<*piSeedState_ListLen;++i) {
unsigned int n = VRAM_READ_UINT(&gpu_pCandidateLists[iSeedID*CANDIDATE_LIST_MAXLEN+i]);
unsigned int iState = pPointState[n];
printf("%d%s,",n,(iState != iCandidateID)?"ERROR":"");
}
printf("\n");
}
printf("TEST_KERNEL_REFILL end\n");
return;
}
// **** MAIN
float t_kernel_main = 0.0;
float t_download_states = 0.0;
float t_check_seedstates = 0.0;
float t_finished_seeds = 0.0;
float t_kernel_refill = 0.0;
float t_cleanup = 0.0;
float t_debug = 0.0;
printf("prepare check\n"); DBSCAN_VERIFY_CANDIDATES("prepare")
printf("start loop\n");
PROFILE_TIME_SECTION_START();
int iMainRoughPointsLeft = N;
int iOutout = 0;
do {
dbscan_kernel_main<<< grid_size_many_threads_per_seed, block_size, mem_shared >>>(
gpu_pPointCoords, // N*D
gpu_pPointState, // N (outlier,candidate:chain-id,finished:chain-id)
gpu_pIndex, // INDEX_NUM_FLOATS
gpu_pSeedStates, // DBSCAN_NUM_SEEDS * x (notlised,listlen,iNeighBorCount : atomicops)
//~ unsigned int* pFinalChainIDs, // DBSCAN_NUM_SEEDS (constant memory, values >= DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST)
gpu_pCandidateLists, // DBSCAN_NUM_SEEDS * CANDIDATE_LIST_MAXLEN (fresh-seeds)
gpu_pConnectionMatrix // DBSCAN_NUM_SEEDS^2
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
,gpu_pClusterIDLookup // [N]
#endif
);
CUDA_SAFE_CALL( cudaThreadSynchronize());HANDLE_ERROR("cudaThreadSynchronize")
PROFILE_TIME_SECTION_SUM(t_kernel_main);
DBSCAN_VERIFY_CANDIDATES("kernel_main") PROFILE_TIME_SECTION_SUM(t_debug);
// download seedstate from vram
CUDA_SAFE_CALL( cudaMemcpy(pSeedStates,gpu_pSeedStates,sizeof(p->pSeedStates),cudaMemcpyDeviceToHost)); HANDLE_ERROR("cudaMemcpy gpu_pSeedStates readback")
PROFILE_TIME_SECTION_SUM(t_download_states);
// check seedstates
bool bListRefillNeeded = false;
int iNewSeedsNeeded = 0;
bool bSeedFinished[DBSCAN_NUM_SEEDS];
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) {
bSeedFinished[iSeedID] = false;
unsigned int* piSeedState_NotListedLen = &pSeedStates[iSeedID*SEEDSTATEDIM + 0];
unsigned int* piSeedState_ListLen = &pSeedStates[iSeedID*SEEDSTATEDIM + 1];
unsigned int* piSeedState_NeighborCount = &pSeedStates[iSeedID*SEEDSTATEDIM + 2];
if (*piSeedState_ListLen > 0) continue;
if (*piSeedState_NotListedLen > 0) {
// refill needed
bListRefillNeeded = true;
} else {
// seed finished
bSeedFinished[iSeedID] = true;
iNewSeedsNeeded++;
// if this is the first finished seed found this round : download connection matrix and spread
if (iNewSeedsNeeded == 1) {
CUDA_SAFE_CALL( cudaMemcpy(gpConnectionMatrix,gpu_pConnectionMatrix,sizeof(p->pConnectionMatrix),cudaMemcpyDeviceToHost)); HANDLE_ERROR("cudaMemcpy pConnectionMatrix readback")
DBScan_SpreadConnection_DebugDump("seedcheck,first");
DBScan_SpreadConnection();
}
// cleanup
//~ if (*piSeedState_NeighborCount >= DBSCAN_PARAM_MINPTS) { // TODO (beim da_text schreiben gesehen und entfernt) : what was this if for ? (bad if last point is noise?)
if (GetFinalClusterIDBySeedID(iSeedID) == INF_32) { // no final id assigned yet, need to generate a new one
unsigned int iFinalClusterID = ++giLastFinalClusterID;
//~ printf("assign seed=%3d cid=%5d\n",iSeedID,iFinalClusterID);
SetFinalClusterIDBySeedID(iSeedID,iFinalClusterID); // generate new cluster id and assign
DBScan_SpreadConnection_DebugDump("seedcheck,cleanup");
DBScan_SpreadConnection_Aux(iSeedID); // spread
}
//~ }
// clear connection matrix entries for this seed
DBScan_ClearConnection(iSeedID);
// generate and assign new final chain id (upload to constant vram later)
unsigned int iFinalChainID = gNextFinalChainID++;
DBScanAssignFinalChainID(iSeedID,iFinalChainID,gpu_pClusterIDLookup);
}
//~ printf("seed %4d : %NotListedLen=%6d ListLen=%6d neighbor=%6d\n",(int)iSeedID,
//~ (int)*piSeedState_NotListedLen,(int)*piSeedState_ListLen,(int)*piSeedState_NeighborCount);
}
PROFILE_TIME_SECTION_SUM(t_check_seedstates);
DBSCAN_VERIFY_CANDIDATES("check_seedstates") PROFILE_TIME_SECTION_SUM(t_debug);
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM
#ifdef DBSCAN_ID_LOOKUP_IN_VRAM_CHECK
// check
if (1) {
unsigned int* temp = (unsigned int*)malloc(N*sizeof(unsigned int));
// download pointstates from vram
CUDA_SAFE_CALL( cudaMemcpy(temp,gpu_pClusterIDLookup,N*sizeof(unsigned int),cudaMemcpyDeviceToHost)); HANDLE_ERROR("cudaMemcpy gpu_pClusterIDLookup debug download")
int c = 0;
for (int i=0;i<N;++i) {
if (temp[i] < INF_32) {
++c;
}
}
if (c > DBSCAN_NUM_SEEDS) {
printf("lookup debug : too many set %d,%d\n",c,(int)DBSCAN_NUM_SEEDS);
exit(0);
}
free(temp);
PROFILE_TIME_SECTION_SUM(t_debug);
}
#endif
#endif
// process finished seeds
int iNumberOfNonRevivableSeeds = 0;
if (iNewSeedsNeeded > 0) {
// upload changed final ids (new chains started)
CUDA_SAFE_CALL( cudaMemcpyToSymbol(gConst_pFinalChainIDs, gpFinalChainIDs, sizeof(p->pFinalChainIDs))); HANDLE_ERROR("cudaMemcpy gpFinalChainIDs upload2") // const mem
// search new seeds in vram by iterating over gpu_pPointState
unsigned int iNewSeedsFound = 0;
if (bFreshSeedsLeft) {
// helper kernel : search a few new seeds (why kernel : candidate ids are in vram)
// new seeds : sum_time="one iteration" : save index last checked and increment until next free point is found
VRAM_WRITE_UINT(&gpu_pHelperData[DBSCAN_NUM_SEEDS+0],0); // counter
MY_ASSERT(NUM_THREADS_NEWSEEDSCAN == grid_size_4.x * block_size.x,"newseeds check");
dbscan_kernel_newseeds<<< grid_size_4, block_size, mem_shared >>>(
gpu_pPointState, // N (outlier,candidate:chain-id,finished:chain-id)
gpu_pHelperData,
iNewSeedsNeeded
);
// download gpu_pHelperData from vram
CUDA_SAFE_CALL( cudaMemcpy(pHelperData,gpu_pHelperData,sizeof(unsigned int) * (DBSCAN_NUM_SEEDS+NEWSEEDSCAN_NUM_PARAMS),cudaMemcpyDeviceToHost)); HANDLE_ERROR("cudaMemcpy pHelperData readback")
iNewSeedsFound = min(iNewSeedsNeeded,pHelperData[DBSCAN_NUM_SEEDS+0]);
// remember when no fresh seeds can be found anymore
if (iNewSeedsFound < iNewSeedsNeeded) bFreshSeedsLeft = false;
}
// process seeds : assign new seeds or split existing ones
for (iSeedID=0;iSeedID<DBSCAN_NUM_SEEDS;++iSeedID) {
// skip seeds that still have work to do
if (!bSeedFinished[iSeedID]) continue;
// calc common helper vars, and reset seed state
unsigned int iFinalChainID = gpFinalChainIDs[iSeedID];
unsigned int iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(iFinalChainID);
pSeedStates[SEEDSTATEDIM*iSeedID + 0] = 0; //NotListedLen
pSeedStates[SEEDSTATEDIM*iSeedID + 1] = 0; //ListLen
pSeedStates[SEEDSTATEDIM*iSeedID + 2] = 0; //NeighborCount
pSeedStates[SEEDSTATEDIM*iSeedID + 3] = INF_32;
// did we find enough free seeds or do we have to split existing chains ?
if (iNewSeedsFound > 0) {
iNewSeedsFound -= 1;
// assign new seed
n = pHelperData[iNewSeedsFound]; // iNewSeedN
pSeedStates[SEEDSTATEDIM*iSeedID + 3] = n; // mark for instant use
// pPointState : write iCandidateID, otherwise it might be marked as candidat by another seedchain
VRAM_WRITE_UINT(&gpu_pPointState[n],iCandidateID);
} else {
//~ printf("split!\n");
// split
// choose largest existing
unsigned int iFoundOtherSeedID = INF_32;
unsigned int iFoundOtherListLen = 0;
for (unsigned int iOtherSeedID=0;iOtherSeedID<DBSCAN_NUM_SEEDS;++iOtherSeedID) {
unsigned int iOtherListLen = pSeedStates[iOtherSeedID*SEEDSTATEDIM + 1];
if (iFoundOtherSeedID == INF_32 || iOtherListLen > iFoundOtherListLen) {
iFoundOtherSeedID = iOtherSeedID;
iFoundOtherListLen = iOtherListLen;
}
}
// split chosen
if (iFoundOtherListLen > 1) {
// split only the last candidate from otherseed
unsigned int* iSplitOriginLen = &pSeedStates[iFoundOtherSeedID*SEEDSTATEDIM + 1];
unsigned int iLastIndex = *iSplitOriginLen - 1;
*iSplitOriginLen -= 1;
unsigned int n = VRAM_READ_UINT(&gpu_pCandidateLists[iFoundOtherSeedID*CANDIDATE_LIST_MAXLEN+iLastIndex]);
pSeedStates[SEEDSTATEDIM*iSeedID + 3] = n;
// change candidate seed-assignment to avoid refill-confusion for otherseed
VRAM_WRITE_UINT(&gpu_pPointState[n],iCandidateID);
// mark split-connection
DBScan_SetConnection(iSeedID,iFoundOtherSeedID);
} else {
++iNumberOfNonRevivableSeeds;
// split not possible algorithm nearing it's end
//~ printf("iSeedID:%03d split not possible anymore, algorithm nearing it's end\n",(int)iSeedID);
// listlen=0,nonlistlen=0,nextid=INF_32 signals end
}
}
}
// upload changed connection matrix (split)
CUDA_SAFE_CALL( cudaMemcpy(gpu_pConnectionMatrix,gpConnectionMatrix,sizeof(p->pConnectionMatrix),cudaMemcpyHostToDevice )); HANDLE_ERROR("cudaMemcpy pConnectionMatrix upload2")
// upload updated states to vram (changed by new cluster started, not by refill)
CUDA_SAFE_CALL( cudaMemcpy(gpu_pSeedStates, p->pSeedStates, sizeof(p->pSeedStates), cudaMemcpyHostToDevice )); HANDLE_ERROR("cudaMemcpy pSeedStates upload2")
}
PROFILE_TIME_SECTION_SUM(t_finished_seeds);
DBSCAN_VERIFY_CANDIDATES("finished_seeds") PROFILE_TIME_SECTION_SUM(t_debug);
// helper kernel : refill lists (why kernel : candidate ids are in vram)
if (bListRefillNeeded) {
dbscan_kernel_refill<<< grid_size_many_threads_per_seed, block_size, mem_shared >>>(
gpu_pPointState, // N (outlier,candidate:chain-id,finished:chain-id)
gpu_pSeedStates, // DBSCAN_NUM_SEEDS * 3 (real,listlen,iNeighBorCount : atomicops)
gpu_pCandidateLists // DBSCAN_NUM_SEEDS * CANDIDATE_LIST_MAXLEN (fresh-seeds)
);
CUDA_SAFE_CALL( cudaThreadSynchronize());HANDLE_ERROR("cudaThreadSynchronize")
}
PROFILE_TIME_SECTION_SUM(t_kernel_refill);
DBSCAN_VERIFY_CANDIDATES("kernel_refill") PROFILE_TIME_SECTION_SUM(t_debug);
// DETECT algorithm termination
if (iNumberOfNonRevivableSeeds > 0) printf("iNumberOfNonRevivableSeeds=%d\n",(int)iNumberOfNonRevivableSeeds);
if (iNumberOfNonRevivableSeeds >= DBSCAN_NUM_SEEDS) {
printf("algorithm finished\n");
break;
}
//printf("DEBUG:BREAK\n"); break;
iMainRoughPointsLeft -= DBSCAN_NUM_SEEDS - iNumberOfNonRevivableSeeds;
if ((iOutout++ % 16) == 0 || iOutout < 16) printf("iMainRoughPointsLeft=%7d iNewSeedsNeeded=%3d\n",iMainRoughPointsLeft,iNewSeedsNeeded);
} while (1) ;
// cleanup
// download pointstates from vram
CUDA_SAFE_CALL( cudaMemcpy(pPointState,gpu_pPointState,sizeof(p->pClusterIDs),cudaMemcpyDeviceToHost)); HANDLE_ERROR("cudaMemcpy gpu_pPointState final download")
// assign final ids, and count groups
int counter_Init = 0;
int counter_Noise = 0;
int counter_Candidate = 0;
int counter_Final = 0;
for (n=0;n<N;++n) {
unsigned int iState = pPointState[n]; // iCandidateID = DBSCAN_WITHIDX_CLUSTER_ID_FINAL_2_CANDIDATE(iFinalChainID);
unsigned int iNewState = INF_32;
if ( iState >= DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST) {
unsigned int iFinalChainID = iState;
unsigned int iMyIndex = iFinalChainID - DBSCAN_WITHIDX_CLUSTER_ID_FINALCHAINID_FIRST;
MY_ASSERT(iMyIndex < N,"FinalClusterIDIndex From pPointState too high");
iNewState = giFinalClusterIDs[iMyIndex];
++counter_Final;
} else if (iState >= DBSCAN_WITHIDX_CLUSTER_ID_CANDIDATE_FIRST) {
++counter_Candidate;
} else if (iState == DBSCAN_WITHIDX_CLUSTER_ID_INIT) {
++counter_Init;
} else if (iState == DBSCAN_WITHIDX_CLUSTER_ID_NOISE) {
++counter_Noise;
}
pPointState[n] = iNewState;
}
PROFILE_TIME_SECTION_SUM(t_cleanup);
printf("giDBScanClusterDoubleAssignmentCounter = %d\n",giDBScanClusterDoubleAssignmentCounter);
printf("time profile:\n");
printf("t_kernel_main = %f\n",t_kernel_main );
printf("t_download_states = %f\n",t_download_states );
printf("t_check_seedstates = %f\n",t_check_seedstates );
printf("t_finished_seeds = %f\n",t_finished_seeds );
printf("t_kernel_refill = %f\n",t_kernel_refill );
printf("t_cleanup = %f\n",t_cleanup );
printf("t_debug = %f\n",t_debug );
printf("dbscan final count : Init=%d,Noise=%d,Candidate=%d,Final=%d\n",(int)counter_Init,(int)counter_Noise,(int)counter_Candidate,(int)counter_Final);
sprintf(gsInfoGPUaux,"|double=%d,Init=%d,Noise=%d(%0.1f%%),Candidate=%d,Final=%d",
(int)giDBScanClusterDoubleAssignmentCounter,(int)counter_Init,(int)counter_Noise,(float)(float(counter_Noise)/float(N)),(int)counter_Candidate,(int)counter_Final);
if (counter_Init > 0) printf("warning, count(init)>0, algorithm not finished\n");
if (counter_Candidate > 0) printf("warning, count(Candidate)>0, algorithm not finished\n");
}
|
98d834b3c95f75a5c7bd7d96fca48c9f11aa19f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "relu_layer.h"
#include "sigmoid_layer.h"
#include "softmax_layer.h"
#include "dense_matrix.h"
#include "cuda_helper.h"
#include "cuda_unary_kernel.cuh"
#include "sparse_matrix.h"
#include <hip/hip_runtime.h>
#define min(x, y) (x < y ? x : y)
// =========================================== relu layer ================================================
template<typename Dtype>
void ReLULayer<GPU, Dtype>::Act(DenseMat<GPU, Dtype>& prev_out, DenseMat<GPU, Dtype>& cur_out)
{
UnaryOp(cur_out.data, prev_out.data, prev_out.count, UnaryReLU<Dtype>(), cur_out.streamid);
}
template<typename Dtype>
__global__ void ReLUDerivKernel(Dtype *dst, Dtype *out, Dtype* cur_grad, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements && out[i] > 0)
{
dst[i] += cur_grad[i];
}
}
template<typename Dtype>
void ReLULayer<GPU, Dtype>::Derivative(DenseMat<GPU, Dtype>& dst, DenseMat<GPU, Dtype>& prev_output,
DenseMat<GPU, Dtype>& cur_output, DenseMat<GPU, Dtype>& cur_grad, Dtype beta)
{
dst.Scale(beta);
int thread_num = min(c_uCudaThreadNum, dst.count);
int blocksPerGrid = (dst.count + thread_num - 1) / thread_num;
hipLaunchKernelGGL(( ReLUDerivKernel) , dim3(blocksPerGrid), dim3(thread_num), 0, GPUHandle::streams[dst.streamid] , dst.data, cur_output.data, cur_grad.data, dst.count);
}
template class ReLULayer<GPU, float>;
template class ReLULayer<GPU, double>;
// =========================================== sigmoid layer ================================================
template<typename Dtype>
void SigmoidLayer<GPU, Dtype>::Act(DenseMat<GPU, Dtype>& prev_out, DenseMat<GPU, Dtype>& cur_out)
{
UnaryOp(cur_out.data, prev_out.data, prev_out.count, UnarySigmoid<Dtype>(), cur_out.streamid);
}
template<typename Dtype>
__global__ void SigmoidDerivKernel(Dtype *dst, Dtype* cur_grad, Dtype* cur_output, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
dst[i] += cur_grad[i] * cur_output[i] * (1 - cur_output[i]);
}
}
template<typename Dtype>
void SigmoidLayer<GPU, Dtype>::Derivative(DenseMat<GPU, Dtype>& dst, DenseMat<GPU, Dtype>& prev_output,
DenseMat<GPU, Dtype>& cur_output, DenseMat<GPU, Dtype>& cur_grad, Dtype beta)
{
dst.Scale(beta);
int thread_num = min(c_uCudaThreadNum, dst.count);
int blocksPerGrid = (dst.count + thread_num - 1) / thread_num;
hipLaunchKernelGGL(( SigmoidDerivKernel) , dim3(blocksPerGrid), dim3(thread_num), 0, GPUHandle::streams[dst.streamid] , dst.data, cur_grad.data, cur_output.data, dst.count);
}
template class SigmoidLayer<GPU, float>;
template class SigmoidLayer<GPU, double>;
// =========================================== softmax layer ================================================
template<typename Dtype>
void SoftmaxLayer<GPU, Dtype>::Act(DenseMat<GPU, Dtype>& prev_out, DenseMat<GPU, Dtype>& cur_out)
{
if (&cur_out != &prev_out)
cur_out.CopyFrom(prev_out);
cur_out.Softmax();
}
// Copied from https://github.com/torch/cunn/blob/master/SoftMax.cu
template<typename Dtype>
__global__ void cunn_SoftMax_updateGradInput_kernel(Dtype *gradInput, Dtype *output, Dtype *gradOutput,
int nframe, int dim)
{
__shared__ Dtype buffer[SOFTMAX_THREADS];
Dtype *gradInput_k = gradInput + blockIdx.x*dim + blockIdx.y;
Dtype *output_k = output + blockIdx.x*dim + blockIdx.y;
Dtype *gradOutput_k = gradOutput + blockIdx.x*dim + blockIdx.y;
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
// sum?
buffer[threadIdx.x] = 0;
for (int i=i_start; i<i_end; i+=i_step)
buffer[threadIdx.x] += gradOutput_k[i] * output_k[i];
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
Dtype sum_k = 0;
for (int i=0; i<blockDim.x; i++)
sum_k += buffer[i];
buffer[0] = sum_k;
}
__syncthreads();
Dtype sum_k = buffer[0];
for (int i=i_start; i<i_end; i+=i_step)
gradInput_k[i] = output_k[i] * (gradOutput_k[i] - sum_k);
}
template<typename Dtype>
void SoftmaxLayer<GPU, Dtype>::Derivative(DenseMat<GPU, Dtype>& dst, DenseMat<GPU, Dtype>& prev_output,
DenseMat<GPU, Dtype>& cur_output, DenseMat<GPU, Dtype>& cur_grad, Dtype beta)
{
buf.Zeros(dst.rows, dst.cols);
dim3 blocks(cur_output.rows, 1);
dim3 threads(SOFTMAX_THREADS);
hipLaunchKernelGGL(( cunn_SoftMax_updateGradInput_kernel), dim3(blocks),
dim3(threads),
0,
GPUHandle::streams[buf.streamid],
buf.data, cur_output.data, cur_grad.data, cur_output.rows, cur_output.cols);
dst.Axpby(1.0, buf, beta);
}
template class SoftmaxLayer<GPU, float>;
template class SoftmaxLayer<GPU, double>; | 98d834b3c95f75a5c7bd7d96fca48c9f11aa19f8.cu | #include "relu_layer.h"
#include "sigmoid_layer.h"
#include "softmax_layer.h"
#include "dense_matrix.h"
#include "cuda_helper.h"
#include "cuda_unary_kernel.cuh"
#include "sparse_matrix.h"
#include <cuda_runtime.h>
#define min(x, y) (x < y ? x : y)
// =========================================== relu layer ================================================
template<typename Dtype>
void ReLULayer<GPU, Dtype>::Act(DenseMat<GPU, Dtype>& prev_out, DenseMat<GPU, Dtype>& cur_out)
{
UnaryOp(cur_out.data, prev_out.data, prev_out.count, UnaryReLU<Dtype>(), cur_out.streamid);
}
template<typename Dtype>
__global__ void ReLUDerivKernel(Dtype *dst, Dtype *out, Dtype* cur_grad, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements && out[i] > 0)
{
dst[i] += cur_grad[i];
}
}
template<typename Dtype>
void ReLULayer<GPU, Dtype>::Derivative(DenseMat<GPU, Dtype>& dst, DenseMat<GPU, Dtype>& prev_output,
DenseMat<GPU, Dtype>& cur_output, DenseMat<GPU, Dtype>& cur_grad, Dtype beta)
{
dst.Scale(beta);
int thread_num = min(c_uCudaThreadNum, dst.count);
int blocksPerGrid = (dst.count + thread_num - 1) / thread_num;
ReLUDerivKernel <<< blocksPerGrid, thread_num, 0, GPUHandle::streams[dst.streamid] >>>(dst.data, cur_output.data, cur_grad.data, dst.count);
}
template class ReLULayer<GPU, float>;
template class ReLULayer<GPU, double>;
// =========================================== sigmoid layer ================================================
template<typename Dtype>
void SigmoidLayer<GPU, Dtype>::Act(DenseMat<GPU, Dtype>& prev_out, DenseMat<GPU, Dtype>& cur_out)
{
UnaryOp(cur_out.data, prev_out.data, prev_out.count, UnarySigmoid<Dtype>(), cur_out.streamid);
}
template<typename Dtype>
__global__ void SigmoidDerivKernel(Dtype *dst, Dtype* cur_grad, Dtype* cur_output, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
dst[i] += cur_grad[i] * cur_output[i] * (1 - cur_output[i]);
}
}
template<typename Dtype>
void SigmoidLayer<GPU, Dtype>::Derivative(DenseMat<GPU, Dtype>& dst, DenseMat<GPU, Dtype>& prev_output,
DenseMat<GPU, Dtype>& cur_output, DenseMat<GPU, Dtype>& cur_grad, Dtype beta)
{
dst.Scale(beta);
int thread_num = min(c_uCudaThreadNum, dst.count);
int blocksPerGrid = (dst.count + thread_num - 1) / thread_num;
SigmoidDerivKernel <<< blocksPerGrid, thread_num, 0, GPUHandle::streams[dst.streamid] >>>(dst.data, cur_grad.data, cur_output.data, dst.count);
}
template class SigmoidLayer<GPU, float>;
template class SigmoidLayer<GPU, double>;
// =========================================== softmax layer ================================================
template<typename Dtype>
void SoftmaxLayer<GPU, Dtype>::Act(DenseMat<GPU, Dtype>& prev_out, DenseMat<GPU, Dtype>& cur_out)
{
if (&cur_out != &prev_out)
cur_out.CopyFrom(prev_out);
cur_out.Softmax();
}
// Copied from https://github.com/torch/cunn/blob/master/SoftMax.cu
template<typename Dtype>
__global__ void cunn_SoftMax_updateGradInput_kernel(Dtype *gradInput, Dtype *output, Dtype *gradOutput,
int nframe, int dim)
{
__shared__ Dtype buffer[SOFTMAX_THREADS];
Dtype *gradInput_k = gradInput + blockIdx.x*dim + blockIdx.y;
Dtype *output_k = output + blockIdx.x*dim + blockIdx.y;
Dtype *gradOutput_k = gradOutput + blockIdx.x*dim + blockIdx.y;
int i_start = threadIdx.x;
int i_end = dim;
int i_step = blockDim.x;
// sum?
buffer[threadIdx.x] = 0;
for (int i=i_start; i<i_end; i+=i_step)
buffer[threadIdx.x] += gradOutput_k[i] * output_k[i];
__syncthreads();
// reduce
if (threadIdx.x == 0)
{
Dtype sum_k = 0;
for (int i=0; i<blockDim.x; i++)
sum_k += buffer[i];
buffer[0] = sum_k;
}
__syncthreads();
Dtype sum_k = buffer[0];
for (int i=i_start; i<i_end; i+=i_step)
gradInput_k[i] = output_k[i] * (gradOutput_k[i] - sum_k);
}
template<typename Dtype>
void SoftmaxLayer<GPU, Dtype>::Derivative(DenseMat<GPU, Dtype>& dst, DenseMat<GPU, Dtype>& prev_output,
DenseMat<GPU, Dtype>& cur_output, DenseMat<GPU, Dtype>& cur_grad, Dtype beta)
{
buf.Zeros(dst.rows, dst.cols);
dim3 blocks(cur_output.rows, 1);
dim3 threads(SOFTMAX_THREADS);
cunn_SoftMax_updateGradInput_kernel<<<blocks,
threads,
0,
GPUHandle::streams[buf.streamid]>>>
(buf.data, cur_output.data, cur_grad.data, cur_output.rows, cur_output.cols);
dst.Axpby(1.0, buf, beta);
}
template class SoftmaxLayer<GPU, float>;
template class SoftmaxLayer<GPU, double>; |
941b06ca1a450f0be3f5b16be4547135a53bcb6a.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <benchmark/benchmark.h>
#include <vector>
#include "dali/benchmark/dali_bench.h"
#include "dali/kernels/slice/slice_gpu.cuh"
#include "dali/test/tensor_test_utils.h"
#include "dali/test/test_tensors.h"
#include "dali/kernels/scratch.h"
namespace dali {
constexpr int Dims = 3;
using InputType = float;
using OutputType = float;
class SliceBenchGPU : public DALIBenchmark {
public:
kernels::TestTensorList<InputType, Dims> test_data;
kernels::TestTensorList<OutputType, Dims> out_data;
void Setup(const TensorShape<Dims> &in_shape,
const TensorShape<Dims> &out_shape,
int batch_size = 1) {
test_data.reshape(uniform_list_shape<Dims>(batch_size, in_shape));
InputType num = 0;
auto seq_gen = [&num]() { return num++; };
Fill(test_data.cpu(), seq_gen);
out_data.reshape(uniform_list_shape<Dims>(batch_size, out_shape));
}
void RunGPU(benchmark::State& st) {
int H = st.range(0);
int W = st.range(1);
int C = st.range(2);
int anchor_h = st.range(3);
int anchor_w = st.range(4);
int anchor_c = st.range(5);
int crop_h = st.range(6);
int crop_w = st.range(7);
int crop_c = st.range(8);
int batch_size = st.range(9);
TensorShape<Dims> in_shape{H, W, C};
TensorShape<Dims> anchor{anchor_h, anchor_w, anchor_c};
TensorShape<Dims> out_shape{crop_h, crop_w, crop_c};
Setup(in_shape, out_shape, batch_size);
using Kernel = kernels::SliceGPU<OutputType, InputType, Dims>;
Kernel kernel;
std::vector<kernels::SliceArgs<OutputType, Dims>> args_vec(batch_size);
for (auto &args : args_vec) {
args.anchor = anchor;
args.shape = out_shape;
}
auto out_tv = out_data.gpu();
auto in_tv = test_data.gpu();
for (auto _ : st) {
kernels::KernelContext ctx;
ctx.gpu.stream = 0;
auto req = kernel.Setup(ctx, in_tv, args_vec);
kernels::ScratchpadAllocator scratch_alloc;
scratch_alloc.Reserve(req.scratch_sizes);
auto scratchpad = scratch_alloc.GetScratchpad();
ctx.scratchpad = &scratchpad;
kernel.Run(ctx, out_tv, in_tv, args_vec);
hipStreamSynchronize(ctx.gpu.stream);
st.counters["FPS"] = benchmark::Counter(st.iterations() + 1,
benchmark::Counter::kIsRate);
}
}
};
static void SliceKernelArgs_GPU_OnlySlice(benchmark::internal::Benchmark *b) {
for (int H = 1000; H >= 500; H /= 2) {
int W = H, C = 3;
int crop_h = 9 * H / 10;
int crop_w = 9 * W / 10;
b->Args({H, W, C, 0, 0, 0, crop_h, crop_w, C, 1});
b->Args({H, W, C, 0, 0, 0, crop_h, crop_w, C, 10});
}
}
BENCHMARK_DEFINE_F(SliceBenchGPU, Slice_GPU_OnlySlice)(benchmark::State& st) {
this->RunGPU(st);
}
BENCHMARK_REGISTER_F(SliceBenchGPU, Slice_GPU_OnlySlice)->Iterations(1000)
->Unit(benchmark::kMicrosecond)
->UseRealTime()
->Apply(SliceKernelArgs_GPU_OnlySlice);
static void SliceKernelArgs_GPU_OnlyPad(benchmark::internal::Benchmark *b) {
for (int H = 1000; H >= 500; H /= 2) {
int W = H, C = 3;
int crop_h = 9 * H / 10;
int crop_w = 9 * W / 10;
int anchor_h = H;
int anchor_w = W;
b->Args({H, W, C, anchor_h, anchor_w, 0, crop_h, crop_w, C, 1});
b->Args({H, W, C, anchor_h, anchor_w, 0, crop_h, crop_w, C, 10});
}
}
BENCHMARK_DEFINE_F(SliceBenchGPU, Slice_GPU_OnlyPad)(benchmark::State& st) {
this->RunGPU(st);
}
BENCHMARK_REGISTER_F(SliceBenchGPU, Slice_GPU_OnlyPad)->Iterations(1000)
->Unit(benchmark::kMicrosecond)
->UseRealTime()
->Apply(SliceKernelArgs_GPU_OnlyPad);
static void SliceKernelArgs_GPU_SliceAndPad(benchmark::internal::Benchmark *b) {
for (int H = 1000; H >= 500; H /= 2) {
int W = H, C = 3;
int crop_h = 9 * H / 10;
int crop_w = 9 * W / 10;
int anchor_h = H / 2;
int anchor_w = W / 2;
b->Args({H, W, C, anchor_h, anchor_w, 0, crop_h, crop_w, C, 1});
b->Args({H, W, C, anchor_h, anchor_w, 0, crop_h, crop_w, C, 10});
}
}
BENCHMARK_DEFINE_F(SliceBenchGPU, Slice_GPU_SliceAndPad)(benchmark::State& st) {
this->RunGPU(st);
}
BENCHMARK_REGISTER_F(SliceBenchGPU, Slice_GPU_SliceAndPad)->Iterations(1000)
->Unit(benchmark::kMicrosecond)
->UseRealTime()
->Apply(SliceKernelArgs_GPU_SliceAndPad);
} // namespace dali
| 941b06ca1a450f0be3f5b16be4547135a53bcb6a.cu | // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <benchmark/benchmark.h>
#include <vector>
#include "dali/benchmark/dali_bench.h"
#include "dali/kernels/slice/slice_gpu.cuh"
#include "dali/test/tensor_test_utils.h"
#include "dali/test/test_tensors.h"
#include "dali/kernels/scratch.h"
namespace dali {
constexpr int Dims = 3;
using InputType = float;
using OutputType = float;
class SliceBenchGPU : public DALIBenchmark {
public:
kernels::TestTensorList<InputType, Dims> test_data;
kernels::TestTensorList<OutputType, Dims> out_data;
void Setup(const TensorShape<Dims> &in_shape,
const TensorShape<Dims> &out_shape,
int batch_size = 1) {
test_data.reshape(uniform_list_shape<Dims>(batch_size, in_shape));
InputType num = 0;
auto seq_gen = [&num]() { return num++; };
Fill(test_data.cpu(), seq_gen);
out_data.reshape(uniform_list_shape<Dims>(batch_size, out_shape));
}
void RunGPU(benchmark::State& st) {
int H = st.range(0);
int W = st.range(1);
int C = st.range(2);
int anchor_h = st.range(3);
int anchor_w = st.range(4);
int anchor_c = st.range(5);
int crop_h = st.range(6);
int crop_w = st.range(7);
int crop_c = st.range(8);
int batch_size = st.range(9);
TensorShape<Dims> in_shape{H, W, C};
TensorShape<Dims> anchor{anchor_h, anchor_w, anchor_c};
TensorShape<Dims> out_shape{crop_h, crop_w, crop_c};
Setup(in_shape, out_shape, batch_size);
using Kernel = kernels::SliceGPU<OutputType, InputType, Dims>;
Kernel kernel;
std::vector<kernels::SliceArgs<OutputType, Dims>> args_vec(batch_size);
for (auto &args : args_vec) {
args.anchor = anchor;
args.shape = out_shape;
}
auto out_tv = out_data.gpu();
auto in_tv = test_data.gpu();
for (auto _ : st) {
kernels::KernelContext ctx;
ctx.gpu.stream = 0;
auto req = kernel.Setup(ctx, in_tv, args_vec);
kernels::ScratchpadAllocator scratch_alloc;
scratch_alloc.Reserve(req.scratch_sizes);
auto scratchpad = scratch_alloc.GetScratchpad();
ctx.scratchpad = &scratchpad;
kernel.Run(ctx, out_tv, in_tv, args_vec);
cudaStreamSynchronize(ctx.gpu.stream);
st.counters["FPS"] = benchmark::Counter(st.iterations() + 1,
benchmark::Counter::kIsRate);
}
}
};
static void SliceKernelArgs_GPU_OnlySlice(benchmark::internal::Benchmark *b) {
for (int H = 1000; H >= 500; H /= 2) {
int W = H, C = 3;
int crop_h = 9 * H / 10;
int crop_w = 9 * W / 10;
b->Args({H, W, C, 0, 0, 0, crop_h, crop_w, C, 1});
b->Args({H, W, C, 0, 0, 0, crop_h, crop_w, C, 10});
}
}
BENCHMARK_DEFINE_F(SliceBenchGPU, Slice_GPU_OnlySlice)(benchmark::State& st) {
this->RunGPU(st);
}
BENCHMARK_REGISTER_F(SliceBenchGPU, Slice_GPU_OnlySlice)->Iterations(1000)
->Unit(benchmark::kMicrosecond)
->UseRealTime()
->Apply(SliceKernelArgs_GPU_OnlySlice);
static void SliceKernelArgs_GPU_OnlyPad(benchmark::internal::Benchmark *b) {
for (int H = 1000; H >= 500; H /= 2) {
int W = H, C = 3;
int crop_h = 9 * H / 10;
int crop_w = 9 * W / 10;
int anchor_h = H;
int anchor_w = W;
b->Args({H, W, C, anchor_h, anchor_w, 0, crop_h, crop_w, C, 1});
b->Args({H, W, C, anchor_h, anchor_w, 0, crop_h, crop_w, C, 10});
}
}
BENCHMARK_DEFINE_F(SliceBenchGPU, Slice_GPU_OnlyPad)(benchmark::State& st) {
this->RunGPU(st);
}
BENCHMARK_REGISTER_F(SliceBenchGPU, Slice_GPU_OnlyPad)->Iterations(1000)
->Unit(benchmark::kMicrosecond)
->UseRealTime()
->Apply(SliceKernelArgs_GPU_OnlyPad);
static void SliceKernelArgs_GPU_SliceAndPad(benchmark::internal::Benchmark *b) {
for (int H = 1000; H >= 500; H /= 2) {
int W = H, C = 3;
int crop_h = 9 * H / 10;
int crop_w = 9 * W / 10;
int anchor_h = H / 2;
int anchor_w = W / 2;
b->Args({H, W, C, anchor_h, anchor_w, 0, crop_h, crop_w, C, 1});
b->Args({H, W, C, anchor_h, anchor_w, 0, crop_h, crop_w, C, 10});
}
}
BENCHMARK_DEFINE_F(SliceBenchGPU, Slice_GPU_SliceAndPad)(benchmark::State& st) {
this->RunGPU(st);
}
BENCHMARK_REGISTER_F(SliceBenchGPU, Slice_GPU_SliceAndPad)->Iterations(1000)
->Unit(benchmark::kMicrosecond)
->UseRealTime()
->Apply(SliceKernelArgs_GPU_SliceAndPad);
} // namespace dali
|
c6b33d0ca0a79c7aed5ee3214c3cdc8644c53733.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Mask_Intersect_Kernel( int* A, int* B, int* devOut)
{
const int idx = blockDim.x*blockIdx.x + threadIdx.x;
devOut[idx] = A[idx] * B[idx];
} | c6b33d0ca0a79c7aed5ee3214c3cdc8644c53733.cu | #include "includes.h"
__global__ void Mask_Intersect_Kernel( int* A, int* B, int* devOut)
{
const int idx = blockDim.x*blockIdx.x + threadIdx.x;
devOut[idx] = A[idx] * B[idx];
} |
13bddae2773526af68f871bbbf238a4ae28dbfca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void pnpoly_cnGPU2(const float *px, const float *py, const float *vx, const float *vy, char* cs, int npoint, int nvert)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ float tpx;
__shared__ float tpy;
if (i < npoint) {
tpx = px[i];
tpy = py[i];
int j, k, c = 0;
for (j = 0, k = nvert-1; j < nvert; k = j++) {
if ( ((vy[j]>tpy) != (vy[k]>tpy)) &&
(tpx < (vx[k]-vx[j]) * (tpy-vy[j]) / (vy[k]-vy[j]) + vx[j]) )
c = !c;
}
cs[i] = c & 1;
__syncthreads();
}
} | 13bddae2773526af68f871bbbf238a4ae28dbfca.cu | #include "includes.h"
__global__ void pnpoly_cnGPU2(const float *px, const float *py, const float *vx, const float *vy, char* cs, int npoint, int nvert)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ float tpx;
__shared__ float tpy;
if (i < npoint) {
tpx = px[i];
tpy = py[i];
int j, k, c = 0;
for (j = 0, k = nvert-1; j < nvert; k = j++) {
if ( ((vy[j]>tpy) != (vy[k]>tpy)) &&
(tpx < (vx[k]-vx[j]) * (tpy-vy[j]) / (vy[k]-vy[j]) + vx[j]) )
c = !c;
}
cs[i] = c & 1;
__syncthreads();
}
} |
a9b3b9962197d5986ef523b29771a2e71dfd5358.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHDeviceUtils.cuh>
#include <torch/torch.h>
#include <vector>
#include <iostream>
namespace rcnn{
namespace layers{
__launch_bounds__(256) static __global__
void max_along_gt_idx(float *match, unsigned char *pred_forgiven, long *max_gt_idx, long long gt,long long preds,
bool include_low_quality, float low_th, float high_th) {
long long tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < preds){
float max_iou = 0.0f;
int max_idx = 0;
float iou;
for(long long i = 0;i < gt; i++){
iou = match[i * preds + tid];
if (iou > max_iou) {max_iou = iou; max_idx = i;}
}
if (max_iou >= high_th) max_gt_idx[tid] = max_idx;
else if ((pred_forgiven[tid] == 1 && include_low_quality)) max_gt_idx[tid] = max_idx;
else if (max_iou < low_th) max_gt_idx[tid] = -1;
else if (max_iou < high_th) max_gt_idx[tid] = -2;
}
}
__device__ void warpReduce(volatile float* sdata, int tid) {
sdata[tid] = fmax(sdata[tid],sdata[tid + 32]);
sdata[tid] = fmax(sdata[tid],sdata[tid + 16]);
sdata[tid] = fmax(sdata[tid],sdata[tid + 8]);
sdata[tid] = fmax(sdata[tid],sdata[tid + 4]);
sdata[tid] = fmax(sdata[tid],sdata[tid + 2]);
sdata[tid] = fmax(sdata[tid],sdata[tid + 1]);
}
static __global__
void max_along_preds(float* match, float* inter_gt, long long gt,long long preds) {
int gt_idx = blockIdx.x;
int chunk_idx = blockIdx.y;
int gt_offset = chunk_idx * 2048;
int start_idx = gt_idx * preds + gt_offset;
int idx = threadIdx.x;
__shared__ float shbuf[1024];
shbuf[idx] = 0.0f;
__syncthreads();
if(gt_offset + idx + 1024 < preds) shbuf[idx] = fmax(match[start_idx + idx], match[start_idx + idx + 1024]);
else if (gt_offset + idx < preds) shbuf[idx] = match[start_idx + idx];
__syncthreads();
if(idx < 512) shbuf[idx] = fmax(shbuf[idx],shbuf[idx + 512]);
__syncthreads();
if(idx < 256) shbuf[idx] = fmax(shbuf[idx], shbuf[idx + 256]);
__syncthreads();
if(idx < 128) shbuf[idx] = fmax(shbuf[idx], shbuf[idx + 128]);
__syncthreads();
if(idx < 64) shbuf[idx] = fmax(shbuf[idx], shbuf[idx + 64]);
__syncthreads();
if(idx < 32) warpReduce(shbuf, idx);
if (idx == 0) inter_gt[((preds + 2047) / 2048) * gt_idx + chunk_idx] = shbuf[idx];
}
__launch_bounds__(256) static __global__
void max_along_preds_reduced(float *match, float *max_preds, long long gt,long long preds) {
long long tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < gt){
float max_iou = 0.0f;
float iou;
for(long long i = 0; i < preds; i++){
iou = match[tid * preds + i];
if (iou > max_iou) max_iou = iou;
}
max_preds[tid] = max_iou;
}
}
__launch_bounds__(256) static __global__
void forgive_preds(float *match_quality_data, float *d_best_pred_per_gt, unsigned char *d_pred_forgiven,
long gt, long preds) {
long tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < preds){
unsigned char forgiven = 0;
float iou;
for(int i = 0; i < gt; i++){
iou = match_quality_data[i * preds + tid];
if(iou == d_best_pred_per_gt[i]){
forgiven = 1;
break;
}
}
d_pred_forgiven[tid] = forgiven;
}
}
torch::Tensor match_proposals_cuda(torch::Tensor match_quality_matrix, bool allow_low_quality_matches,
float low_th, float high_th){
int current_device;
THCudaCheck(hipGetDevice(¤t_device));
THCudaCheck(hipSetDevice(match_quality_matrix.get_device()));
int gt = match_quality_matrix.size(0);
long long preds = match_quality_matrix.size(1);
float *match_quality_data = match_quality_matrix.data<float>();
using namespace torch;
//predictions are reduced by chunks of 2048 elements per block
int num_chunks = (preds + 2047) / 2048;
auto result = torch::ones({preds}, torch::CUDA(torch::kLong));
torch::Tensor best_pred_per_gt = torch::zeros({gt}, torch::CUDA(torch::kFloat));
torch::Tensor pred_forgiven = torch::zeros({preds}, torch::CUDA(torch::kByte));
torch::Tensor intergt = torch::zeros({gt * num_chunks}, torch::CUDA(torch::kFloat));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
//do an intermediate reduction along all predictions for each gt
dim3 block(1024, 1, 1);
dim3 grid(gt, num_chunks, 1);
if (allow_low_quality_matches)hipLaunchKernelGGL(( max_along_preds), dim3(grid), dim3(block), 0, stream.stream(),
match_quality_matrix.data<float>(),
intergt.data<float>(),
gt,
preds);
//final reduction to find best iou per gt
int numThreads = 256;
int numBlocks=(gt + numThreads - 1) / numThreads;
if (allow_low_quality_matches)hipLaunchKernelGGL(( max_along_preds_reduced), dim3(numBlocks), dim3(numThreads), 0, stream.stream(),
intergt.data<float>(),
best_pred_per_gt.data<float>(),
gt,
num_chunks);
numBlocks=(preds + numThreads - 1) / numThreads;
//if low_quality_matches are allowed, mark some predictions to keep their best matching gt even though
//iou < threshold
if (allow_low_quality_matches)hipLaunchKernelGGL(( forgive_preds), dim3(numBlocks), dim3(numThreads), 0, stream.stream(),
match_quality_matrix.data<float>(),
best_pred_per_gt.data<float>(),
pred_forgiven.data<unsigned char>(),
gt,
preds);
//compute resulting tensor of indices
hipLaunchKernelGGL(( max_along_gt_idx), dim3(numBlocks), dim3(numThreads), 0, stream.stream(), match_quality_matrix.data<float>(),
pred_forgiven.data<unsigned char>(),
result.data<long>(),
gt,
preds,
allow_low_quality_matches,
low_th,
high_th);
THCudaCheck(hipSetDevice(current_device));
return result;
}
}
} | a9b3b9962197d5986ef523b29771a2e71dfd5358.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCDeviceUtils.cuh>
#include <torch/torch.h>
#include <vector>
#include <iostream>
namespace rcnn{
namespace layers{
__launch_bounds__(256) static __global__
void max_along_gt_idx(float *match, unsigned char *pred_forgiven, long *max_gt_idx, long long gt,long long preds,
bool include_low_quality, float low_th, float high_th) {
long long tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < preds){
float max_iou = 0.0f;
int max_idx = 0;
float iou;
for(long long i = 0;i < gt; i++){
iou = match[i * preds + tid];
if (iou > max_iou) {max_iou = iou; max_idx = i;}
}
if (max_iou >= high_th) max_gt_idx[tid] = max_idx;
else if ((pred_forgiven[tid] == 1 && include_low_quality)) max_gt_idx[tid] = max_idx;
else if (max_iou < low_th) max_gt_idx[tid] = -1;
else if (max_iou < high_th) max_gt_idx[tid] = -2;
}
}
__device__ void warpReduce(volatile float* sdata, int tid) {
sdata[tid] = fmax(sdata[tid],sdata[tid + 32]);
sdata[tid] = fmax(sdata[tid],sdata[tid + 16]);
sdata[tid] = fmax(sdata[tid],sdata[tid + 8]);
sdata[tid] = fmax(sdata[tid],sdata[tid + 4]);
sdata[tid] = fmax(sdata[tid],sdata[tid + 2]);
sdata[tid] = fmax(sdata[tid],sdata[tid + 1]);
}
static __global__
void max_along_preds(float* match, float* inter_gt, long long gt,long long preds) {
int gt_idx = blockIdx.x;
int chunk_idx = blockIdx.y;
int gt_offset = chunk_idx * 2048;
int start_idx = gt_idx * preds + gt_offset;
int idx = threadIdx.x;
__shared__ float shbuf[1024];
shbuf[idx] = 0.0f;
__syncthreads();
if(gt_offset + idx + 1024 < preds) shbuf[idx] = fmax(match[start_idx + idx], match[start_idx + idx + 1024]);
else if (gt_offset + idx < preds) shbuf[idx] = match[start_idx + idx];
__syncthreads();
if(idx < 512) shbuf[idx] = fmax(shbuf[idx],shbuf[idx + 512]);
__syncthreads();
if(idx < 256) shbuf[idx] = fmax(shbuf[idx], shbuf[idx + 256]);
__syncthreads();
if(idx < 128) shbuf[idx] = fmax(shbuf[idx], shbuf[idx + 128]);
__syncthreads();
if(idx < 64) shbuf[idx] = fmax(shbuf[idx], shbuf[idx + 64]);
__syncthreads();
if(idx < 32) warpReduce(shbuf, idx);
if (idx == 0) inter_gt[((preds + 2047) / 2048) * gt_idx + chunk_idx] = shbuf[idx];
}
__launch_bounds__(256) static __global__
void max_along_preds_reduced(float *match, float *max_preds, long long gt,long long preds) {
long long tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < gt){
float max_iou = 0.0f;
float iou;
for(long long i = 0; i < preds; i++){
iou = match[tid * preds + i];
if (iou > max_iou) max_iou = iou;
}
max_preds[tid] = max_iou;
}
}
__launch_bounds__(256) static __global__
void forgive_preds(float *match_quality_data, float *d_best_pred_per_gt, unsigned char *d_pred_forgiven,
long gt, long preds) {
long tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < preds){
unsigned char forgiven = 0;
float iou;
for(int i = 0; i < gt; i++){
iou = match_quality_data[i * preds + tid];
if(iou == d_best_pred_per_gt[i]){
forgiven = 1;
break;
}
}
d_pred_forgiven[tid] = forgiven;
}
}
torch::Tensor match_proposals_cuda(torch::Tensor match_quality_matrix, bool allow_low_quality_matches,
float low_th, float high_th){
int current_device;
THCudaCheck(cudaGetDevice(¤t_device));
THCudaCheck(cudaSetDevice(match_quality_matrix.get_device()));
int gt = match_quality_matrix.size(0);
long long preds = match_quality_matrix.size(1);
float *match_quality_data = match_quality_matrix.data<float>();
using namespace torch;
//predictions are reduced by chunks of 2048 elements per block
int num_chunks = (preds + 2047) / 2048;
auto result = torch::ones({preds}, torch::CUDA(torch::kLong));
torch::Tensor best_pred_per_gt = torch::zeros({gt}, torch::CUDA(torch::kFloat));
torch::Tensor pred_forgiven = torch::zeros({preds}, torch::CUDA(torch::kByte));
torch::Tensor intergt = torch::zeros({gt * num_chunks}, torch::CUDA(torch::kFloat));
auto stream = at::cuda::getCurrentCUDAStream();
//do an intermediate reduction along all predictions for each gt
dim3 block(1024, 1, 1);
dim3 grid(gt, num_chunks, 1);
if (allow_low_quality_matches) max_along_preds<<<grid, block, 0, stream.stream()>>>(
match_quality_matrix.data<float>(),
intergt.data<float>(),
gt,
preds);
//final reduction to find best iou per gt
int numThreads = 256;
int numBlocks=(gt + numThreads - 1) / numThreads;
if (allow_low_quality_matches) max_along_preds_reduced<<<numBlocks, numThreads, 0, stream.stream()>>>(
intergt.data<float>(),
best_pred_per_gt.data<float>(),
gt,
num_chunks);
numBlocks=(preds + numThreads - 1) / numThreads;
//if low_quality_matches are allowed, mark some predictions to keep their best matching gt even though
//iou < threshold
if (allow_low_quality_matches) forgive_preds<<<numBlocks, numThreads, 0, stream.stream()>>>(
match_quality_matrix.data<float>(),
best_pred_per_gt.data<float>(),
pred_forgiven.data<unsigned char>(),
gt,
preds);
//compute resulting tensor of indices
max_along_gt_idx<<<numBlocks, numThreads, 0, stream.stream()>>>(match_quality_matrix.data<float>(),
pred_forgiven.data<unsigned char>(),
result.data<long>(),
gt,
preds,
allow_low_quality_matches,
low_th,
high_th);
THCudaCheck(cudaSetDevice(current_device));
return result;
}
}
} |
2d4efec3e68468986b8bd002101d94187bfea016.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* (C) Copyright 2020, 2021 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "bit_line_maker.h"
#include "update_management_helper.h"
#include <cmath>
#include <iostream>
#include <memory>
#include "cuda_math_util.h"
#include "cuda_util.h"
#include "io_iterator.h"
#include <hipcub/hipcub.hpp>
namespace RPU {
// /*********************************************************************************/
/*--------- K<32 special path for large batch sizes. -------------------------------*/
template <bool ublm>
__device__ __forceinline__ kagg_t getKn(const int m_batch, const int BL, const kagg_t *nK);
template <bool ublm>
__device__ __forceinline__ kagg_t getBlockAggregate(
const int m_batch,
const int BL,
int tid_stride,
int thread_block_size,
kagg_t *Kc_block_aggregate);
template <bool ublm>
__device__ __forceinline__ int getK(int batch_idx, const int BL, int *K_values);
template <bool ublm>
__device__ __forceinline__ kagg_t
getCurrentKc(int batch_idx, const int BL, kagg_t *Kc_block, kagg_t Kn_aggregate);
template <>
__device__ __forceinline__ kagg_t getKn<false>(const int m_batch, const int BL, const kagg_t *Kn) {
return m_batch * BL;
}
template <>
__device__ __forceinline__ kagg_t getKn<true>(const int m_batch, const int BL, const kagg_t *Kn) {
return *Kn;
}
template <>
__device__ __forceinline__ kagg_t getBlockAggregate<false>(
const int m_batch,
const int BL,
int tid_stride,
int thread_block_size,
kagg_t *Kc_block_aggregate) {
if ((thread_block_size + tid_stride) >= m_batch)
return (m_batch % thread_block_size) * BL;
else
return thread_block_size * BL;
}
template <>
__device__ __forceinline__ kagg_t getBlockAggregate<true>(
const int m_batch,
const int BL,
int tid_stride,
int thread_block_size,
kagg_t *Kc_block_aggregate) {
int bid = tid_stride / thread_block_size;
return Kc_block_aggregate[bid];
}
template <> __device__ __forceinline__ int getK<false>(int batch_idx, const int BL, int *K_values) {
return BL;
}
template <> __device__ __forceinline__ int getK<true>(int batch_idx, const int BL, int *K_values) {
return K_values[batch_idx];
}
template <>
__device__ __forceinline__ kagg_t
getCurrentKc<false>(int batch_idx, const int BL, kagg_t *Kc_block, kagg_t Kn_aggregate) {
return batch_idx * BL - Kn_aggregate;
}
template <>
__device__ __forceinline__ kagg_t
getCurrentKc<true>(int batch_idx, const int BL, kagg_t *Kc_block, kagg_t Kn_aggregate) {
return Kc_block[batch_idx];
}
template <bool update_bl_management = false, int thread_block_size = 512>
__global__ void kernelTranslateTransFormatToBatchOrder64Format(
const uint32_t *x_counts,
uint64_t *x_counts_BO64_format,
int x_size_in,
const uint32_t *d_counts,
uint64_t *d_counts_BO64_format,
int d_size_in,
const int m_batch_in,
const int BL_in,
kagg_t *Kn_in = nullptr,
int *K_values_in = nullptr,
kagg_t *Kc_block_in = nullptr,
kagg_t *Kc_block_aggregate_in = nullptr) {
// -- each block takes one x/d value.
// -- expects OUTTRANS format !!
__shared__ uint32_t c_shared[thread_block_size];
__shared__ uint32_t neg_shared[thread_block_size];
const int m_batch = m_batch_in;
const int BL = BL_in;
kagg_t Kn = getKn<update_bl_management>(m_batch, BL, Kn_in);
const int x_size = x_size_in;
const int d_size = d_size_in;
const int add_size = x_size + d_size;
int nB = ((Kn + 31) >> 5); // compressed K on batch
// loop xd indeces
for (int bid_stride = 0; bid_stride < add_size; bid_stride += gridDim.x) {
int bid = blockIdx.x + bid_stride;
// select x or d
const uint32_t *counts;
uint64_t *out_counts;
int xd_index;
if (bid < x_size) {
counts = x_counts;
out_counts = x_counts_BO64_format;
xd_index = bid;
} else if (bid < add_size) {
counts = d_counts;
out_counts = d_counts_BO64_format;
xd_index = bid - x_size;
} else {
return;
}
const int start_idx = xd_index * m_batch; // expects trans order !!
const int out_start_idx = xd_index * nB; // reduced batch size
int total_nB = 0;
uint32_t last_neg = 0;
uint32_t last_c = 0;
int current_nB = 0;
kagg_t Kc_aggregate = 0;
int K_left_over = 0;
// loop over batch
for (int tid_stride = 0; tid_stride < m_batch; tid_stride += blockDim.x) {
if (threadIdx.x > 0) {
c_shared[threadIdx.x] = 0;
neg_shared[threadIdx.x] = 0;
}
if (threadIdx.x == current_nB) { // to avoid a sync, see below
c_shared[0] = last_c;
neg_shared[0] = last_neg;
}
const int batch_idx = threadIdx.x + tid_stride;
kagg_t Kc_block_aggregate = getBlockAggregate<update_bl_management>(
m_batch, BL, tid_stride, thread_block_size, Kc_block_aggregate_in);
kagg_t current_Kc = Kc_block_aggregate;
int K = 0;
if (batch_idx < m_batch) {
K = getK<update_bl_management>(batch_idx, BL, K_values_in);
current_Kc = getCurrentKc<update_bl_management>(batch_idx, BL, Kc_block_in, Kc_aggregate);
}
Kc_block_aggregate += K_left_over;
current_Kc += K_left_over;
__syncthreads(); // need to sync for shared
if (batch_idx < m_batch) {
uint32_t c = counts[start_idx + batch_idx];
uint32_t negative = 0;
if ((c & ((uint32_t)1)) > 0) {
negative = 0xffffffff >> (32 - K);
}
c >>= 1; // get rid of negative bit
// set bit in shared
int i_word_start = current_Kc >> 5;
int i_word_end = (current_Kc + K) >> 5;
int i_bit_start = current_Kc & 0x1f;
atomicOr(&c_shared[i_word_start], c << i_bit_start);
atomicOr(&neg_shared[i_word_start], negative << i_bit_start);
if (i_word_start != i_word_end) { // most 31 bits per batch, so only 1 overlap possible
atomicOr(
&c_shared[i_word_end],
c >> (32 - i_bit_start)); // (32 - i_bit_start) first bits were already set above
atomicOr(&neg_shared[i_word_end], negative >> (32 - i_bit_start));
}
}
__syncthreads();
Kc_aggregate += Kc_block_aggregate;
kagg_t current_nB =
Kc_block_aggregate >> 5; // there might be some left over bits. put into next round
bool last_loop = tid_stride + blockDim.x >= m_batch;
K_left_over = Kc_aggregate & 0x1f;
bool left_overs = K_left_over > 0;
if ((threadIdx.x < current_nB) || ((threadIdx.x == current_nB) && last_loop && left_overs)) {
uint64_t c64 =
(((uint64_t)neg_shared[threadIdx.x]) << 32) | ((uint64_t)c_shared[threadIdx.x]);
out_counts[out_start_idx + total_nB + threadIdx.x] = c64;
} else if ((threadIdx.x == current_nB) && left_overs) { // save left overs
last_neg = neg_shared[current_nB];
last_c = c_shared[current_nB];
}
total_nB += current_nB;
}
}
}
namespace test_helper {
template <typename T, bool ublm>
int debugKernelTranslateTransFormatToBatchOrder64Format(
T *indata, int size, int m_batch, T scaleprob, int K) {
// counts should be: size*nk32 allocated !
if (K > 31)
return 1;
DebugPulsedUpdateMetaParameter<T> up;
up.res = 0.01;
up.sto_round = false;
up.update_bl_management = ublm;
up.update_management = ublm;
up.scaleprob = scaleprob;
up.desired_BL = K;
std::cout << "m_batch: " << m_batch << " size: " << size << std::endl;
const int nthreads = RPU_THREADS_PER_BLOCK_UPDATE;
CUDA_TIMING_INIT;
CudaContext c{-1, false};
T *tmp = new T[size * m_batch];
for (int i = 0; i < m_batch; i++) {
for (int j = 0; j < size; j++) {
tmp[i * size + j] = indata[j];
}
}
CudaArray<T> dev_indata(&c, size * m_batch, tmp);
c.synchronize();
delete[] tmp;
T dwmin = 0.001;
T lr = 0.01;
BitLineMaker<T> blm(&c, size, size);
blm.makeCounts(
dev_indata.getData(), dev_indata.getData(), up, dwmin, lr, m_batch, false, false, true, 2,
false); // compute B64 to init buffer for below
UpdateManagementHelper<T> *umh = blm.getUmh();
c.synchronize();
int nBmax = m_batch; // at most m_batch, likely smaller
CudaArray<uint64_t> dev_counts_out(&c, size * nBmax);
CudaArray<uint64_t> dev_counts_out2(&c, size * nBmax);
int nblocks = size + size;
std::cout << "nblocks, nthreads: " << nblocks << ", " << nthreads << std::endl;
CUDA_TIMING_START(c);
kagg_t *nK = nullptr;
int *K_values = nullptr;
kagg_t *Kc_block = nullptr;
kagg_t *Kc_block_aggregate = nullptr;
if (ublm) {
// redo computation for timing
umh->computeKn(m_batch); // needs explicit buffer init. see above.
nK = umh->getKnData(true);
K_values = umh->getKValueData();
umh->computeKcBlock(m_batch);
Kc_block = umh->getKcBlockData();
Kc_block_aggregate = umh->getKcBlockAggregateData();
}
CUDA_TIMING_STOP(c, "get Kn/Kcblock ");
CUDA_TIMING_START(c);
hipLaunchKernelGGL(( kernelTranslateTransFormatToBatchOrder64Format<ublm, nthreads>)
, dim3(nblocks), dim3(nthreads), 0, c.getStream(),
blm.getXCountsData(), dev_counts_out.getData(), size, blm.getDCountsData(),
dev_counts_out2.getData(), size, m_batch, K, nK, K_values, Kc_block, Kc_block_aggregate);
CUDA_TIMING_STOP(c, "Counts translated");
kagg_t Kn = 0;
if (ublm)
Kn = umh->getKnValue();
else
Kn = m_batch * K;
kagg_t nB = (Kn + 31) / 32;
// check translated:
int *Kvalues = new int[m_batch];
if (ublm)
umh->getKValues().copyTo(Kvalues);
uint32_t *orig_counts = new uint32_t[m_batch * size];
uint64_t *counts_out = new uint64_t[m_batch * size];
uint64_t *counts_out_ref = new uint64_t[m_batch * size];
dev_counts_out2.copyTo(counts_out);
blm.copyDCountsToHost(orig_counts);
for (int j = 0; j < m_batch * size; j++) {
counts_out_ref[j] = 0;
}
c.synchronize();
int return_int = 0;
kagg_t Kc = 0;
for (int i_batch = 0; i_batch < m_batch; i_batch++) {
if (ublm)
Kc += Kvalues[i_batch];
else
Kc += K;
}
int nBref = (Kc + 31) >> 5;
uint32_t one = 1;
// translate reference
for (int idx = 0; idx < size; idx++) {
Kc = 0;
for (int i_batch = 0; i_batch < m_batch; i_batch++) {
uint32_t c = orig_counts[i_batch + m_batch * idx];
uint32_t neg = c & one;
c >>= 1; // get rid of sign bit;
int k = K;
if (ublm)
k = Kvalues[i_batch];
for (int i = 0; i < k; i++) { // k is smaller than 32 because nK32==1
kagg_t current_cK = Kc + i;
kagg_t iB = (current_cK) >> 5;
int ibit = (current_cK)&0x1f;
if ((c & (one << i)) > 0) {
counts_out_ref[iB + idx * nBref] |= ((uint64_t)1) << ibit;
}
if (neg > 0) {
counts_out_ref[iB + idx * nBref] |= ((uint64_t)1) << (ibit + 32);
}
}
Kc += k;
}
}
std::cout << "nB should be " << nBref << " and is " << nB << ".\n";
if (nB != nBref) {
return_int = 1;
}
for (int j = 0; j < nBref * size; j++) {
if (counts_out_ref[j] != counts_out[j]) {
std::cerr << j << ":" << counts_out[j] << " should be " << counts_out_ref[j] << std::endl;
return_int = 1;
}
if ((j > 100) && return_int)
break;
}
delete[] counts_out;
delete[] orig_counts;
delete[] counts_out_ref;
delete[] Kvalues;
CUDA_TIMING_DESTROY;
return return_int;
}
template int
debugKernelTranslateTransFormatToBatchOrder64Format<float, true>(float *, int, int, float, int);
template int
debugKernelTranslateTransFormatToBatchOrder64Format<float, false>(float *, int, int, float, int);
#ifdef RPU_USE_DOUBLE
template int
debugKernelTranslateTransFormatToBatchOrder64Format<double, true>(double *, int, int, double, int);
template int
debugKernelTranslateTransFormatToBatchOrder64Format<double, false>(double *, int, int, double, int);
#endif
} // namespace test_helper
template <typename T>
__global__ void kernelUMGetScaleAndKValues(
T *scale_values,
int *K_values,
float *x_amax_values,
float *d_amax_values,
const int m_batch,
const bool ublm_in,
const T dw_min_in,
const T lr_in,
const int Kmax_in) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
bool ublm = ublm_in;
T dw_min = dw_min_in;
T lr = lr_in;
T regularizer = dw_min * dw_min;
if (tid < m_batch) {
T x_amax = MAX(x_amax_values[tid], regularizer);
T d_amax = MAX(d_amax_values[tid], regularizer);
scale_values[tid] = sqrt(x_amax / d_amax);
if (ublm) {
int Kmax = Kmax_in;
int K = ceil(lr * x_amax * d_amax / dw_min);
K_values[tid] = (K <= Kmax) ? K : Kmax;
}
// note: K values are not set in case of ~ublm
}
}
template <int thread_block_size>
__global__ void kernelGetKBlockAggregate(
int *K_values, int m_batch_in, kagg_t *Kc_block, kagg_t *Kc_block_aggregate) {
const int m_batch = m_batch_in;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ typename RPU::hipcub::BlockScan<kagg_t, thread_block_size>::TempStorage temp_storage;
int K = 0;
if (tid < m_batch) {
K = K_values[tid];
}
kagg_t Kc = 0;
kagg_t block_aggregate = 0;
RPU::hipcub::BlockScan<kagg_t, thread_block_size>(temp_storage).ExclusiveSum(K, Kc, block_aggregate);
if (tid < m_batch) {
Kc_block[tid] = Kc;
}
if (threadIdx.x == 0) {
Kc_block_aggregate[blockIdx.x] = block_aggregate;
}
}
/*********************************************************************************************************************/
/* UPDATEMANAGERHELPER */
/*********************************************************************************************************************/
#define RPU_UMH_B64_NTHREADS 512
template <typename T>
UpdateManagementHelper<T>::UpdateManagementHelper(CudaContext *c, int x_size, int d_size)
: context_{c}, x_size_{x_size}, d_size_{d_size}, buffer_m_batch_{0} {
nthreads_ = RPU_THREADS_PER_BLOCK_UPDATE;
x_maximizer_ = RPU::make_unique<Maximizer<T>>(c, x_size_);
d_maximizer_ = RPU::make_unique<Maximizer<T>>(c, d_size_);
dev_Kn_ = RPU::make_unique<CudaArray<kagg_t>>(c, 1);
}
template <typename T> void UpdateManagementHelper<T>::initializeBuffers(int m_batch) {
buffer_m_batch_ = m_batch;
dev_K_values_ = RPU::make_unique<CudaArray<int>>(context_, m_batch);
dev_Kc_values_ = RPU::make_unique<CudaArray<kagg_t>>(context_, m_batch);
dev_scale_values_ = RPU::make_unique<CudaArray<T>>(context_, m_batch);
// for translate
const int nthreads = RPU_UMH_B64_NTHREADS;
int nblocks = context_->getNBlocks(m_batch, nthreads);
dev_Kc_block_ = RPU::make_unique<CudaArray<kagg_t>>(context_, m_batch);
dev_Kc_block_aggregate_ = RPU::make_unique<CudaArray<kagg_t>>(context_, nblocks);
// Determine temporary device storage requirements
void *temp_storage = NULL;
size_t temp_storage_bytes = 0;
CUDA_CALL(RPU::hipcub::DeviceReduce::Sum(
temp_storage, temp_storage_bytes, dev_K_values_->getData(), dev_Kn_->getData(), m_batch,
context_->getStream()));
context_->synchronize();
dev_Kn_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, (int)temp_storage_bytes);
context_->synchronize();
temp_storage = NULL;
temp_storage_bytes = 0;
CUDA_CALL(RPU::hipcub::DeviceScan::ExclusiveSum(
temp_storage, temp_storage_bytes, dev_K_values_->getData(), dev_Kc_values_->getData(),
m_batch, context_->getStream()));
context_->synchronize();
dev_Kc_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, (int)temp_storage_bytes);
context_->synchronize();
}
template <typename T> void UpdateManagementHelper<T>::computeKcBlock(int m_batch) {
// CAUTION: needs K_values to be already computed !!
const int nthreads = RPU_UMH_B64_NTHREADS;
int nblocks = context_->getNBlocks(m_batch, nthreads);
hipLaunchKernelGGL(( kernelGetKBlockAggregate<nthreads>), dim3(nblocks), dim3(nthreads), 0, context_->getStream(),
dev_K_values_->getData(), m_batch, dev_Kc_block_->getData(),
dev_Kc_block_aggregate_->getData());
}
template <typename T> void UpdateManagementHelper<T>::computeKc(int m_batch) {
// CAUTION: needs K_values to be already computed !!
size_t temp_storage_bytes = dev_Kc_temp_storage_->getSize();
CUDA_CALL(RPU::hipcub::DeviceScan::ExclusiveSum(
(void *)dev_Kc_temp_storage_->getData(), temp_storage_bytes, dev_K_values_->getData(),
dev_Kc_values_->getData(), m_batch, context_->getStream()));
}
template <typename T> void UpdateManagementHelper<T>::computeKn(int m_batch) {
// CAUTION: needs K_values to be already computed !!
size_t temp_storage_bytes = dev_Kn_temp_storage_->getSize();
CUDA_CALL(RPU::hipcub::DeviceReduce::Sum(
(void *)dev_Kn_temp_storage_->getData(), temp_storage_bytes, dev_K_values_->getData(),
dev_Kn_->getData(), m_batch, context_->getStream()));
}
template <typename T>
void UpdateManagementHelper<T>::translateTransToBatchOrder64(
uint64_t *x_counts_bo64,
uint64_t *d_counts_bo64,
const uint32_t *x_counts,
const uint32_t *d_counts,
const int m_batch,
const int BL,
const bool update_bl_management) {
// needs K values to be precomputed for ublm !!
if (BL > 31) {
RPU_FATAL("ERROR: BO64 format only supported for BL<32");
}
if (buffer_m_batch_ < m_batch) {
this->initializeBuffers(m_batch);
}
const int nthreads = RPU_UMH_B64_NTHREADS; // how many ? test...
int nblocks = d_size_ + x_size_;
if (update_bl_management) {
this->computeKcBlock(m_batch);
this->computeKn(m_batch);
hipLaunchKernelGGL(( kernelTranslateTransFormatToBatchOrder64Format<true, nthreads>)
, dim3(nblocks), dim3(nthreads), 0, context_->getStream(),
x_counts, x_counts_bo64, x_size_, d_counts, d_counts_bo64, d_size_, m_batch, BL,
this->getKnData(true), this->getKValueData(), this->getKcBlockData(),
this->getKcBlockAggregateData());
// context_->synchronize();
} else {
// no update bl management
hipLaunchKernelGGL(( kernelTranslateTransFormatToBatchOrder64Format<false, nthreads>)
, dim3(nblocks), dim3(nthreads), 0, context_->getStream(),
x_counts, x_counts_bo64, x_size_, d_counts, d_counts_bo64, d_size_, m_batch, BL);
}
}
template <typename T>
template <typename XInputIteratorT, typename DInputIteratorT>
void UpdateManagementHelper<T>::computeKandScaleValues(
XInputIteratorT x_in,
DInputIteratorT d_in,
const T dw_min,
const T lr,
const bool update_management,
const bool update_bl_management,
const int m_batch,
const bool x_trans,
const bool d_trans,
const int Kmax) {
if ((!update_management) && (!update_bl_management)) {
return;
} else {
// get max values
x_maximizer_->compute(x_in, m_batch, x_trans);
d_maximizer_->compute(d_in, m_batch, d_trans);
// initilize if necessary
if (buffer_m_batch_ < m_batch) {
this->initializeBuffers(m_batch);
}
// compute
int nblocks = context_->getNBlocks(m_batch, nthreads_);
hipLaunchKernelGGL(( kernelUMGetScaleAndKValues), dim3(nblocks), dim3(nthreads_), 0, context_->getStream(),
dev_scale_values_->getData(), dev_K_values_->getData(), x_maximizer_->getMaxValues(),
d_maximizer_->getMaxValues(), m_batch, update_bl_management, dw_min, lr, Kmax);
}
}
#define RPU_UMH_ITER_TEMPLATE(NUM_T, XITERT, DITERT) \
template void UpdateManagementHelper<NUM_T>::computeKandScaleValues( \
XITERT, DITERT, const NUM_T, const NUM_T, const bool, const bool, const int, const bool, \
const bool, const int);
#define TRANSFLOAT(TRANS) TRANS, float
template class UpdateManagementHelper<float>;
RPU_UMH_ITER_TEMPLATE(float, const float *, const float *);
RPU_UMH_ITER_TEMPLATE(float, float *, float *);
RPU_UMH_ITER_TEMPLATE(float, IndexReaderInputIterator<float>, const float *);
RPU_UMH_ITER_TEMPLATE(float, IndexReaderTransInputIterator<float>, const float *);
RPU_UMH_ITER_TEMPLATE(
float, IndexReaderTransInputIterator<float>, PermuterTransInputIterator<float>);
RPU_UMH_ITER_TEMPLATE(
float, IndexReaderSliceInputIterator<TRANSFLOAT(true)>, SliceInputIterator<TRANSFLOAT(true)>);
RPU_UMH_ITER_TEMPLATE(
float, IndexReaderSliceInputIterator<TRANSFLOAT(false)>, SliceInputIterator<TRANSFLOAT(false)>);
RPU_UMH_ITER_TEMPLATE(float, const float *, PermuterTransInputIterator<float>);
RPU_UMH_ITER_TEMPLATE(float, const float *, SliceInputIterator<TRANSFLOAT(true)>);
RPU_UMH_ITER_TEMPLATE(float, const float *, SliceInputIterator<TRANSFLOAT(false)>);
RPU_UMH_ITER_TEMPLATE(float, IndexReaderSliceInputIterator<TRANSFLOAT(true)>, const float *);
RPU_UMH_ITER_TEMPLATE(float, IndexReaderSliceInputIterator<TRANSFLOAT(false)>, const float *);
#undef TRANSFLOAT
#ifdef RPU_USE_DOUBLE
#define TRANSDOUBLE(TRANS) TRANS, double
template class UpdateManagementHelper<double>;
RPU_UMH_ITER_TEMPLATE(double, const double *, const double *);
RPU_UMH_ITER_TEMPLATE(double, double *, double *);
RPU_UMH_ITER_TEMPLATE(
double, IndexReaderTransInputIterator<double>, PermuterTransInputIterator<double>);
RPU_UMH_ITER_TEMPLATE(double, IndexReaderInputIterator<double>, const double *);
RPU_UMH_ITER_TEMPLATE(double, IndexReaderTransInputIterator<double>, const double *);
RPU_UMH_ITER_TEMPLATE(
double,
IndexReaderSliceInputIterator<TRANSDOUBLE(true)>,
SliceInputIterator<TRANSDOUBLE(true)>);
RPU_UMH_ITER_TEMPLATE(
double,
IndexReaderSliceInputIterator<TRANSDOUBLE(false)>,
SliceInputIterator<TRANSDOUBLE(false)>);
RPU_UMH_ITER_TEMPLATE(double, const double *, PermuterTransInputIterator<double>);
RPU_UMH_ITER_TEMPLATE(double, const double *, SliceInputIterator<TRANSDOUBLE(true)>);
RPU_UMH_ITER_TEMPLATE(double, const double *, SliceInputIterator<TRANSDOUBLE(false)>);
RPU_UMH_ITER_TEMPLATE(double, IndexReaderSliceInputIterator<TRANSDOUBLE(true)>, const double *);
RPU_UMH_ITER_TEMPLATE(double, IndexReaderSliceInputIterator<TRANSDOUBLE(false)>, const double *);
#undef TRANSDOUBLE
#endif
#undef RPU_UMH_ITER_TEMPLATE
} // namespace RPU
| 2d4efec3e68468986b8bd002101d94187bfea016.cu | /**
* (C) Copyright 2020, 2021 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "bit_line_maker.h"
#include "update_management_helper.h"
#include <cmath>
#include <iostream>
#include <memory>
#include "cuda_math_util.h"
#include "cuda_util.h"
#include "io_iterator.h"
#include <cub/cub.cuh>
namespace RPU {
// /*********************************************************************************/
/*--------- K<32 special path for large batch sizes. -------------------------------*/
template <bool ublm>
__device__ __forceinline__ kagg_t getKn(const int m_batch, const int BL, const kagg_t *nK);
template <bool ublm>
__device__ __forceinline__ kagg_t getBlockAggregate(
const int m_batch,
const int BL,
int tid_stride,
int thread_block_size,
kagg_t *Kc_block_aggregate);
template <bool ublm>
__device__ __forceinline__ int getK(int batch_idx, const int BL, int *K_values);
template <bool ublm>
__device__ __forceinline__ kagg_t
getCurrentKc(int batch_idx, const int BL, kagg_t *Kc_block, kagg_t Kn_aggregate);
template <>
__device__ __forceinline__ kagg_t getKn<false>(const int m_batch, const int BL, const kagg_t *Kn) {
return m_batch * BL;
}
template <>
__device__ __forceinline__ kagg_t getKn<true>(const int m_batch, const int BL, const kagg_t *Kn) {
return *Kn;
}
template <>
__device__ __forceinline__ kagg_t getBlockAggregate<false>(
const int m_batch,
const int BL,
int tid_stride,
int thread_block_size,
kagg_t *Kc_block_aggregate) {
if ((thread_block_size + tid_stride) >= m_batch)
return (m_batch % thread_block_size) * BL;
else
return thread_block_size * BL;
}
template <>
__device__ __forceinline__ kagg_t getBlockAggregate<true>(
const int m_batch,
const int BL,
int tid_stride,
int thread_block_size,
kagg_t *Kc_block_aggregate) {
int bid = tid_stride / thread_block_size;
return Kc_block_aggregate[bid];
}
template <> __device__ __forceinline__ int getK<false>(int batch_idx, const int BL, int *K_values) {
return BL;
}
template <> __device__ __forceinline__ int getK<true>(int batch_idx, const int BL, int *K_values) {
return K_values[batch_idx];
}
template <>
__device__ __forceinline__ kagg_t
getCurrentKc<false>(int batch_idx, const int BL, kagg_t *Kc_block, kagg_t Kn_aggregate) {
return batch_idx * BL - Kn_aggregate;
}
template <>
__device__ __forceinline__ kagg_t
getCurrentKc<true>(int batch_idx, const int BL, kagg_t *Kc_block, kagg_t Kn_aggregate) {
return Kc_block[batch_idx];
}
template <bool update_bl_management = false, int thread_block_size = 512>
__global__ void kernelTranslateTransFormatToBatchOrder64Format(
const uint32_t *x_counts,
uint64_t *x_counts_BO64_format,
int x_size_in,
const uint32_t *d_counts,
uint64_t *d_counts_BO64_format,
int d_size_in,
const int m_batch_in,
const int BL_in,
kagg_t *Kn_in = nullptr,
int *K_values_in = nullptr,
kagg_t *Kc_block_in = nullptr,
kagg_t *Kc_block_aggregate_in = nullptr) {
// -- each block takes one x/d value.
// -- expects OUTTRANS format !!
__shared__ uint32_t c_shared[thread_block_size];
__shared__ uint32_t neg_shared[thread_block_size];
const int m_batch = m_batch_in;
const int BL = BL_in;
kagg_t Kn = getKn<update_bl_management>(m_batch, BL, Kn_in);
const int x_size = x_size_in;
const int d_size = d_size_in;
const int add_size = x_size + d_size;
int nB = ((Kn + 31) >> 5); // compressed K on batch
// loop xd indeces
for (int bid_stride = 0; bid_stride < add_size; bid_stride += gridDim.x) {
int bid = blockIdx.x + bid_stride;
// select x or d
const uint32_t *counts;
uint64_t *out_counts;
int xd_index;
if (bid < x_size) {
counts = x_counts;
out_counts = x_counts_BO64_format;
xd_index = bid;
} else if (bid < add_size) {
counts = d_counts;
out_counts = d_counts_BO64_format;
xd_index = bid - x_size;
} else {
return;
}
const int start_idx = xd_index * m_batch; // expects trans order !!
const int out_start_idx = xd_index * nB; // reduced batch size
int total_nB = 0;
uint32_t last_neg = 0;
uint32_t last_c = 0;
int current_nB = 0;
kagg_t Kc_aggregate = 0;
int K_left_over = 0;
// loop over batch
for (int tid_stride = 0; tid_stride < m_batch; tid_stride += blockDim.x) {
if (threadIdx.x > 0) {
c_shared[threadIdx.x] = 0;
neg_shared[threadIdx.x] = 0;
}
if (threadIdx.x == current_nB) { // to avoid a sync, see below
c_shared[0] = last_c;
neg_shared[0] = last_neg;
}
const int batch_idx = threadIdx.x + tid_stride;
kagg_t Kc_block_aggregate = getBlockAggregate<update_bl_management>(
m_batch, BL, tid_stride, thread_block_size, Kc_block_aggregate_in);
kagg_t current_Kc = Kc_block_aggregate;
int K = 0;
if (batch_idx < m_batch) {
K = getK<update_bl_management>(batch_idx, BL, K_values_in);
current_Kc = getCurrentKc<update_bl_management>(batch_idx, BL, Kc_block_in, Kc_aggregate);
}
Kc_block_aggregate += K_left_over;
current_Kc += K_left_over;
__syncthreads(); // need to sync for shared
if (batch_idx < m_batch) {
uint32_t c = counts[start_idx + batch_idx];
uint32_t negative = 0;
if ((c & ((uint32_t)1)) > 0) {
negative = 0xffffffff >> (32 - K);
}
c >>= 1; // get rid of negative bit
// set bit in shared
int i_word_start = current_Kc >> 5;
int i_word_end = (current_Kc + K) >> 5;
int i_bit_start = current_Kc & 0x1f;
atomicOr(&c_shared[i_word_start], c << i_bit_start);
atomicOr(&neg_shared[i_word_start], negative << i_bit_start);
if (i_word_start != i_word_end) { // most 31 bits per batch, so only 1 overlap possible
atomicOr(
&c_shared[i_word_end],
c >> (32 - i_bit_start)); // (32 - i_bit_start) first bits were already set above
atomicOr(&neg_shared[i_word_end], negative >> (32 - i_bit_start));
}
}
__syncthreads();
Kc_aggregate += Kc_block_aggregate;
kagg_t current_nB =
Kc_block_aggregate >> 5; // there might be some left over bits. put into next round
bool last_loop = tid_stride + blockDim.x >= m_batch;
K_left_over = Kc_aggregate & 0x1f;
bool left_overs = K_left_over > 0;
if ((threadIdx.x < current_nB) || ((threadIdx.x == current_nB) && last_loop && left_overs)) {
uint64_t c64 =
(((uint64_t)neg_shared[threadIdx.x]) << 32) | ((uint64_t)c_shared[threadIdx.x]);
out_counts[out_start_idx + total_nB + threadIdx.x] = c64;
} else if ((threadIdx.x == current_nB) && left_overs) { // save left overs
last_neg = neg_shared[current_nB];
last_c = c_shared[current_nB];
}
total_nB += current_nB;
}
}
}
namespace test_helper {
template <typename T, bool ublm>
int debugKernelTranslateTransFormatToBatchOrder64Format(
T *indata, int size, int m_batch, T scaleprob, int K) {
// counts should be: size*nk32 allocated !
if (K > 31)
return 1;
DebugPulsedUpdateMetaParameter<T> up;
up.res = 0.01;
up.sto_round = false;
up.update_bl_management = ublm;
up.update_management = ublm;
up.scaleprob = scaleprob;
up.desired_BL = K;
std::cout << "m_batch: " << m_batch << " size: " << size << std::endl;
const int nthreads = RPU_THREADS_PER_BLOCK_UPDATE;
CUDA_TIMING_INIT;
CudaContext c{-1, false};
T *tmp = new T[size * m_batch];
for (int i = 0; i < m_batch; i++) {
for (int j = 0; j < size; j++) {
tmp[i * size + j] = indata[j];
}
}
CudaArray<T> dev_indata(&c, size * m_batch, tmp);
c.synchronize();
delete[] tmp;
T dwmin = 0.001;
T lr = 0.01;
BitLineMaker<T> blm(&c, size, size);
blm.makeCounts(
dev_indata.getData(), dev_indata.getData(), up, dwmin, lr, m_batch, false, false, true, 2,
false); // compute B64 to init buffer for below
UpdateManagementHelper<T> *umh = blm.getUmh();
c.synchronize();
int nBmax = m_batch; // at most m_batch, likely smaller
CudaArray<uint64_t> dev_counts_out(&c, size * nBmax);
CudaArray<uint64_t> dev_counts_out2(&c, size * nBmax);
int nblocks = size + size;
std::cout << "nblocks, nthreads: " << nblocks << ", " << nthreads << std::endl;
CUDA_TIMING_START(c);
kagg_t *nK = nullptr;
int *K_values = nullptr;
kagg_t *Kc_block = nullptr;
kagg_t *Kc_block_aggregate = nullptr;
if (ublm) {
// redo computation for timing
umh->computeKn(m_batch); // needs explicit buffer init. see above.
nK = umh->getKnData(true);
K_values = umh->getKValueData();
umh->computeKcBlock(m_batch);
Kc_block = umh->getKcBlockData();
Kc_block_aggregate = umh->getKcBlockAggregateData();
}
CUDA_TIMING_STOP(c, "get Kn/Kcblock ");
CUDA_TIMING_START(c);
kernelTranslateTransFormatToBatchOrder64Format<ublm, nthreads>
<<<nblocks, nthreads, 0, c.getStream()>>>(
blm.getXCountsData(), dev_counts_out.getData(), size, blm.getDCountsData(),
dev_counts_out2.getData(), size, m_batch, K, nK, K_values, Kc_block, Kc_block_aggregate);
CUDA_TIMING_STOP(c, "Counts translated");
kagg_t Kn = 0;
if (ublm)
Kn = umh->getKnValue();
else
Kn = m_batch * K;
kagg_t nB = (Kn + 31) / 32;
// check translated:
int *Kvalues = new int[m_batch];
if (ublm)
umh->getKValues().copyTo(Kvalues);
uint32_t *orig_counts = new uint32_t[m_batch * size];
uint64_t *counts_out = new uint64_t[m_batch * size];
uint64_t *counts_out_ref = new uint64_t[m_batch * size];
dev_counts_out2.copyTo(counts_out);
blm.copyDCountsToHost(orig_counts);
for (int j = 0; j < m_batch * size; j++) {
counts_out_ref[j] = 0;
}
c.synchronize();
int return_int = 0;
kagg_t Kc = 0;
for (int i_batch = 0; i_batch < m_batch; i_batch++) {
if (ublm)
Kc += Kvalues[i_batch];
else
Kc += K;
}
int nBref = (Kc + 31) >> 5;
uint32_t one = 1;
// translate reference
for (int idx = 0; idx < size; idx++) {
Kc = 0;
for (int i_batch = 0; i_batch < m_batch; i_batch++) {
uint32_t c = orig_counts[i_batch + m_batch * idx];
uint32_t neg = c & one;
c >>= 1; // get rid of sign bit;
int k = K;
if (ublm)
k = Kvalues[i_batch];
for (int i = 0; i < k; i++) { // k is smaller than 32 because nK32==1
kagg_t current_cK = Kc + i;
kagg_t iB = (current_cK) >> 5;
int ibit = (current_cK)&0x1f;
if ((c & (one << i)) > 0) {
counts_out_ref[iB + idx * nBref] |= ((uint64_t)1) << ibit;
}
if (neg > 0) {
counts_out_ref[iB + idx * nBref] |= ((uint64_t)1) << (ibit + 32);
}
}
Kc += k;
}
}
std::cout << "nB should be " << nBref << " and is " << nB << ".\n";
if (nB != nBref) {
return_int = 1;
}
for (int j = 0; j < nBref * size; j++) {
if (counts_out_ref[j] != counts_out[j]) {
std::cerr << j << ":" << counts_out[j] << " should be " << counts_out_ref[j] << std::endl;
return_int = 1;
}
if ((j > 100) && return_int)
break;
}
delete[] counts_out;
delete[] orig_counts;
delete[] counts_out_ref;
delete[] Kvalues;
CUDA_TIMING_DESTROY;
return return_int;
}
template int
debugKernelTranslateTransFormatToBatchOrder64Format<float, true>(float *, int, int, float, int);
template int
debugKernelTranslateTransFormatToBatchOrder64Format<float, false>(float *, int, int, float, int);
#ifdef RPU_USE_DOUBLE
template int
debugKernelTranslateTransFormatToBatchOrder64Format<double, true>(double *, int, int, double, int);
template int
debugKernelTranslateTransFormatToBatchOrder64Format<double, false>(double *, int, int, double, int);
#endif
} // namespace test_helper
template <typename T>
__global__ void kernelUMGetScaleAndKValues(
T *scale_values,
int *K_values,
float *x_amax_values,
float *d_amax_values,
const int m_batch,
const bool ublm_in,
const T dw_min_in,
const T lr_in,
const int Kmax_in) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
bool ublm = ublm_in;
T dw_min = dw_min_in;
T lr = lr_in;
T regularizer = dw_min * dw_min;
if (tid < m_batch) {
T x_amax = MAX(x_amax_values[tid], regularizer);
T d_amax = MAX(d_amax_values[tid], regularizer);
scale_values[tid] = sqrt(x_amax / d_amax);
if (ublm) {
int Kmax = Kmax_in;
int K = ceil(lr * x_amax * d_amax / dw_min);
K_values[tid] = (K <= Kmax) ? K : Kmax;
}
// note: K values are not set in case of ~ublm
}
}
template <int thread_block_size>
__global__ void kernelGetKBlockAggregate(
int *K_values, int m_batch_in, kagg_t *Kc_block, kagg_t *Kc_block_aggregate) {
const int m_batch = m_batch_in;
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ typename RPU::cub::BlockScan<kagg_t, thread_block_size>::TempStorage temp_storage;
int K = 0;
if (tid < m_batch) {
K = K_values[tid];
}
kagg_t Kc = 0;
kagg_t block_aggregate = 0;
RPU::cub::BlockScan<kagg_t, thread_block_size>(temp_storage).ExclusiveSum(K, Kc, block_aggregate);
if (tid < m_batch) {
Kc_block[tid] = Kc;
}
if (threadIdx.x == 0) {
Kc_block_aggregate[blockIdx.x] = block_aggregate;
}
}
/*********************************************************************************************************************/
/* UPDATEMANAGERHELPER */
/*********************************************************************************************************************/
#define RPU_UMH_B64_NTHREADS 512
template <typename T>
UpdateManagementHelper<T>::UpdateManagementHelper(CudaContext *c, int x_size, int d_size)
: context_{c}, x_size_{x_size}, d_size_{d_size}, buffer_m_batch_{0} {
nthreads_ = RPU_THREADS_PER_BLOCK_UPDATE;
x_maximizer_ = RPU::make_unique<Maximizer<T>>(c, x_size_);
d_maximizer_ = RPU::make_unique<Maximizer<T>>(c, d_size_);
dev_Kn_ = RPU::make_unique<CudaArray<kagg_t>>(c, 1);
}
template <typename T> void UpdateManagementHelper<T>::initializeBuffers(int m_batch) {
buffer_m_batch_ = m_batch;
dev_K_values_ = RPU::make_unique<CudaArray<int>>(context_, m_batch);
dev_Kc_values_ = RPU::make_unique<CudaArray<kagg_t>>(context_, m_batch);
dev_scale_values_ = RPU::make_unique<CudaArray<T>>(context_, m_batch);
// for translate
const int nthreads = RPU_UMH_B64_NTHREADS;
int nblocks = context_->getNBlocks(m_batch, nthreads);
dev_Kc_block_ = RPU::make_unique<CudaArray<kagg_t>>(context_, m_batch);
dev_Kc_block_aggregate_ = RPU::make_unique<CudaArray<kagg_t>>(context_, nblocks);
// Determine temporary device storage requirements
void *temp_storage = NULL;
size_t temp_storage_bytes = 0;
CUDA_CALL(RPU::cub::DeviceReduce::Sum(
temp_storage, temp_storage_bytes, dev_K_values_->getData(), dev_Kn_->getData(), m_batch,
context_->getStream()));
context_->synchronize();
dev_Kn_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, (int)temp_storage_bytes);
context_->synchronize();
temp_storage = NULL;
temp_storage_bytes = 0;
CUDA_CALL(RPU::cub::DeviceScan::ExclusiveSum(
temp_storage, temp_storage_bytes, dev_K_values_->getData(), dev_Kc_values_->getData(),
m_batch, context_->getStream()));
context_->synchronize();
dev_Kc_temp_storage_ = RPU::make_unique<CudaArray<char>>(context_, (int)temp_storage_bytes);
context_->synchronize();
}
template <typename T> void UpdateManagementHelper<T>::computeKcBlock(int m_batch) {
// CAUTION: needs K_values to be already computed !!
const int nthreads = RPU_UMH_B64_NTHREADS;
int nblocks = context_->getNBlocks(m_batch, nthreads);
kernelGetKBlockAggregate<nthreads><<<nblocks, nthreads, 0, context_->getStream()>>>(
dev_K_values_->getData(), m_batch, dev_Kc_block_->getData(),
dev_Kc_block_aggregate_->getData());
}
template <typename T> void UpdateManagementHelper<T>::computeKc(int m_batch) {
// CAUTION: needs K_values to be already computed !!
size_t temp_storage_bytes = dev_Kc_temp_storage_->getSize();
CUDA_CALL(RPU::cub::DeviceScan::ExclusiveSum(
(void *)dev_Kc_temp_storage_->getData(), temp_storage_bytes, dev_K_values_->getData(),
dev_Kc_values_->getData(), m_batch, context_->getStream()));
}
template <typename T> void UpdateManagementHelper<T>::computeKn(int m_batch) {
// CAUTION: needs K_values to be already computed !!
size_t temp_storage_bytes = dev_Kn_temp_storage_->getSize();
CUDA_CALL(RPU::cub::DeviceReduce::Sum(
(void *)dev_Kn_temp_storage_->getData(), temp_storage_bytes, dev_K_values_->getData(),
dev_Kn_->getData(), m_batch, context_->getStream()));
}
template <typename T>
void UpdateManagementHelper<T>::translateTransToBatchOrder64(
uint64_t *x_counts_bo64,
uint64_t *d_counts_bo64,
const uint32_t *x_counts,
const uint32_t *d_counts,
const int m_batch,
const int BL,
const bool update_bl_management) {
// needs K values to be precomputed for ublm !!
if (BL > 31) {
RPU_FATAL("ERROR: BO64 format only supported for BL<32");
}
if (buffer_m_batch_ < m_batch) {
this->initializeBuffers(m_batch);
}
const int nthreads = RPU_UMH_B64_NTHREADS; // how many ? test...
int nblocks = d_size_ + x_size_;
if (update_bl_management) {
this->computeKcBlock(m_batch);
this->computeKn(m_batch);
kernelTranslateTransFormatToBatchOrder64Format<true, nthreads>
<<<nblocks, nthreads, 0, context_->getStream()>>>(
x_counts, x_counts_bo64, x_size_, d_counts, d_counts_bo64, d_size_, m_batch, BL,
this->getKnData(true), this->getKValueData(), this->getKcBlockData(),
this->getKcBlockAggregateData());
// context_->synchronize();
} else {
// no update bl management
kernelTranslateTransFormatToBatchOrder64Format<false, nthreads>
<<<nblocks, nthreads, 0, context_->getStream()>>>(
x_counts, x_counts_bo64, x_size_, d_counts, d_counts_bo64, d_size_, m_batch, BL);
}
}
template <typename T>
template <typename XInputIteratorT, typename DInputIteratorT>
void UpdateManagementHelper<T>::computeKandScaleValues(
XInputIteratorT x_in,
DInputIteratorT d_in,
const T dw_min,
const T lr,
const bool update_management,
const bool update_bl_management,
const int m_batch,
const bool x_trans,
const bool d_trans,
const int Kmax) {
if ((!update_management) && (!update_bl_management)) {
return;
} else {
// get max values
x_maximizer_->compute(x_in, m_batch, x_trans);
d_maximizer_->compute(d_in, m_batch, d_trans);
// initilize if necessary
if (buffer_m_batch_ < m_batch) {
this->initializeBuffers(m_batch);
}
// compute
int nblocks = context_->getNBlocks(m_batch, nthreads_);
kernelUMGetScaleAndKValues<<<nblocks, nthreads_, 0, context_->getStream()>>>(
dev_scale_values_->getData(), dev_K_values_->getData(), x_maximizer_->getMaxValues(),
d_maximizer_->getMaxValues(), m_batch, update_bl_management, dw_min, lr, Kmax);
}
}
#define RPU_UMH_ITER_TEMPLATE(NUM_T, XITERT, DITERT) \
template void UpdateManagementHelper<NUM_T>::computeKandScaleValues( \
XITERT, DITERT, const NUM_T, const NUM_T, const bool, const bool, const int, const bool, \
const bool, const int);
#define TRANSFLOAT(TRANS) TRANS, float
template class UpdateManagementHelper<float>;
RPU_UMH_ITER_TEMPLATE(float, const float *, const float *);
RPU_UMH_ITER_TEMPLATE(float, float *, float *);
RPU_UMH_ITER_TEMPLATE(float, IndexReaderInputIterator<float>, const float *);
RPU_UMH_ITER_TEMPLATE(float, IndexReaderTransInputIterator<float>, const float *);
RPU_UMH_ITER_TEMPLATE(
float, IndexReaderTransInputIterator<float>, PermuterTransInputIterator<float>);
RPU_UMH_ITER_TEMPLATE(
float, IndexReaderSliceInputIterator<TRANSFLOAT(true)>, SliceInputIterator<TRANSFLOAT(true)>);
RPU_UMH_ITER_TEMPLATE(
float, IndexReaderSliceInputIterator<TRANSFLOAT(false)>, SliceInputIterator<TRANSFLOAT(false)>);
RPU_UMH_ITER_TEMPLATE(float, const float *, PermuterTransInputIterator<float>);
RPU_UMH_ITER_TEMPLATE(float, const float *, SliceInputIterator<TRANSFLOAT(true)>);
RPU_UMH_ITER_TEMPLATE(float, const float *, SliceInputIterator<TRANSFLOAT(false)>);
RPU_UMH_ITER_TEMPLATE(float, IndexReaderSliceInputIterator<TRANSFLOAT(true)>, const float *);
RPU_UMH_ITER_TEMPLATE(float, IndexReaderSliceInputIterator<TRANSFLOAT(false)>, const float *);
#undef TRANSFLOAT
#ifdef RPU_USE_DOUBLE
#define TRANSDOUBLE(TRANS) TRANS, double
template class UpdateManagementHelper<double>;
RPU_UMH_ITER_TEMPLATE(double, const double *, const double *);
RPU_UMH_ITER_TEMPLATE(double, double *, double *);
RPU_UMH_ITER_TEMPLATE(
double, IndexReaderTransInputIterator<double>, PermuterTransInputIterator<double>);
RPU_UMH_ITER_TEMPLATE(double, IndexReaderInputIterator<double>, const double *);
RPU_UMH_ITER_TEMPLATE(double, IndexReaderTransInputIterator<double>, const double *);
RPU_UMH_ITER_TEMPLATE(
double,
IndexReaderSliceInputIterator<TRANSDOUBLE(true)>,
SliceInputIterator<TRANSDOUBLE(true)>);
RPU_UMH_ITER_TEMPLATE(
double,
IndexReaderSliceInputIterator<TRANSDOUBLE(false)>,
SliceInputIterator<TRANSDOUBLE(false)>);
RPU_UMH_ITER_TEMPLATE(double, const double *, PermuterTransInputIterator<double>);
RPU_UMH_ITER_TEMPLATE(double, const double *, SliceInputIterator<TRANSDOUBLE(true)>);
RPU_UMH_ITER_TEMPLATE(double, const double *, SliceInputIterator<TRANSDOUBLE(false)>);
RPU_UMH_ITER_TEMPLATE(double, IndexReaderSliceInputIterator<TRANSDOUBLE(true)>, const double *);
RPU_UMH_ITER_TEMPLATE(double, IndexReaderSliceInputIterator<TRANSDOUBLE(false)>, const double *);
#undef TRANSDOUBLE
#endif
#undef RPU_UMH_ITER_TEMPLATE
} // namespace RPU
|
ec7bd757b4b923b98e79e444bc4f020adce2797f.hip | // !!! This is a file automatically generated by hipify!!!
#include "pch.h"
#include "../gridop2_src/gridop2_cpp.h"
#include "../gridop2_src/catch.h"
int main(int argc, char* argv[])
{
try {
const int cuda_devices = hpc::getCudaDeviceCount();
std::cout << "Number of CUDA devices: " << cuda_devices << std::endl;
if (cuda_devices < 1) {
std::cerr << "No CUDA devices found" << std::endl;
return 1;
}
hipError_t error_id;
hipDeviceProp_t deviceProp;
CUDA_EXEC(hipGetDeviceProperties, (&deviceProp, 0));
std::cout
<< "Device: " << deviceProp.name << " (" << deviceProp.major << '.' << deviceProp.minor << ')' << std::endl
<< "Total global memory: " << deviceProp.totalGlobalMem << std::endl
<< "Free global memory: " << hpc::getCudaFreeMem() << std::endl
<< "Max threads per block: " << deviceProp.maxThreadsPerBlock << std::endl
<< "Multi Processor count: " << deviceProp.multiProcessorCount << std::endl;
return main1(argc, argv);
} CATCH_EXCEPTIONS()
}
| ec7bd757b4b923b98e79e444bc4f020adce2797f.cu | #include "pch.h"
#include "../gridop2_src/gridop2_cpp.h"
#include "../gridop2_src/catch.h"
int main(int argc, char* argv[])
{
try {
const int cuda_devices = hpc::getCudaDeviceCount();
std::cout << "Number of CUDA devices: " << cuda_devices << std::endl;
if (cuda_devices < 1) {
std::cerr << "No CUDA devices found" << std::endl;
return 1;
}
cudaError_t error_id;
cudaDeviceProp deviceProp;
CUDA_EXEC(cudaGetDeviceProperties, (&deviceProp, 0));
std::cout
<< "Device: " << deviceProp.name << " (" << deviceProp.major << '.' << deviceProp.minor << ')' << std::endl
<< "Total global memory: " << deviceProp.totalGlobalMem << std::endl
<< "Free global memory: " << hpc::getCudaFreeMem() << std::endl
<< "Max threads per block: " << deviceProp.maxThreadsPerBlock << std::endl
<< "Multi Processor count: " << deviceProp.multiProcessorCount << std::endl;
return main1(argc, argv);
} CATCH_EXCEPTIONS()
}
|
ad68ef8c8afbb392fe788d66f92eab1df63d6279.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ftl/render/render_params.hpp>
#include "splatter_cuda.hpp"
#include <ftl/rgbd/camera.hpp>
#include <ftl/cuda_common.hpp>
#include <ftl/cuda/weighting.hpp>
#include <ftl/cuda/makers.hpp>
#define T_PER_BLOCK 8
#define ACCUM_DIAMETER 8
using ftl::cuda::TextureObject;
using ftl::render::Parameters;
using ftl::rgbd::Camera;
using ftl::render::ViewPortMode;
using ftl::render::AccumulationFunction;
using ftl::rgbd::Projection;
/*template <typename T>
__device__ inline T generateInput(const T &in, const SplatParams ¶ms, const float4 &worldPos) {
return in;
}
template <>
__device__ inline uchar4 generateInput(const uchar4 &in, const SplatParams ¶ms, const float4 &worldPos) {
return (params.m_flags & ftl::render::kShowDisconMask && worldPos.w < 0.0f) ?
make_uchar4(0,0,255,255) : // Show discontinuity mask in red
in;
}*/
template <typename A, typename B>
__device__ inline B weightInput(const A &in, float weight) {
return in * weight;
}
template <>
__device__ inline float4 weightInput(const uchar4 &in, float weight) {
return make_float4(
(float)in.x * weight,
(float)in.y * weight,
(float)in.z * weight,
(float)in.w * weight);
}
template <typename T>
__device__ inline float colourDifference(const T &a, const T &b);
template <>
__device__ inline float colourDifference<float4>(const float4 &a, const float4 &b) {
return max(fabsf(a.x-b.x), max(fabsf(a.y-b.y), fabsf(a.z-b.z)));
}
template <>
__device__ inline float colourDifference<float>(const float &a, const float &b) {
return fabs(a-b);
}
template <AccumulationFunction F, typename T>
__device__ inline void accumulateOutput(TextureObject<T> &out, TextureObject<int> &contrib, const uint2 &pos, const T &in, float w) {
// Just weighted average everything
if (F == AccumulationFunction::Simple) {
const T old = out.tex2D(pos.x, pos.y);
const int c = contrib.tex2D(pos.x,pos.y);
out(pos.x, pos.y) = old + in*w;
contrib(pos.x, pos.y) = int(w * float(0xFFFF)) + (c & 0xFFFFFF);
} else {
int c = contrib.tex2D(pos.x,pos.y);
float weight_sum = float(c & 0xFFFFFF) / float(0xFFFF);
float count = c >> 24;
const T old = out.tex2D(pos.x, pos.y);
// Really close weights are weighted averaged together
// but substantially stronger weights do a straight replacement
if (F == AccumulationFunction::CloseWeights) {
if (count == 0 || w*0.95f > weight_sum/count) {
out(pos.x, pos.y) = in*w;
contrib(pos.x, pos.y) = (1 << 24) + int(w * float(0xFFFF));
} else {
out(pos.x, pos.y) = old + in*w;
contrib(pos.x, pos.y) = (int(count+1.0f) << 24) + int(w * float(0xFFFF)) + (c & 0xFFFFFF);
}
// The winner takes all in determining colour
} else if (F == AccumulationFunction::BestWeight) {
if (count == 0 || w > weight_sum/count) {
out(pos.x, pos.y) = in*w;
contrib(pos.x, pos.y) = (1 << 24) + int(w * float(0xFFFF));
}
// If colours are close then weighted average, otherwise discard the
// lowest weighted colours
} else if (F == AccumulationFunction::ColourDiscard) {
if (colourDifference(old / weight_sum, in) > 10.0f) {
if (count == 0 || w > weight_sum/count) {
out(pos.x, pos.y) = in*w;
contrib(pos.x, pos.y) = (1 << 24) + int(w * float(0xFFFF));
} else {
//out(pos.x, pos.y) = old + in*w;
//contrib(pos.x, pos.y) = (int(count+1.0f) << 24) + int(w * float(0xFFFF)) + (c & 0xFFFFFF);
}
} else {
out(pos.x, pos.y) = old + in*w;
contrib(pos.x, pos.y) = (int(count+1.0f) << 24) + int(w * float(0xFFFF)) + (c & 0xFFFFFF);
}
} else if (F == AccumulationFunction::ColourDiscardSmooth) {
//const float cdiff = 1.0f - min(1.0f, colourDifference(old / weight_sum, in) / 50.0f);
// TODO: Determine colour smoothing param from magnitude of weighting difference
// Or at least consider the magnitude of weight difference some how.
const float cdiff = ftl::cuda::weighting(colourDifference(old / weight_sum, in), 255.0f);
const float alpha = (w > weight_sum/count) ? cdiff : 1.0f;
const float beta = (count == 0 || w > weight_sum/count) ? 1.0f : cdiff;
out(pos.x, pos.y) = old*alpha + in*w*beta;
contrib(pos.x, pos.y) = (int(count+1.0f) << 24) + int(w*beta * float(0xFFFF)) + int(weight_sum*alpha * float(0xFFFF));
}
}
}
template <ViewPortMode VPMODE>
__device__ inline float2 convertScreen(const Parameters ¶ms, int x, int y) {
return make_float2(x,y);
}
/*template <>
__device__ inline float2 convertScreen<ViewPortMode::Warping>(const Parameters ¶ms, int x, int y) {
const float coeff = 1.0f / (params.viewport.warpMatrix.entries[6] * x + params.viewport.warpMatrix.entries[7] * y + params.viewport.warpMatrix.entries[8]);
const float xcoo = coeff * (params.viewport.warpMatrix.entries[0] * x + params.viewport.warpMatrix.entries[1] * y + params.viewport.warpMatrix.entries[2]);
const float ycoo = coeff * (params.viewport.warpMatrix.entries[3] * x + params.viewport.warpMatrix.entries[4] * y + params.viewport.warpMatrix.entries[5]);
float2 pt = params.viewport.reverseMap(params.camera, make_float2(xcoo,ycoo));
return pt;
}*/
template <>
__device__ inline float2 convertScreen<ViewPortMode::Stretch>(const Parameters ¶ms, int x, int y) {
return params.viewport.reverseMap(params.camera, make_float2(x,y));
}
template <typename A>
__device__ inline auto getInput(TextureObject<A> &in, const float3 &screen, float width, float height) {
const float inSX = float(in.width()) / width;
const float inSY = float(in.height()) / height;
return in.tex2D(screen.x*inSX, screen.y*inSY);
}
__device__ float weightByNormal(TextureObject<half4> &normals, int x, int y, const float3x3 &transformR, const float3 &screenPos, const ftl::rgbd::Camera &camera) {
// Calculate the dot product of surface normal and camera ray
const float3 n = transformR * make_float3(normals.tex2D(x, y));
float3 ray = camera.screenToCam(screenPos.x, screenPos.y, 1.0f);
ray = ray / length(ray);
// Allow slightly beyond 90 degrees due to normal estimation errors
const float dotproduct = (max(dot(ray,n),-0.1f)+0.1) / 1.1f;
return dotproduct;
}
__device__ float depthMatching(const Parameters ¶ms, float d1, float d2) {
// TODO: Consider a slightly different pixel size check
const float threshold = (params.depthCoef / ((params.depthCoef / d2) - 1.0f)) - d2;
return (fabs(d1 - d2) <= threshold) ? 1.0f : 0.0f;
}
/*
* Full reprojection with normals and depth
*/
template <typename A, typename B, AccumulationFunction ACCUM, Projection PROJECT>
__global__ void reprojection_kernel(
TextureObject<A> in, // Attribute input
TextureObject<float> depth_src,
TextureObject<float> depth_in, // Virtual depth map
TextureObject<short> weights,
TextureObject<half4> normals,
TextureObject<B> out, // Accumulated output
TextureObject<int> contrib,
Parameters params,
Camera camera, float4x4 transform, float3x3 transformR) {
const int x = (blockIdx.x*blockDim.x + threadIdx.x);
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float d = depth_in.tex2D((int)x, (int)y);
if (d > params.camera.minDepth && d < params.camera.maxDepth) {
//const float2 rpt = convertScreen<VPMODE>(params, x, y);
//const float3 camPos = transform * params.camera.screenToCam(rpt.x, rpt.y, d);
const float3 camPos = transform * params.camera.unproject<PROJECT>(make_float3(x, y, d));
if (camPos.z > camera.minDepth && camPos.z < camera.maxDepth) {
const float3 screenPos = camera.project<Projection::PERSPECTIVE>(camPos);
// Not on screen so stop now...
if (screenPos.x < depth_src.width() && screenPos.y < depth_src.height()) {
const float d2 = depth_src.tex2D(int(screenPos.x+0.5f), int(screenPos.y+0.5f));
const auto input = getInput(in, screenPos, depth_src.width(), depth_src.height());
// Boolean match (0 or 1 weight). 1.0 if depths are sufficiently close
float weight = depthMatching(params, camPos.z, d2);
if (params.m_flags & ftl::render::kUseWeightsChannel)
weight *= float(weights.tex2D(int(screenPos.x+0.5f), int(screenPos.y+0.5f))) / 32767.0f;
// TODO: Weight by distance to discontinuity? Perhaps as an alternative to
// removing a discontinuity. This would also gradually blend colours from
// multiple cameras that otherwise might jump when one cameras input is lost
// at an invalid patch.
/* Buehler C. et al. 2001. Unstructured Lumigraph Rendering. */
/* Orts-Escolano S. et al. 2016. Holoportation: Virtual 3D teleportation in real-time. */
// This is the simple naive colour weighting. It might be good
// enough for our purposes if the alignment step prevents ghosting
// TODO: Use depth and perhaps the neighbourhood consistency in:
// Kuster C. et al. 2011. FreeCam: A hybrid camera system for interactive free-viewpoint video
if (params.m_flags & ftl::render::kNormalWeightColours)
weight *= weightByNormal(normals, x, y, transformR, screenPos, camera);
const B output = make<B>(input); // * weight; //weightInput(input, weight);
if (weight > 0.0f) {
accumulateOutput<ACCUM,B>(out, contrib, make_uint2(x,y), output, weight);
}
}
}
}
}
/*
* Full reprojection without normals
*/
template <typename A, typename B, AccumulationFunction ACCUM, Projection PROJECT>
__global__ void reprojection_kernel(
TextureObject<A> in, // Attribute input
TextureObject<float> depth_src,
TextureObject<float> depth_in, // Virtual depth map
TextureObject<short> weights,
TextureObject<B> out, // Accumulated output
TextureObject<int> contrib,
Parameters params,
Camera camera, float4x4 transform, float3x3 transformR) {
const int x = (blockIdx.x*blockDim.x + threadIdx.x);
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float d = depth_in.tex2D((int)x, (int)y);
if (d > params.camera.minDepth && d < params.camera.maxDepth) {
//const float2 rpt = convertScreen<VPMODE>(params, x, y);
const float3 camPos = transform * params.camera.unproject<PROJECT>(make_float3(x, y, d));
if (camPos.z > camera.minDepth && camPos.z < camera.maxDepth) {
const float3 screenPos = camera.project<Projection::PERSPECTIVE>(camPos);
// Not on screen so stop now...
if (screenPos.x < depth_src.width() && screenPos.y < depth_src.height()) {
const float d2 = depth_src.tex2D(int(screenPos.x+0.5f), int(screenPos.y+0.5f));
const auto input = getInput(in, screenPos, depth_src.width(), depth_src.height());
// Boolean match (0 or 1 weight). 1.0 if depths are sufficiently close
float weight = depthMatching(params, camPos.z, d2);
if (params.m_flags & ftl::render::kUseWeightsChannel)
weight *= float(weights.tex2D(int(screenPos.x+0.5f), int(screenPos.y+0.5f))) / 32767.0f;
const B output = make<B>(input); // * weight; //weightInput(input, weight);
if (weight > 0.0f) {
accumulateOutput<ACCUM,B>(out, contrib, make_uint2(x,y), output, weight);
}
}
}
}
}
template <typename A, typename B>
void ftl::cuda::reproject(
TextureObject<A> &in,
TextureObject<float> &depth_src, // Original 3D points
TextureObject<float> &depth_in, // Virtual depth map
TextureObject<short> &weights,
TextureObject<half4> *normals,
TextureObject<B> &out, // Accumulated output
TextureObject<int> &contrib,
const Parameters ¶ms,
const Camera &camera, const float4x4 &transform, const float3x3 &transformR,
hipStream_t stream) {
const dim3 gridSize((out.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (out.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
if (normals) {
if (params.accumulationMode == AccumulationFunction::CloseWeights) {
switch (params.projection) {
case Projection::PERSPECTIVE:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::CloseWeights, Projection::PERSPECTIVE>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::CloseWeights, Projection::ORTHOGRAPHIC>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::CloseWeights, Projection::EQUIRECTANGULAR>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::CloseWeights><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
}
} else if (params.accumulationMode == AccumulationFunction::BestWeight) {
switch (params.projection) {
case Projection::PERSPECTIVE:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::BestWeight, Projection::PERSPECTIVE>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::BestWeight, Projection::ORTHOGRAPHIC>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::BestWeight, Projection::EQUIRECTANGULAR>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::BestWeight><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
}
} else if (params.accumulationMode == AccumulationFunction::Simple) {
switch (params.projection) {
case Projection::PERSPECTIVE:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::Simple, Projection::PERSPECTIVE>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::Simple, Projection::ORTHOGRAPHIC>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::Simple, Projection::EQUIRECTANGULAR>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::Simple><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
}
} else if (params.accumulationMode == AccumulationFunction::ColourDiscard) {
switch (params.projection) {
case Projection::PERSPECTIVE:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::ColourDiscard, Projection::PERSPECTIVE>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::ColourDiscard, Projection::ORTHOGRAPHIC>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::ColourDiscard, Projection::EQUIRECTANGULAR>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::ColourDiscard><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
}
} else if (params.accumulationMode == AccumulationFunction::ColourDiscardSmooth) {
switch (params.projection) {
case Projection::PERSPECTIVE:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::ColourDiscardSmooth, Projection::PERSPECTIVE>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::ColourDiscardSmooth, Projection::ORTHOGRAPHIC>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::ColourDiscardSmooth, Projection::EQUIRECTANGULAR>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::ColourDiscardSmooth><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
}
}
} else {
if (params.accumulationMode == AccumulationFunction::CloseWeights) {
switch (params.projection) {
case Projection::PERSPECTIVE:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::CloseWeights, Projection::PERSPECTIVE>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::CloseWeights, Projection::ORTHOGRAPHIC>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::CloseWeights, Projection::EQUIRECTANGULAR>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::CloseWeights><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
}
} else if (params.accumulationMode == AccumulationFunction::BestWeight) {
switch (params.projection) {
case Projection::PERSPECTIVE:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::BestWeight, Projection::PERSPECTIVE>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::BestWeight, Projection::ORTHOGRAPHIC>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::BestWeight, Projection::EQUIRECTANGULAR>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::BestWeight><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
}
} else if (params.accumulationMode == AccumulationFunction::Simple) {
switch (params.projection) {
case Projection::PERSPECTIVE:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::Simple, Projection::PERSPECTIVE>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::Simple, Projection::ORTHOGRAPHIC>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::Simple, Projection::EQUIRECTANGULAR>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::Simple><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
}
} else if (params.accumulationMode == AccumulationFunction::ColourDiscard) {
switch (params.projection) {
case Projection::PERSPECTIVE:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::ColourDiscard, Projection::PERSPECTIVE>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::ColourDiscard, Projection::ORTHOGRAPHIC>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::ColourDiscard, Projection::EQUIRECTANGULAR>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::ColourDiscard><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
}
} else if (params.accumulationMode == AccumulationFunction::ColourDiscardSmooth) {
switch (params.projection) {
case Projection::PERSPECTIVE:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::ColourDiscardSmooth, Projection::PERSPECTIVE>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::ColourDiscardSmooth, Projection::ORTHOGRAPHIC>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR:hipLaunchKernelGGL(( reprojection_kernel<A,B,AccumulationFunction::ColourDiscardSmooth, Projection::EQUIRECTANGULAR>), dim3(gridSize), dim3(blockSize), 0, stream, in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::ColourDiscardSmooth><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
}
}
}
cudaSafeCall( hipGetLastError() );
}
template void ftl::cuda::reproject(
ftl::cuda::TextureObject<uchar4> &in, // Original colour image
ftl::cuda::TextureObject<float> &depth_src, // Original 3D points
ftl::cuda::TextureObject<float> &depth_in, // Virtual depth map
ftl::cuda::TextureObject<short> &weights,
ftl::cuda::TextureObject<half4> *normals,
ftl::cuda::TextureObject<float4> &out, // Accumulated output
ftl::cuda::TextureObject<int> &contrib,
const ftl::render::Parameters ¶ms,
const ftl::rgbd::Camera &camera,
const float4x4 &transform, const float3x3 &transformR,
hipStream_t stream);
template void ftl::cuda::reproject(
ftl::cuda::TextureObject<float> &in, // Original colour image
ftl::cuda::TextureObject<float> &depth_src, // Original 3D points
ftl::cuda::TextureObject<float> &depth_in, // Virtual depth map
ftl::cuda::TextureObject<short> &weights,
ftl::cuda::TextureObject<half4> *normals,
ftl::cuda::TextureObject<float> &out, // Accumulated output
ftl::cuda::TextureObject<int> &contrib,
const ftl::render::Parameters ¶ms,
const ftl::rgbd::Camera &camera,
const float4x4 &transform, const float3x3 &transformR,
hipStream_t stream);
template void ftl::cuda::reproject(
ftl::cuda::TextureObject<float4> &in, // Original colour image
ftl::cuda::TextureObject<float> &depth_src, // Original 3D points
ftl::cuda::TextureObject<float> &depth_in, // Virtual depth map
ftl::cuda::TextureObject<short> &weights,
ftl::cuda::TextureObject<half4> *normals,
ftl::cuda::TextureObject<float4> &out, // Accumulated output
ftl::cuda::TextureObject<int> &contrib,
const ftl::render::Parameters ¶ms,
const ftl::rgbd::Camera &camera,
const float4x4 &transform, const float3x3 &transformR,
hipStream_t stream);
//==============================================================================
// Without normals or depth
//==============================================================================
/*
* Pass 2: Accumulate attribute contributions if the points pass a visibility test.
*/
template <typename A, typename B>
__global__ void reprojection_kernel(
TextureObject<A> in, // Attribute input
TextureObject<float> depth_in, // Virtual depth map
TextureObject<B> out, // Accumulated output
TextureObject<int> contrib,
Parameters params,
Camera camera, float4x4 poseInv) {
const int x = (blockIdx.x*blockDim.x + threadIdx.x);
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float d = depth_in.tex2D((int)x, (int)y);
if (d > params.camera.minDepth && d < params.camera.maxDepth) {
//const float3 camPos = poseInv * params.camera.screenToCam(x, y, d);
const float3 camPos = poseInv * params.camera.unproject<Projection::PERSPECTIVE>(make_float3(x, y, d));
const float3 screenPos = camera.project<Projection::PERSPECTIVE>(camPos);
if (screenPos.x < in.width() && screenPos.y < in.height()) {
const auto input = in.tex2D(screenPos.x, screenPos.y);
float weight = depthMatching(params, camPos.z, camera.maxDepth);
const B weighted = make<B>(input) * weight;
if (weight > 0.0f) {
accumulateOutput<AccumulationFunction::Simple>(out, contrib, make_uint2(x,y), weighted, weight);
}
}
}
}
template <typename A, typename B>
void ftl::cuda::reproject(
TextureObject<A> &in,
TextureObject<float> &depth_in, // Virtual depth map
TextureObject<B> &out, // Accumulated output
TextureObject<int> &contrib,
const Parameters ¶ms,
const Camera &camera, const float4x4 &poseInv, hipStream_t stream) {
const dim3 gridSize((out.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (out.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
hipLaunchKernelGGL(( reprojection_kernel), dim3(gridSize), dim3(blockSize), 0, stream,
in,
depth_in,
out,
contrib,
params,
camera,
poseInv
);
cudaSafeCall( hipGetLastError() );
}
template void ftl::cuda::reproject(
ftl::cuda::TextureObject<uchar4> &in, // Original colour image
ftl::cuda::TextureObject<float> &depth_in, // Virtual depth map
ftl::cuda::TextureObject<float4> &out, // Accumulated output
ftl::cuda::TextureObject<int> &contrib,
const ftl::render::Parameters ¶ms,
const ftl::rgbd::Camera &camera,
const float4x4 &poseInv, hipStream_t stream);
template void ftl::cuda::reproject(
ftl::cuda::TextureObject<float> &in, // Original colour image
ftl::cuda::TextureObject<float> &depth_in, // Virtual depth map
ftl::cuda::TextureObject<float> &out, // Accumulated output
ftl::cuda::TextureObject<int> &contrib,
const ftl::render::Parameters ¶ms,
const ftl::rgbd::Camera &camera,
const float4x4 &poseInv, hipStream_t stream);
template void ftl::cuda::reproject(
ftl::cuda::TextureObject<float4> &in, // Original colour image
ftl::cuda::TextureObject<float> &depth_in, // Virtual depth map
ftl::cuda::TextureObject<float4> &out, // Accumulated output
ftl::cuda::TextureObject<int> &contrib,
const ftl::render::Parameters ¶ms,
const ftl::rgbd::Camera &camera,
const float4x4 &poseInv, hipStream_t stream);
// ===== Equirectangular Reprojection ==========================================
__device__ inline float2 equirect_reprojection(int x_img, int y_img, double f, const float3x3 &rot, int w1, int h1, const ftl::rgbd::Camera &cam) {
float3 ray3d = cam.screenToCam(x_img, y_img, 1.0f);
ray3d /= length(ray3d);
ray3d = rot * ray3d;
//inverse formula for spherical projection, reference Szeliski book "Computer Vision: Algorithms and Applications" p439.
float theta = atan2(ray3d.y,sqrt(ray3d.x*ray3d.x+ray3d.z*ray3d.z));
float phi = atan2(ray3d.x, ray3d.z);
const float pi = 3.14f;
//get 2D point on equirectangular map
float x_sphere = (((phi*w1)/pi+w1)/2);
float y_sphere = (theta+ pi/2)*h1/pi;
return make_float2(x_sphere,y_sphere);
};
__global__ void equirectangular_kernel(
TextureObject<uchar4> image_in,
TextureObject<uchar4> image_out,
Camera camera, float3x3 pose) {
const int x = (blockIdx.x*blockDim.x + threadIdx.x);
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= 0 && y >= 0 && x < image_out.width() && y < image_out.height()) {
const float2 p = equirect_reprojection(x,y, camera.fx, pose, image_in.width(), image_in.height(), camera);
const float4 colour = image_in.tex2D(p.x, p.y);
image_out(x,y) = make_uchar4(colour.x, colour.y, colour.z, 0);
}
}
void ftl::cuda::equirectangular_reproject(
ftl::cuda::TextureObject<uchar4> &image_in,
ftl::cuda::TextureObject<uchar4> &image_out,
const ftl::rgbd::Camera &camera, const float3x3 &pose, hipStream_t stream) {
const dim3 gridSize((image_out.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (image_out.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
hipLaunchKernelGGL(( equirectangular_kernel), dim3(gridSize), dim3(blockSize), 0, stream, image_in, image_out, camera, pose);
cudaSafeCall( hipGetLastError() );
}
// ==== Correct for bad colours ================================================
__device__ inline uchar4 make_uchar4(const float4 v) {
return make_uchar4(v.x,v.y,v.z,v.w);
}
template <int RADIUS>
__global__ void fix_colour_kernel(
TextureObject<float> depth,
TextureObject<uchar4> out,
TextureObject<int> contribs,
uchar4 bad_colour,
ftl::rgbd::Camera cam) {
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= RADIUS && y >= RADIUS && x < out.width()-RADIUS && y < out.height()-RADIUS) {
const float contrib = contribs.tex2D((int)x,(int)y);
const float d = depth.tex2D(x,y);
if (contrib == 0 && d > cam.minDepth && d < cam.maxDepth) {
float4 sumcol = make_float4(0.0f);
float count = 0.0f;
for (int v=-RADIUS; v<=RADIUS; ++v) {
for (int u=-RADIUS; u<=RADIUS; ++u) {
const int contrib = contribs.tex2D((int)x+u,(int)y+v);
const float4 c = make_float4(out(int(x)+u,int(y)+v));
if (contrib > 0) {
sumcol += c;
count += 1.0f;
}
}
}
out(x,y) = (count > 0.0f) ? make_uchar4(sumcol / count) : bad_colour;
}
}
}
void ftl::cuda::fix_bad_colour(
TextureObject<float> &depth,
TextureObject<uchar4> &out,
TextureObject<int> &contribs,
uchar4 bad_colour,
const ftl::rgbd::Camera &cam,
hipStream_t stream) {
const dim3 gridSize((out.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (out.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
hipLaunchKernelGGL(( fix_colour_kernel<1>), dim3(gridSize), dim3(blockSize), 0, stream, depth, out, contribs, bad_colour, cam);
cudaSafeCall( hipGetLastError() );
}
// ===== Show bad colour normalise =============================================
__global__ void show_missing_colour_kernel(
TextureObject<float> depth,
TextureObject<uchar4> out,
TextureObject<int> contribs,
uchar4 bad_colour,
ftl::rgbd::Camera cam) {
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < out.width() && y < out.height()) {
const int contrib = contribs.tex2D((int)x,(int)y);
const float d = depth.tex2D(x,y);
if (contrib == 0 && d > cam.minDepth && d < cam.maxDepth) {
out(x,y) = bad_colour;
}
}
}
void ftl::cuda::show_missing_colour(
TextureObject<float> &depth,
TextureObject<uchar4> &out,
TextureObject<int> &contribs,
uchar4 bad_colour,
const ftl::rgbd::Camera &cam,
hipStream_t stream) {
const dim3 gridSize((out.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (out.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
hipLaunchKernelGGL(( show_missing_colour_kernel), dim3(gridSize), dim3(blockSize), 0, stream, depth, out, contribs, bad_colour, cam);
cudaSafeCall( hipGetLastError() );
}
// ===== Show colour weights ===================================================
__global__ void show_colour_weights_kernel(
TextureObject<uchar4> out,
TextureObject<int> contribs,
uchar4 bad_colour) {
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < out.width() && y < out.height()) {
const int contrib = contribs.tex2D((int)x,(int)y);
if (contrib > 0) {
float w = float(contrib & 0xFFFFFF) / float(0xFFFF) / float(contrib >> 24);
out(x,y) = make_uchar4(float(bad_colour.x) * w, float(bad_colour.y) * w, float(bad_colour.z) * w, 0.0f);
}
}
}
void ftl::cuda::show_colour_weights(
TextureObject<uchar4> &out,
TextureObject<int> &contribs,
uchar4 bad_colour,
hipStream_t stream) {
const dim3 gridSize((out.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (out.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
hipLaunchKernelGGL(( show_colour_weights_kernel), dim3(gridSize), dim3(blockSize), 0, stream, out, contribs, bad_colour);
cudaSafeCall( hipGetLastError() );
}
| ad68ef8c8afbb392fe788d66f92eab1df63d6279.cu | #include <ftl/render/render_params.hpp>
#include "splatter_cuda.hpp"
#include <ftl/rgbd/camera.hpp>
#include <ftl/cuda_common.hpp>
#include <ftl/cuda/weighting.hpp>
#include <ftl/cuda/makers.hpp>
#define T_PER_BLOCK 8
#define ACCUM_DIAMETER 8
using ftl::cuda::TextureObject;
using ftl::render::Parameters;
using ftl::rgbd::Camera;
using ftl::render::ViewPortMode;
using ftl::render::AccumulationFunction;
using ftl::rgbd::Projection;
/*template <typename T>
__device__ inline T generateInput(const T &in, const SplatParams ¶ms, const float4 &worldPos) {
return in;
}
template <>
__device__ inline uchar4 generateInput(const uchar4 &in, const SplatParams ¶ms, const float4 &worldPos) {
return (params.m_flags & ftl::render::kShowDisconMask && worldPos.w < 0.0f) ?
make_uchar4(0,0,255,255) : // Show discontinuity mask in red
in;
}*/
template <typename A, typename B>
__device__ inline B weightInput(const A &in, float weight) {
return in * weight;
}
template <>
__device__ inline float4 weightInput(const uchar4 &in, float weight) {
return make_float4(
(float)in.x * weight,
(float)in.y * weight,
(float)in.z * weight,
(float)in.w * weight);
}
template <typename T>
__device__ inline float colourDifference(const T &a, const T &b);
template <>
__device__ inline float colourDifference<float4>(const float4 &a, const float4 &b) {
return max(fabsf(a.x-b.x), max(fabsf(a.y-b.y), fabsf(a.z-b.z)));
}
template <>
__device__ inline float colourDifference<float>(const float &a, const float &b) {
return fabs(a-b);
}
template <AccumulationFunction F, typename T>
__device__ inline void accumulateOutput(TextureObject<T> &out, TextureObject<int> &contrib, const uint2 &pos, const T &in, float w) {
// Just weighted average everything
if (F == AccumulationFunction::Simple) {
const T old = out.tex2D(pos.x, pos.y);
const int c = contrib.tex2D(pos.x,pos.y);
out(pos.x, pos.y) = old + in*w;
contrib(pos.x, pos.y) = int(w * float(0xFFFF)) + (c & 0xFFFFFF);
} else {
int c = contrib.tex2D(pos.x,pos.y);
float weight_sum = float(c & 0xFFFFFF) / float(0xFFFF);
float count = c >> 24;
const T old = out.tex2D(pos.x, pos.y);
// Really close weights are weighted averaged together
// but substantially stronger weights do a straight replacement
if (F == AccumulationFunction::CloseWeights) {
if (count == 0 || w*0.95f > weight_sum/count) {
out(pos.x, pos.y) = in*w;
contrib(pos.x, pos.y) = (1 << 24) + int(w * float(0xFFFF));
} else {
out(pos.x, pos.y) = old + in*w;
contrib(pos.x, pos.y) = (int(count+1.0f) << 24) + int(w * float(0xFFFF)) + (c & 0xFFFFFF);
}
// The winner takes all in determining colour
} else if (F == AccumulationFunction::BestWeight) {
if (count == 0 || w > weight_sum/count) {
out(pos.x, pos.y) = in*w;
contrib(pos.x, pos.y) = (1 << 24) + int(w * float(0xFFFF));
}
// If colours are close then weighted average, otherwise discard the
// lowest weighted colours
} else if (F == AccumulationFunction::ColourDiscard) {
if (colourDifference(old / weight_sum, in) > 10.0f) {
if (count == 0 || w > weight_sum/count) {
out(pos.x, pos.y) = in*w;
contrib(pos.x, pos.y) = (1 << 24) + int(w * float(0xFFFF));
} else {
//out(pos.x, pos.y) = old + in*w;
//contrib(pos.x, pos.y) = (int(count+1.0f) << 24) + int(w * float(0xFFFF)) + (c & 0xFFFFFF);
}
} else {
out(pos.x, pos.y) = old + in*w;
contrib(pos.x, pos.y) = (int(count+1.0f) << 24) + int(w * float(0xFFFF)) + (c & 0xFFFFFF);
}
} else if (F == AccumulationFunction::ColourDiscardSmooth) {
//const float cdiff = 1.0f - min(1.0f, colourDifference(old / weight_sum, in) / 50.0f);
// TODO: Determine colour smoothing param from magnitude of weighting difference
// Or at least consider the magnitude of weight difference some how.
const float cdiff = ftl::cuda::weighting(colourDifference(old / weight_sum, in), 255.0f);
const float alpha = (w > weight_sum/count) ? cdiff : 1.0f;
const float beta = (count == 0 || w > weight_sum/count) ? 1.0f : cdiff;
out(pos.x, pos.y) = old*alpha + in*w*beta;
contrib(pos.x, pos.y) = (int(count+1.0f) << 24) + int(w*beta * float(0xFFFF)) + int(weight_sum*alpha * float(0xFFFF));
}
}
}
template <ViewPortMode VPMODE>
__device__ inline float2 convertScreen(const Parameters ¶ms, int x, int y) {
return make_float2(x,y);
}
/*template <>
__device__ inline float2 convertScreen<ViewPortMode::Warping>(const Parameters ¶ms, int x, int y) {
const float coeff = 1.0f / (params.viewport.warpMatrix.entries[6] * x + params.viewport.warpMatrix.entries[7] * y + params.viewport.warpMatrix.entries[8]);
const float xcoo = coeff * (params.viewport.warpMatrix.entries[0] * x + params.viewport.warpMatrix.entries[1] * y + params.viewport.warpMatrix.entries[2]);
const float ycoo = coeff * (params.viewport.warpMatrix.entries[3] * x + params.viewport.warpMatrix.entries[4] * y + params.viewport.warpMatrix.entries[5]);
float2 pt = params.viewport.reverseMap(params.camera, make_float2(xcoo,ycoo));
return pt;
}*/
template <>
__device__ inline float2 convertScreen<ViewPortMode::Stretch>(const Parameters ¶ms, int x, int y) {
return params.viewport.reverseMap(params.camera, make_float2(x,y));
}
template <typename A>
__device__ inline auto getInput(TextureObject<A> &in, const float3 &screen, float width, float height) {
const float inSX = float(in.width()) / width;
const float inSY = float(in.height()) / height;
return in.tex2D(screen.x*inSX, screen.y*inSY);
}
__device__ float weightByNormal(TextureObject<half4> &normals, int x, int y, const float3x3 &transformR, const float3 &screenPos, const ftl::rgbd::Camera &camera) {
// Calculate the dot product of surface normal and camera ray
const float3 n = transformR * make_float3(normals.tex2D(x, y));
float3 ray = camera.screenToCam(screenPos.x, screenPos.y, 1.0f);
ray = ray / length(ray);
// Allow slightly beyond 90 degrees due to normal estimation errors
const float dotproduct = (max(dot(ray,n),-0.1f)+0.1) / 1.1f;
return dotproduct;
}
__device__ float depthMatching(const Parameters ¶ms, float d1, float d2) {
// TODO: Consider a slightly different pixel size check
const float threshold = (params.depthCoef / ((params.depthCoef / d2) - 1.0f)) - d2;
return (fabs(d1 - d2) <= threshold) ? 1.0f : 0.0f;
}
/*
* Full reprojection with normals and depth
*/
template <typename A, typename B, AccumulationFunction ACCUM, Projection PROJECT>
__global__ void reprojection_kernel(
TextureObject<A> in, // Attribute input
TextureObject<float> depth_src,
TextureObject<float> depth_in, // Virtual depth map
TextureObject<short> weights,
TextureObject<half4> normals,
TextureObject<B> out, // Accumulated output
TextureObject<int> contrib,
Parameters params,
Camera camera, float4x4 transform, float3x3 transformR) {
const int x = (blockIdx.x*blockDim.x + threadIdx.x);
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float d = depth_in.tex2D((int)x, (int)y);
if (d > params.camera.minDepth && d < params.camera.maxDepth) {
//const float2 rpt = convertScreen<VPMODE>(params, x, y);
//const float3 camPos = transform * params.camera.screenToCam(rpt.x, rpt.y, d);
const float3 camPos = transform * params.camera.unproject<PROJECT>(make_float3(x, y, d));
if (camPos.z > camera.minDepth && camPos.z < camera.maxDepth) {
const float3 screenPos = camera.project<Projection::PERSPECTIVE>(camPos);
// Not on screen so stop now...
if (screenPos.x < depth_src.width() && screenPos.y < depth_src.height()) {
const float d2 = depth_src.tex2D(int(screenPos.x+0.5f), int(screenPos.y+0.5f));
const auto input = getInput(in, screenPos, depth_src.width(), depth_src.height());
// Boolean match (0 or 1 weight). 1.0 if depths are sufficiently close
float weight = depthMatching(params, camPos.z, d2);
if (params.m_flags & ftl::render::kUseWeightsChannel)
weight *= float(weights.tex2D(int(screenPos.x+0.5f), int(screenPos.y+0.5f))) / 32767.0f;
// TODO: Weight by distance to discontinuity? Perhaps as an alternative to
// removing a discontinuity. This would also gradually blend colours from
// multiple cameras that otherwise might jump when one cameras input is lost
// at an invalid patch.
/* Buehler C. et al. 2001. Unstructured Lumigraph Rendering. */
/* Orts-Escolano S. et al. 2016. Holoportation: Virtual 3D teleportation in real-time. */
// This is the simple naive colour weighting. It might be good
// enough for our purposes if the alignment step prevents ghosting
// TODO: Use depth and perhaps the neighbourhood consistency in:
// Kuster C. et al. 2011. FreeCam: A hybrid camera system for interactive free-viewpoint video
if (params.m_flags & ftl::render::kNormalWeightColours)
weight *= weightByNormal(normals, x, y, transformR, screenPos, camera);
const B output = make<B>(input); // * weight; //weightInput(input, weight);
if (weight > 0.0f) {
accumulateOutput<ACCUM,B>(out, contrib, make_uint2(x,y), output, weight);
}
}
}
}
}
/*
* Full reprojection without normals
*/
template <typename A, typename B, AccumulationFunction ACCUM, Projection PROJECT>
__global__ void reprojection_kernel(
TextureObject<A> in, // Attribute input
TextureObject<float> depth_src,
TextureObject<float> depth_in, // Virtual depth map
TextureObject<short> weights,
TextureObject<B> out, // Accumulated output
TextureObject<int> contrib,
Parameters params,
Camera camera, float4x4 transform, float3x3 transformR) {
const int x = (blockIdx.x*blockDim.x + threadIdx.x);
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float d = depth_in.tex2D((int)x, (int)y);
if (d > params.camera.minDepth && d < params.camera.maxDepth) {
//const float2 rpt = convertScreen<VPMODE>(params, x, y);
const float3 camPos = transform * params.camera.unproject<PROJECT>(make_float3(x, y, d));
if (camPos.z > camera.minDepth && camPos.z < camera.maxDepth) {
const float3 screenPos = camera.project<Projection::PERSPECTIVE>(camPos);
// Not on screen so stop now...
if (screenPos.x < depth_src.width() && screenPos.y < depth_src.height()) {
const float d2 = depth_src.tex2D(int(screenPos.x+0.5f), int(screenPos.y+0.5f));
const auto input = getInput(in, screenPos, depth_src.width(), depth_src.height());
// Boolean match (0 or 1 weight). 1.0 if depths are sufficiently close
float weight = depthMatching(params, camPos.z, d2);
if (params.m_flags & ftl::render::kUseWeightsChannel)
weight *= float(weights.tex2D(int(screenPos.x+0.5f), int(screenPos.y+0.5f))) / 32767.0f;
const B output = make<B>(input); // * weight; //weightInput(input, weight);
if (weight > 0.0f) {
accumulateOutput<ACCUM,B>(out, contrib, make_uint2(x,y), output, weight);
}
}
}
}
}
template <typename A, typename B>
void ftl::cuda::reproject(
TextureObject<A> &in,
TextureObject<float> &depth_src, // Original 3D points
TextureObject<float> &depth_in, // Virtual depth map
TextureObject<short> &weights,
TextureObject<half4> *normals,
TextureObject<B> &out, // Accumulated output
TextureObject<int> &contrib,
const Parameters ¶ms,
const Camera &camera, const float4x4 &transform, const float3x3 &transformR,
cudaStream_t stream) {
const dim3 gridSize((out.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (out.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
if (normals) {
if (params.accumulationMode == AccumulationFunction::CloseWeights) {
switch (params.projection) {
case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::CloseWeights, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::CloseWeights, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::CloseWeights, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::CloseWeights><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
}
} else if (params.accumulationMode == AccumulationFunction::BestWeight) {
switch (params.projection) {
case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::BestWeight, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::BestWeight, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::BestWeight, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::BestWeight><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
}
} else if (params.accumulationMode == AccumulationFunction::Simple) {
switch (params.projection) {
case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::Simple, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::Simple, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::Simple, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::Simple><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
}
} else if (params.accumulationMode == AccumulationFunction::ColourDiscard) {
switch (params.projection) {
case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::ColourDiscard, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::ColourDiscard, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::ColourDiscard, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::ColourDiscard><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
}
} else if (params.accumulationMode == AccumulationFunction::ColourDiscardSmooth) {
switch (params.projection) {
case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::ColourDiscardSmooth, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::ColourDiscardSmooth, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::ColourDiscardSmooth, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::ColourDiscardSmooth><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break;
}
}
} else {
if (params.accumulationMode == AccumulationFunction::CloseWeights) {
switch (params.projection) {
case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::CloseWeights, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::CloseWeights, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::CloseWeights, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::CloseWeights><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
}
} else if (params.accumulationMode == AccumulationFunction::BestWeight) {
switch (params.projection) {
case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::BestWeight, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::BestWeight, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::BestWeight, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::BestWeight><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
}
} else if (params.accumulationMode == AccumulationFunction::Simple) {
switch (params.projection) {
case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::Simple, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::Simple, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::Simple, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::Simple><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
}
} else if (params.accumulationMode == AccumulationFunction::ColourDiscard) {
switch (params.projection) {
case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::ColourDiscard, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::ColourDiscard, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::ColourDiscard, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::ColourDiscard><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
}
} else if (params.accumulationMode == AccumulationFunction::ColourDiscardSmooth) {
switch (params.projection) {
case Projection::PERSPECTIVE: reprojection_kernel<A,B,AccumulationFunction::ColourDiscardSmooth, Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::ORTHOGRAPHIC: reprojection_kernel<A,B,AccumulationFunction::ColourDiscardSmooth, Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
case Projection::EQUIRECTANGULAR: reprojection_kernel<A,B,AccumulationFunction::ColourDiscardSmooth, Projection::EQUIRECTANGULAR><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
//case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::ColourDiscardSmooth><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break;
}
}
}
cudaSafeCall( cudaGetLastError() );
}
template void ftl::cuda::reproject(
ftl::cuda::TextureObject<uchar4> &in, // Original colour image
ftl::cuda::TextureObject<float> &depth_src, // Original 3D points
ftl::cuda::TextureObject<float> &depth_in, // Virtual depth map
ftl::cuda::TextureObject<short> &weights,
ftl::cuda::TextureObject<half4> *normals,
ftl::cuda::TextureObject<float4> &out, // Accumulated output
ftl::cuda::TextureObject<int> &contrib,
const ftl::render::Parameters ¶ms,
const ftl::rgbd::Camera &camera,
const float4x4 &transform, const float3x3 &transformR,
cudaStream_t stream);
template void ftl::cuda::reproject(
ftl::cuda::TextureObject<float> &in, // Original colour image
ftl::cuda::TextureObject<float> &depth_src, // Original 3D points
ftl::cuda::TextureObject<float> &depth_in, // Virtual depth map
ftl::cuda::TextureObject<short> &weights,
ftl::cuda::TextureObject<half4> *normals,
ftl::cuda::TextureObject<float> &out, // Accumulated output
ftl::cuda::TextureObject<int> &contrib,
const ftl::render::Parameters ¶ms,
const ftl::rgbd::Camera &camera,
const float4x4 &transform, const float3x3 &transformR,
cudaStream_t stream);
template void ftl::cuda::reproject(
ftl::cuda::TextureObject<float4> &in, // Original colour image
ftl::cuda::TextureObject<float> &depth_src, // Original 3D points
ftl::cuda::TextureObject<float> &depth_in, // Virtual depth map
ftl::cuda::TextureObject<short> &weights,
ftl::cuda::TextureObject<half4> *normals,
ftl::cuda::TextureObject<float4> &out, // Accumulated output
ftl::cuda::TextureObject<int> &contrib,
const ftl::render::Parameters ¶ms,
const ftl::rgbd::Camera &camera,
const float4x4 &transform, const float3x3 &transformR,
cudaStream_t stream);
//==============================================================================
// Without normals or depth
//==============================================================================
/*
* Pass 2: Accumulate attribute contributions if the points pass a visibility test.
*/
template <typename A, typename B>
__global__ void reprojection_kernel(
TextureObject<A> in, // Attribute input
TextureObject<float> depth_in, // Virtual depth map
TextureObject<B> out, // Accumulated output
TextureObject<int> contrib,
Parameters params,
Camera camera, float4x4 poseInv) {
const int x = (blockIdx.x*blockDim.x + threadIdx.x);
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float d = depth_in.tex2D((int)x, (int)y);
if (d > params.camera.minDepth && d < params.camera.maxDepth) {
//const float3 camPos = poseInv * params.camera.screenToCam(x, y, d);
const float3 camPos = poseInv * params.camera.unproject<Projection::PERSPECTIVE>(make_float3(x, y, d));
const float3 screenPos = camera.project<Projection::PERSPECTIVE>(camPos);
if (screenPos.x < in.width() && screenPos.y < in.height()) {
const auto input = in.tex2D(screenPos.x, screenPos.y);
float weight = depthMatching(params, camPos.z, camera.maxDepth);
const B weighted = make<B>(input) * weight;
if (weight > 0.0f) {
accumulateOutput<AccumulationFunction::Simple>(out, contrib, make_uint2(x,y), weighted, weight);
}
}
}
}
template <typename A, typename B>
void ftl::cuda::reproject(
TextureObject<A> &in,
TextureObject<float> &depth_in, // Virtual depth map
TextureObject<B> &out, // Accumulated output
TextureObject<int> &contrib,
const Parameters ¶ms,
const Camera &camera, const float4x4 &poseInv, cudaStream_t stream) {
const dim3 gridSize((out.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (out.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
reprojection_kernel<<<gridSize, blockSize, 0, stream>>>(
in,
depth_in,
out,
contrib,
params,
camera,
poseInv
);
cudaSafeCall( cudaGetLastError() );
}
template void ftl::cuda::reproject(
ftl::cuda::TextureObject<uchar4> &in, // Original colour image
ftl::cuda::TextureObject<float> &depth_in, // Virtual depth map
ftl::cuda::TextureObject<float4> &out, // Accumulated output
ftl::cuda::TextureObject<int> &contrib,
const ftl::render::Parameters ¶ms,
const ftl::rgbd::Camera &camera,
const float4x4 &poseInv, cudaStream_t stream);
template void ftl::cuda::reproject(
ftl::cuda::TextureObject<float> &in, // Original colour image
ftl::cuda::TextureObject<float> &depth_in, // Virtual depth map
ftl::cuda::TextureObject<float> &out, // Accumulated output
ftl::cuda::TextureObject<int> &contrib,
const ftl::render::Parameters ¶ms,
const ftl::rgbd::Camera &camera,
const float4x4 &poseInv, cudaStream_t stream);
template void ftl::cuda::reproject(
ftl::cuda::TextureObject<float4> &in, // Original colour image
ftl::cuda::TextureObject<float> &depth_in, // Virtual depth map
ftl::cuda::TextureObject<float4> &out, // Accumulated output
ftl::cuda::TextureObject<int> &contrib,
const ftl::render::Parameters ¶ms,
const ftl::rgbd::Camera &camera,
const float4x4 &poseInv, cudaStream_t stream);
// ===== Equirectangular Reprojection ==========================================
__device__ inline float2 equirect_reprojection(int x_img, int y_img, double f, const float3x3 &rot, int w1, int h1, const ftl::rgbd::Camera &cam) {
float3 ray3d = cam.screenToCam(x_img, y_img, 1.0f);
ray3d /= length(ray3d);
ray3d = rot * ray3d;
//inverse formula for spherical projection, reference Szeliski book "Computer Vision: Algorithms and Applications" p439.
float theta = atan2(ray3d.y,sqrt(ray3d.x*ray3d.x+ray3d.z*ray3d.z));
float phi = atan2(ray3d.x, ray3d.z);
const float pi = 3.14f;
//get 2D point on equirectangular map
float x_sphere = (((phi*w1)/pi+w1)/2);
float y_sphere = (theta+ pi/2)*h1/pi;
return make_float2(x_sphere,y_sphere);
};
__global__ void equirectangular_kernel(
TextureObject<uchar4> image_in,
TextureObject<uchar4> image_out,
Camera camera, float3x3 pose) {
const int x = (blockIdx.x*blockDim.x + threadIdx.x);
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= 0 && y >= 0 && x < image_out.width() && y < image_out.height()) {
const float2 p = equirect_reprojection(x,y, camera.fx, pose, image_in.width(), image_in.height(), camera);
const float4 colour = image_in.tex2D(p.x, p.y);
image_out(x,y) = make_uchar4(colour.x, colour.y, colour.z, 0);
}
}
void ftl::cuda::equirectangular_reproject(
ftl::cuda::TextureObject<uchar4> &image_in,
ftl::cuda::TextureObject<uchar4> &image_out,
const ftl::rgbd::Camera &camera, const float3x3 &pose, cudaStream_t stream) {
const dim3 gridSize((image_out.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (image_out.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
equirectangular_kernel<<<gridSize, blockSize, 0, stream>>>(image_in, image_out, camera, pose);
cudaSafeCall( cudaGetLastError() );
}
// ==== Correct for bad colours ================================================
__device__ inline uchar4 make_uchar4(const float4 v) {
return make_uchar4(v.x,v.y,v.z,v.w);
}
template <int RADIUS>
__global__ void fix_colour_kernel(
TextureObject<float> depth,
TextureObject<uchar4> out,
TextureObject<int> contribs,
uchar4 bad_colour,
ftl::rgbd::Camera cam) {
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= RADIUS && y >= RADIUS && x < out.width()-RADIUS && y < out.height()-RADIUS) {
const float contrib = contribs.tex2D((int)x,(int)y);
const float d = depth.tex2D(x,y);
if (contrib == 0 && d > cam.minDepth && d < cam.maxDepth) {
float4 sumcol = make_float4(0.0f);
float count = 0.0f;
for (int v=-RADIUS; v<=RADIUS; ++v) {
for (int u=-RADIUS; u<=RADIUS; ++u) {
const int contrib = contribs.tex2D((int)x+u,(int)y+v);
const float4 c = make_float4(out(int(x)+u,int(y)+v));
if (contrib > 0) {
sumcol += c;
count += 1.0f;
}
}
}
out(x,y) = (count > 0.0f) ? make_uchar4(sumcol / count) : bad_colour;
}
}
}
void ftl::cuda::fix_bad_colour(
TextureObject<float> &depth,
TextureObject<uchar4> &out,
TextureObject<int> &contribs,
uchar4 bad_colour,
const ftl::rgbd::Camera &cam,
cudaStream_t stream) {
const dim3 gridSize((out.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (out.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
fix_colour_kernel<1><<<gridSize, blockSize, 0, stream>>>(depth, out, contribs, bad_colour, cam);
cudaSafeCall( cudaGetLastError() );
}
// ===== Show bad colour normalise =============================================
__global__ void show_missing_colour_kernel(
TextureObject<float> depth,
TextureObject<uchar4> out,
TextureObject<int> contribs,
uchar4 bad_colour,
ftl::rgbd::Camera cam) {
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < out.width() && y < out.height()) {
const int contrib = contribs.tex2D((int)x,(int)y);
const float d = depth.tex2D(x,y);
if (contrib == 0 && d > cam.minDepth && d < cam.maxDepth) {
out(x,y) = bad_colour;
}
}
}
void ftl::cuda::show_missing_colour(
TextureObject<float> &depth,
TextureObject<uchar4> &out,
TextureObject<int> &contribs,
uchar4 bad_colour,
const ftl::rgbd::Camera &cam,
cudaStream_t stream) {
const dim3 gridSize((out.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (out.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
show_missing_colour_kernel<<<gridSize, blockSize, 0, stream>>>(depth, out, contribs, bad_colour, cam);
cudaSafeCall( cudaGetLastError() );
}
// ===== Show colour weights ===================================================
__global__ void show_colour_weights_kernel(
TextureObject<uchar4> out,
TextureObject<int> contribs,
uchar4 bad_colour) {
const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x < out.width() && y < out.height()) {
const int contrib = contribs.tex2D((int)x,(int)y);
if (contrib > 0) {
float w = float(contrib & 0xFFFFFF) / float(0xFFFF) / float(contrib >> 24);
out(x,y) = make_uchar4(float(bad_colour.x) * w, float(bad_colour.y) * w, float(bad_colour.z) * w, 0.0f);
}
}
}
void ftl::cuda::show_colour_weights(
TextureObject<uchar4> &out,
TextureObject<int> &contribs,
uchar4 bad_colour,
cudaStream_t stream) {
const dim3 gridSize((out.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (out.height() + T_PER_BLOCK - 1)/T_PER_BLOCK);
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
show_colour_weights_kernel<<<gridSize, blockSize, 0, stream>>>(out, contribs, bad_colour);
cudaSafeCall( cudaGetLastError() );
}
|
f6e6ae128ab8ab32e836104e7a5f8b405990c3a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/***********************************************************
tissueGPU1.cu
GPU kernel to accumulate contributions of tissue source
strengths qt to tissue solute levels pt.
TWS December 2011
Cuda 10.1 Version, August 2019
************************************************************/
__global__ void tissueGPU1Kernel(int *d_tisspoints, float *d_dtt000, float *d_pt000, float *d_qt000, int nnt)
{
int itp = blockDim.x * blockIdx.x + threadIdx.x;
int jtp,ixyz,ix,iy,iz,jx,jy,jz,nnt2=2*nnt;
float p = 0.;
if(itp < nnt){
ix = d_tisspoints[itp];
iy = d_tisspoints[itp+nnt];
iz = d_tisspoints[itp+nnt2];
for(jtp=0; jtp<nnt; jtp++){
jx = d_tisspoints[jtp];
jy = d_tisspoints[jtp+nnt];
jz = d_tisspoints[jtp+nnt2];
ixyz = abs(jx-ix) + abs(jy-iy) + abs(jz-iz);
p += d_qt000[jtp]*d_dtt000[ixyz];
}
d_pt000[itp] = p;
}
} | f6e6ae128ab8ab32e836104e7a5f8b405990c3a6.cu | #include "includes.h"
/***********************************************************
tissueGPU1.cu
GPU kernel to accumulate contributions of tissue source
strengths qt to tissue solute levels pt.
TWS December 2011
Cuda 10.1 Version, August 2019
************************************************************/
__global__ void tissueGPU1Kernel(int *d_tisspoints, float *d_dtt000, float *d_pt000, float *d_qt000, int nnt)
{
int itp = blockDim.x * blockIdx.x + threadIdx.x;
int jtp,ixyz,ix,iy,iz,jx,jy,jz,nnt2=2*nnt;
float p = 0.;
if(itp < nnt){
ix = d_tisspoints[itp];
iy = d_tisspoints[itp+nnt];
iz = d_tisspoints[itp+nnt2];
for(jtp=0; jtp<nnt; jtp++){
jx = d_tisspoints[jtp];
jy = d_tisspoints[jtp+nnt];
jz = d_tisspoints[jtp+nnt2];
ixyz = abs(jx-ix) + abs(jy-iy) + abs(jz-iz);
p += d_qt000[jtp]*d_dtt000[ixyz];
}
d_pt000[itp] = p;
}
} |
5a6cc6490e6074268e34f45be9c0e3fad16f1a69.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
using namespace std;
#define SIZE 7
__global__ void max(int *a , int *c)
{
int i = threadIdx.x;
*c = a[0];
if(a[i] > *c)
{
*c = a[i];
}
}
int main()
{
int i;
int a[SIZE];
int c;
int *dev_a, *dev_c;
hipMalloc((void **) &dev_a, SIZE*sizeof(int));
hipMalloc((void **) &dev_c, SIZE*sizeof(int));
cout<<"Enter the numbers : \n";
for( i = 0 ; i < SIZE ; i++)
{
cin>>a[i];
}
for( i = 0 ; i < SIZE ; i++)
{
cout<<a[i]<<" ";
}
hipMemcpy(dev_a , a, SIZE*sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( max), dim3(1),dim3(SIZE), 0, 0, dev_a,dev_c);
hipMemcpy(&c, dev_c, SIZE*sizeof(int),hipMemcpyDeviceToHost);
cout<<"\n max value = ";
cout<<c;
hipFree(dev_a);
hipFree(dev_c);
return 0;
}
| 5a6cc6490e6074268e34f45be9c0e3fad16f1a69.cu | #include <cuda.h>
#include <iostream>
using namespace std;
#define SIZE 7
__global__ void max(int *a , int *c)
{
int i = threadIdx.x;
*c = a[0];
if(a[i] > *c)
{
*c = a[i];
}
}
int main()
{
int i;
int a[SIZE];
int c;
int *dev_a, *dev_c;
cudaMalloc((void **) &dev_a, SIZE*sizeof(int));
cudaMalloc((void **) &dev_c, SIZE*sizeof(int));
cout<<"Enter the numbers : \n";
for( i = 0 ; i < SIZE ; i++)
{
cin>>a[i];
}
for( i = 0 ; i < SIZE ; i++)
{
cout<<a[i]<<" ";
}
cudaMemcpy(dev_a , a, SIZE*sizeof(int),cudaMemcpyHostToDevice);
max<<<1,SIZE>>>(dev_a,dev_c);
cudaMemcpy(&c, dev_c, SIZE*sizeof(int),cudaMemcpyDeviceToHost);
cout<<"\n max value = ";
cout<<c;
cudaFree(dev_a);
cudaFree(dev_c);
return 0;
}
|
ac67f93516eb1f94655101c9e93073a42fc97e3d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <math.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <chrono>
#include <algorithm>
#include <stdio.h>
#include <Eigen/Dense>
using namespace Eigen;
using namespace std::chrono;
typedef void(*addFunc)(float*, float*, int);
__device__
void add(float *x, float *y, int i)
{
y[i] = 2 * x[i] + y[i];
}
__global__
void kern(addFunc func, int n, float *x, float *y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
func(x, y, i);
}
//?????
__device__ addFunc d_add = add;
int main(void)
{
addFunc h_add;
hipMemcpyFromSymbol(&h_add, d_add, sizeof(addFunc));
std::cout << "h_add adr:" << h_add << std::endl;
std::cout << "d_add adr:" << h_add << std::endl;
int N = 1 << 20;
float *x, *y;
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N * sizeof(float));
hipMallocManaged(&y, N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
auto start = high_resolution_clock::now();
hipLaunchKernelGGL(( kern) , dim3(numBlocks), dim3(blockSize) , 0, 0, h_add, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
std::cout << "Time taken by function: " << duration.count() << " microseconds" << std::endl;
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i] - 3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
| ac67f93516eb1f94655101c9e93073a42fc97e3d.cu | #include <iostream>
#include <math.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <chrono>
#include <algorithm>
#include <stdio.h>
#include <Eigen/Dense>
using namespace Eigen;
using namespace std::chrono;
typedef void(*addFunc)(float*, float*, int);
__device__
void add(float *x, float *y, int i)
{
y[i] = 2 * x[i] + y[i];
}
__global__
void kern(addFunc func, int n, float *x, float *y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
func(x, y, i);
}
//?????
__device__ addFunc d_add = add;
int main(void)
{
addFunc h_add;
cudaMemcpyFromSymbol(&h_add, d_add, sizeof(addFunc));
std::cout << "h_add adr:" << h_add << std::endl;
std::cout << "d_add adr:" << h_add << std::endl;
int N = 1 << 20;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N * sizeof(float));
cudaMallocManaged(&y, N * sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
auto start = high_resolution_clock::now();
kern <<< numBlocks, blockSize >>> (h_add, N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
std::cout << "Time taken by function: " << duration.count() << " microseconds" << std::endl;
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i] - 3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
8a42f854fee71cdd161c84b85b14e1f2ebf1ca0e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/constants.h"
#include "hip/hip_cooperative_groups.h"
#include "stencils.cu"
#include "prefetch_smem.cu"
#include "stencil_border_check.cu"
using namespace cooperative_groups;
__global__ void smem_padded_3d(float* __restrict__ d_u1,
float* __restrict__ d_u2,
unsigned int kstart,
unsigned int kend)
{
extern __shared__ float smem[];
unsigned int i, j, k, idx, sidx, smem_p_x, smem_p_y;
i = threadIdx.x + blockIdx.x*blockDim.x*COARSEN_X;
j = threadIdx.y + blockIdx.y*blockDim.y;
k = threadIdx.z + blockIdx.z*blockDim.z;
idx = i + j*NX + k*NX*NY;
smem_p_x = blockDim.x*COARSEN_X+2*RADIUS;
smem_p_y = blockDim.y+2*RADIUS;
sidx = (threadIdx.x + RADIUS)
+ (threadIdx.y + RADIUS)*smem_p_x
+ (threadIdx.z + RADIUS)*smem_p_x*smem_p_y;
if (check_domain_border_3d(i, j, k, kstart, kend))
prefetch_3d(smem, d_u1, 0, i, j, k, idx, sidx, kstart, kend);
this_thread_block().sync();
if (check_stencil_border_3d(i, j, k, kstart, kend))
smem_padded_stencil(smem, d_u2, idx, sidx);
}
__global__ void smem_padded_unroll_3d(float* __restrict__ d_u1,
float* __restrict__ d_u2,
unsigned int kstart,
unsigned int kend)
{
extern __shared__ float smem[];
unsigned int i, j, k, si, sj, sk, i_off, si_off, u, idx, sidx, smem_p_x, smem_p_y;
i = threadIdx.x + blockIdx.x*blockDim.x*COARSEN_X;
j = threadIdx.y + blockIdx.y*blockDim.y;
k = threadIdx.z + blockIdx.z*blockDim.z;
si = threadIdx.x + RADIUS;
sj = threadIdx.y + RADIUS;
sk = threadIdx.z + RADIUS;
smem_p_x = blockDim.x*COARSEN_X+2*RADIUS;
smem_p_y = blockDim.y+2*RADIUS;
#pragma unroll
for (u=0; u<COARSEN_X; u++) {
i_off = i+u*blockDim.x;
si_off = si+u*blockDim.x;
idx = i_off+j*NX+k*NX*NY;
sidx = si_off + sj*smem_p_x + sk*smem_p_x*smem_p_y;
if (check_domain_border_3d(i_off, j, k, kstart, kend))
prefetch_3d(smem, d_u1, u, i_off, j, k, idx, sidx, kstart, kend);
}
this_thread_block().sync();
#pragma unroll
for (u=0; u<COARSEN_X; u++) {
i_off = i+u*blockDim.x;
idx = i_off+j*NX+k*NX*NY;
si_off = si+u*blockDim.x;
sidx = si_off + sj*smem_p_x + sk*smem_p_x*smem_p_y;
if (check_stencil_border_3d(i_off, j, k, kstart, kend))
smem_padded_stencil(smem, d_u2, idx, sidx);
}
}
__global__ void smem_padded_2d(float* __restrict__ d_u1,
float* __restrict__ d_u2,
unsigned int jstart,
unsigned int jend)
{
extern __shared__ float smem[];
unsigned int i, j, idx, sidx, smem_p_x;
i = threadIdx.x + blockIdx.x*blockDim.x*COARSEN_X;
j = threadIdx.y + blockIdx.y*blockDim.y;
idx = i + j*NX;
smem_p_x = blockDim.x*COARSEN_X+2*RADIUS;
sidx = (threadIdx.x + RADIUS)
+ (threadIdx.y + RADIUS)*smem_p_x;
if (check_domain_border_2d(i, j, jstart, jend))
prefetch_2d(smem, d_u1, 0, i, j, idx, sidx, jstart, jend);
this_thread_block().sync();
if (check_stencil_border_2d(i, j, jstart, jend))
smem_padded_stencil(smem, d_u2, idx, sidx);
}
__global__ void smem_padded_unroll_2d(float* __restrict__ d_u1,
float* __restrict__ d_u2,
unsigned int jstart,
unsigned int jend)
{
extern __shared__ float smem[];
unsigned int i, j, si, sj, i_off, si_off, u, idx, sidx, smem_p_x;
i = threadIdx.x + blockIdx.x*blockDim.x*COARSEN_X;
j = threadIdx.y + blockIdx.y*blockDim.y;
si = threadIdx.x + RADIUS;
sj = threadIdx.y + RADIUS;
smem_p_x = blockDim.x*COARSEN_X+2*RADIUS;
#pragma unroll
for (u=0; u<COARSEN_X; u++) {
i_off = i+u*blockDim.x;
si_off = si+u*blockDim.x;
idx = i_off+j*NX;
sidx = si_off+sj*smem_p_x;
if (check_domain_border_2d(i_off, j, jstart, jend))
prefetch_2d(smem, d_u1, u, i_off, j, idx, sidx, jstart, jend);
}
this_thread_block().sync();
#pragma unroll
for (u=0; u<COARSEN_X; u++) {
i_off = i+u*blockDim.x;
idx = i_off+j*NX;
si_off = si+u*blockDim.x;
sidx = si_off+sj*smem_p_x;
if (check_stencil_border_2d(i_off, j, jstart, jend))
smem_padded_stencil(smem, d_u2, idx, sidx);
}
}
| 8a42f854fee71cdd161c84b85b14e1f2ebf1ca0e.cu | #include "../include/constants.h"
#include "cooperative_groups.h"
#include "stencils.cu"
#include "prefetch_smem.cu"
#include "stencil_border_check.cu"
using namespace cooperative_groups;
__global__ void smem_padded_3d(float* __restrict__ d_u1,
float* __restrict__ d_u2,
unsigned int kstart,
unsigned int kend)
{
extern __shared__ float smem[];
unsigned int i, j, k, idx, sidx, smem_p_x, smem_p_y;
i = threadIdx.x + blockIdx.x*blockDim.x*COARSEN_X;
j = threadIdx.y + blockIdx.y*blockDim.y;
k = threadIdx.z + blockIdx.z*blockDim.z;
idx = i + j*NX + k*NX*NY;
smem_p_x = blockDim.x*COARSEN_X+2*RADIUS;
smem_p_y = blockDim.y+2*RADIUS;
sidx = (threadIdx.x + RADIUS)
+ (threadIdx.y + RADIUS)*smem_p_x
+ (threadIdx.z + RADIUS)*smem_p_x*smem_p_y;
if (check_domain_border_3d(i, j, k, kstart, kend))
prefetch_3d(smem, d_u1, 0, i, j, k, idx, sidx, kstart, kend);
this_thread_block().sync();
if (check_stencil_border_3d(i, j, k, kstart, kend))
smem_padded_stencil(smem, d_u2, idx, sidx);
}
__global__ void smem_padded_unroll_3d(float* __restrict__ d_u1,
float* __restrict__ d_u2,
unsigned int kstart,
unsigned int kend)
{
extern __shared__ float smem[];
unsigned int i, j, k, si, sj, sk, i_off, si_off, u, idx, sidx, smem_p_x, smem_p_y;
i = threadIdx.x + blockIdx.x*blockDim.x*COARSEN_X;
j = threadIdx.y + blockIdx.y*blockDim.y;
k = threadIdx.z + blockIdx.z*blockDim.z;
si = threadIdx.x + RADIUS;
sj = threadIdx.y + RADIUS;
sk = threadIdx.z + RADIUS;
smem_p_x = blockDim.x*COARSEN_X+2*RADIUS;
smem_p_y = blockDim.y+2*RADIUS;
#pragma unroll
for (u=0; u<COARSEN_X; u++) {
i_off = i+u*blockDim.x;
si_off = si+u*blockDim.x;
idx = i_off+j*NX+k*NX*NY;
sidx = si_off + sj*smem_p_x + sk*smem_p_x*smem_p_y;
if (check_domain_border_3d(i_off, j, k, kstart, kend))
prefetch_3d(smem, d_u1, u, i_off, j, k, idx, sidx, kstart, kend);
}
this_thread_block().sync();
#pragma unroll
for (u=0; u<COARSEN_X; u++) {
i_off = i+u*blockDim.x;
idx = i_off+j*NX+k*NX*NY;
si_off = si+u*blockDim.x;
sidx = si_off + sj*smem_p_x + sk*smem_p_x*smem_p_y;
if (check_stencil_border_3d(i_off, j, k, kstart, kend))
smem_padded_stencil(smem, d_u2, idx, sidx);
}
}
__global__ void smem_padded_2d(float* __restrict__ d_u1,
float* __restrict__ d_u2,
unsigned int jstart,
unsigned int jend)
{
extern __shared__ float smem[];
unsigned int i, j, idx, sidx, smem_p_x;
i = threadIdx.x + blockIdx.x*blockDim.x*COARSEN_X;
j = threadIdx.y + blockIdx.y*blockDim.y;
idx = i + j*NX;
smem_p_x = blockDim.x*COARSEN_X+2*RADIUS;
sidx = (threadIdx.x + RADIUS)
+ (threadIdx.y + RADIUS)*smem_p_x;
if (check_domain_border_2d(i, j, jstart, jend))
prefetch_2d(smem, d_u1, 0, i, j, idx, sidx, jstart, jend);
this_thread_block().sync();
if (check_stencil_border_2d(i, j, jstart, jend))
smem_padded_stencil(smem, d_u2, idx, sidx);
}
__global__ void smem_padded_unroll_2d(float* __restrict__ d_u1,
float* __restrict__ d_u2,
unsigned int jstart,
unsigned int jend)
{
extern __shared__ float smem[];
unsigned int i, j, si, sj, i_off, si_off, u, idx, sidx, smem_p_x;
i = threadIdx.x + blockIdx.x*blockDim.x*COARSEN_X;
j = threadIdx.y + blockIdx.y*blockDim.y;
si = threadIdx.x + RADIUS;
sj = threadIdx.y + RADIUS;
smem_p_x = blockDim.x*COARSEN_X+2*RADIUS;
#pragma unroll
for (u=0; u<COARSEN_X; u++) {
i_off = i+u*blockDim.x;
si_off = si+u*blockDim.x;
idx = i_off+j*NX;
sidx = si_off+sj*smem_p_x;
if (check_domain_border_2d(i_off, j, jstart, jend))
prefetch_2d(smem, d_u1, u, i_off, j, idx, sidx, jstart, jend);
}
this_thread_block().sync();
#pragma unroll
for (u=0; u<COARSEN_X; u++) {
i_off = i+u*blockDim.x;
idx = i_off+j*NX;
si_off = si+u*blockDim.x;
sidx = si_off+sj*smem_p_x;
if (check_stencil_border_2d(i_off, j, jstart, jend))
smem_padded_stencil(smem, d_u2, idx, sidx);
}
}
|
bb9df2fc6d86265f6e2b22c6097a4b4867a0a99d.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2019-2022 by XGBoost Contributors
*/
#include <dmlc/filesystem.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cmath>
#include <thrust/device_vector.h>
#include <xgboost/data.h>
#include <xgboost/c_api.h>
#include "test_hist_util.h"
#include "../helpers.h"
#include "../data/test_array_interface.h"
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/hist_util.h"
#include "../../../src/common/hist_util.cuh"
#include "../../../src/data/device_adapter.cuh"
#include "../../../src/common/math.h"
#include "../../../src/data/simple_dmatrix.h"
#include "../../../include/xgboost/logging.h"
namespace xgboost {
namespace common {
template <typename AdapterT>
HistogramCuts GetHostCuts(AdapterT *adapter, int num_bins, float missing) {
data::SimpleDMatrix dmat(adapter, missing, 1);
HistogramCuts cuts = SketchOnDMatrix(&dmat, num_bins, common::OmpGetNumThreads(0));
return cuts;
}
TEST(HistUtil, DeviceSketch) {
int num_columns = 1;
int num_bins = 4;
std::vector<float> x = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 7.0f, -1.0f};
int num_rows = x.size();
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto device_cuts = DeviceSketch(0, dmat.get(), num_bins);
HistogramCuts host_cuts = SketchOnDMatrix(dmat.get(), num_bins, common::OmpGetNumThreads(0));
EXPECT_EQ(device_cuts.Values(), host_cuts.Values());
EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs());
EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues());
}
TEST(HistUtil, SketchBatchNumElements) {
#if defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
LOG(WARNING) << "Test not runnable with RMM enabled.";
return;
#endif // defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
size_t constexpr kCols = 10000;
int device;
dh::safe_cuda(hipGetDevice(&device));
auto avail = static_cast<size_t>(dh::AvailableMemory(device) * 0.8);
auto per_elem = detail::BytesPerElement(false);
auto avail_elem = avail / per_elem;
size_t rows = avail_elem / kCols * 10;
auto batch = detail::SketchBatchNumElements(0, rows, kCols, rows * kCols, device, 256, false);
ASSERT_EQ(batch, avail_elem);
}
TEST(HistUtil, DeviceSketchMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto device_cuts = DeviceSketch(0, dmat.get(), num_bins);
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
ConsoleLogger::Configure({{"verbosity", "0"}});
}
TEST(HistUtil, DeviceSketchWeightsMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto device_cuts = DeviceSketch(0, dmat.get(), num_bins);
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, true);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required);
}
TEST(HistUtil, DeviceSketchDeterminism) {
int num_rows = 500;
int num_columns = 5;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto reference_sketch = DeviceSketch(0, dmat.get(), num_bins);
size_t constexpr kRounds{ 100 };
for (size_t r = 0; r < kRounds; ++r) {
auto new_sketch = DeviceSketch(0, dmat.get(), num_bins);
ASSERT_EQ(reference_sketch.Values(), new_sketch.Values());
ASSERT_EQ(reference_sketch.MinValues(), new_sketch.MinValues());
}
}
TEST(HistUtil, DeviceSketchCategoricalAsNumeric) {
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
for (auto n : sizes) {
for (auto num_categories : categorical_sizes) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
auto dmat = GetDMatrixFromData(x, n, 1);
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchCategoricalFeatures) {
TestCategoricalSketch(1000, 256, 32, false,
[](DMatrix *p_fmat, int32_t num_bins) {
return DeviceSketch(0, p_fmat, num_bins);
});
TestCategoricalSketch(1000, 256, 32, true,
[](DMatrix *p_fmat, int32_t num_bins) {
return DeviceSketch(0, p_fmat, num_bins);
});
}
void TestMixedSketch() {
size_t n_samples = 1000, n_features = 2, n_categories = 3;
std::vector<float> data(n_samples * n_features);
SimpleLCG gen;
SimpleRealUniformDistribution<float> cat_d{0.0f, float(n_categories)};
SimpleRealUniformDistribution<float> num_d{0.0f, 3.0f};
for (size_t i = 0; i < n_samples * n_features; ++i) {
if (i % 2 == 0) {
data[i] = ::floor(cat_d(&gen));
} else {
data[i] = num_d(&gen);
}
}
auto m = GetDMatrixFromData(data, n_samples, n_features);
m->Info().feature_types.HostVector().push_back(FeatureType::kCategorical);
m->Info().feature_types.HostVector().push_back(FeatureType::kNumerical);
auto cuts = DeviceSketch(0, m.get(), 64);
ASSERT_EQ(cuts.Values().size(), 64 + n_categories);
}
TEST(HistUtil, DeviceSketchMixedFeatures) {
TestMixedSketch();
}
TEST(HistUtil, DeviceSketchMultipleColumns) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchMultipleColumnsWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUitl, DeviceSketchWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto weighted_dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto& h_weights = weighted_dmat->Info().weights_.HostVector();
h_weights.resize(num_rows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
auto wcuts = DeviceSketch(0, weighted_dmat.get(), num_bins);
ASSERT_EQ(cuts.MinValues(), wcuts.MinValues());
ASSERT_EQ(cuts.Ptrs(), wcuts.Ptrs());
ASSERT_EQ(cuts.Values(), wcuts.Values());
ValidateCuts(cuts, dmat.get(), num_bins);
ValidateCuts(wcuts, weighted_dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchBatches) {
int num_bins = 256;
int num_rows = 5000;
int batch_sizes[] = {0, 100, 1500, 6000};
int num_columns = 5;
for (auto batch_size : batch_sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto cuts = DeviceSketch(0, dmat.get(), num_bins, batch_size);
ValidateCuts(cuts, dmat.get(), num_bins);
}
num_rows = 1000;
size_t batches = 16;
auto x = GenerateRandom(num_rows * batches, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows * batches, num_columns);
auto cuts_with_batches = DeviceSketch(0, dmat.get(), num_bins, num_rows);
auto cuts = DeviceSketch(0, dmat.get(), num_bins, 0);
auto const& cut_values_batched = cuts_with_batches.Values();
auto const& cut_values = cuts.Values();
CHECK_EQ(cut_values.size(), cut_values_batched.size());
for (size_t i = 0; i < cut_values.size(); ++i) {
ASSERT_NEAR(cut_values_batched[i], cut_values[i], 1e5);
}
}
TEST(HistUtil, DeviceSketchMultipleColumnsExternal) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns =5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
dmlc::TemporaryDirectory temp;
auto dmat =
GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, 100, temp);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
// See https://github.com/dmlc/xgboost/issues/5866.
TEST(HistUtil, DeviceSketchExternalMemoryWithWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
dmlc::TemporaryDirectory temp;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, 100, temp);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
template <typename Adapter>
auto MakeUnweightedCutsForTest(Adapter adapter, int32_t num_bins, float missing, size_t batch_size = 0) {
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, adapter.NumColumns(), adapter.NumRows(), 0);
MetaInfo info;
AdapterDeviceSketch(adapter.Value(), num_bins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
sketch_container.MakeCuts(&batched_cuts);
return batched_cuts;
}
template <typename Adapter>
void ValidateBatchedCuts(Adapter adapter, int num_bins, int num_columns, int num_rows,
DMatrix* dmat, size_t batch_size = 0) {
common::HistogramCuts batched_cuts = MakeUnweightedCutsForTest(
adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
ValidateCuts(batched_cuts, dmat, num_bins);
}
TEST(HistUtil, AdapterDeviceSketch) {
int rows = 5;
int cols = 1;
int num_bins = 4;
float missing = - 1.0;
thrust::device_vector< float> data(rows*cols);
auto json_array_interface = Generate2dArrayInterface(rows, cols, "<f4", &data);
data = std::vector<float >{ 1.0,2.0,3.0,4.0,5.0 };
std::string str;
Json::Dump(json_array_interface, &str);
data::CupyAdapter adapter(str);
auto device_cuts = MakeUnweightedCutsForTest(adapter, num_bins, missing);
auto host_cuts = GetHostCuts(&adapter, num_bins, missing);
EXPECT_EQ(device_cuts.Values(), host_cuts.Values());
EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs());
EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues());
}
TEST(HistUtil, AdapterDeviceSketchMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto cuts = MakeUnweightedCutsForTest(adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
}
TEST(HistUtil, AdapterSketchSlidingWindowMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
MetaInfo info;
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
HistogramCuts cuts;
sketch_container.MakeCuts(&cuts);
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
ConsoleLogger::Configure({{"verbosity", "0"}});
}
TEST(HistUtil, AdapterSketchSlidingWindowWeightedMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
MetaInfo info;
auto& h_weights = info.weights_.HostVector();
h_weights.resize(num_rows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
HistogramCuts cuts;
sketch_container.MakeCuts(&cuts);
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, true);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required);
}
void TestCategoricalSketchAdapter(size_t n, size_t num_categories,
int32_t num_bins, bool weighted) {
auto h_x = GenerateRandomCategoricalSingleColumn(n, num_categories);
thrust::device_vector<float> x(h_x);
auto adapter = AdapterFromData(x, n, 1);
MetaInfo info;
info.num_row_ = n;
info.num_col_ = 1;
info.feature_types.HostVector().push_back(FeatureType::kCategorical);
if (weighted) {
std::vector<float> weights(n, 0);
SimpleLCG lcg;
SimpleRealUniformDistribution<float> dist(0, 1);
for (auto& v : weights) {
v = dist(&lcg);
}
info.weights_.HostVector() = weights;
}
ASSERT_EQ(info.feature_types.Size(), 1);
SketchContainer container(info.feature_types, num_bins, 1, n, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info,
std::numeric_limits<float>::quiet_NaN(), &container);
HistogramCuts cuts;
container.MakeCuts(&cuts);
thrust::sort(x.begin(), x.end());
auto n_uniques = thrust::unique(x.begin(), x.end()) - x.begin();
ASSERT_NE(n_uniques, x.size());
ASSERT_EQ(cuts.TotalBins(), n_uniques);
ASSERT_EQ(n_uniques, num_categories);
auto& values = cuts.cut_values_.HostVector();
ASSERT_TRUE(std::is_sorted(values.cbegin(), values.cend()));
auto is_unique = (std::unique(values.begin(), values.end()) - values.begin()) == n_uniques;
ASSERT_TRUE(is_unique);
x.resize(n_uniques);
h_x.resize(n_uniques);
thrust::copy(x.begin(), x.end(), h_x.begin());
for (decltype(n_uniques) i = 0; i < n_uniques; ++i) {
ASSERT_EQ(h_x[i], values[i]);
}
}
TEST(HistUtil, AdapterDeviceSketchCategorical) {
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
for (auto n : sizes) {
for (auto num_categories : categorical_sizes) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
auto dmat = GetDMatrixFromData(x, n, 1);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, n, 1);
ValidateBatchedCuts(adapter, num_bins, adapter.NumColumns(),
adapter.NumRows(), dmat.get());
TestCategoricalSketchAdapter(n, num_categories, num_bins, true);
TestCategoricalSketchAdapter(n, num_categories, num_bins, false);
}
}
}
TEST(HistUtil, AdapterDeviceSketchMultipleColumns) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
for (auto num_bins : bin_sizes) {
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
ValidateBatchedCuts(adapter, num_bins, num_columns, num_rows, dmat.get());
}
}
}
TEST(HistUtil, AdapterDeviceSketchBatches) {
int num_bins = 256;
int num_rows = 5000;
int batch_sizes[] = {0, 100, 1500, 6000};
int num_columns = 5;
for (auto batch_size : batch_sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
ValidateBatchedCuts(adapter, num_bins, num_columns, num_rows, dmat.get(), batch_size);
}
}
// Check sketching from adapter or DMatrix results in the same answer
// Consistency here is useful for testing and user experience
TEST(HistUtil, SketchingEquivalent) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
for (auto num_bins : bin_sizes) {
auto dmat_cuts = DeviceSketch(0, dmat.get(), num_bins);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
common::HistogramCuts adapter_cuts = MakeUnweightedCutsForTest(
adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
EXPECT_EQ(dmat_cuts.Values(), adapter_cuts.Values());
EXPECT_EQ(dmat_cuts.Ptrs(), adapter_cuts.Ptrs());
EXPECT_EQ(dmat_cuts.MinValues(), adapter_cuts.MinValues());
ValidateBatchedCuts(adapter, num_bins, num_columns, num_rows, dmat.get());
}
}
}
TEST(HistUtil, DeviceSketchFromGroupWeights) {
size_t constexpr kRows = 3000, kCols = 200, kBins = 256;
size_t constexpr kGroups = 10;
auto m = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix();
auto& h_weights = m->Info().weights_.HostVector();
h_weights.resize(kRows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
std::vector<bst_group_t> groups(kGroups);
for (size_t i = 0; i < kGroups; ++i) {
groups[i] = kRows / kGroups;
}
m->SetInfo("group", groups.data(), DataType::kUInt32, kGroups);
HistogramCuts weighted_cuts = DeviceSketch(0, m.get(), kBins, 0);
h_weights.clear();
HistogramCuts cuts = DeviceSketch(0, m.get(), kBins, 0);
ASSERT_EQ(cuts.Values().size(), weighted_cuts.Values().size());
ASSERT_EQ(cuts.MinValues().size(), weighted_cuts.MinValues().size());
ASSERT_EQ(cuts.Ptrs().size(), weighted_cuts.Ptrs().size());
for (size_t i = 0; i < cuts.Values().size(); ++i) {
EXPECT_EQ(cuts.Values()[i], weighted_cuts.Values()[i]) << "i:"<< i;
}
for (size_t i = 0; i < cuts.MinValues().size(); ++i) {
ASSERT_EQ(cuts.MinValues()[i], weighted_cuts.MinValues()[i]);
}
for (size_t i = 0; i < cuts.Ptrs().size(); ++i) {
ASSERT_EQ(cuts.Ptrs().at(i), weighted_cuts.Ptrs().at(i));
}
ValidateCuts(weighted_cuts, m.get(), kBins);
}
void TestAdapterSketchFromWeights(bool with_group) {
size_t constexpr kRows = 300, kCols = 20, kBins = 256;
size_t constexpr kGroups = 10;
HostDeviceVector<float> storage;
std::string m =
RandomDataGenerator{kRows, kCols, 0}.Device(0).GenerateArrayInterface(
&storage);
MetaInfo info;
Context ctx;
auto& h_weights = info.weights_.HostVector();
if (with_group) {
h_weights.resize(kGroups);
} else {
h_weights.resize(kRows);
}
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
std::vector<bst_group_t> groups(kGroups);
if (with_group) {
for (size_t i = 0; i < kGroups; ++i) {
groups[i] = kRows / kGroups;
}
info.SetInfo(ctx, "group", groups.data(), DataType::kUInt32, kGroups);
}
info.weights_.SetDevice(0);
info.num_row_ = kRows;
info.num_col_ = kCols;
data::CupyAdapter adapter(m);
auto const& batch = adapter.Value();
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, kBins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), kBins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
common::HistogramCuts cuts;
sketch_container.MakeCuts(&cuts);
auto dmat = GetDMatrixFromData(storage.HostVector(), kRows, kCols);
if (with_group) {
dmat->Info().SetInfo(ctx, "group", groups.data(), DataType::kUInt32, kGroups);
}
dmat->Info().SetInfo(ctx, "weight", h_weights.data(), DataType::kFloat32, h_weights.size());
dmat->Info().num_col_ = kCols;
dmat->Info().num_row_ = kRows;
ASSERT_EQ(cuts.Ptrs().size(), kCols + 1);
ValidateCuts(cuts, dmat.get(), kBins);
if (with_group) {
dmat->Info().weights_ = decltype(dmat->Info().weights_)(); // remove weight
HistogramCuts non_weighted = DeviceSketch(0, dmat.get(), kBins, 0);
for (size_t i = 0; i < cuts.Values().size(); ++i) {
ASSERT_EQ(cuts.Values()[i], non_weighted.Values()[i]);
}
for (size_t i = 0; i < cuts.MinValues().size(); ++i) {
ASSERT_EQ(cuts.MinValues()[i], non_weighted.MinValues()[i]);
}
for (size_t i = 0; i < cuts.Ptrs().size(); ++i) {
ASSERT_EQ(cuts.Ptrs().at(i), non_weighted.Ptrs().at(i));
}
}
if (with_group) {
common::HistogramCuts weighted;
auto& h_weights = info.weights_.HostVector();
h_weights.resize(kGroups);
// Generate different weight.
for (size_t i = 0; i < h_weights.size(); ++i) {
// FIXME(jiamingy): Some entries generated GPU test cannot pass the validate cuts if
// we use more diverse weights, partially caused by
// https://github.com/dmlc/xgboost/issues/7946
h_weights[i] = (i % 2 == 0 ? 1 : 2) / static_cast<float>(kGroups);
}
SketchContainer sketch_container(ft, kBins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), kBins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
sketch_container.MakeCuts(&weighted);
ValidateCuts(weighted, dmat.get(), kBins);
}
}
TEST(HistUtil, AdapterSketchFromWeights) {
TestAdapterSketchFromWeights(false);
TestAdapterSketchFromWeights(true);
}
} // namespace common
} // namespace xgboost
| bb9df2fc6d86265f6e2b22c6097a4b4867a0a99d.cu | /*!
* Copyright 2019-2022 by XGBoost Contributors
*/
#include <dmlc/filesystem.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cmath>
#include <thrust/device_vector.h>
#include <xgboost/data.h>
#include <xgboost/c_api.h>
#include "test_hist_util.h"
#include "../helpers.h"
#include "../data/test_array_interface.h"
#include "../../../src/common/device_helpers.cuh"
#include "../../../src/common/hist_util.h"
#include "../../../src/common/hist_util.cuh"
#include "../../../src/data/device_adapter.cuh"
#include "../../../src/common/math.h"
#include "../../../src/data/simple_dmatrix.h"
#include "../../../include/xgboost/logging.h"
namespace xgboost {
namespace common {
template <typename AdapterT>
HistogramCuts GetHostCuts(AdapterT *adapter, int num_bins, float missing) {
data::SimpleDMatrix dmat(adapter, missing, 1);
HistogramCuts cuts = SketchOnDMatrix(&dmat, num_bins, common::OmpGetNumThreads(0));
return cuts;
}
TEST(HistUtil, DeviceSketch) {
int num_columns = 1;
int num_bins = 4;
std::vector<float> x = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 7.0f, -1.0f};
int num_rows = x.size();
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto device_cuts = DeviceSketch(0, dmat.get(), num_bins);
HistogramCuts host_cuts = SketchOnDMatrix(dmat.get(), num_bins, common::OmpGetNumThreads(0));
EXPECT_EQ(device_cuts.Values(), host_cuts.Values());
EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs());
EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues());
}
TEST(HistUtil, SketchBatchNumElements) {
#if defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
LOG(WARNING) << "Test not runnable with RMM enabled.";
return;
#endif // defined(XGBOOST_USE_RMM) && XGBOOST_USE_RMM == 1
size_t constexpr kCols = 10000;
int device;
dh::safe_cuda(cudaGetDevice(&device));
auto avail = static_cast<size_t>(dh::AvailableMemory(device) * 0.8);
auto per_elem = detail::BytesPerElement(false);
auto avail_elem = avail / per_elem;
size_t rows = avail_elem / kCols * 10;
auto batch = detail::SketchBatchNumElements(0, rows, kCols, rows * kCols, device, 256, false);
ASSERT_EQ(batch, avail_elem);
}
TEST(HistUtil, DeviceSketchMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto device_cuts = DeviceSketch(0, dmat.get(), num_bins);
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
ConsoleLogger::Configure({{"verbosity", "0"}});
}
TEST(HistUtil, DeviceSketchWeightsMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto device_cuts = DeviceSketch(0, dmat.get(), num_bins);
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, true);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required);
}
TEST(HistUtil, DeviceSketchDeterminism) {
int num_rows = 500;
int num_columns = 5;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto reference_sketch = DeviceSketch(0, dmat.get(), num_bins);
size_t constexpr kRounds{ 100 };
for (size_t r = 0; r < kRounds; ++r) {
auto new_sketch = DeviceSketch(0, dmat.get(), num_bins);
ASSERT_EQ(reference_sketch.Values(), new_sketch.Values());
ASSERT_EQ(reference_sketch.MinValues(), new_sketch.MinValues());
}
}
TEST(HistUtil, DeviceSketchCategoricalAsNumeric) {
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
for (auto n : sizes) {
for (auto num_categories : categorical_sizes) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
auto dmat = GetDMatrixFromData(x, n, 1);
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchCategoricalFeatures) {
TestCategoricalSketch(1000, 256, 32, false,
[](DMatrix *p_fmat, int32_t num_bins) {
return DeviceSketch(0, p_fmat, num_bins);
});
TestCategoricalSketch(1000, 256, 32, true,
[](DMatrix *p_fmat, int32_t num_bins) {
return DeviceSketch(0, p_fmat, num_bins);
});
}
void TestMixedSketch() {
size_t n_samples = 1000, n_features = 2, n_categories = 3;
std::vector<float> data(n_samples * n_features);
SimpleLCG gen;
SimpleRealUniformDistribution<float> cat_d{0.0f, float(n_categories)};
SimpleRealUniformDistribution<float> num_d{0.0f, 3.0f};
for (size_t i = 0; i < n_samples * n_features; ++i) {
if (i % 2 == 0) {
data[i] = std::floor(cat_d(&gen));
} else {
data[i] = num_d(&gen);
}
}
auto m = GetDMatrixFromData(data, n_samples, n_features);
m->Info().feature_types.HostVector().push_back(FeatureType::kCategorical);
m->Info().feature_types.HostVector().push_back(FeatureType::kNumerical);
auto cuts = DeviceSketch(0, m.get(), 64);
ASSERT_EQ(cuts.Values().size(), 64 + n_categories);
}
TEST(HistUtil, DeviceSketchMixedFeatures) {
TestMixedSketch();
}
TEST(HistUtil, DeviceSketchMultipleColumns) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchMultipleColumnsWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
TEST(HistUitl, DeviceSketchWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto weighted_dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto& h_weights = weighted_dmat->Info().weights_.HostVector();
h_weights.resize(num_rows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
auto wcuts = DeviceSketch(0, weighted_dmat.get(), num_bins);
ASSERT_EQ(cuts.MinValues(), wcuts.MinValues());
ASSERT_EQ(cuts.Ptrs(), wcuts.Ptrs());
ASSERT_EQ(cuts.Values(), wcuts.Values());
ValidateCuts(cuts, dmat.get(), num_bins);
ValidateCuts(wcuts, weighted_dmat.get(), num_bins);
}
}
}
TEST(HistUtil, DeviceSketchBatches) {
int num_bins = 256;
int num_rows = 5000;
int batch_sizes[] = {0, 100, 1500, 6000};
int num_columns = 5;
for (auto batch_size : batch_sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto cuts = DeviceSketch(0, dmat.get(), num_bins, batch_size);
ValidateCuts(cuts, dmat.get(), num_bins);
}
num_rows = 1000;
size_t batches = 16;
auto x = GenerateRandom(num_rows * batches, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows * batches, num_columns);
auto cuts_with_batches = DeviceSketch(0, dmat.get(), num_bins, num_rows);
auto cuts = DeviceSketch(0, dmat.get(), num_bins, 0);
auto const& cut_values_batched = cuts_with_batches.Values();
auto const& cut_values = cuts.Values();
CHECK_EQ(cut_values.size(), cut_values_batched.size());
for (size_t i = 0; i < cut_values.size(); ++i) {
ASSERT_NEAR(cut_values_batched[i], cut_values[i], 1e5);
}
}
TEST(HistUtil, DeviceSketchMultipleColumnsExternal) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns =5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
dmlc::TemporaryDirectory temp;
auto dmat =
GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, 100, temp);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
// See https://github.com/dmlc/xgboost/issues/5866.
TEST(HistUtil, DeviceSketchExternalMemoryWithWeights) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
dmlc::TemporaryDirectory temp;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetExternalMemoryDMatrixFromData(x, num_rows, num_columns, 100, temp);
dmat->Info().weights_.HostVector() = GenerateRandomWeights(num_rows);
for (auto num_bins : bin_sizes) {
auto cuts = DeviceSketch(0, dmat.get(), num_bins);
ValidateCuts(cuts, dmat.get(), num_bins);
}
}
}
template <typename Adapter>
auto MakeUnweightedCutsForTest(Adapter adapter, int32_t num_bins, float missing, size_t batch_size = 0) {
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, adapter.NumColumns(), adapter.NumRows(), 0);
MetaInfo info;
AdapterDeviceSketch(adapter.Value(), num_bins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
sketch_container.MakeCuts(&batched_cuts);
return batched_cuts;
}
template <typename Adapter>
void ValidateBatchedCuts(Adapter adapter, int num_bins, int num_columns, int num_rows,
DMatrix* dmat, size_t batch_size = 0) {
common::HistogramCuts batched_cuts = MakeUnweightedCutsForTest(
adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
ValidateCuts(batched_cuts, dmat, num_bins);
}
TEST(HistUtil, AdapterDeviceSketch) {
int rows = 5;
int cols = 1;
int num_bins = 4;
float missing = - 1.0;
thrust::device_vector< float> data(rows*cols);
auto json_array_interface = Generate2dArrayInterface(rows, cols, "<f4", &data);
data = std::vector<float >{ 1.0,2.0,3.0,4.0,5.0 };
std::string str;
Json::Dump(json_array_interface, &str);
data::CupyAdapter adapter(str);
auto device_cuts = MakeUnweightedCutsForTest(adapter, num_bins, missing);
auto host_cuts = GetHostCuts(&adapter, num_bins, missing);
EXPECT_EQ(device_cuts.Values(), host_cuts.Values());
EXPECT_EQ(device_cuts.Ptrs(), host_cuts.Ptrs());
EXPECT_EQ(device_cuts.MinValues(), host_cuts.MinValues());
}
TEST(HistUtil, AdapterDeviceSketchMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
auto cuts = MakeUnweightedCutsForTest(adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
}
TEST(HistUtil, AdapterSketchSlidingWindowMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
MetaInfo info;
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
HistogramCuts cuts;
sketch_container.MakeCuts(&cuts);
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, false);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 0.95);
ConsoleLogger::Configure({{"verbosity", "0"}});
}
TEST(HistUtil, AdapterSketchSlidingWindowWeightedMemory) {
int num_columns = 100;
int num_rows = 1000;
int num_bins = 256;
auto x = GenerateRandom(num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
MetaInfo info;
auto& h_weights = info.weights_.HostVector();
h_weights.resize(num_rows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
dh::GlobalMemoryLogger().Clear();
ConsoleLogger::Configure({{"verbosity", "3"}});
common::HistogramCuts batched_cuts;
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, num_bins, num_columns, num_rows, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info,
std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
HistogramCuts cuts;
sketch_container.MakeCuts(&cuts);
ConsoleLogger::Configure({{"verbosity", "0"}});
size_t bytes_required = detail::RequiredMemory(
num_rows, num_columns, num_rows * num_columns, num_bins, true);
EXPECT_LE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required * 1.05);
EXPECT_GE(dh::GlobalMemoryLogger().PeakMemory(), bytes_required);
}
void TestCategoricalSketchAdapter(size_t n, size_t num_categories,
int32_t num_bins, bool weighted) {
auto h_x = GenerateRandomCategoricalSingleColumn(n, num_categories);
thrust::device_vector<float> x(h_x);
auto adapter = AdapterFromData(x, n, 1);
MetaInfo info;
info.num_row_ = n;
info.num_col_ = 1;
info.feature_types.HostVector().push_back(FeatureType::kCategorical);
if (weighted) {
std::vector<float> weights(n, 0);
SimpleLCG lcg;
SimpleRealUniformDistribution<float> dist(0, 1);
for (auto& v : weights) {
v = dist(&lcg);
}
info.weights_.HostVector() = weights;
}
ASSERT_EQ(info.feature_types.Size(), 1);
SketchContainer container(info.feature_types, num_bins, 1, n, 0);
AdapterDeviceSketch(adapter.Value(), num_bins, info,
std::numeric_limits<float>::quiet_NaN(), &container);
HistogramCuts cuts;
container.MakeCuts(&cuts);
thrust::sort(x.begin(), x.end());
auto n_uniques = thrust::unique(x.begin(), x.end()) - x.begin();
ASSERT_NE(n_uniques, x.size());
ASSERT_EQ(cuts.TotalBins(), n_uniques);
ASSERT_EQ(n_uniques, num_categories);
auto& values = cuts.cut_values_.HostVector();
ASSERT_TRUE(std::is_sorted(values.cbegin(), values.cend()));
auto is_unique = (std::unique(values.begin(), values.end()) - values.begin()) == n_uniques;
ASSERT_TRUE(is_unique);
x.resize(n_uniques);
h_x.resize(n_uniques);
thrust::copy(x.begin(), x.end(), h_x.begin());
for (decltype(n_uniques) i = 0; i < n_uniques; ++i) {
ASSERT_EQ(h_x[i], values[i]);
}
}
TEST(HistUtil, AdapterDeviceSketchCategorical) {
int categorical_sizes[] = {2, 6, 8, 12};
int num_bins = 256;
int sizes[] = {25, 100, 1000};
for (auto n : sizes) {
for (auto num_categories : categorical_sizes) {
auto x = GenerateRandomCategoricalSingleColumn(n, num_categories);
auto dmat = GetDMatrixFromData(x, n, 1);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, n, 1);
ValidateBatchedCuts(adapter, num_bins, adapter.NumColumns(),
adapter.NumRows(), dmat.get());
TestCategoricalSketchAdapter(n, num_categories, num_bins, true);
TestCategoricalSketchAdapter(n, num_categories, num_bins, false);
}
}
}
TEST(HistUtil, AdapterDeviceSketchMultipleColumns) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
for (auto num_bins : bin_sizes) {
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
ValidateBatchedCuts(adapter, num_bins, num_columns, num_rows, dmat.get());
}
}
}
TEST(HistUtil, AdapterDeviceSketchBatches) {
int num_bins = 256;
int num_rows = 5000;
int batch_sizes[] = {0, 100, 1500, 6000};
int num_columns = 5;
for (auto batch_size : batch_sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
ValidateBatchedCuts(adapter, num_bins, num_columns, num_rows, dmat.get(), batch_size);
}
}
// Check sketching from adapter or DMatrix results in the same answer
// Consistency here is useful for testing and user experience
TEST(HistUtil, SketchingEquivalent) {
int bin_sizes[] = {2, 16, 256, 512};
int sizes[] = {100, 1000, 1500};
int num_columns = 5;
for (auto num_rows : sizes) {
auto x = GenerateRandom(num_rows, num_columns);
auto dmat = GetDMatrixFromData(x, num_rows, num_columns);
for (auto num_bins : bin_sizes) {
auto dmat_cuts = DeviceSketch(0, dmat.get(), num_bins);
auto x_device = thrust::device_vector<float>(x);
auto adapter = AdapterFromData(x_device, num_rows, num_columns);
common::HistogramCuts adapter_cuts = MakeUnweightedCutsForTest(
adapter, num_bins, std::numeric_limits<float>::quiet_NaN());
EXPECT_EQ(dmat_cuts.Values(), adapter_cuts.Values());
EXPECT_EQ(dmat_cuts.Ptrs(), adapter_cuts.Ptrs());
EXPECT_EQ(dmat_cuts.MinValues(), adapter_cuts.MinValues());
ValidateBatchedCuts(adapter, num_bins, num_columns, num_rows, dmat.get());
}
}
}
TEST(HistUtil, DeviceSketchFromGroupWeights) {
size_t constexpr kRows = 3000, kCols = 200, kBins = 256;
size_t constexpr kGroups = 10;
auto m = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix();
auto& h_weights = m->Info().weights_.HostVector();
h_weights.resize(kRows);
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
std::vector<bst_group_t> groups(kGroups);
for (size_t i = 0; i < kGroups; ++i) {
groups[i] = kRows / kGroups;
}
m->SetInfo("group", groups.data(), DataType::kUInt32, kGroups);
HistogramCuts weighted_cuts = DeviceSketch(0, m.get(), kBins, 0);
h_weights.clear();
HistogramCuts cuts = DeviceSketch(0, m.get(), kBins, 0);
ASSERT_EQ(cuts.Values().size(), weighted_cuts.Values().size());
ASSERT_EQ(cuts.MinValues().size(), weighted_cuts.MinValues().size());
ASSERT_EQ(cuts.Ptrs().size(), weighted_cuts.Ptrs().size());
for (size_t i = 0; i < cuts.Values().size(); ++i) {
EXPECT_EQ(cuts.Values()[i], weighted_cuts.Values()[i]) << "i:"<< i;
}
for (size_t i = 0; i < cuts.MinValues().size(); ++i) {
ASSERT_EQ(cuts.MinValues()[i], weighted_cuts.MinValues()[i]);
}
for (size_t i = 0; i < cuts.Ptrs().size(); ++i) {
ASSERT_EQ(cuts.Ptrs().at(i), weighted_cuts.Ptrs().at(i));
}
ValidateCuts(weighted_cuts, m.get(), kBins);
}
void TestAdapterSketchFromWeights(bool with_group) {
size_t constexpr kRows = 300, kCols = 20, kBins = 256;
size_t constexpr kGroups = 10;
HostDeviceVector<float> storage;
std::string m =
RandomDataGenerator{kRows, kCols, 0}.Device(0).GenerateArrayInterface(
&storage);
MetaInfo info;
Context ctx;
auto& h_weights = info.weights_.HostVector();
if (with_group) {
h_weights.resize(kGroups);
} else {
h_weights.resize(kRows);
}
std::fill(h_weights.begin(), h_weights.end(), 1.0f);
std::vector<bst_group_t> groups(kGroups);
if (with_group) {
for (size_t i = 0; i < kGroups; ++i) {
groups[i] = kRows / kGroups;
}
info.SetInfo(ctx, "group", groups.data(), DataType::kUInt32, kGroups);
}
info.weights_.SetDevice(0);
info.num_row_ = kRows;
info.num_col_ = kCols;
data::CupyAdapter adapter(m);
auto const& batch = adapter.Value();
HostDeviceVector<FeatureType> ft;
SketchContainer sketch_container(ft, kBins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), kBins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
common::HistogramCuts cuts;
sketch_container.MakeCuts(&cuts);
auto dmat = GetDMatrixFromData(storage.HostVector(), kRows, kCols);
if (with_group) {
dmat->Info().SetInfo(ctx, "group", groups.data(), DataType::kUInt32, kGroups);
}
dmat->Info().SetInfo(ctx, "weight", h_weights.data(), DataType::kFloat32, h_weights.size());
dmat->Info().num_col_ = kCols;
dmat->Info().num_row_ = kRows;
ASSERT_EQ(cuts.Ptrs().size(), kCols + 1);
ValidateCuts(cuts, dmat.get(), kBins);
if (with_group) {
dmat->Info().weights_ = decltype(dmat->Info().weights_)(); // remove weight
HistogramCuts non_weighted = DeviceSketch(0, dmat.get(), kBins, 0);
for (size_t i = 0; i < cuts.Values().size(); ++i) {
ASSERT_EQ(cuts.Values()[i], non_weighted.Values()[i]);
}
for (size_t i = 0; i < cuts.MinValues().size(); ++i) {
ASSERT_EQ(cuts.MinValues()[i], non_weighted.MinValues()[i]);
}
for (size_t i = 0; i < cuts.Ptrs().size(); ++i) {
ASSERT_EQ(cuts.Ptrs().at(i), non_weighted.Ptrs().at(i));
}
}
if (with_group) {
common::HistogramCuts weighted;
auto& h_weights = info.weights_.HostVector();
h_weights.resize(kGroups);
// Generate different weight.
for (size_t i = 0; i < h_weights.size(); ++i) {
// FIXME(jiamingy): Some entries generated GPU test cannot pass the validate cuts if
// we use more diverse weights, partially caused by
// https://github.com/dmlc/xgboost/issues/7946
h_weights[i] = (i % 2 == 0 ? 1 : 2) / static_cast<float>(kGroups);
}
SketchContainer sketch_container(ft, kBins, kCols, kRows, 0);
AdapterDeviceSketch(adapter.Value(), kBins, info, std::numeric_limits<float>::quiet_NaN(),
&sketch_container);
sketch_container.MakeCuts(&weighted);
ValidateCuts(weighted, dmat.get(), kBins);
}
}
TEST(HistUtil, AdapterSketchFromWeights) {
TestAdapterSketchFromWeights(false);
TestAdapterSketchFromWeights(true);
}
} // namespace common
} // namespace xgboost
|
454c60df5911016741eb93a1b0868d868005d6e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
CUDA implementation of the NFFT.
-----------
Accelerating the Non-equispaced Fast Fourier Transform on Commodity Graphics Hardware.
T.S. Srensen, T. Schaeffter, K.. Noe, M.S. Hansen.
IEEE Transactions on Medical Imaging 2008; 27(4):538-547.
Real-time Reconstruction of Sensitivity Encoded Radial Magnetic Resonance Imaging Using a Graphics Processing Unit.
T.S. Srensen, D. Atkinson, T. Schaeffter, M.S. Hansen.
IEEE Transactions on Medical Imaging 2009; 28(12): 1974-1985.
*/
// Includes - Thrust
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/binary_search.h>
#include <thrust/extrema.h>
// Includes - Gadgetron
#include "cuNFFT.h"
#include "cuNDFFT.h"
#include "cuNDArray_operators.h"
#include "cuNDArray_elemwise.h"
#include "cuNDArray_utils.h"
#include "vector_td_utilities.h"
#include "vector_td_io.h"
#include "cudaDeviceManager.h"
#include "check_CUDA.h"
// Includes - CUDA
#include <hip/device_functions.h>
#include <math_constants.h>
#include <hipfft.h>
// Includes - stdlibs
#include <stdio.h>
#include <assert.h>
#include <limits.h>
#include <math.h>
#include <cmath>
#include <sstream>
#include <stdexcept>
//using namespace std;
using std::vector;
using namespace thrust;
using namespace Gadgetron;
// Kernel configuration
#define NFFT_MAX_COILS_COMPUTE_1x 8
#define NFFT_MAX_COILS_COMPUTE_2x 16
#define NFFT_THREADS_PER_KERNEL 192
// Reference to shared memory
extern __shared__ char _shared_mem[];
// Includes containing the NFFT convolution implementation
#include "KaiserBessel_kernel.cu"
#include "NFFT_C2NC_conv_kernel.cu"
#include "NFFT_NC2C_conv_kernel.cu"
#include "NFFT_NC2C_atomic_conv_kernel.cu"
#include "NFFT_preprocess_kernel.hip"
// Default template arguments requires c++-0x ?
typedef float dummy;
// The declaration of atomic/non-atomic NC2C convolution
// We would love to hide this inside the class, but the compiler core dumps on us when we try...
//
template<class REAL, unsigned int D, bool ATOMICS> struct _convolve_NFFT_NC2C{
static bool apply( cuNFFT_plan<REAL,D,ATOMICS> *plan,
cuNDArray<complext<REAL> > *in,
cuNDArray<complext<REAL> > *out,
bool accumulate );
};
// Common multi-device handling: prepare
//
template<class I1, class I2, class I3>
static bool prepare( int device, int *old_device,
cuNDArray<I1> *in1, cuNDArray<I1> **in1_int,
cuNDArray<I2> *in2 = 0x0, cuNDArray<I2> **in2_int = 0x0,
cuNDArray<I3> *in3 = 0x0, cuNDArray<I3> **in3_int = 0x0 )
{
// Get current Cuda device
if( hipGetDevice(old_device) != hipSuccess ) {
throw cuda_error("Error: cuNFFT : unable to get device no");
}
if( device != *old_device && hipSetDevice(device) != hipSuccess) {
throw cuda_error("Error : cuNFFT : unable to set device no");
}
// Transfer arrays to compute device if necessary
if( in1 ){
if( device != in1->get_device() )
*in1_int = new cuNDArray<I1>(*in1); // device transfer
else
*in1_int = in1;
}
if( in2 ){
if( device != in2->get_device() )
*in2_int = new cuNDArray<I2>(*in2); // device transfer
else
*in2_int = in2;
}
if( in3 ){
if( device != in3->get_device() )
*in3_int = new cuNDArray<I3>(*in3); // device transfer
else
*in3_int = in3;
}
return true;
}
// Common multi-device handling: restore
//
template<class I1, class I2, class I3>
static bool restore( int old_device, cuNDArray<I1> *out,
cuNDArray<I1> *in1, cuNDArray<I1> *in1_int,
cuNDArray<I2> *in2 = 0x0, cuNDArray<I2> *in2_int = 0x0,
cuNDArray<I3> *in3 = 0x0, cuNDArray<I3> *in3_int = 0x0 )
{
if( in1 && out && out->get_device() != in1_int->get_device() ){
*out = *in1_int; // device transfer by assignment
}
// Check if internal array needs deletion (they do only if they were created in ::prepare()
//
if( in1 && in1->get_device() != in1_int->get_device() ){
delete in1_int;
}
if( in2 && in2->get_device() != in2_int->get_device() ){
delete in2_int;
}
if( in3 && in3->get_device() != in3_int->get_device() ){
delete in3_int;
}
// Get current Cuda device
int device;
if( hipGetDevice(&device) != hipSuccess ) {
throw cuda_error("Error: cuNFFT : unable to get device no");
}
// Restore old device
if( device != old_device && hipSetDevice(old_device) != hipSuccess) {
throw cuda_error("Error: cuNFFT : unable to restore device no");
}
return true;
}
//
// Public class methods
//
template<class REAL, unsigned int D, bool ATOMICS>
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::cuNFFT_plan()
{
// Minimal initialization
barebones();
}
template<class REAL, unsigned int D, bool ATOMICS>
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::cuNFFT_plan( typename uint64d<D>::Type matrix_size, typename uint64d<D>::Type matrix_size_os, REAL W, int device )
{
// Minimal initialization
barebones();
// Setup plan
setup( matrix_size, matrix_size_os, W, device );
}
template<class REAL, unsigned int D, bool ATOMICS>
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::~cuNFFT_plan()
{
wipe(NFFT_WIPE_ALL);
}
template<class REAL, unsigned int D, bool ATOMICS>
void Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::setup( typename uint64d<D>::Type matrix_size, typename uint64d<D>::Type matrix_size_os, REAL W, int _device )
{
// Free memory
wipe(NFFT_WIPE_ALL);
//
// Check if the device is valid
//
if( _device<0 ){
if( hipGetDevice( &device ) != hipSuccess ){
throw cuda_error("Error: cuNFFT_plan::setup: unable to determine device properties.");
}
}
else
device = _device;
// The convolution does not work properly for very small convolution kernel widths
// (experimentally observed limit)
if( W < REAL(1.8) ) {
throw std::runtime_error("Error: the convolution kernel width for the cuNFFT plan is too small.");
}
typename uint64d<D>::Type vec_warp_size( (size_t)(cudaDeviceManager::Instance()->warp_size(device)) );
//
// Check input against certain requirements
//
if( sum(matrix_size%vec_warp_size) || sum(matrix_size_os%vec_warp_size) ){
//GDEBUG_STREAM("Matrix size: " << matrix_size << std::endl);
//GDEBUG_STREAM("Matrix size os: " << matrix_size_os << std::endl);
//GDEBUG_STREAM("Warp size: " << vec_warp_size << std::endl);
throw std::runtime_error("Error: Illegal matrix size for the cuNFFT plan (not a multiple of the warp size)");
}
//
// Setup private variables
//
this->matrix_size = matrix_size;
this->matrix_size_os = matrix_size_os;
REAL W_half = REAL(0.5)*W;
vector_td<REAL,D> W_vec(W_half);
matrix_size_wrap = vector_td<size_t,D>( ceil(W_vec) );
matrix_size_wrap<<=1;
alpha = vector_td<REAL,D>(matrix_size_os) / vector_td<REAL,D>(matrix_size);
typename reald<REAL,D>::Type ones(REAL(1));
if( weak_less( alpha, ones ) ){
throw std::runtime_error("Error: cuNFFT : Illegal oversampling ratio suggested");
}
this->W = W;
// Compute Kaiser-Bessel beta
compute_beta();
int device_no_old;
if (hipGetDevice(&device_no_old) != hipSuccess) {
throw cuda_error("Error: cuNFFT_plan::setup: unable to get device no");
}
if( device != device_no_old && hipSetDevice(device) != hipSuccess) {
throw cuda_error("Error: cuNFFT_plan::setup: unable to set device");
}
initialized = true;
if( device != device_no_old && hipSetDevice(device_no_old) != hipSuccess) {
throw cuda_error("Error: cuNFFT_plan::setup: unable to restore device");
}
}
template<class REAL, unsigned int D, bool ATOMICS>
void Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::preprocess( cuNDArray<typename reald<REAL,D>::Type> *trajectory, NFFT_prep_mode mode )
{
if( !trajectory || trajectory->get_number_of_elements()==0 ){
throw std::runtime_error("Error: cuNFFT_plan::preprocess: invalid trajectory");
}
if( !initialized ){
throw std::runtime_error("Error: cuNFFT_plan::preprocess: cuNFFT_plan::setup must be invoked prior to preprocessing.");
}
wipe(NFFT_WIPE_PREPROCESSING);
cuNDArray<typename reald<REAL,D>::Type> *trajectory_int;
int old_device;
if( !prepare<typename reald<REAL,D>::Type,dummy,dummy>(device, &old_device, trajectory, &trajectory_int ) ){
throw cuda_error("Error: cuNFFT_plan::preprocess: device preparation error.");
}
number_of_samples = trajectory_int->get_size(0);
number_of_frames = trajectory_int->get_number_of_elements()/number_of_samples;
// Make sure that the trajectory values are within range [-1/2;1/2]
thrust::pair< thrust::device_ptr<REAL>, thrust::device_ptr<REAL> > mm_pair =
thrust::minmax_element( device_pointer_cast<REAL>((REAL*)trajectory_int->get_data_ptr()),
device_pointer_cast<REAL>(((REAL*)trajectory_int->get_data_ptr())+trajectory_int->get_number_of_elements()*D ));
if( *mm_pair.first < REAL(-0.5) || *mm_pair.second > REAL(0.5) ){
std::stringstream ss;
ss << "Error: cuNFFT::preprocess : trajectory [" << *mm_pair.first << "; " << *mm_pair.second << "] out of range [-1/2;1/2]";
throw std::runtime_error(ss.str());
}
// Make Thrust device vector of trajectory and samples
device_vector< vector_td<REAL,D> > trajectory_positions_in
( device_pointer_cast< vector_td<REAL,D> >(trajectory_int->get_data_ptr()),
device_pointer_cast< vector_td<REAL,D> >(trajectory_int->get_data_ptr()+trajectory_int->get_number_of_elements() ));
trajectory_positions = new device_vector< vector_td<REAL,D> >( trajectory_int->get_number_of_elements() );
CHECK_FOR_CUDA_ERROR();
vector_td<REAL,D> matrix_size_os_real = vector_td<REAL,D>( matrix_size_os );
vector_td<REAL,D> matrix_size_os_plus_wrap_real = vector_td<REAL,D>( (matrix_size_os+matrix_size_wrap)>>1 );
// convert input trajectory in [-1/2;1/2] to [0;matrix_size_os]
thrust::transform( trajectory_positions_in.begin(), trajectory_positions_in.end(), trajectory_positions->begin(),
trajectory_scale<REAL,D>(matrix_size_os_real, matrix_size_os_plus_wrap_real) );
CHECK_FOR_CUDA_ERROR();
if( !( mode == NFFT_PREP_C2NC || ATOMICS )){
// allocate storage for and compute temporary prefix-sum variable (#cells influenced per sample)
device_vector<unsigned int> c_p_s(trajectory_int->get_number_of_elements());
device_vector<unsigned int> c_p_s_ps(trajectory_int->get_number_of_elements());
CHECK_FOR_CUDA_ERROR();
REAL half_W = REAL(0.5)*W;
thrust::plus<unsigned int> binary_op;
thrust::transform(trajectory_positions->begin(), trajectory_positions->end(), c_p_s.begin(), compute_num_cells_per_sample<REAL,D>(half_W));
inclusive_scan( c_p_s.begin(), c_p_s.end(), c_p_s_ps.begin(), binary_op ); // prefix sum
// Build the vector of (grid_idx, sample_idx) tuples. Actually kept in two seperate vectors.
unsigned int num_pairs = c_p_s_ps.back();
c_p_s.clear();
thrust::device_vector<unsigned int> *tuples_first = new device_vector<unsigned int>(num_pairs);
tuples_last = new device_vector<unsigned int>(num_pairs);
CHECK_FOR_CUDA_ERROR();
// Fill tuple vector
write_pairs<REAL,D>( vector_td<unsigned int,D>(matrix_size_os), vector_td<unsigned int,D>(matrix_size_wrap), number_of_samples, number_of_frames, W,
raw_pointer_cast(&(*trajectory_positions)[0]), raw_pointer_cast(&c_p_s_ps[0]),
raw_pointer_cast(&(*tuples_first)[0]), raw_pointer_cast(&(*tuples_last)[0]) );
c_p_s_ps.clear();
// Sort by grid indices
sort_by_key( tuples_first->begin(), tuples_first->end(), tuples_last->begin() );
// each bucket_begin[i] indexes the first element of bucket i's list of points
// each bucket_end[i] indexes one past the last element of bucket i's list of points
bucket_begin = new device_vector<unsigned int>(number_of_frames*prod(matrix_size_os+matrix_size_wrap));
bucket_end = new device_vector<unsigned int>(number_of_frames*prod(matrix_size_os+matrix_size_wrap));
CHECK_FOR_CUDA_ERROR();
// find the beginning of each bucket's list of points
counting_iterator<unsigned int> search_begin(0);
lower_bound(tuples_first->begin(), tuples_first->end(), search_begin, search_begin + number_of_frames*prod(matrix_size_os+matrix_size_wrap), bucket_begin->begin() );
// find the end of each bucket's list of points
upper_bound(tuples_first->begin(), tuples_first->end(), search_begin, search_begin + number_of_frames*prod(matrix_size_os+matrix_size_wrap), bucket_end->begin() );
delete tuples_first;
}
preprocessed_C2NC = true;
if( mode != NFFT_PREP_C2NC )
preprocessed_NC2C = true;
if( !restore<typename reald<REAL,D>::Type,dummy,dummy>(old_device, trajectory, trajectory, trajectory_int) ){
throw cuda_error("Error: cuNFFT_plan::preprocess: unable to restore compute device.");
}
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::compute( cuNDArray<complext<REAL> > *in, cuNDArray<complext<REAL> > *out,
cuNDArray<REAL> *dcw, NFFT_comp_mode mode )
{
// Validity checks
unsigned char components;
if( mode == NFFT_FORWARDS_C2NC )
components = _NFFT_CONV_C2NC + _NFFT_FFT + _NFFT_DEAPODIZATION;
else if( mode == NFFT_FORWARDS_NC2C )
components = _NFFT_CONV_NC2C + _NFFT_FFT + _NFFT_DEAPODIZATION;
else if( mode == NFFT_BACKWARDS_NC2C )
components = _NFFT_CONV_NC2C + _NFFT_FFT + _NFFT_DEAPODIZATION;
else if( mode == NFFT_BACKWARDS_C2NC )
components = _NFFT_CONV_C2NC + _NFFT_FFT + _NFFT_DEAPODIZATION;
else{
throw std::runtime_error("Error: cuNFFT_plan::compute: unknown mode");
}
{
cuNDArray<complext<REAL> > *samples, *image;
if( mode == NFFT_FORWARDS_C2NC || mode == NFFT_BACKWARDS_C2NC ){
image = in; samples = out;
} else{
image = out; samples = in;
}
check_consistency( samples, image, dcw, components );
}
cuNDArray<complext<REAL> > *in_int = 0x0, *out_int = 0x0;
cuNDArray<REAL> *dcw_int = 0x0;
int old_device;
if( !prepare<complext<REAL>, complext<REAL>, REAL>
(device, &old_device, in, &in_int, out, &out_int, dcw, &dcw_int ) ){
throw cuda_error("Error: cuNFFT_plan::compute: device preparation error.");
}
typename uint64d<D>::Type image_dims = from_std_vector<size_t,D>
( (mode == NFFT_FORWARDS_C2NC || mode == NFFT_BACKWARDS_C2NC ) ? *in->get_dimensions() : *out->get_dimensions() );
bool oversampled_image = (image_dims==matrix_size_os);
vector<size_t> vec_dims = to_std_vector(matrix_size_os);
{
cuNDArray<complext<REAL> > *image = ((mode == NFFT_FORWARDS_C2NC || mode == NFFT_BACKWARDS_C2NC ) ? in : out );
for( unsigned int d=D; d<image->get_number_of_dimensions(); d++ )
vec_dims.push_back(image->get_size(d));
}
cuNDArray<complext<REAL> > *working_image = 0x0, *working_samples = 0x0;
switch(mode){
case NFFT_FORWARDS_C2NC:
if( !oversampled_image ){
working_image = new cuNDArray<complext<REAL> >(&vec_dims);
pad<complext<REAL>, D>( in_int, working_image );
}
else{
working_image = in_int;
}
compute_NFFT_C2NC( working_image, out_int );
if( dcw_int )
*out_int *= *dcw_int;
if( !oversampled_image ){
delete working_image; working_image = 0x0;
}
break;
case NFFT_FORWARDS_NC2C:
// Density compensation
if( dcw_int ){
working_samples = new cuNDArray<complext<REAL> >(*in_int);
*working_samples *= *dcw_int;
}
else{
working_samples = in_int;
}
if( !oversampled_image ){
working_image = new cuNDArray<complext<REAL> >(&vec_dims);
}
else{
working_image = out_int;
}
compute_NFFT_NC2C( working_samples, working_image );
if( !oversampled_image ){
crop<complext<REAL>, D>( (matrix_size_os-matrix_size)>>1, working_image, out_int );
}
if( !oversampled_image ){
delete working_image; working_image = 0x0;
}
if( dcw_int ){
delete working_samples; working_samples = 0x0;
}
break;
case NFFT_BACKWARDS_NC2C:
// Density compensation
if( dcw_int ){
working_samples = new cuNDArray<complext<REAL> >(*in_int);
*working_samples *= *dcw_int;
}
else{
working_samples = in_int;
}
if( !oversampled_image ){
working_image = new cuNDArray<complext<REAL> >(&vec_dims);
}
else{
working_image = out_int;
}
compute_NFFTH_NC2C( working_samples, working_image );
if( !oversampled_image ){
crop<complext<REAL> ,D>( (matrix_size_os-matrix_size)>>1, working_image, out_int );
}
if( !oversampled_image ){
delete working_image; working_image = 0x0;
}
if( dcw_int ){
delete working_samples; working_samples = 0x0;
}
break;
case NFFT_BACKWARDS_C2NC:
if( !oversampled_image ){
working_image = new cuNDArray<complext<REAL> >(&vec_dims);
pad<complext<REAL>, D>( in_int, working_image );
}
else{
working_image = in_int;
}
compute_NFFTH_C2NC( working_image, out_int );
if( dcw_int )
*out_int *= *dcw_int;
if( !oversampled_image ){
delete working_image; working_image = 0x0;
}
break;
};
if( !restore<complext<REAL> ,complext<REAL> ,REAL>
(old_device, out, out, out_int, in, in_int, dcw, dcw_int ) ){
throw cuda_error("Error: cuNFFT_plan::compute: unable to restore compute device.");
}
CHECK_FOR_CUDA_ERROR();
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::mult_MH_M( cuNDArray<complext<REAL> > *in, cuNDArray<complext<REAL> > *out,
cuNDArray<REAL> *dcw, std::vector<size_t> halfway_dims )
{
// Validity checks
unsigned char components = _NFFT_CONV_C2NC + _NFFT_CONV_NC2C + _NFFT_FFT + _NFFT_DEAPODIZATION;
if( in->get_number_of_elements() != out->get_number_of_elements() ){
throw std::runtime_error("Error: cuNFFT_plan::mult_MH_M: in/out image sizes mismatch");
}
cuNDArray<complext<REAL> > *working_samples = new cuNDArray<complext<REAL> >(&halfway_dims);
check_consistency( working_samples, in, dcw, components );
cuNDArray<complext<REAL> > *in_int = 0x0;
cuNDArray<complext<REAL> > *out_int = 0x0;
cuNDArray<REAL> *dcw_int = 0x0;
int old_device;
if( !prepare<complext<REAL>, complext<REAL>, REAL>
(device, &old_device, in, &in_int, out, &out_int, dcw, &dcw_int ) ){
throw cuda_error("Error: cuNFFT_plan::mult_MH_M: device preparation error.");
}
cuNDArray<complext<REAL> > *working_image = 0x0;
typename uint64d<D>::Type image_dims = from_std_vector<size_t,D>(*in->get_dimensions());
bool oversampled_image = (image_dims==matrix_size_os);
vector<size_t> vec_dims = to_std_vector(matrix_size_os);
for( unsigned int d=D; d<in->get_number_of_dimensions(); d++ )
vec_dims.push_back(in->get_size(d));
if( !oversampled_image ){
working_image = new cuNDArray<complext<REAL> >(&vec_dims);
pad<complext<REAL>, D>( in_int, working_image );
}
else{
working_image = in_int;
}
compute_NFFT_C2NC( working_image, working_samples );
// Density compensation
if( dcw ){
*working_samples *= *dcw_int;
*working_samples *= *dcw_int;
}
compute_NFFTH_NC2C( working_samples, working_image );
delete working_samples;
working_samples = 0x0;
if( !oversampled_image ){
crop<complext<REAL>, D>( (matrix_size_os-matrix_size)>>1, working_image, out_int );
delete working_image; working_image = 0x0;
}
restore<complext<REAL> ,complext<REAL> ,REAL>
(old_device, out, out, out_int, in, in_int, dcw, dcw_int );
CHECK_FOR_CUDA_ERROR();
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::convolve( cuNDArray<complext<REAL> > *in, cuNDArray<complext<REAL> > *out,
cuNDArray<REAL> *dcw, NFFT_conv_mode mode, bool accumulate )
{
unsigned char components;
if( mode == NFFT_CONV_C2NC )
components = _NFFT_CONV_C2NC;
else
components = _NFFT_CONV_NC2C;
{
cuNDArray<complext<REAL> > *samples, *image;
if( mode == NFFT_CONV_C2NC ){
image = in; samples = out;
} else{
image = out; samples = in;
}
check_consistency( samples, image, dcw, components );
}
cuNDArray<complext<REAL> > *in_int = 0x0, *out_int = 0x0;
cuNDArray<REAL> *dcw_int = 0x0;
int old_device;
prepare<complext<REAL>, complext<REAL>, REAL>
(device, &old_device, in, &in_int, out, &out_int, dcw, &dcw_int );
cuNDArray<complext<REAL> > *working_samples = 0x0;
typename uint64d<D>::Type image_dims = from_std_vector<size_t, D>
(*(((mode == NFFT_CONV_C2NC) ? in : out )->get_dimensions()));
bool oversampled_image = (image_dims==matrix_size_os);
if( !oversampled_image ){
throw std::runtime_error("Error: cuNFFT_plan::convolve: ERROR: oversampled image not provided as input.");
}
vector<size_t> vec_dims = to_std_vector(matrix_size_os);
{
cuNDArray<complext<REAL> > *image = ((mode == NFFT_CONV_C2NC) ? in : out );
for( unsigned int d=D; d<image->get_number_of_dimensions(); d++ )
vec_dims.push_back(image->get_size(d));
}
switch(mode){
case NFFT_CONV_C2NC:
convolve_NFFT_C2NC( in_int, out_int, accumulate );
if( dcw_int ) *out_int *= *dcw_int;
break;
case NFFT_CONV_NC2C:
// Density compensation
if( dcw_int ){
working_samples = new cuNDArray<complext<REAL> >(*in_int);
*working_samples *= *dcw_int;
}
else{
working_samples = in_int;
}
_convolve_NFFT_NC2C<REAL,D,ATOMICS>::apply( this, working_samples, out_int, accumulate );
if( dcw_int ){
delete working_samples; working_samples = 0x0;
}
break;
default:
throw std::runtime_error( "Error: cuNFFT_plan::convolve: unknown mode.");
}
restore<complext<REAL>, complext<REAL>, REAL>
(old_device, out, out, out_int, in, in_int, dcw, dcw_int );
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::fft(cuNDArray<complext<REAL> > *data, NFFT_fft_mode mode, bool do_scale )
{
cuNDArray<complext<REAL> > *data_int = 0x0;
int old_device;
prepare<complext<REAL>,dummy,dummy>( device, &old_device, data, &data_int );
typename uint64d<D>::Type _dims_to_transform = counting_vec<size_t,D>();
vector<size_t> dims_to_transform = to_std_vector( _dims_to_transform );
if( mode == NFFT_FORWARDS ){
cuNDFFT<REAL>::instance()->fft( data_int, &dims_to_transform, do_scale );
}
else{
cuNDFFT<REAL>::instance()->ifft( data_int, &dims_to_transform, do_scale );
}
restore<complext<REAL> ,dummy,dummy>(old_device, data, data, data_int);
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::deapodize( cuNDArray<complext<REAL> > *image, bool fourier_domain)
{
unsigned char components;
components = _NFFT_FFT;
check_consistency( 0x0, image, 0x0, components );
cuNDArray<complext<REAL> > *image_int = 0x0;
int old_device;
prepare<complext<REAL>,dummy,dummy>(device, &old_device, image, &image_int );
typename uint64d<D>::Type image_dims = from_std_vector<size_t, D>(*image->get_dimensions());
bool oversampled_image = (image_dims==matrix_size_os);
if( !oversampled_image ){
throw std::runtime_error( "Error: cuNFFT_plan::deapodize: ERROR: oversampled image not provided as input.");
}
if (fourier_domain){
if (!deapodization_filterFFT)
deapodization_filterFFT = compute_deapodization_filter(true);
*image_int *= *deapodization_filterFFT;
} else {
if (!deapodization_filter)
deapodization_filter = compute_deapodization_filter(false);
*image_int *= *deapodization_filter;
}
restore<complext<REAL> ,dummy,dummy>(old_device, image, image, image_int);
}
//
// Private class methods
//
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::check_consistency( cuNDArray<complext<REAL> > *samples, cuNDArray<complext<REAL> > *image,
cuNDArray<REAL> *weights, unsigned char components )
{
if( !initialized ){
throw std::runtime_error( "Error: cuNFFT_plan: Unable to proceed without setup.");
}
if( (components & _NFFT_CONV_C2NC ) && !preprocessed_C2NC ){
throw std::runtime_error("Error: cuNFFT_plan: Unable to compute NFFT before preprocessing.");
}
if( (components & _NFFT_CONV_NC2C ) && !(preprocessed_NC2C || (preprocessed_C2NC && ATOMICS ) ) ){
throw std::runtime_error("Error: cuNFFT_plan: Unable to compute NFFT before preprocessing.");
}
if( ((components & _NFFT_CONV_C2NC ) || (components & _NFFT_CONV_NC2C )) && !(image && samples) ){
throw std::runtime_error("Error: cuNFFT_plan: Unable to process 0x0 input/output.");
}
if( ((components & _NFFT_FFT) || (components & _NFFT_DEAPODIZATION )) && !image ){
throw std::runtime_error("Error: cuNFFT_plan: Unable to process 0x0 input.");
}
if( image->get_number_of_dimensions() < D ){
throw std::runtime_error("Error: cuNFFT_plan: Number of image dimensions mismatch the plan.");
}
typename uint64d<D>::Type image_dims = from_std_vector<size_t,D>( *image->get_dimensions() );
bool oversampled_image = (image_dims==matrix_size_os);
if( !((oversampled_image) ? (image_dims == matrix_size_os) : (image_dims == matrix_size) )){
throw std::runtime_error("Error: cuNFFT_plan: Image dimensions mismatch.");
}
if( (components & _NFFT_CONV_C2NC ) || (components & _NFFT_CONV_NC2C )){
if( (samples->get_number_of_elements() == 0) || (samples->get_number_of_elements() % (number_of_frames*number_of_samples)) ){
printf("\ncuNFFT::check_consistency() failed:\n#elements in the samples array: %ld.\n#samples from preprocessing: %d.\n#frames from preprocessing: %d.\n",samples->get_number_of_elements(), number_of_samples, number_of_frames ); fflush(stdout);
throw std::runtime_error("Error: cuNFFT_plan: The number of samples is not a multiple of #samples/frame x #frames as requested through preprocessing");
}
unsigned int num_batches_in_samples_array = samples->get_number_of_elements()/(number_of_frames*number_of_samples);
unsigned int num_batches_in_image_array = 1;
for( unsigned int d=D; d<image->get_number_of_dimensions(); d++ ){
num_batches_in_image_array *= image->get_size(d);
}
num_batches_in_image_array /= number_of_frames;
if( num_batches_in_samples_array != num_batches_in_image_array ){
printf("\ncuNFFT::check_consistency() failed:\n#elements in the samples array: %ld.\n#samples from preprocessing: %d.\n#frames from preprocessing: %d.\nLeading to %d batches in the samples array.\nThe number of batches in the image array is %d.\n",samples->get_number_of_elements(), number_of_samples, number_of_frames, num_batches_in_samples_array, num_batches_in_image_array ); fflush(stdout);
throw std::runtime_error("Error: cuNFFT_plan: Number of batches mismatch between samples and image arrays");
}
}
if( components & _NFFT_CONV_NC2C ){
if( weights ){
if( weights->get_number_of_elements() == 0 ||
!( weights->get_number_of_elements() == number_of_samples ||
weights->get_number_of_elements() == number_of_frames*number_of_samples) ){
printf("\ncuNFFT::check_consistency() failed:\n#elements in the samples array: %ld.\n#samples from preprocessing: %d.\n#frames from preprocessing: %d.\n#weights: %ld.\n",samples->get_number_of_elements(), number_of_samples, number_of_frames, weights->get_number_of_elements() ); fflush(stdout);
throw std::runtime_error("Error: cuNFFT_plan: The number of weights should match #samples/frame x #frames as requested through preprocessing");
}
}
}
}
template<class REAL, unsigned int D, bool ATOMICS>
void Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::barebones()
{
// These are the fundamental booleans checked before accessing the various member pointers
initialized = preprocessed_C2NC = preprocessed_NC2C = false;
// Clear matrix sizes
clear(matrix_size);
clear(matrix_size_os);
// Clear pointers
trajectory_positions = 0x0;
tuples_last = bucket_begin = bucket_end = 0x0;
// and specify the device
if (hipGetDevice(&device) != hipSuccess) {
throw cuda_error("Error: cuNFFT_plan::barebones:: unable to get device no");
}
}
template<class REAL, unsigned int D, bool ATOMICS>
void Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::wipe( NFFT_wipe_mode mode )
{
// Get current Cuda device
int old_device;
if( hipGetDevice(&old_device) != hipSuccess ) {
throw cuda_error("Error: cuNFFT_plan::wipe: unable to get device no");
}
if( device != old_device && hipSetDevice(device) != hipSuccess) {
throw cuda_error("Error: cuNFFT_plan::wipe: unable to set device no");
}
if( mode==NFFT_WIPE_ALL && initialized ){
deapodization_filter.reset();
initialized = false;
}
if( preprocessed_NC2C ){
if( tuples_last ) delete tuples_last;
if( bucket_begin ) delete bucket_begin;
if( bucket_end ) delete bucket_end;
}
if( preprocessed_C2NC || preprocessed_NC2C ){
delete trajectory_positions;
preprocessed_C2NC = preprocessed_NC2C = false;
}
if( device != old_device && hipSetDevice(old_device) != hipSuccess) {
throw cuda_error("Error: cuNFFT_plan::wipe: unable to restore device no");
}
}
template<class REAL, unsigned int D, bool ATOMICS>
void Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::compute_beta()
{
// Compute Kaiser-Bessel beta paramter according to the formula provided in
// Beatty et. al. IEEE TMI 2005;24(6):799-808.
for( unsigned int d=0; d<D; d++ )
beta[d] = (M_PI*std::sqrt((W*W)/(alpha[d]*alpha[d])*(alpha[d]-REAL(0.5))*(alpha[d]-REAL(0.5))-REAL(0.8)));
}
//
// Grid fictitious trajectory with a single sample at the origin
//
template<class REAL, unsigned int D> __global__ void
compute_deapodization_filter_kernel( typename uintd<D>::Type matrix_size_os, typename reald<REAL,D>::Type matrix_size_os_real,
REAL W, REAL half_W, REAL one_over_W,
typename reald<REAL,D>::Type beta, complext<REAL> * __restrict__ image_os )
{
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int num_elements = prod(matrix_size_os);
if( idx <num_elements ){
// Compute weight from Kaiser-Bessel filter
const typename uintd<D>::Type cell_pos = idx_to_co<D>(idx, matrix_size_os);
// Sample position ("origin")
const vector_td<REAL,D> sample_pos = REAL(0.5)*matrix_size_os_real;
// Calculate the distance between the cell and the sample
vector_td<REAL,D> cell_pos_real = vector_td<REAL,D>(cell_pos);
const typename reald<REAL,D>::Type delta = abs(sample_pos-cell_pos_real);
// Compute convolution weight.
REAL weight;
REAL zero = REAL(0);
vector_td<REAL,D> half_W_vec( half_W );
if( weak_greater( delta, half_W_vec ) )
weight = zero;
else{
weight = KaiserBessel<REAL>( delta, matrix_size_os_real, one_over_W, beta );
//if( !isfinite(weight) )
//weight = zero;
}
// Output weight
complext<REAL> result;
result._real = weight;
result._imag = zero;
image_os[idx] = result;
}
}
//
// Function to calculate the deapodization filter
//
template<class REAL, unsigned int D, bool ATOMICS> boost::shared_ptr<cuNDArray<complext<REAL> > >
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::compute_deapodization_filter( bool FFTed)
{
std::vector<size_t> tmp_vec_os = to_std_vector(matrix_size_os);
boost::shared_ptr< cuNDArray<complext<REAL> > > filter( new cuNDArray<complext<REAL> >(tmp_vec_os));
vector_td<REAL,D> matrix_size_os_real = vector_td<REAL,D>(matrix_size_os);
// Find dimensions of grid/blocks.
dim3 dimBlock( 256 );
dim3 dimGrid( (prod(matrix_size_os)+dimBlock.x-1)/dimBlock.x );
// Invoke kernel
hipLaunchKernelGGL(( compute_deapodization_filter_kernel<REAL,D>), dim3(dimGrid), dim3(dimBlock), 0, 0,
vector_td<unsigned int,D>(matrix_size_os), matrix_size_os_real, W, REAL(0.5)*W, REAL(1)/W, beta, filter->get_data_ptr() );
CHECK_FOR_CUDA_ERROR();
// FFT
if (FFTed)
fft( filter.get(), NFFT_FORWARDS, false );
else
fft( filter.get(), NFFT_BACKWARDS, false );
// Reciprocal
reciprocal_inplace(filter.get());
return filter;
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::compute_NFFT_C2NC( cuNDArray<complext<REAL> > *image, cuNDArray<complext<REAL> > *samples )
{
// private method - no consistency check. We trust in ourselves.
// Deapodization
deapodize( image );
// FFT
fft( image, NFFT_FORWARDS );
// Convolution
convolve( image, samples, 0x0, NFFT_CONV_C2NC );
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::compute_NFFTH_NC2C( cuNDArray<complext<REAL> > *samples, cuNDArray<complext<REAL> > *image )
{
// private method - no consistency check. We trust in ourselves.
// Convolution
convolve( samples, image, 0x0, NFFT_CONV_NC2C );
// FFT
fft( image, NFFT_BACKWARDS );
// Deapodization
deapodize( image );
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::compute_NFFTH_C2NC( cuNDArray<complext<REAL> > *image, cuNDArray<complext<REAL> > *samples )
{
// private method - no consistency check. We trust in ourselves.
// Deapodization
deapodize( image, true );
// FFT
fft( image, NFFT_BACKWARDS );
// Convolution
convolve( image, samples, 0x0, NFFT_CONV_C2NC );
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::compute_NFFT_NC2C( cuNDArray<complext<REAL> > *samples, cuNDArray<complext<REAL> > *image )
{
// private method - no consistency check. We trust in ourselves.
// Convolution
convolve( samples, image, 0x0, NFFT_CONV_NC2C );
// FFT
fft( image, NFFT_FORWARDS );
// Deapodization
deapodize( image, true );
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::convolve_NFFT_C2NC( cuNDArray<complext<REAL> > *image, cuNDArray<complext<REAL> > *samples, bool accumulate )
{
// private method - no consistency check. We trust in ourselves.
unsigned int num_batches = 1;
for( unsigned int d=D; d<image->get_number_of_dimensions(); d++ )
num_batches *= image->get_size(d);
num_batches /= number_of_frames;
/*
Setup grid and threads
*/
size_t threads_per_block;
unsigned int max_coils;
threads_per_block = NFFT_THREADS_PER_KERNEL;
if( cudaDeviceManager::Instance()->major_version(device) == 1 ){
max_coils = NFFT_MAX_COILS_COMPUTE_1x;
}
else{
max_coils = NFFT_MAX_COILS_COMPUTE_2x;
}
// We can (only) convolve max_coils batches per run due to shared memory issues.
unsigned int domain_size_coils_desired = num_batches;
unsigned int num_repetitions = domain_size_coils_desired/max_coils +
( ((domain_size_coils_desired%max_coils)==0) ? 0 : 1 );
unsigned int domain_size_coils = (num_repetitions==1) ? domain_size_coils_desired : max_coils;
unsigned int domain_size_coils_tail = (num_repetitions==1) ? domain_size_coils_desired : domain_size_coils_desired - (num_repetitions-1)*domain_size_coils;
// Block and Grid dimensions
dim3 dimBlock( (unsigned int)threads_per_block );
dim3 dimGrid( (number_of_samples+dimBlock.x-1)/dimBlock.x, number_of_frames );
// Calculate how much shared memory to use per thread
size_t bytes_per_thread = domain_size_coils * sizeof( vector_td<REAL,D> );
size_t bytes_per_thread_tail = domain_size_coils_tail * sizeof( vector_td<REAL,D> );
unsigned int double_warp_size_power=0;
unsigned int __tmp = cudaDeviceManager::Instance()->warp_size(device)<<1;
while(__tmp!=1){
__tmp>>=1;
double_warp_size_power++;
}
vector_td<REAL,D> matrix_size_os_real = vector_td<REAL,D>( matrix_size_os );
/*
Invoke kernel
*/
for( unsigned int repetition = 0; repetition<num_repetitions; repetition++ ){
hipLaunchKernelGGL(( NFFT_convolve_kernel<REAL,D>)
, dim3(dimGrid), dim3(dimBlock), ((repetition==num_repetitions-1) ? dimBlock.x*bytes_per_thread_tail : dimBlock.x*bytes_per_thread), 0,
alpha, beta, W, vector_td<unsigned int,D>(matrix_size_os), vector_td<unsigned int,D>(matrix_size_wrap), number_of_samples,
(repetition==num_repetitions-1) ? domain_size_coils_tail : domain_size_coils,
raw_pointer_cast(&(*trajectory_positions)[0]),
image->get_data_ptr()+repetition*prod(matrix_size_os)*number_of_frames*domain_size_coils,
samples->get_data_ptr()+repetition*number_of_samples*number_of_frames*domain_size_coils,
double_warp_size_power, REAL(0.5)*W, REAL(1)/(W), accumulate, matrix_size_os_real );
CHECK_FOR_CUDA_ERROR();
}
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::convolve_NFFT_NC2C( cuNDArray<complext<REAL> > *image, cuNDArray<complext<REAL> > *samples, bool accumulate )
{
_convolve_NFFT_NC2C<REAL,D,ATOMICS>::apply( this, image, samples, accumulate );
}
template<unsigned int D> struct
_convolve_NFFT_NC2C<float,D,true>{ // True: use atomic operations variant
static bool apply( cuNFFT_plan<float,D,true> *plan,
cuNDArray<complext<float> > *samples,
cuNDArray<complext<float> > *image,
bool accumulate )
{
//
// Bring in some variables from the plan
unsigned int device = plan->device;
unsigned int number_of_frames = plan->number_of_frames;
unsigned int number_of_samples = plan->number_of_samples;
typename uint64d<D>::Type matrix_size_os = plan->matrix_size_os;
typename uint64d<D>::Type matrix_size_wrap = plan->matrix_size_wrap;
typename reald<float,D>::Type alpha = plan->alpha;
typename reald<float,D>::Type beta = plan->beta;
float W = plan->W;
thrust::device_vector< typename reald<float,D>::Type > *trajectory_positions = plan->trajectory_positions;
//
// Atomic operations are only supported in compute model 2.0 and up
//
if( cudaDeviceManager::Instance()->major_version(device) == 1 ){
throw cuda_error("Error: Atomic NC2C NFFT only supported on device with compute model 2.0 or higher");
}
// Check if warp_size is a power of two. We do some modulus tricks in the kernels that depend on this...
if( !((cudaDeviceManager::Instance()->warp_size(device) & (cudaDeviceManager::Instance()->warp_size(device)-1)) == 0 ) ){
throw cuda_error("cuNFFT: unsupported hardware (warpSize is not a power of two)");
}
unsigned int num_batches = 1;
for( unsigned int d=D; d<image->get_number_of_dimensions(); d++ )
num_batches *= image->get_size(d);
num_batches /= number_of_frames;
//
// Setup grid and threads
//
size_t threads_per_block;
unsigned int max_coils;
threads_per_block = NFFT_THREADS_PER_KERNEL;
max_coils = NFFT_MAX_COILS_COMPUTE_2x;
// We can (only) convolve domain_size_coils batches per run due to shared memory issues.
unsigned int domain_size_coils_desired = num_batches;
unsigned int num_repetitions = domain_size_coils_desired/max_coils +
( ((domain_size_coils_desired%max_coils)==0) ? 0 : 1 );
unsigned int domain_size_coils = (num_repetitions==1) ? domain_size_coils_desired : max_coils;
unsigned int domain_size_coils_tail = (num_repetitions==1) ? domain_size_coils_desired : domain_size_coils_desired - (num_repetitions-1)*domain_size_coils;
// Block and Grid dimensions
dim3 dimBlock( (unsigned int)threads_per_block );
dim3 dimGrid( (number_of_samples+dimBlock.x-1)/dimBlock.x, number_of_frames );
// Calculate how much shared memory to use per thread
size_t bytes_per_thread = domain_size_coils * sizeof( vector_td<float,D> );
size_t bytes_per_thread_tail = domain_size_coils_tail * sizeof( vector_td<float,D> );
unsigned int double_warp_size_power=0, __tmp = cudaDeviceManager::Instance()->warp_size(device)<<1;
while(__tmp!=1){
__tmp>>=1;
double_warp_size_power++;
}
vector_td<float,D> matrix_size_os_real = vector_td<float,D>( matrix_size_os );
if( !accumulate ){
clear(image);
}
//
// Invoke kernel
//
for( unsigned int repetition = 0; repetition<num_repetitions; repetition++ ){
hipLaunchKernelGGL(( NFFT_H_atomic_convolve_kernel<float,D>)
, dim3(dimGrid), dim3(dimBlock), ((repetition==num_repetitions-1) ? dimBlock.x*bytes_per_thread_tail : dimBlock.x*bytes_per_thread), 0,
alpha, beta, W, vector_td<unsigned int,D>(matrix_size_os), vector_td<unsigned int,D>(matrix_size_wrap), number_of_samples,
(repetition==num_repetitions-1) ? domain_size_coils_tail : domain_size_coils,
raw_pointer_cast(&(*trajectory_positions)[0]),
samples->get_data_ptr()+repetition*number_of_samples*number_of_frames*domain_size_coils,
image->get_data_ptr()+repetition*prod(matrix_size_os)*number_of_frames*domain_size_coils,
double_warp_size_power, float(0.5)*W, float(1)/(W), matrix_size_os_real );
}
CHECK_FOR_CUDA_ERROR();
return true;
}
};
template<unsigned int D> struct
_convolve_NFFT_NC2C<double,D,true>{ // True: use atomic operations variant
// Atomics don't exist for doubles, so this gives a compile error if you actually try to use it.
};
template<class REAL, unsigned int D> struct
_convolve_NFFT_NC2C<REAL,D,false>{ // False: use non-atomic operations variant
static void apply( cuNFFT_plan<REAL,D,false> *plan,
cuNDArray<complext<REAL> > *samples,
cuNDArray<complext<REAL> > *image,
bool accumulate )
{
// Bring in some variables from the plan
unsigned int device = plan->device;
unsigned int number_of_frames = plan->number_of_frames;
unsigned int number_of_samples = plan->number_of_samples;
typename uint64d<D>::Type matrix_size_os = plan->matrix_size_os;
typename uint64d<D>::Type matrix_size_wrap = plan->matrix_size_wrap;
typename reald<REAL,D>::Type alpha = plan->alpha;
typename reald<REAL,D>::Type beta = plan->beta;
REAL W = plan->W;
thrust::device_vector< typename reald<REAL,D>::Type > *trajectory_positions = plan->trajectory_positions;
thrust::device_vector<unsigned int> *tuples_last = plan->tuples_last;
thrust::device_vector<unsigned int> *bucket_begin = plan->bucket_begin;
thrust::device_vector<unsigned int> *bucket_end = plan->bucket_end;
// private method - no consistency check. We trust in ourselves.
// Check if warp_size is a power of two. We do some modulus tricks in the kernels that depend on this...
if( !((cudaDeviceManager::Instance()->warp_size(device) & (cudaDeviceManager::Instance()->warp_size(device)-1)) == 0 ) ){
throw cuda_error("cuNFFT: unsupported hardware (warpSize is not a power of two)");
}
unsigned int num_batches = 1;
for( unsigned int d=D; d<image->get_number_of_dimensions(); d++ )
num_batches *= image->get_size(d);
num_batches /= number_of_frames;
//
// Setup grid and threads
//
size_t threads_per_block;
unsigned int max_coils;
threads_per_block = NFFT_THREADS_PER_KERNEL;
if( cudaDeviceManager::Instance()->major_version(device) == 1 ){
max_coils = NFFT_MAX_COILS_COMPUTE_1x;
}
else{
max_coils = NFFT_MAX_COILS_COMPUTE_2x;
}
// We can (only) convolve domain_size_coils batches per run due to shared memory issues.
unsigned int domain_size_coils_desired = num_batches;
unsigned int num_repetitions = domain_size_coils_desired/max_coils +
( ((domain_size_coils_desired%max_coils)==0) ? 0 : 1 );
unsigned int domain_size_coils = (num_repetitions==1) ? domain_size_coils_desired : max_coils;
unsigned int domain_size_coils_tail = (num_repetitions==1) ? domain_size_coils_desired : domain_size_coils_desired - (num_repetitions-1)*domain_size_coils;
// Block and Grid dimensions
dim3 dimBlock( (unsigned int)threads_per_block );
dim3 dimGrid( (prod(matrix_size_os+matrix_size_wrap)+dimBlock.x-1)/dimBlock.x, number_of_frames );
// Calculate how much shared memory to use per thread
size_t bytes_per_thread = domain_size_coils * sizeof( vector_td<REAL,D> );
size_t bytes_per_thread_tail = domain_size_coils_tail * sizeof( vector_td<REAL,D> );
unsigned int double_warp_size_power=0, __tmp = cudaDeviceManager::Instance()->warp_size(device)<<1;
while(__tmp!=1){
__tmp>>=1;
double_warp_size_power++;
}
vector_td<REAL,D> matrix_size_os_real = vector_td<REAL,D>( matrix_size_os );
// Define temporary image that includes a wrapping zone
cuNDArray<complext<REAL> > _tmp;
vector<size_t> vec_dims = to_std_vector(matrix_size_os+matrix_size_wrap);
if( number_of_frames > 1 )
vec_dims.push_back(number_of_frames);
if( num_batches > 1 )
vec_dims.push_back(num_batches);
_tmp.create(&vec_dims);
//
// Invoke kernel
//
for( unsigned int repetition = 0; repetition<num_repetitions; repetition++ ){
hipLaunchKernelGGL(( NFFT_H_convolve_kernel<REAL,D>)
, dim3(dimGrid), dim3(dimBlock), ((repetition==num_repetitions-1) ? dimBlock.x*bytes_per_thread_tail : dimBlock.x*bytes_per_thread), 0,
alpha, beta, W, vector_td<unsigned int,D>(matrix_size_os+matrix_size_wrap), number_of_samples,
(repetition==num_repetitions-1) ? domain_size_coils_tail : domain_size_coils,
raw_pointer_cast(&(*trajectory_positions)[0]),
_tmp.get_data_ptr()+repetition*prod(matrix_size_os+matrix_size_wrap)*number_of_frames*domain_size_coils,
samples->get_data_ptr()+repetition*number_of_samples*number_of_frames*domain_size_coils,
raw_pointer_cast(&(*tuples_last)[0]), raw_pointer_cast(&(*bucket_begin)[0]), raw_pointer_cast(&(*bucket_end)[0]),
double_warp_size_power, REAL(0.5)*W, REAL(1)/(W), matrix_size_os_real );
}
CHECK_FOR_CUDA_ERROR();
plan->image_wrap( &_tmp, image, accumulate );
};
};
// Image wrap kernels
template<class REAL, unsigned int D> __global__ void
image_wrap_kernel( typename uintd<D>::Type matrix_size_os, typename uintd<D>::Type matrix_size_wrap, bool accumulate,
const complext<REAL> * __restrict__ in, complext<REAL> * __restrict__ out )
{
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int num_elements_per_image_src = prod(matrix_size_os+matrix_size_wrap);
const unsigned int image_offset_src = blockIdx.y*num_elements_per_image_src;
const typename uintd<D>::Type co = idx_to_co<D>(idx, matrix_size_os);
const typename uintd<D>::Type half_wrap = matrix_size_wrap>>1;
// Make "boolean" vectors denoting whether wrapping needs to be performed in a given direction (forwards/backwards)
vector_td<bool,D> B_l = vector_less( co, half_wrap );
vector_td<bool,D> B_r = vector_greater_equal( co, matrix_size_os-half_wrap );
complext<REAL> result = in[co_to_idx<D>(co+half_wrap, matrix_size_os+matrix_size_wrap) + image_offset_src];
if( sum(B_l+B_r) > 0 ){
// Fold back the wrapping zone onto the image ("periodically")
//
// There is 2^D-1 ways to pick combinations of dimensions in D-dimensionsal space, e.g.
//
// { x, y, xy } in 2D
// { x, y, x, xy, xz, yz, xyz } in 3D
//
// Every "letter" in each combination provides two possible wraps (eiher end of the dimension)
//
// For every 2^D-1 combinations DO
// - find the number of dimensions, d, in the combination
// - create 2^(d) stride vectors and test for wrapping using the 'B'-vectors above.
// - accumulate the contributions
//
// The following code represents dimensions as bits in a char.
//
for( unsigned char combination = 1; combination < (1<<D); combination++ ){
// Find d
unsigned char d = 0;
for( unsigned char i=0; i<D; i++ )
d += ((combination & (1<<i)) > 0 );
// Create stride vector for each wrapping test
for( unsigned char s = 0; s < (1<<d); s++ ){
// Target for stride
typename intd<D>::Type stride;
char wrap_requests = 0;
char skipped_dims = 0;
// Fill dimensions of the stride
for( unsigned char i=1; i<D+1; i++ ){
// Is the stride dimension present in the current combination?
if( i & combination ){
// A zero bit in s indicates "check for left wrap" and a one bit is interpreted as "check for right wrap"
// ("left/right" for the individual dimension meaning wrapping on either side of the dimension).
if( i & (s<<(skipped_dims)) ){
if( B_r.vec[i-1] ){ // Wrapping required
stride[i-1] = -1;
wrap_requests++;
}
else
stride[i-1] = 0;
}
else{
if( B_l.vec[i-1] ){ // Wrapping required
stride[i-1] =1 ;
wrap_requests++;
}
else
stride[i-1] = 0;
}
}
else{
// Do not test for wrapping in dimension 'i-1' (for this combination)
stride[i-1] = 0;
skipped_dims++;
}
}
// Now it is time to do the actual wrapping (if needed)
if( wrap_requests == d ){
typename intd<D>::Type src_co_int = vector_td<int,D>(co+half_wrap);
typename intd<D>::Type matrix_size_os_int = vector_td<int,D>(matrix_size_os);
typename intd<D>::Type co_offset_int = src_co_int + component_wise_mul<int,D>(stride,matrix_size_os_int);
typename uintd<D>::Type co_offset = vector_td<unsigned int,D>(co_offset_int);
result += in[co_to_idx<D>(co_offset, matrix_size_os+matrix_size_wrap) + image_offset_src];
break; // only one stride per combination can contribute (e.g. one edge, one corner)
}
}
}
}
// Output
const unsigned int image_offset_tgt = blockIdx.y*prod(matrix_size_os);
if( accumulate ) result += out[idx+image_offset_tgt];
out[idx+image_offset_tgt] = result;
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::image_wrap( cuNDArray<complext<REAL> > *source, cuNDArray<complext<REAL> > *target, bool accumulate )
{
unsigned int num_batches = 1;
for( unsigned int d=D; d<source->get_number_of_dimensions(); d++ )
num_batches *= source->get_size(d);
num_batches /= number_of_frames;
// Set dimensions of grid/blocks.
unsigned int bdim = 256;
dim3 dimBlock( bdim );
dim3 dimGrid( prod(matrix_size_os)/bdim, number_of_frames*num_batches );
// Safety check
if( (prod(matrix_size_os)%bdim) != 0 ) {
std::stringstream ss;
ss << "Error: cuNFFT : the number of oversampled image elements must be a multiplum of the block size: " << bdim;
throw std::runtime_error(ss.str());
}
// Invoke kernel
hipLaunchKernelGGL(( image_wrap_kernel<REAL,D>), dim3(dimGrid), dim3(dimBlock), 0, 0,
vector_td<unsigned int,D>(matrix_size_os), vector_td<unsigned int,D>(matrix_size_wrap), accumulate, source->get_data_ptr(), target->get_data_ptr() );
CHECK_FOR_CUDA_ERROR();
}
//
// Template instantion
//
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< float, 1, true >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< float, 1, false >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< double, 1, false >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< float, 2, true >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< float, 2, false >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< double, 2, false >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< float, 3, true >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< float, 3, false >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< double, 3, false >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< float, 4, true >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< float, 4, false >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< double, 4, false >;
| 454c60df5911016741eb93a1b0868d868005d6e6.cu | /*
CUDA implementation of the NFFT.
-----------
Accelerating the Non-equispaced Fast Fourier Transform on Commodity Graphics Hardware.
T.S. Sørensen, T. Schaeffter, K.Ø. Noe, M.S. Hansen.
IEEE Transactions on Medical Imaging 2008; 27(4):538-547.
Real-time Reconstruction of Sensitivity Encoded Radial Magnetic Resonance Imaging Using a Graphics Processing Unit.
T.S. Sørensen, D. Atkinson, T. Schaeffter, M.S. Hansen.
IEEE Transactions on Medical Imaging 2009; 28(12): 1974-1985.
*/
// Includes - Thrust
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/binary_search.h>
#include <thrust/extrema.h>
// Includes - Gadgetron
#include "cuNFFT.h"
#include "cuNDFFT.h"
#include "cuNDArray_operators.h"
#include "cuNDArray_elemwise.h"
#include "cuNDArray_utils.h"
#include "vector_td_utilities.h"
#include "vector_td_io.h"
#include "cudaDeviceManager.h"
#include "check_CUDA.h"
// Includes - CUDA
#include <device_functions.h>
#include <math_constants.h>
#include <cufft.h>
// Includes - stdlibs
#include <stdio.h>
#include <assert.h>
#include <limits.h>
#include <math.h>
#include <cmath>
#include <sstream>
#include <stdexcept>
//using namespace std;
using std::vector;
using namespace thrust;
using namespace Gadgetron;
// Kernel configuration
#define NFFT_MAX_COILS_COMPUTE_1x 8
#define NFFT_MAX_COILS_COMPUTE_2x 16
#define NFFT_THREADS_PER_KERNEL 192
// Reference to shared memory
extern __shared__ char _shared_mem[];
// Includes containing the NFFT convolution implementation
#include "KaiserBessel_kernel.cu"
#include "NFFT_C2NC_conv_kernel.cu"
#include "NFFT_NC2C_conv_kernel.cu"
#include "NFFT_NC2C_atomic_conv_kernel.cu"
#include "NFFT_preprocess_kernel.cu"
// Default template arguments requires c++-0x ?
typedef float dummy;
// The declaration of atomic/non-atomic NC2C convolution
// We would love to hide this inside the class, but the compiler core dumps on us when we try...
//
template<class REAL, unsigned int D, bool ATOMICS> struct _convolve_NFFT_NC2C{
static bool apply( cuNFFT_plan<REAL,D,ATOMICS> *plan,
cuNDArray<complext<REAL> > *in,
cuNDArray<complext<REAL> > *out,
bool accumulate );
};
// Common multi-device handling: prepare
//
template<class I1, class I2, class I3>
static bool prepare( int device, int *old_device,
cuNDArray<I1> *in1, cuNDArray<I1> **in1_int,
cuNDArray<I2> *in2 = 0x0, cuNDArray<I2> **in2_int = 0x0,
cuNDArray<I3> *in3 = 0x0, cuNDArray<I3> **in3_int = 0x0 )
{
// Get current Cuda device
if( cudaGetDevice(old_device) != cudaSuccess ) {
throw cuda_error("Error: cuNFFT : unable to get device no");
}
if( device != *old_device && cudaSetDevice(device) != cudaSuccess) {
throw cuda_error("Error : cuNFFT : unable to set device no");
}
// Transfer arrays to compute device if necessary
if( in1 ){
if( device != in1->get_device() )
*in1_int = new cuNDArray<I1>(*in1); // device transfer
else
*in1_int = in1;
}
if( in2 ){
if( device != in2->get_device() )
*in2_int = new cuNDArray<I2>(*in2); // device transfer
else
*in2_int = in2;
}
if( in3 ){
if( device != in3->get_device() )
*in3_int = new cuNDArray<I3>(*in3); // device transfer
else
*in3_int = in3;
}
return true;
}
// Common multi-device handling: restore
//
template<class I1, class I2, class I3>
static bool restore( int old_device, cuNDArray<I1> *out,
cuNDArray<I1> *in1, cuNDArray<I1> *in1_int,
cuNDArray<I2> *in2 = 0x0, cuNDArray<I2> *in2_int = 0x0,
cuNDArray<I3> *in3 = 0x0, cuNDArray<I3> *in3_int = 0x0 )
{
if( in1 && out && out->get_device() != in1_int->get_device() ){
*out = *in1_int; // device transfer by assignment
}
// Check if internal array needs deletion (they do only if they were created in ::prepare()
//
if( in1 && in1->get_device() != in1_int->get_device() ){
delete in1_int;
}
if( in2 && in2->get_device() != in2_int->get_device() ){
delete in2_int;
}
if( in3 && in3->get_device() != in3_int->get_device() ){
delete in3_int;
}
// Get current Cuda device
int device;
if( cudaGetDevice(&device) != cudaSuccess ) {
throw cuda_error("Error: cuNFFT : unable to get device no");
}
// Restore old device
if( device != old_device && cudaSetDevice(old_device) != cudaSuccess) {
throw cuda_error("Error: cuNFFT : unable to restore device no");
}
return true;
}
//
// Public class methods
//
template<class REAL, unsigned int D, bool ATOMICS>
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::cuNFFT_plan()
{
// Minimal initialization
barebones();
}
template<class REAL, unsigned int D, bool ATOMICS>
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::cuNFFT_plan( typename uint64d<D>::Type matrix_size, typename uint64d<D>::Type matrix_size_os, REAL W, int device )
{
// Minimal initialization
barebones();
// Setup plan
setup( matrix_size, matrix_size_os, W, device );
}
template<class REAL, unsigned int D, bool ATOMICS>
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::~cuNFFT_plan()
{
wipe(NFFT_WIPE_ALL);
}
template<class REAL, unsigned int D, bool ATOMICS>
void Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::setup( typename uint64d<D>::Type matrix_size, typename uint64d<D>::Type matrix_size_os, REAL W, int _device )
{
// Free memory
wipe(NFFT_WIPE_ALL);
//
// Check if the device is valid
//
if( _device<0 ){
if( cudaGetDevice( &device ) != cudaSuccess ){
throw cuda_error("Error: cuNFFT_plan::setup: unable to determine device properties.");
}
}
else
device = _device;
// The convolution does not work properly for very small convolution kernel widths
// (experimentally observed limit)
if( W < REAL(1.8) ) {
throw std::runtime_error("Error: the convolution kernel width for the cuNFFT plan is too small.");
}
typename uint64d<D>::Type vec_warp_size( (size_t)(cudaDeviceManager::Instance()->warp_size(device)) );
//
// Check input against certain requirements
//
if( sum(matrix_size%vec_warp_size) || sum(matrix_size_os%vec_warp_size) ){
//GDEBUG_STREAM("Matrix size: " << matrix_size << std::endl);
//GDEBUG_STREAM("Matrix size os: " << matrix_size_os << std::endl);
//GDEBUG_STREAM("Warp size: " << vec_warp_size << std::endl);
throw std::runtime_error("Error: Illegal matrix size for the cuNFFT plan (not a multiple of the warp size)");
}
//
// Setup private variables
//
this->matrix_size = matrix_size;
this->matrix_size_os = matrix_size_os;
REAL W_half = REAL(0.5)*W;
vector_td<REAL,D> W_vec(W_half);
matrix_size_wrap = vector_td<size_t,D>( ceil(W_vec) );
matrix_size_wrap<<=1;
alpha = vector_td<REAL,D>(matrix_size_os) / vector_td<REAL,D>(matrix_size);
typename reald<REAL,D>::Type ones(REAL(1));
if( weak_less( alpha, ones ) ){
throw std::runtime_error("Error: cuNFFT : Illegal oversampling ratio suggested");
}
this->W = W;
// Compute Kaiser-Bessel beta
compute_beta();
int device_no_old;
if (cudaGetDevice(&device_no_old) != cudaSuccess) {
throw cuda_error("Error: cuNFFT_plan::setup: unable to get device no");
}
if( device != device_no_old && cudaSetDevice(device) != cudaSuccess) {
throw cuda_error("Error: cuNFFT_plan::setup: unable to set device");
}
initialized = true;
if( device != device_no_old && cudaSetDevice(device_no_old) != cudaSuccess) {
throw cuda_error("Error: cuNFFT_plan::setup: unable to restore device");
}
}
template<class REAL, unsigned int D, bool ATOMICS>
void Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::preprocess( cuNDArray<typename reald<REAL,D>::Type> *trajectory, NFFT_prep_mode mode )
{
if( !trajectory || trajectory->get_number_of_elements()==0 ){
throw std::runtime_error("Error: cuNFFT_plan::preprocess: invalid trajectory");
}
if( !initialized ){
throw std::runtime_error("Error: cuNFFT_plan::preprocess: cuNFFT_plan::setup must be invoked prior to preprocessing.");
}
wipe(NFFT_WIPE_PREPROCESSING);
cuNDArray<typename reald<REAL,D>::Type> *trajectory_int;
int old_device;
if( !prepare<typename reald<REAL,D>::Type,dummy,dummy>(device, &old_device, trajectory, &trajectory_int ) ){
throw cuda_error("Error: cuNFFT_plan::preprocess: device preparation error.");
}
number_of_samples = trajectory_int->get_size(0);
number_of_frames = trajectory_int->get_number_of_elements()/number_of_samples;
// Make sure that the trajectory values are within range [-1/2;1/2]
thrust::pair< thrust::device_ptr<REAL>, thrust::device_ptr<REAL> > mm_pair =
thrust::minmax_element( device_pointer_cast<REAL>((REAL*)trajectory_int->get_data_ptr()),
device_pointer_cast<REAL>(((REAL*)trajectory_int->get_data_ptr())+trajectory_int->get_number_of_elements()*D ));
if( *mm_pair.first < REAL(-0.5) || *mm_pair.second > REAL(0.5) ){
std::stringstream ss;
ss << "Error: cuNFFT::preprocess : trajectory [" << *mm_pair.first << "; " << *mm_pair.second << "] out of range [-1/2;1/2]";
throw std::runtime_error(ss.str());
}
// Make Thrust device vector of trajectory and samples
device_vector< vector_td<REAL,D> > trajectory_positions_in
( device_pointer_cast< vector_td<REAL,D> >(trajectory_int->get_data_ptr()),
device_pointer_cast< vector_td<REAL,D> >(trajectory_int->get_data_ptr()+trajectory_int->get_number_of_elements() ));
trajectory_positions = new device_vector< vector_td<REAL,D> >( trajectory_int->get_number_of_elements() );
CHECK_FOR_CUDA_ERROR();
vector_td<REAL,D> matrix_size_os_real = vector_td<REAL,D>( matrix_size_os );
vector_td<REAL,D> matrix_size_os_plus_wrap_real = vector_td<REAL,D>( (matrix_size_os+matrix_size_wrap)>>1 );
// convert input trajectory in [-1/2;1/2] to [0;matrix_size_os]
thrust::transform( trajectory_positions_in.begin(), trajectory_positions_in.end(), trajectory_positions->begin(),
trajectory_scale<REAL,D>(matrix_size_os_real, matrix_size_os_plus_wrap_real) );
CHECK_FOR_CUDA_ERROR();
if( !( mode == NFFT_PREP_C2NC || ATOMICS )){
// allocate storage for and compute temporary prefix-sum variable (#cells influenced per sample)
device_vector<unsigned int> c_p_s(trajectory_int->get_number_of_elements());
device_vector<unsigned int> c_p_s_ps(trajectory_int->get_number_of_elements());
CHECK_FOR_CUDA_ERROR();
REAL half_W = REAL(0.5)*W;
thrust::plus<unsigned int> binary_op;
thrust::transform(trajectory_positions->begin(), trajectory_positions->end(), c_p_s.begin(), compute_num_cells_per_sample<REAL,D>(half_W));
inclusive_scan( c_p_s.begin(), c_p_s.end(), c_p_s_ps.begin(), binary_op ); // prefix sum
// Build the vector of (grid_idx, sample_idx) tuples. Actually kept in two seperate vectors.
unsigned int num_pairs = c_p_s_ps.back();
c_p_s.clear();
thrust::device_vector<unsigned int> *tuples_first = new device_vector<unsigned int>(num_pairs);
tuples_last = new device_vector<unsigned int>(num_pairs);
CHECK_FOR_CUDA_ERROR();
// Fill tuple vector
write_pairs<REAL,D>( vector_td<unsigned int,D>(matrix_size_os), vector_td<unsigned int,D>(matrix_size_wrap), number_of_samples, number_of_frames, W,
raw_pointer_cast(&(*trajectory_positions)[0]), raw_pointer_cast(&c_p_s_ps[0]),
raw_pointer_cast(&(*tuples_first)[0]), raw_pointer_cast(&(*tuples_last)[0]) );
c_p_s_ps.clear();
// Sort by grid indices
sort_by_key( tuples_first->begin(), tuples_first->end(), tuples_last->begin() );
// each bucket_begin[i] indexes the first element of bucket i's list of points
// each bucket_end[i] indexes one past the last element of bucket i's list of points
bucket_begin = new device_vector<unsigned int>(number_of_frames*prod(matrix_size_os+matrix_size_wrap));
bucket_end = new device_vector<unsigned int>(number_of_frames*prod(matrix_size_os+matrix_size_wrap));
CHECK_FOR_CUDA_ERROR();
// find the beginning of each bucket's list of points
counting_iterator<unsigned int> search_begin(0);
lower_bound(tuples_first->begin(), tuples_first->end(), search_begin, search_begin + number_of_frames*prod(matrix_size_os+matrix_size_wrap), bucket_begin->begin() );
// find the end of each bucket's list of points
upper_bound(tuples_first->begin(), tuples_first->end(), search_begin, search_begin + number_of_frames*prod(matrix_size_os+matrix_size_wrap), bucket_end->begin() );
delete tuples_first;
}
preprocessed_C2NC = true;
if( mode != NFFT_PREP_C2NC )
preprocessed_NC2C = true;
if( !restore<typename reald<REAL,D>::Type,dummy,dummy>(old_device, trajectory, trajectory, trajectory_int) ){
throw cuda_error("Error: cuNFFT_plan::preprocess: unable to restore compute device.");
}
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::compute( cuNDArray<complext<REAL> > *in, cuNDArray<complext<REAL> > *out,
cuNDArray<REAL> *dcw, NFFT_comp_mode mode )
{
// Validity checks
unsigned char components;
if( mode == NFFT_FORWARDS_C2NC )
components = _NFFT_CONV_C2NC + _NFFT_FFT + _NFFT_DEAPODIZATION;
else if( mode == NFFT_FORWARDS_NC2C )
components = _NFFT_CONV_NC2C + _NFFT_FFT + _NFFT_DEAPODIZATION;
else if( mode == NFFT_BACKWARDS_NC2C )
components = _NFFT_CONV_NC2C + _NFFT_FFT + _NFFT_DEAPODIZATION;
else if( mode == NFFT_BACKWARDS_C2NC )
components = _NFFT_CONV_C2NC + _NFFT_FFT + _NFFT_DEAPODIZATION;
else{
throw std::runtime_error("Error: cuNFFT_plan::compute: unknown mode");
}
{
cuNDArray<complext<REAL> > *samples, *image;
if( mode == NFFT_FORWARDS_C2NC || mode == NFFT_BACKWARDS_C2NC ){
image = in; samples = out;
} else{
image = out; samples = in;
}
check_consistency( samples, image, dcw, components );
}
cuNDArray<complext<REAL> > *in_int = 0x0, *out_int = 0x0;
cuNDArray<REAL> *dcw_int = 0x0;
int old_device;
if( !prepare<complext<REAL>, complext<REAL>, REAL>
(device, &old_device, in, &in_int, out, &out_int, dcw, &dcw_int ) ){
throw cuda_error("Error: cuNFFT_plan::compute: device preparation error.");
}
typename uint64d<D>::Type image_dims = from_std_vector<size_t,D>
( (mode == NFFT_FORWARDS_C2NC || mode == NFFT_BACKWARDS_C2NC ) ? *in->get_dimensions() : *out->get_dimensions() );
bool oversampled_image = (image_dims==matrix_size_os);
vector<size_t> vec_dims = to_std_vector(matrix_size_os);
{
cuNDArray<complext<REAL> > *image = ((mode == NFFT_FORWARDS_C2NC || mode == NFFT_BACKWARDS_C2NC ) ? in : out );
for( unsigned int d=D; d<image->get_number_of_dimensions(); d++ )
vec_dims.push_back(image->get_size(d));
}
cuNDArray<complext<REAL> > *working_image = 0x0, *working_samples = 0x0;
switch(mode){
case NFFT_FORWARDS_C2NC:
if( !oversampled_image ){
working_image = new cuNDArray<complext<REAL> >(&vec_dims);
pad<complext<REAL>, D>( in_int, working_image );
}
else{
working_image = in_int;
}
compute_NFFT_C2NC( working_image, out_int );
if( dcw_int )
*out_int *= *dcw_int;
if( !oversampled_image ){
delete working_image; working_image = 0x0;
}
break;
case NFFT_FORWARDS_NC2C:
// Density compensation
if( dcw_int ){
working_samples = new cuNDArray<complext<REAL> >(*in_int);
*working_samples *= *dcw_int;
}
else{
working_samples = in_int;
}
if( !oversampled_image ){
working_image = new cuNDArray<complext<REAL> >(&vec_dims);
}
else{
working_image = out_int;
}
compute_NFFT_NC2C( working_samples, working_image );
if( !oversampled_image ){
crop<complext<REAL>, D>( (matrix_size_os-matrix_size)>>1, working_image, out_int );
}
if( !oversampled_image ){
delete working_image; working_image = 0x0;
}
if( dcw_int ){
delete working_samples; working_samples = 0x0;
}
break;
case NFFT_BACKWARDS_NC2C:
// Density compensation
if( dcw_int ){
working_samples = new cuNDArray<complext<REAL> >(*in_int);
*working_samples *= *dcw_int;
}
else{
working_samples = in_int;
}
if( !oversampled_image ){
working_image = new cuNDArray<complext<REAL> >(&vec_dims);
}
else{
working_image = out_int;
}
compute_NFFTH_NC2C( working_samples, working_image );
if( !oversampled_image ){
crop<complext<REAL> ,D>( (matrix_size_os-matrix_size)>>1, working_image, out_int );
}
if( !oversampled_image ){
delete working_image; working_image = 0x0;
}
if( dcw_int ){
delete working_samples; working_samples = 0x0;
}
break;
case NFFT_BACKWARDS_C2NC:
if( !oversampled_image ){
working_image = new cuNDArray<complext<REAL> >(&vec_dims);
pad<complext<REAL>, D>( in_int, working_image );
}
else{
working_image = in_int;
}
compute_NFFTH_C2NC( working_image, out_int );
if( dcw_int )
*out_int *= *dcw_int;
if( !oversampled_image ){
delete working_image; working_image = 0x0;
}
break;
};
if( !restore<complext<REAL> ,complext<REAL> ,REAL>
(old_device, out, out, out_int, in, in_int, dcw, dcw_int ) ){
throw cuda_error("Error: cuNFFT_plan::compute: unable to restore compute device.");
}
CHECK_FOR_CUDA_ERROR();
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::mult_MH_M( cuNDArray<complext<REAL> > *in, cuNDArray<complext<REAL> > *out,
cuNDArray<REAL> *dcw, std::vector<size_t> halfway_dims )
{
// Validity checks
unsigned char components = _NFFT_CONV_C2NC + _NFFT_CONV_NC2C + _NFFT_FFT + _NFFT_DEAPODIZATION;
if( in->get_number_of_elements() != out->get_number_of_elements() ){
throw std::runtime_error("Error: cuNFFT_plan::mult_MH_M: in/out image sizes mismatch");
}
cuNDArray<complext<REAL> > *working_samples = new cuNDArray<complext<REAL> >(&halfway_dims);
check_consistency( working_samples, in, dcw, components );
cuNDArray<complext<REAL> > *in_int = 0x0;
cuNDArray<complext<REAL> > *out_int = 0x0;
cuNDArray<REAL> *dcw_int = 0x0;
int old_device;
if( !prepare<complext<REAL>, complext<REAL>, REAL>
(device, &old_device, in, &in_int, out, &out_int, dcw, &dcw_int ) ){
throw cuda_error("Error: cuNFFT_plan::mult_MH_M: device preparation error.");
}
cuNDArray<complext<REAL> > *working_image = 0x0;
typename uint64d<D>::Type image_dims = from_std_vector<size_t,D>(*in->get_dimensions());
bool oversampled_image = (image_dims==matrix_size_os);
vector<size_t> vec_dims = to_std_vector(matrix_size_os);
for( unsigned int d=D; d<in->get_number_of_dimensions(); d++ )
vec_dims.push_back(in->get_size(d));
if( !oversampled_image ){
working_image = new cuNDArray<complext<REAL> >(&vec_dims);
pad<complext<REAL>, D>( in_int, working_image );
}
else{
working_image = in_int;
}
compute_NFFT_C2NC( working_image, working_samples );
// Density compensation
if( dcw ){
*working_samples *= *dcw_int;
*working_samples *= *dcw_int;
}
compute_NFFTH_NC2C( working_samples, working_image );
delete working_samples;
working_samples = 0x0;
if( !oversampled_image ){
crop<complext<REAL>, D>( (matrix_size_os-matrix_size)>>1, working_image, out_int );
delete working_image; working_image = 0x0;
}
restore<complext<REAL> ,complext<REAL> ,REAL>
(old_device, out, out, out_int, in, in_int, dcw, dcw_int );
CHECK_FOR_CUDA_ERROR();
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::convolve( cuNDArray<complext<REAL> > *in, cuNDArray<complext<REAL> > *out,
cuNDArray<REAL> *dcw, NFFT_conv_mode mode, bool accumulate )
{
unsigned char components;
if( mode == NFFT_CONV_C2NC )
components = _NFFT_CONV_C2NC;
else
components = _NFFT_CONV_NC2C;
{
cuNDArray<complext<REAL> > *samples, *image;
if( mode == NFFT_CONV_C2NC ){
image = in; samples = out;
} else{
image = out; samples = in;
}
check_consistency( samples, image, dcw, components );
}
cuNDArray<complext<REAL> > *in_int = 0x0, *out_int = 0x0;
cuNDArray<REAL> *dcw_int = 0x0;
int old_device;
prepare<complext<REAL>, complext<REAL>, REAL>
(device, &old_device, in, &in_int, out, &out_int, dcw, &dcw_int );
cuNDArray<complext<REAL> > *working_samples = 0x0;
typename uint64d<D>::Type image_dims = from_std_vector<size_t, D>
(*(((mode == NFFT_CONV_C2NC) ? in : out )->get_dimensions()));
bool oversampled_image = (image_dims==matrix_size_os);
if( !oversampled_image ){
throw std::runtime_error("Error: cuNFFT_plan::convolve: ERROR: oversampled image not provided as input.");
}
vector<size_t> vec_dims = to_std_vector(matrix_size_os);
{
cuNDArray<complext<REAL> > *image = ((mode == NFFT_CONV_C2NC) ? in : out );
for( unsigned int d=D; d<image->get_number_of_dimensions(); d++ )
vec_dims.push_back(image->get_size(d));
}
switch(mode){
case NFFT_CONV_C2NC:
convolve_NFFT_C2NC( in_int, out_int, accumulate );
if( dcw_int ) *out_int *= *dcw_int;
break;
case NFFT_CONV_NC2C:
// Density compensation
if( dcw_int ){
working_samples = new cuNDArray<complext<REAL> >(*in_int);
*working_samples *= *dcw_int;
}
else{
working_samples = in_int;
}
_convolve_NFFT_NC2C<REAL,D,ATOMICS>::apply( this, working_samples, out_int, accumulate );
if( dcw_int ){
delete working_samples; working_samples = 0x0;
}
break;
default:
throw std::runtime_error( "Error: cuNFFT_plan::convolve: unknown mode.");
}
restore<complext<REAL>, complext<REAL>, REAL>
(old_device, out, out, out_int, in, in_int, dcw, dcw_int );
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::fft(cuNDArray<complext<REAL> > *data, NFFT_fft_mode mode, bool do_scale )
{
cuNDArray<complext<REAL> > *data_int = 0x0;
int old_device;
prepare<complext<REAL>,dummy,dummy>( device, &old_device, data, &data_int );
typename uint64d<D>::Type _dims_to_transform = counting_vec<size_t,D>();
vector<size_t> dims_to_transform = to_std_vector( _dims_to_transform );
if( mode == NFFT_FORWARDS ){
cuNDFFT<REAL>::instance()->fft( data_int, &dims_to_transform, do_scale );
}
else{
cuNDFFT<REAL>::instance()->ifft( data_int, &dims_to_transform, do_scale );
}
restore<complext<REAL> ,dummy,dummy>(old_device, data, data, data_int);
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::deapodize( cuNDArray<complext<REAL> > *image, bool fourier_domain)
{
unsigned char components;
components = _NFFT_FFT;
check_consistency( 0x0, image, 0x0, components );
cuNDArray<complext<REAL> > *image_int = 0x0;
int old_device;
prepare<complext<REAL>,dummy,dummy>(device, &old_device, image, &image_int );
typename uint64d<D>::Type image_dims = from_std_vector<size_t, D>(*image->get_dimensions());
bool oversampled_image = (image_dims==matrix_size_os);
if( !oversampled_image ){
throw std::runtime_error( "Error: cuNFFT_plan::deapodize: ERROR: oversampled image not provided as input.");
}
if (fourier_domain){
if (!deapodization_filterFFT)
deapodization_filterFFT = compute_deapodization_filter(true);
*image_int *= *deapodization_filterFFT;
} else {
if (!deapodization_filter)
deapodization_filter = compute_deapodization_filter(false);
*image_int *= *deapodization_filter;
}
restore<complext<REAL> ,dummy,dummy>(old_device, image, image, image_int);
}
//
// Private class methods
//
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::check_consistency( cuNDArray<complext<REAL> > *samples, cuNDArray<complext<REAL> > *image,
cuNDArray<REAL> *weights, unsigned char components )
{
if( !initialized ){
throw std::runtime_error( "Error: cuNFFT_plan: Unable to proceed without setup.");
}
if( (components & _NFFT_CONV_C2NC ) && !preprocessed_C2NC ){
throw std::runtime_error("Error: cuNFFT_plan: Unable to compute NFFT before preprocessing.");
}
if( (components & _NFFT_CONV_NC2C ) && !(preprocessed_NC2C || (preprocessed_C2NC && ATOMICS ) ) ){
throw std::runtime_error("Error: cuNFFT_plan: Unable to compute NFFT before preprocessing.");
}
if( ((components & _NFFT_CONV_C2NC ) || (components & _NFFT_CONV_NC2C )) && !(image && samples) ){
throw std::runtime_error("Error: cuNFFT_plan: Unable to process 0x0 input/output.");
}
if( ((components & _NFFT_FFT) || (components & _NFFT_DEAPODIZATION )) && !image ){
throw std::runtime_error("Error: cuNFFT_plan: Unable to process 0x0 input.");
}
if( image->get_number_of_dimensions() < D ){
throw std::runtime_error("Error: cuNFFT_plan: Number of image dimensions mismatch the plan.");
}
typename uint64d<D>::Type image_dims = from_std_vector<size_t,D>( *image->get_dimensions() );
bool oversampled_image = (image_dims==matrix_size_os);
if( !((oversampled_image) ? (image_dims == matrix_size_os) : (image_dims == matrix_size) )){
throw std::runtime_error("Error: cuNFFT_plan: Image dimensions mismatch.");
}
if( (components & _NFFT_CONV_C2NC ) || (components & _NFFT_CONV_NC2C )){
if( (samples->get_number_of_elements() == 0) || (samples->get_number_of_elements() % (number_of_frames*number_of_samples)) ){
printf("\ncuNFFT::check_consistency() failed:\n#elements in the samples array: %ld.\n#samples from preprocessing: %d.\n#frames from preprocessing: %d.\n",samples->get_number_of_elements(), number_of_samples, number_of_frames ); fflush(stdout);
throw std::runtime_error("Error: cuNFFT_plan: The number of samples is not a multiple of #samples/frame x #frames as requested through preprocessing");
}
unsigned int num_batches_in_samples_array = samples->get_number_of_elements()/(number_of_frames*number_of_samples);
unsigned int num_batches_in_image_array = 1;
for( unsigned int d=D; d<image->get_number_of_dimensions(); d++ ){
num_batches_in_image_array *= image->get_size(d);
}
num_batches_in_image_array /= number_of_frames;
if( num_batches_in_samples_array != num_batches_in_image_array ){
printf("\ncuNFFT::check_consistency() failed:\n#elements in the samples array: %ld.\n#samples from preprocessing: %d.\n#frames from preprocessing: %d.\nLeading to %d batches in the samples array.\nThe number of batches in the image array is %d.\n",samples->get_number_of_elements(), number_of_samples, number_of_frames, num_batches_in_samples_array, num_batches_in_image_array ); fflush(stdout);
throw std::runtime_error("Error: cuNFFT_plan: Number of batches mismatch between samples and image arrays");
}
}
if( components & _NFFT_CONV_NC2C ){
if( weights ){
if( weights->get_number_of_elements() == 0 ||
!( weights->get_number_of_elements() == number_of_samples ||
weights->get_number_of_elements() == number_of_frames*number_of_samples) ){
printf("\ncuNFFT::check_consistency() failed:\n#elements in the samples array: %ld.\n#samples from preprocessing: %d.\n#frames from preprocessing: %d.\n#weights: %ld.\n",samples->get_number_of_elements(), number_of_samples, number_of_frames, weights->get_number_of_elements() ); fflush(stdout);
throw std::runtime_error("Error: cuNFFT_plan: The number of weights should match #samples/frame x #frames as requested through preprocessing");
}
}
}
}
template<class REAL, unsigned int D, bool ATOMICS>
void Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::barebones()
{
// These are the fundamental booleans checked before accessing the various member pointers
initialized = preprocessed_C2NC = preprocessed_NC2C = false;
// Clear matrix sizes
clear(matrix_size);
clear(matrix_size_os);
// Clear pointers
trajectory_positions = 0x0;
tuples_last = bucket_begin = bucket_end = 0x0;
// and specify the device
if (cudaGetDevice(&device) != cudaSuccess) {
throw cuda_error("Error: cuNFFT_plan::barebones:: unable to get device no");
}
}
template<class REAL, unsigned int D, bool ATOMICS>
void Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::wipe( NFFT_wipe_mode mode )
{
// Get current Cuda device
int old_device;
if( cudaGetDevice(&old_device) != cudaSuccess ) {
throw cuda_error("Error: cuNFFT_plan::wipe: unable to get device no");
}
if( device != old_device && cudaSetDevice(device) != cudaSuccess) {
throw cuda_error("Error: cuNFFT_plan::wipe: unable to set device no");
}
if( mode==NFFT_WIPE_ALL && initialized ){
deapodization_filter.reset();
initialized = false;
}
if( preprocessed_NC2C ){
if( tuples_last ) delete tuples_last;
if( bucket_begin ) delete bucket_begin;
if( bucket_end ) delete bucket_end;
}
if( preprocessed_C2NC || preprocessed_NC2C ){
delete trajectory_positions;
preprocessed_C2NC = preprocessed_NC2C = false;
}
if( device != old_device && cudaSetDevice(old_device) != cudaSuccess) {
throw cuda_error("Error: cuNFFT_plan::wipe: unable to restore device no");
}
}
template<class REAL, unsigned int D, bool ATOMICS>
void Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::compute_beta()
{
// Compute Kaiser-Bessel beta paramter according to the formula provided in
// Beatty et. al. IEEE TMI 2005;24(6):799-808.
for( unsigned int d=0; d<D; d++ )
beta[d] = (M_PI*std::sqrt((W*W)/(alpha[d]*alpha[d])*(alpha[d]-REAL(0.5))*(alpha[d]-REAL(0.5))-REAL(0.8)));
}
//
// Grid fictitious trajectory with a single sample at the origin
//
template<class REAL, unsigned int D> __global__ void
compute_deapodization_filter_kernel( typename uintd<D>::Type matrix_size_os, typename reald<REAL,D>::Type matrix_size_os_real,
REAL W, REAL half_W, REAL one_over_W,
typename reald<REAL,D>::Type beta, complext<REAL> * __restrict__ image_os )
{
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int num_elements = prod(matrix_size_os);
if( idx <num_elements ){
// Compute weight from Kaiser-Bessel filter
const typename uintd<D>::Type cell_pos = idx_to_co<D>(idx, matrix_size_os);
// Sample position ("origin")
const vector_td<REAL,D> sample_pos = REAL(0.5)*matrix_size_os_real;
// Calculate the distance between the cell and the sample
vector_td<REAL,D> cell_pos_real = vector_td<REAL,D>(cell_pos);
const typename reald<REAL,D>::Type delta = abs(sample_pos-cell_pos_real);
// Compute convolution weight.
REAL weight;
REAL zero = REAL(0);
vector_td<REAL,D> half_W_vec( half_W );
if( weak_greater( delta, half_W_vec ) )
weight = zero;
else{
weight = KaiserBessel<REAL>( delta, matrix_size_os_real, one_over_W, beta );
//if( !isfinite(weight) )
//weight = zero;
}
// Output weight
complext<REAL> result;
result._real = weight;
result._imag = zero;
image_os[idx] = result;
}
}
//
// Function to calculate the deapodization filter
//
template<class REAL, unsigned int D, bool ATOMICS> boost::shared_ptr<cuNDArray<complext<REAL> > >
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::compute_deapodization_filter( bool FFTed)
{
std::vector<size_t> tmp_vec_os = to_std_vector(matrix_size_os);
boost::shared_ptr< cuNDArray<complext<REAL> > > filter( new cuNDArray<complext<REAL> >(tmp_vec_os));
vector_td<REAL,D> matrix_size_os_real = vector_td<REAL,D>(matrix_size_os);
// Find dimensions of grid/blocks.
dim3 dimBlock( 256 );
dim3 dimGrid( (prod(matrix_size_os)+dimBlock.x-1)/dimBlock.x );
// Invoke kernel
compute_deapodization_filter_kernel<REAL,D><<<dimGrid, dimBlock>>>
( vector_td<unsigned int,D>(matrix_size_os), matrix_size_os_real, W, REAL(0.5)*W, REAL(1)/W, beta, filter->get_data_ptr() );
CHECK_FOR_CUDA_ERROR();
// FFT
if (FFTed)
fft( filter.get(), NFFT_FORWARDS, false );
else
fft( filter.get(), NFFT_BACKWARDS, false );
// Reciprocal
reciprocal_inplace(filter.get());
return filter;
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::compute_NFFT_C2NC( cuNDArray<complext<REAL> > *image, cuNDArray<complext<REAL> > *samples )
{
// private method - no consistency check. We trust in ourselves.
// Deapodization
deapodize( image );
// FFT
fft( image, NFFT_FORWARDS );
// Convolution
convolve( image, samples, 0x0, NFFT_CONV_C2NC );
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::compute_NFFTH_NC2C( cuNDArray<complext<REAL> > *samples, cuNDArray<complext<REAL> > *image )
{
// private method - no consistency check. We trust in ourselves.
// Convolution
convolve( samples, image, 0x0, NFFT_CONV_NC2C );
// FFT
fft( image, NFFT_BACKWARDS );
// Deapodization
deapodize( image );
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::compute_NFFTH_C2NC( cuNDArray<complext<REAL> > *image, cuNDArray<complext<REAL> > *samples )
{
// private method - no consistency check. We trust in ourselves.
// Deapodization
deapodize( image, true );
// FFT
fft( image, NFFT_BACKWARDS );
// Convolution
convolve( image, samples, 0x0, NFFT_CONV_C2NC );
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::compute_NFFT_NC2C( cuNDArray<complext<REAL> > *samples, cuNDArray<complext<REAL> > *image )
{
// private method - no consistency check. We trust in ourselves.
// Convolution
convolve( samples, image, 0x0, NFFT_CONV_NC2C );
// FFT
fft( image, NFFT_FORWARDS );
// Deapodization
deapodize( image, true );
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::convolve_NFFT_C2NC( cuNDArray<complext<REAL> > *image, cuNDArray<complext<REAL> > *samples, bool accumulate )
{
// private method - no consistency check. We trust in ourselves.
unsigned int num_batches = 1;
for( unsigned int d=D; d<image->get_number_of_dimensions(); d++ )
num_batches *= image->get_size(d);
num_batches /= number_of_frames;
/*
Setup grid and threads
*/
size_t threads_per_block;
unsigned int max_coils;
threads_per_block = NFFT_THREADS_PER_KERNEL;
if( cudaDeviceManager::Instance()->major_version(device) == 1 ){
max_coils = NFFT_MAX_COILS_COMPUTE_1x;
}
else{
max_coils = NFFT_MAX_COILS_COMPUTE_2x;
}
// We can (only) convolve max_coils batches per run due to shared memory issues.
unsigned int domain_size_coils_desired = num_batches;
unsigned int num_repetitions = domain_size_coils_desired/max_coils +
( ((domain_size_coils_desired%max_coils)==0) ? 0 : 1 );
unsigned int domain_size_coils = (num_repetitions==1) ? domain_size_coils_desired : max_coils;
unsigned int domain_size_coils_tail = (num_repetitions==1) ? domain_size_coils_desired : domain_size_coils_desired - (num_repetitions-1)*domain_size_coils;
// Block and Grid dimensions
dim3 dimBlock( (unsigned int)threads_per_block );
dim3 dimGrid( (number_of_samples+dimBlock.x-1)/dimBlock.x, number_of_frames );
// Calculate how much shared memory to use per thread
size_t bytes_per_thread = domain_size_coils * sizeof( vector_td<REAL,D> );
size_t bytes_per_thread_tail = domain_size_coils_tail * sizeof( vector_td<REAL,D> );
unsigned int double_warp_size_power=0;
unsigned int __tmp = cudaDeviceManager::Instance()->warp_size(device)<<1;
while(__tmp!=1){
__tmp>>=1;
double_warp_size_power++;
}
vector_td<REAL,D> matrix_size_os_real = vector_td<REAL,D>( matrix_size_os );
/*
Invoke kernel
*/
for( unsigned int repetition = 0; repetition<num_repetitions; repetition++ ){
NFFT_convolve_kernel<REAL,D>
<<<dimGrid, dimBlock, ((repetition==num_repetitions-1) ? dimBlock.x*bytes_per_thread_tail : dimBlock.x*bytes_per_thread)>>>
( alpha, beta, W, vector_td<unsigned int,D>(matrix_size_os), vector_td<unsigned int,D>(matrix_size_wrap), number_of_samples,
(repetition==num_repetitions-1) ? domain_size_coils_tail : domain_size_coils,
raw_pointer_cast(&(*trajectory_positions)[0]),
image->get_data_ptr()+repetition*prod(matrix_size_os)*number_of_frames*domain_size_coils,
samples->get_data_ptr()+repetition*number_of_samples*number_of_frames*domain_size_coils,
double_warp_size_power, REAL(0.5)*W, REAL(1)/(W), accumulate, matrix_size_os_real );
CHECK_FOR_CUDA_ERROR();
}
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::convolve_NFFT_NC2C( cuNDArray<complext<REAL> > *image, cuNDArray<complext<REAL> > *samples, bool accumulate )
{
_convolve_NFFT_NC2C<REAL,D,ATOMICS>::apply( this, image, samples, accumulate );
}
template<unsigned int D> struct
_convolve_NFFT_NC2C<float,D,true>{ // True: use atomic operations variant
static bool apply( cuNFFT_plan<float,D,true> *plan,
cuNDArray<complext<float> > *samples,
cuNDArray<complext<float> > *image,
bool accumulate )
{
//
// Bring in some variables from the plan
unsigned int device = plan->device;
unsigned int number_of_frames = plan->number_of_frames;
unsigned int number_of_samples = plan->number_of_samples;
typename uint64d<D>::Type matrix_size_os = plan->matrix_size_os;
typename uint64d<D>::Type matrix_size_wrap = plan->matrix_size_wrap;
typename reald<float,D>::Type alpha = plan->alpha;
typename reald<float,D>::Type beta = plan->beta;
float W = plan->W;
thrust::device_vector< typename reald<float,D>::Type > *trajectory_positions = plan->trajectory_positions;
//
// Atomic operations are only supported in compute model 2.0 and up
//
if( cudaDeviceManager::Instance()->major_version(device) == 1 ){
throw cuda_error("Error: Atomic NC2C NFFT only supported on device with compute model 2.0 or higher");
}
// Check if warp_size is a power of two. We do some modulus tricks in the kernels that depend on this...
if( !((cudaDeviceManager::Instance()->warp_size(device) & (cudaDeviceManager::Instance()->warp_size(device)-1)) == 0 ) ){
throw cuda_error("cuNFFT: unsupported hardware (warpSize is not a power of two)");
}
unsigned int num_batches = 1;
for( unsigned int d=D; d<image->get_number_of_dimensions(); d++ )
num_batches *= image->get_size(d);
num_batches /= number_of_frames;
//
// Setup grid and threads
//
size_t threads_per_block;
unsigned int max_coils;
threads_per_block = NFFT_THREADS_PER_KERNEL;
max_coils = NFFT_MAX_COILS_COMPUTE_2x;
// We can (only) convolve domain_size_coils batches per run due to shared memory issues.
unsigned int domain_size_coils_desired = num_batches;
unsigned int num_repetitions = domain_size_coils_desired/max_coils +
( ((domain_size_coils_desired%max_coils)==0) ? 0 : 1 );
unsigned int domain_size_coils = (num_repetitions==1) ? domain_size_coils_desired : max_coils;
unsigned int domain_size_coils_tail = (num_repetitions==1) ? domain_size_coils_desired : domain_size_coils_desired - (num_repetitions-1)*domain_size_coils;
// Block and Grid dimensions
dim3 dimBlock( (unsigned int)threads_per_block );
dim3 dimGrid( (number_of_samples+dimBlock.x-1)/dimBlock.x, number_of_frames );
// Calculate how much shared memory to use per thread
size_t bytes_per_thread = domain_size_coils * sizeof( vector_td<float,D> );
size_t bytes_per_thread_tail = domain_size_coils_tail * sizeof( vector_td<float,D> );
unsigned int double_warp_size_power=0, __tmp = cudaDeviceManager::Instance()->warp_size(device)<<1;
while(__tmp!=1){
__tmp>>=1;
double_warp_size_power++;
}
vector_td<float,D> matrix_size_os_real = vector_td<float,D>( matrix_size_os );
if( !accumulate ){
clear(image);
}
//
// Invoke kernel
//
for( unsigned int repetition = 0; repetition<num_repetitions; repetition++ ){
NFFT_H_atomic_convolve_kernel<float,D>
<<<dimGrid, dimBlock, ((repetition==num_repetitions-1) ? dimBlock.x*bytes_per_thread_tail : dimBlock.x*bytes_per_thread)>>>
( alpha, beta, W, vector_td<unsigned int,D>(matrix_size_os), vector_td<unsigned int,D>(matrix_size_wrap), number_of_samples,
(repetition==num_repetitions-1) ? domain_size_coils_tail : domain_size_coils,
raw_pointer_cast(&(*trajectory_positions)[0]),
samples->get_data_ptr()+repetition*number_of_samples*number_of_frames*domain_size_coils,
image->get_data_ptr()+repetition*prod(matrix_size_os)*number_of_frames*domain_size_coils,
double_warp_size_power, float(0.5)*W, float(1)/(W), matrix_size_os_real );
}
CHECK_FOR_CUDA_ERROR();
return true;
}
};
template<unsigned int D> struct
_convolve_NFFT_NC2C<double,D,true>{ // True: use atomic operations variant
// Atomics don't exist for doubles, so this gives a compile error if you actually try to use it.
};
template<class REAL, unsigned int D> struct
_convolve_NFFT_NC2C<REAL,D,false>{ // False: use non-atomic operations variant
static void apply( cuNFFT_plan<REAL,D,false> *plan,
cuNDArray<complext<REAL> > *samples,
cuNDArray<complext<REAL> > *image,
bool accumulate )
{
// Bring in some variables from the plan
unsigned int device = plan->device;
unsigned int number_of_frames = plan->number_of_frames;
unsigned int number_of_samples = plan->number_of_samples;
typename uint64d<D>::Type matrix_size_os = plan->matrix_size_os;
typename uint64d<D>::Type matrix_size_wrap = plan->matrix_size_wrap;
typename reald<REAL,D>::Type alpha = plan->alpha;
typename reald<REAL,D>::Type beta = plan->beta;
REAL W = plan->W;
thrust::device_vector< typename reald<REAL,D>::Type > *trajectory_positions = plan->trajectory_positions;
thrust::device_vector<unsigned int> *tuples_last = plan->tuples_last;
thrust::device_vector<unsigned int> *bucket_begin = plan->bucket_begin;
thrust::device_vector<unsigned int> *bucket_end = plan->bucket_end;
// private method - no consistency check. We trust in ourselves.
// Check if warp_size is a power of two. We do some modulus tricks in the kernels that depend on this...
if( !((cudaDeviceManager::Instance()->warp_size(device) & (cudaDeviceManager::Instance()->warp_size(device)-1)) == 0 ) ){
throw cuda_error("cuNFFT: unsupported hardware (warpSize is not a power of two)");
}
unsigned int num_batches = 1;
for( unsigned int d=D; d<image->get_number_of_dimensions(); d++ )
num_batches *= image->get_size(d);
num_batches /= number_of_frames;
//
// Setup grid and threads
//
size_t threads_per_block;
unsigned int max_coils;
threads_per_block = NFFT_THREADS_PER_KERNEL;
if( cudaDeviceManager::Instance()->major_version(device) == 1 ){
max_coils = NFFT_MAX_COILS_COMPUTE_1x;
}
else{
max_coils = NFFT_MAX_COILS_COMPUTE_2x;
}
// We can (only) convolve domain_size_coils batches per run due to shared memory issues.
unsigned int domain_size_coils_desired = num_batches;
unsigned int num_repetitions = domain_size_coils_desired/max_coils +
( ((domain_size_coils_desired%max_coils)==0) ? 0 : 1 );
unsigned int domain_size_coils = (num_repetitions==1) ? domain_size_coils_desired : max_coils;
unsigned int domain_size_coils_tail = (num_repetitions==1) ? domain_size_coils_desired : domain_size_coils_desired - (num_repetitions-1)*domain_size_coils;
// Block and Grid dimensions
dim3 dimBlock( (unsigned int)threads_per_block );
dim3 dimGrid( (prod(matrix_size_os+matrix_size_wrap)+dimBlock.x-1)/dimBlock.x, number_of_frames );
// Calculate how much shared memory to use per thread
size_t bytes_per_thread = domain_size_coils * sizeof( vector_td<REAL,D> );
size_t bytes_per_thread_tail = domain_size_coils_tail * sizeof( vector_td<REAL,D> );
unsigned int double_warp_size_power=0, __tmp = cudaDeviceManager::Instance()->warp_size(device)<<1;
while(__tmp!=1){
__tmp>>=1;
double_warp_size_power++;
}
vector_td<REAL,D> matrix_size_os_real = vector_td<REAL,D>( matrix_size_os );
// Define temporary image that includes a wrapping zone
cuNDArray<complext<REAL> > _tmp;
vector<size_t> vec_dims = to_std_vector(matrix_size_os+matrix_size_wrap);
if( number_of_frames > 1 )
vec_dims.push_back(number_of_frames);
if( num_batches > 1 )
vec_dims.push_back(num_batches);
_tmp.create(&vec_dims);
//
// Invoke kernel
//
for( unsigned int repetition = 0; repetition<num_repetitions; repetition++ ){
NFFT_H_convolve_kernel<REAL,D>
<<<dimGrid, dimBlock, ((repetition==num_repetitions-1) ? dimBlock.x*bytes_per_thread_tail : dimBlock.x*bytes_per_thread)>>>
( alpha, beta, W, vector_td<unsigned int,D>(matrix_size_os+matrix_size_wrap), number_of_samples,
(repetition==num_repetitions-1) ? domain_size_coils_tail : domain_size_coils,
raw_pointer_cast(&(*trajectory_positions)[0]),
_tmp.get_data_ptr()+repetition*prod(matrix_size_os+matrix_size_wrap)*number_of_frames*domain_size_coils,
samples->get_data_ptr()+repetition*number_of_samples*number_of_frames*domain_size_coils,
raw_pointer_cast(&(*tuples_last)[0]), raw_pointer_cast(&(*bucket_begin)[0]), raw_pointer_cast(&(*bucket_end)[0]),
double_warp_size_power, REAL(0.5)*W, REAL(1)/(W), matrix_size_os_real );
}
CHECK_FOR_CUDA_ERROR();
plan->image_wrap( &_tmp, image, accumulate );
};
};
// Image wrap kernels
template<class REAL, unsigned int D> __global__ void
image_wrap_kernel( typename uintd<D>::Type matrix_size_os, typename uintd<D>::Type matrix_size_wrap, bool accumulate,
const complext<REAL> * __restrict__ in, complext<REAL> * __restrict__ out )
{
unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int num_elements_per_image_src = prod(matrix_size_os+matrix_size_wrap);
const unsigned int image_offset_src = blockIdx.y*num_elements_per_image_src;
const typename uintd<D>::Type co = idx_to_co<D>(idx, matrix_size_os);
const typename uintd<D>::Type half_wrap = matrix_size_wrap>>1;
// Make "boolean" vectors denoting whether wrapping needs to be performed in a given direction (forwards/backwards)
vector_td<bool,D> B_l = vector_less( co, half_wrap );
vector_td<bool,D> B_r = vector_greater_equal( co, matrix_size_os-half_wrap );
complext<REAL> result = in[co_to_idx<D>(co+half_wrap, matrix_size_os+matrix_size_wrap) + image_offset_src];
if( sum(B_l+B_r) > 0 ){
// Fold back the wrapping zone onto the image ("periodically")
//
// There is 2^D-1 ways to pick combinations of dimensions in D-dimensionsal space, e.g.
//
// { x, y, xy } in 2D
// { x, y, x, xy, xz, yz, xyz } in 3D
//
// Every "letter" in each combination provides two possible wraps (eiher end of the dimension)
//
// For every 2^D-1 combinations DO
// - find the number of dimensions, d, in the combination
// - create 2^(d) stride vectors and test for wrapping using the 'B'-vectors above.
// - accumulate the contributions
//
// The following code represents dimensions as bits in a char.
//
for( unsigned char combination = 1; combination < (1<<D); combination++ ){
// Find d
unsigned char d = 0;
for( unsigned char i=0; i<D; i++ )
d += ((combination & (1<<i)) > 0 );
// Create stride vector for each wrapping test
for( unsigned char s = 0; s < (1<<d); s++ ){
// Target for stride
typename intd<D>::Type stride;
char wrap_requests = 0;
char skipped_dims = 0;
// Fill dimensions of the stride
for( unsigned char i=1; i<D+1; i++ ){
// Is the stride dimension present in the current combination?
if( i & combination ){
// A zero bit in s indicates "check for left wrap" and a one bit is interpreted as "check for right wrap"
// ("left/right" for the individual dimension meaning wrapping on either side of the dimension).
if( i & (s<<(skipped_dims)) ){
if( B_r.vec[i-1] ){ // Wrapping required
stride[i-1] = -1;
wrap_requests++;
}
else
stride[i-1] = 0;
}
else{
if( B_l.vec[i-1] ){ // Wrapping required
stride[i-1] =1 ;
wrap_requests++;
}
else
stride[i-1] = 0;
}
}
else{
// Do not test for wrapping in dimension 'i-1' (for this combination)
stride[i-1] = 0;
skipped_dims++;
}
}
// Now it is time to do the actual wrapping (if needed)
if( wrap_requests == d ){
typename intd<D>::Type src_co_int = vector_td<int,D>(co+half_wrap);
typename intd<D>::Type matrix_size_os_int = vector_td<int,D>(matrix_size_os);
typename intd<D>::Type co_offset_int = src_co_int + component_wise_mul<int,D>(stride,matrix_size_os_int);
typename uintd<D>::Type co_offset = vector_td<unsigned int,D>(co_offset_int);
result += in[co_to_idx<D>(co_offset, matrix_size_os+matrix_size_wrap) + image_offset_src];
break; // only one stride per combination can contribute (e.g. one edge, one corner)
}
}
}
}
// Output
const unsigned int image_offset_tgt = blockIdx.y*prod(matrix_size_os);
if( accumulate ) result += out[idx+image_offset_tgt];
out[idx+image_offset_tgt] = result;
}
template<class REAL, unsigned int D, bool ATOMICS> void
Gadgetron::cuNFFT_plan<REAL,D,ATOMICS>::image_wrap( cuNDArray<complext<REAL> > *source, cuNDArray<complext<REAL> > *target, bool accumulate )
{
unsigned int num_batches = 1;
for( unsigned int d=D; d<source->get_number_of_dimensions(); d++ )
num_batches *= source->get_size(d);
num_batches /= number_of_frames;
// Set dimensions of grid/blocks.
unsigned int bdim = 256;
dim3 dimBlock( bdim );
dim3 dimGrid( prod(matrix_size_os)/bdim, number_of_frames*num_batches );
// Safety check
if( (prod(matrix_size_os)%bdim) != 0 ) {
std::stringstream ss;
ss << "Error: cuNFFT : the number of oversampled image elements must be a multiplum of the block size: " << bdim;
throw std::runtime_error(ss.str());
}
// Invoke kernel
image_wrap_kernel<REAL,D><<<dimGrid, dimBlock>>>
( vector_td<unsigned int,D>(matrix_size_os), vector_td<unsigned int,D>(matrix_size_wrap), accumulate, source->get_data_ptr(), target->get_data_ptr() );
CHECK_FOR_CUDA_ERROR();
}
//
// Template instantion
//
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< float, 1, true >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< float, 1, false >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< double, 1, false >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< float, 2, true >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< float, 2, false >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< double, 2, false >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< float, 3, true >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< float, 3, false >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< double, 3, false >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< float, 4, true >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< float, 4, false >;
template class EXPORTGPUNFFT Gadgetron::cuNFFT_plan< double, 4, false >;
|
e4a0f0c22f2547c4f10c362537d2a4e7a348acfe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__
void adagradfloat(float *weights, //weights input and output
float *gsum, //storage
float *dw, //input and will have to set to zero
// float *loss1, //output
// float *loss2, //output
float rate, //input
// float decay1,//input
// float decay2,//input
// int batch,
float eps){
int section = blockIdx.x;
int index = threadIdx.x;
int cell = section*blockDim.x +index;
gsum[cell]= gsum[cell]+(dw[cell]*dw[cell]);
weights[cell]= -(rate*dw[cell])/(sqrtf(gsum[cell])+eps);
}
extern "C" __global__
void adamfloat(
float *w,
float *gsum,
float *xsum,
float *dw,
float beta1,
float beta2,
float eps,
float counter,
){
int section = blockIdx.x;
int index = threadIdx.x;
int cell = section*blockDim.x +index;
gsum[cell]=beta1*gsum[cell] +(1.0-beta1)*dw[cell];
float gsumt = 0;
gsumt = gsum[cell]/(1.0- _powf(beta1,counter));
xsum[cell]= (beta2*xsum[cell])+((1.0 -beta2)*dw[cell]*dw[cell]);
float xsumt =0;
xsumt = xsum[cell]/(1.0 - _powf(beta2,counter));
w[cell]+= -(eps*gsumt)/(sqrtf(xsumt)+eps)j;
}
extern "C" __global__
void adadeltafloat(
float *weights, //weights input and output
float *gsum, //storage
float *xsum, //storage
float *dw, //input and will have to set to zero
// float *loss1, //output
// float *loss2, //output
const float rate, //input
// const float decay1,//input
// const float decay2,//input from cpu
// const int batch, //input from cpu
const float eps){
int section = blockIdx.x;
int index = threadIdx.x;
int cell = section*blockDim.x +index;
/*
if weights[cell]<0.0{
decay1=-decay1;
}*/
//decay2=weights[cell]*decay2;
//dw[cell]=(dw[cell]/(float)batch)+decay+decay2;
gsum[cell]= gsum[cell]+(dw[cell]*dw[cell]);
weights[cell]= -(rate*dw[cell])/(sqrtf(gsum[cell])+eps);
dw[cell]=0.0;
}
extern "C" __global__
void l1regularizationfloat(
float *dw, //input and output
float *w //input
int values, //number of values
float *l1, //output
float *l2, //output
float batch, // should be an int but just send it as a float
float decay1,
const float decay2,
){
int section = blockIdx.x;
int index = threadIdx.x;
int cell = section*blockDim.x+index;
float decay = decay1;
if (dw[cell]<0){
decay=-decay;
}
atomicAdd(&l1,w[cell]*decay);
dw[cell]= (dw[cell]/batch) +decay;
}
extern "C" __global__
void l2regularizationfloat(
float *dw, //input and output
float *w //input
//int values, //number of values
float *l1, //output
float *l2, //output
const float batch, // should be an int but just send it as a float
const float decay1,
const float decay2,
){
int section = blockIdx.x;
int index = threadIdx.x;
int cell = section*blockDim.x+index;
atomicAdd(&l2,(w[cell]*w[cell]*decay2)/2.0);
dw[cell]= (dw[cell]/batch) + w[cell]*decay2;
}
extern "C" __global__
void l1l2regularizationfloat(
float *dw, //input and output
float *w //input needs to ba an array
// int values, //number of values
float *l1, //output set to zero
float *l2, //output set to zero
const float batch, // should be an int but just send it as a float
const float decay1, //input
const float decay2, //input
){
int section = blockIdx.x;
int index = threadIdx.x;
int cell = section*blockDim.x+index;
float decay = decay1;
if (dw[cell]<0){
decay=-decay;
}
atomicAdd(&l1,w[cell]*decay);
atomicAdd(&l2,(w[cell]*w[cell]*decay2)/2.0);
dw[cell]= (dw[cell]/batch) + (w[cell]*decay2) +decay1;
}
| e4a0f0c22f2547c4f10c362537d2a4e7a348acfe.cu | extern "C" __global__
void adagradfloat(float *weights, //weights input and output
float *gsum, //storage
float *dw, //input and will have to set to zero
// float *loss1, //output
// float *loss2, //output
float rate, //input
// float decay1,//input
// float decay2,//input
// int batch,
float eps){
int section = blockIdx.x;
int index = threadIdx.x;
int cell = section*blockDim.x +index;
gsum[cell]= gsum[cell]+(dw[cell]*dw[cell]);
weights[cell]= -(rate*dw[cell])/(sqrtf(gsum[cell])+eps);
}
extern "C" __global__
void adamfloat(
float *w,
float *gsum,
float *xsum,
float *dw,
float beta1,
float beta2,
float eps,
float counter,
){
int section = blockIdx.x;
int index = threadIdx.x;
int cell = section*blockDim.x +index;
gsum[cell]=beta1*gsum[cell] +(1.0-beta1)*dw[cell];
float gsumt = 0;
gsumt = gsum[cell]/(1.0- _powf(beta1,counter));
xsum[cell]= (beta2*xsum[cell])+((1.0 -beta2)*dw[cell]*dw[cell]);
float xsumt =0;
xsumt = xsum[cell]/(1.0 - _powf(beta2,counter));
w[cell]+= -(eps*gsumt)/(sqrtf(xsumt)+eps)j;
}
extern "C" __global__
void adadeltafloat(
float *weights, //weights input and output
float *gsum, //storage
float *xsum, //storage
float *dw, //input and will have to set to zero
// float *loss1, //output
// float *loss2, //output
const float rate, //input
// const float decay1,//input
// const float decay2,//input from cpu
// const int batch, //input from cpu
const float eps){
int section = blockIdx.x;
int index = threadIdx.x;
int cell = section*blockDim.x +index;
/*
if weights[cell]<0.0{
decay1=-decay1;
}*/
//decay2=weights[cell]*decay2;
//dw[cell]=(dw[cell]/(float)batch)+decay+decay2;
gsum[cell]= gsum[cell]+(dw[cell]*dw[cell]);
weights[cell]= -(rate*dw[cell])/(sqrtf(gsum[cell])+eps);
dw[cell]=0.0;
}
extern "C" __global__
void l1regularizationfloat(
float *dw, //input and output
float *w //input
int values, //number of values
float *l1, //output
float *l2, //output
float batch, // should be an int but just send it as a float
float decay1,
const float decay2,
){
int section = blockIdx.x;
int index = threadIdx.x;
int cell = section*blockDim.x+index;
float decay = decay1;
if (dw[cell]<0){
decay=-decay;
}
atomicAdd(&l1,w[cell]*decay);
dw[cell]= (dw[cell]/batch) +decay;
}
extern "C" __global__
void l2regularizationfloat(
float *dw, //input and output
float *w //input
//int values, //number of values
float *l1, //output
float *l2, //output
const float batch, // should be an int but just send it as a float
const float decay1,
const float decay2,
){
int section = blockIdx.x;
int index = threadIdx.x;
int cell = section*blockDim.x+index;
atomicAdd(&l2,(w[cell]*w[cell]*decay2)/2.0);
dw[cell]= (dw[cell]/batch) + w[cell]*decay2;
}
extern "C" __global__
void l1l2regularizationfloat(
float *dw, //input and output
float *w //input needs to ba an array
// int values, //number of values
float *l1, //output set to zero
float *l2, //output set to zero
const float batch, // should be an int but just send it as a float
const float decay1, //input
const float decay2, //input
){
int section = blockIdx.x;
int index = threadIdx.x;
int cell = section*blockDim.x+index;
float decay = decay1;
if (dw[cell]<0){
decay=-decay;
}
atomicAdd(&l1,w[cell]*decay);
atomicAdd(&l2,(w[cell]*w[cell]*decay2)/2.0);
dw[cell]= (dw[cell]/batch) + (w[cell]*decay2) +decay1;
}
|
2ea1d581daee6123b6348d8093142a7d3e149f9f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
//__global__ void kernel( void ) {
// does nothing
//}
int main(int argc, char** argv) {
// default the loop count to equal 1
int loopCount = 1;
// take in a command line arg to set the loop count
if(argc > 1){
loopCount = atoi(argv[1]);
}
// delcare two variables
int *dev_a;
// get the size of an int for the cuda malloc
int size = 1;
// malloc on the device
// loop over the loop count and copy to device
for(int i = 0; i < loopCount; i++){
hipMalloc((void **)&dev_a, size);
//hipFree(dev_a);
}
return 0;
}
| 2ea1d581daee6123b6348d8093142a7d3e149f9f.cu | #include <stdio.h>
//__global__ void kernel( void ) {
// does nothing
//}
int main(int argc, char** argv) {
// default the loop count to equal 1
int loopCount = 1;
// take in a command line arg to set the loop count
if(argc > 1){
loopCount = atoi(argv[1]);
}
// delcare two variables
int *dev_a;
// get the size of an int for the cuda malloc
int size = 1;
// malloc on the device
// loop over the loop count and copy to device
for(int i = 0; i < loopCount; i++){
cudaMalloc((void **)&dev_a, size);
//cudaFree(dev_a);
}
return 0;
}
|
0e465b36088a0176b1741959aa6dd2917246513e.hip | // !!! This is a file automatically generated by hipify!!!
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
#include <math.h>
extern "C"
{
__global__ void FullyConnectedUpdateMovingAveragesKernel(
float *weightsGradPtr,
float *biasGradPtr,
float *weightsGradCurvePtr,
float *biasGradCurvePtr,
float *avgWeightGradPtr,
float *avgBiasGradPtr,
float *avgWeightGradVarPtr,
float *avgBiasGradVarPtr,
float *avgWeightGradCurvePtr,
float *avgBiasGradCurvePtr,
float *avgWeightGradCurveVarPtr,
float *avgBiasGradCurveVarPtr,
float *weightMemorySizePtr,
float *biasMemorySizePtr,
float *dropoutMaskPtr,
int prevLayerSize,
int thisLayerSize
)
{
// i: prev. layer neuron id
// j: current layer neuron id
int i;
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
if (!dropoutMaskPtr[j])
{
int index = j;
for (i = 0; i < prevLayerSize; i++)
{
// update moving averages according to memory size
avgWeightGradPtr[index] = (1.0f - 1.0f / weightMemorySizePtr[index]) * avgWeightGradPtr[index] + (1.0f / weightMemorySizePtr[index]) * weightsGradPtr[index];
avgWeightGradVarPtr[index] = (1.0f - 1.0f / weightMemorySizePtr[index]) * avgWeightGradVarPtr[index] + (1.0f / weightMemorySizePtr[index]) * weightsGradPtr[index] * weightsGradPtr[index];
avgWeightGradCurvePtr[index] = (1.0f - 1.0f / weightMemorySizePtr[index]) * avgWeightGradCurvePtr[index] + (1.0f / weightMemorySizePtr[index]) * weightsGradCurvePtr[index];
avgWeightGradCurveVarPtr[index] = (1.0f - 1.0f / weightMemorySizePtr[index]) * avgWeightGradCurveVarPtr[index] + (1.0f / weightMemorySizePtr[index]) * weightsGradCurvePtr[index] * weightsGradCurvePtr[index];
index += thisLayerSize;
}
// update moving averages according to memory size
avgBiasGradPtr[j] = (1.0f - 1.0f / biasMemorySizePtr[j]) * avgBiasGradPtr[j] + (1.0f / biasMemorySizePtr[j]) * biasGradPtr[j];
avgBiasGradVarPtr[j] = (1.0f - 1.0f / biasMemorySizePtr[j]) * avgBiasGradVarPtr[j] + (1.0f / biasMemorySizePtr[j]) * biasGradPtr[j] * biasGradPtr[j];
avgBiasGradCurvePtr[j] = (1.0f - 1.0f / biasMemorySizePtr[j]) * avgBiasGradCurvePtr[j] + (1.0f / biasMemorySizePtr[j]) * biasGradCurvePtr[j];
avgBiasGradCurveVarPtr[j] = (1.0f - 1.0f / biasMemorySizePtr[j]) * avgBiasGradCurveVarPtr[j] + (1.0f / biasMemorySizePtr[j]) * biasGradCurvePtr[j] * biasGradCurvePtr[j];
}
}
}
} | 0e465b36088a0176b1741959aa6dd2917246513e.cu | //Includes for IntelliSense
#define _SIZE_T_DEFINED
#include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
#include <math.h>
extern "C"
{
__global__ void FullyConnectedUpdateMovingAveragesKernel(
float *weightsGradPtr,
float *biasGradPtr,
float *weightsGradCurvePtr,
float *biasGradCurvePtr,
float *avgWeightGradPtr,
float *avgBiasGradPtr,
float *avgWeightGradVarPtr,
float *avgBiasGradVarPtr,
float *avgWeightGradCurvePtr,
float *avgBiasGradCurvePtr,
float *avgWeightGradCurveVarPtr,
float *avgBiasGradCurveVarPtr,
float *weightMemorySizePtr,
float *biasMemorySizePtr,
float *dropoutMaskPtr,
int prevLayerSize,
int thisLayerSize
)
{
// i: prev. layer neuron id
// j: current layer neuron id
int i;
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
if (!dropoutMaskPtr[j])
{
int index = j;
for (i = 0; i < prevLayerSize; i++)
{
// update moving averages according to memory size
avgWeightGradPtr[index] = (1.0f - 1.0f / weightMemorySizePtr[index]) * avgWeightGradPtr[index] + (1.0f / weightMemorySizePtr[index]) * weightsGradPtr[index];
avgWeightGradVarPtr[index] = (1.0f - 1.0f / weightMemorySizePtr[index]) * avgWeightGradVarPtr[index] + (1.0f / weightMemorySizePtr[index]) * weightsGradPtr[index] * weightsGradPtr[index];
avgWeightGradCurvePtr[index] = (1.0f - 1.0f / weightMemorySizePtr[index]) * avgWeightGradCurvePtr[index] + (1.0f / weightMemorySizePtr[index]) * weightsGradCurvePtr[index];
avgWeightGradCurveVarPtr[index] = (1.0f - 1.0f / weightMemorySizePtr[index]) * avgWeightGradCurveVarPtr[index] + (1.0f / weightMemorySizePtr[index]) * weightsGradCurvePtr[index] * weightsGradCurvePtr[index];
index += thisLayerSize;
}
// update moving averages according to memory size
avgBiasGradPtr[j] = (1.0f - 1.0f / biasMemorySizePtr[j]) * avgBiasGradPtr[j] + (1.0f / biasMemorySizePtr[j]) * biasGradPtr[j];
avgBiasGradVarPtr[j] = (1.0f - 1.0f / biasMemorySizePtr[j]) * avgBiasGradVarPtr[j] + (1.0f / biasMemorySizePtr[j]) * biasGradPtr[j] * biasGradPtr[j];
avgBiasGradCurvePtr[j] = (1.0f - 1.0f / biasMemorySizePtr[j]) * avgBiasGradCurvePtr[j] + (1.0f / biasMemorySizePtr[j]) * biasGradCurvePtr[j];
avgBiasGradCurveVarPtr[j] = (1.0f - 1.0f / biasMemorySizePtr[j]) * avgBiasGradCurveVarPtr[j] + (1.0f / biasMemorySizePtr[j]) * biasGradCurvePtr[j] * biasGradCurvePtr[j];
}
}
}
} |
e3729cc3c9c7f80c35ee21866ff7da9089fea0dd.hip | // !!! This is a file automatically generated by hipify!!!
//#include <opencv2/gpu/gpu.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core/version.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
#if CV_VERSION_EPOCH == 2
#define OPENCV2
#include <opencv2/gpu/gpu.hpp>
namespace GPU = cv::gpu;
#elif CV_VERSION_MAJOR == 4
#define OPENCV4
#include <opencv2/core/cuda.hpp>
namespace GPU = cv::cuda;
#endif
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <opencv2/imgproc.hpp>
#include <iostream>
#define THREAD_X 32
#define THREAD_Y 32
#define WRAP_SIZE 32
#define MAX_WRAP_NUM 32
//using namespace cv;
//using namespace cv;
int KERNEL_SIZE;
__global__ void conv(int* dev){
int pixel_i=blockDim.x*blockIdx.x+threadIdx.x;
int pixel_j=blockDim.y*blockIdx.y+threadIdx.y;
dev[pixel_i]=1;
// printf("idx%d %d,%d\n",pixel_i,pixel_j,dev[pixel_i]);
}
__global__ void convolution(GPU::PtrStepSz<float> src,GPU::PtrStepSz<double> guass_kernel,GPU::PtrStepSz<float> dst,int kernel_size,int kernel_radius,int orign_width,int orign_height){
__shared__ int share_mem[WRAP_SIZE][MAX_WRAP_NUM];
int pixel_i=blockDim.x*blockIdx.x+threadIdx.x;
int pixel_j=blockDim.y*blockIdx.y+threadIdx.y;
//need to do bound check
//printf("pixel %d %d block dim %d %d\n",pixel_i,pixel_j,blockDim.x,blockDim.y);
/*int thread_block_index=pixel_i+pixel_j*;
int share_i=thread_block_index%WRAP_NUM;
int share_j=thread_block_index/WRAP_NUM;*/
double sum=0;
//share_mem[share_i][share_j]=src(pixel_i,pixel_j);
//share_mem[threadIdx.x][threadIdx.y]=src(pixel_i,pixel_j).x;
//__syncthreads();
//printf("%d %d %d\n",pixel_i,pixel_j,share_mem[pixel_i][pixel_j]);
if(!(pixel_i<kernel_radius || pixel_j<kernel_radius || pixel_i>=orign_width+kernel_radius || pixel_j>=orign_height+kernel_radius)){
int start_i=pixel_i-kernel_radius,start_j=pixel_j-kernel_radius;
for(int i=0;i<kernel_size;i++){
for(int j=0;j<kernel_size;j++){
int index_i=start_i+i,index_j=start_j+j;
//sum+=share_mem[][index_j]*guass_kernel(i,j).x;
sum+=src(index_j,index_i)*(float)guass_kernel(i,j);
}
}
dst(pixel_j-kernel_radius,pixel_i-kernel_radius)=sum;//sum;
}
return ;
}
void guassain_conv(const Mat *src,Mat *dst,double sigma){
// int depth = CV_MAT_DEPTH(src.type());
KERNEL_SIZE = cvRound(sigma* 4 * 2 + 1)|1;
int kernel_radius=KERNEL_SIZE/2;
int orign_width=src->cols,orign_height=src->rows;
Mat padding_image;
GPU::GpuMat device_image,g_kernel,result;
if(GPU::getCudaEnabledDeviceCount()==0){
std::cout<<"not use GPU module"<<std::endl;
return ;
}
Mat gauss_x=getGaussianKernel(KERNEL_SIZE,sigma),gauss_y=getGaussianKernel(KERNEL_SIZE,sigma); //3*3 filter
Mat gauss_kernel=gauss_x*gauss_y.t();
//allocate
/* double* gs_kernel,*dev_kernel;
hipHostMalloc(&gs_kernel,sizeof(double)*KERNEL_SIZE*KERNEL_SIZE,hipHostMallocDefault);
for(int i=0;i<KERNEL_SIZE;i++){
double* row=gauss_kernel.ptr<double>(i);
for(int j=0;j<KERNEL_SIZE;j++){
gs_kernel[i*KERNEL_SIZE+j]=row[j];
}
}
hipMalloc(&dev_kernel,sizeof(double)*KERNEL_SIZE*KERNEL_SIZE);*/
//allocate
copyMakeBorder(*src,padding_image,kernel_radius,kernel_radius,kernel_radius,kernel_radius,BORDER_CONSTANT, 0);
int grid_num_x=(padding_image.cols+THREAD_X-1)/THREAD_X,grid_num_y=(padding_image.rows+THREAD_Y-1)/THREAD_Y;
result.upload(*dst);
g_kernel.upload(gauss_kernel);
device_image.upload(padding_image);
//hipMemcpy(dev_kernel,gs_kernel,sizeof(double)*KERNEL_SIZE*KERNEL_SIZE,hipMemcpyHostToDevice);
dim3 thread_block(THREAD_X,THREAD_Y);
dim3 grid(grid_num_x,grid_num_y);
hipLaunchKernelGGL(( convolution), dim3(grid),dim3(thread_block), 0, 0, device_image,g_kernel,result,KERNEL_SIZE,kernel_radius,orign_width,orign_height);
result.download(*dst);
return ;
}
| e3729cc3c9c7f80c35ee21866ff7da9089fea0dd.cu | //#include <opencv2/gpu/gpu.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core/version.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
#if CV_VERSION_EPOCH == 2
#define OPENCV2
#include <opencv2/gpu/gpu.hpp>
namespace GPU = cv::gpu;
#elif CV_VERSION_MAJOR == 4
#define OPENCV4
#include <opencv2/core/cuda.hpp>
namespace GPU = cv::cuda;
#endif
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <opencv2/imgproc.hpp>
#include <iostream>
#define THREAD_X 32
#define THREAD_Y 32
#define WRAP_SIZE 32
#define MAX_WRAP_NUM 32
//using namespace cv;
//using namespace cv;
int KERNEL_SIZE;
__global__ void conv(int* dev){
int pixel_i=blockDim.x*blockIdx.x+threadIdx.x;
int pixel_j=blockDim.y*blockIdx.y+threadIdx.y;
dev[pixel_i]=1;
// printf("idx%d %d,%d\n",pixel_i,pixel_j,dev[pixel_i]);
}
__global__ void convolution(GPU::PtrStepSz<float> src,GPU::PtrStepSz<double> guass_kernel,GPU::PtrStepSz<float> dst,int kernel_size,int kernel_radius,int orign_width,int orign_height){
__shared__ int share_mem[WRAP_SIZE][MAX_WRAP_NUM];
int pixel_i=blockDim.x*blockIdx.x+threadIdx.x;
int pixel_j=blockDim.y*blockIdx.y+threadIdx.y;
//need to do bound check
//printf("pixel %d %d block dim %d %d\n",pixel_i,pixel_j,blockDim.x,blockDim.y);
/*int thread_block_index=pixel_i+pixel_j*;
int share_i=thread_block_index%WRAP_NUM;
int share_j=thread_block_index/WRAP_NUM;*/
double sum=0;
//share_mem[share_i][share_j]=src(pixel_i,pixel_j);
//share_mem[threadIdx.x][threadIdx.y]=src(pixel_i,pixel_j).x;
//__syncthreads();
//printf("%d %d %d\n",pixel_i,pixel_j,share_mem[pixel_i][pixel_j]);
if(!(pixel_i<kernel_radius || pixel_j<kernel_radius || pixel_i>=orign_width+kernel_radius || pixel_j>=orign_height+kernel_radius)){
int start_i=pixel_i-kernel_radius,start_j=pixel_j-kernel_radius;
for(int i=0;i<kernel_size;i++){
for(int j=0;j<kernel_size;j++){
int index_i=start_i+i,index_j=start_j+j;
//sum+=share_mem[][index_j]*guass_kernel(i,j).x;
sum+=src(index_j,index_i)*(float)guass_kernel(i,j);
}
}
dst(pixel_j-kernel_radius,pixel_i-kernel_radius)=sum;//sum;
}
return ;
}
void guassain_conv(const Mat *src,Mat *dst,double sigma){
// int depth = CV_MAT_DEPTH(src.type());
KERNEL_SIZE = cvRound(sigma* 4 * 2 + 1)|1;
int kernel_radius=KERNEL_SIZE/2;
int orign_width=src->cols,orign_height=src->rows;
Mat padding_image;
GPU::GpuMat device_image,g_kernel,result;
if(GPU::getCudaEnabledDeviceCount()==0){
std::cout<<"not use GPU module"<<std::endl;
return ;
}
Mat gauss_x=getGaussianKernel(KERNEL_SIZE,sigma),gauss_y=getGaussianKernel(KERNEL_SIZE,sigma); //3*3 filter
Mat gauss_kernel=gauss_x*gauss_y.t();
//allocate
/* double* gs_kernel,*dev_kernel;
cudaHostAlloc(&gs_kernel,sizeof(double)*KERNEL_SIZE*KERNEL_SIZE,cudaHostAllocDefault);
for(int i=0;i<KERNEL_SIZE;i++){
double* row=gauss_kernel.ptr<double>(i);
for(int j=0;j<KERNEL_SIZE;j++){
gs_kernel[i*KERNEL_SIZE+j]=row[j];
}
}
cudaMalloc(&dev_kernel,sizeof(double)*KERNEL_SIZE*KERNEL_SIZE);*/
//allocate
copyMakeBorder(*src,padding_image,kernel_radius,kernel_radius,kernel_radius,kernel_radius,BORDER_CONSTANT, 0);
int grid_num_x=(padding_image.cols+THREAD_X-1)/THREAD_X,grid_num_y=(padding_image.rows+THREAD_Y-1)/THREAD_Y;
result.upload(*dst);
g_kernel.upload(gauss_kernel);
device_image.upload(padding_image);
//cudaMemcpy(dev_kernel,gs_kernel,sizeof(double)*KERNEL_SIZE*KERNEL_SIZE,cudaMemcpyHostToDevice);
dim3 thread_block(THREAD_X,THREAD_Y);
dim3 grid(grid_num_x,grid_num_y);
convolution<<<grid,thread_block>>>(device_image,g_kernel,result,KERNEL_SIZE,kernel_radius,orign_width,orign_height);
result.download(*dst);
return ;
}
|
f9144fec69b9fb3bfe7d5ad4ea377954f2fcc92b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
int i = blockDim.x*blockIdx.x+threadIdx.x;
if( i < len ) out[i] = in1[i] + in2[i];
} | f9144fec69b9fb3bfe7d5ad4ea377954f2fcc92b.cu |
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
int i = blockDim.x*blockIdx.x+threadIdx.x;
if( i < len ) out[i] = in1[i] + in2[i];
} |
ff14a99094356ca1c569d92fc547d69e0cd303ba.hip | // !!! This is a file automatically generated by hipify!!!
//
// by Jan Eric Kyprianidis <www.kyprianidis.com>
// Copyright (C) 2010-2012 Computer Graphics Systems Group at the
// Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
#include <oz/resize.h>
#include <oz/generate.h>
#include <oz/gpu_sampler1.h>
#include <oz/tex2d_util.h>
namespace oz {
template <typename T, resize_mode_t mode> struct Tex2D;
template <typename T> struct Tex2D<T,RESIZE_NEAREST> : public generator<T> {
gpu_sampler<T,0> src_;
float2 s_;
Tex2D( const gpu_image& src, float2 s ) : src_(src, hipFilterModePoint), s_(s) {}
inline __device__ T operator()( int ix, int iy ) const {
return src_( s_.x * (ix + 0.5f), s_.y * (iy + 0.5f) );
}
};
template <typename T> struct Tex2D<T,RESIZE_FAST_BILINEAR> : public generator<T> {
gpu_sampler<T,0> src_;
float2 s_;
Tex2D( const gpu_image& src, float2 s ) : src_(src, hipFilterModeLinear), s_(s) {}
inline __device__ T operator()( int ix, int iy ) const {
return src_( s_.x * (ix + 0.5f), s_.y * (iy + 0.5f) );
}
};
template <typename T> struct Tex2D<T,RESIZE_BILINEAR> : public generator<T> {
gpu_sampler<T,0> src_;
float2 s_;
Tex2D( const gpu_image& src, float2 s ) : src_(src, hipFilterModePoint), s_(s) {}
inline __device__ T operator()( int ix, int iy ) const {
return make_T<T>(tex2DBilinear(src_.texref(), s_.x * (ix + 0.5f), s_.y * (iy + 0.5f)));
}
};
template <typename T> struct Tex2D<T,RESIZE_FAST_BICUBIC> : public generator<T> {
gpu_sampler<T,0> src_;
float2 s_;
Tex2D( const gpu_image& src, float2 s ) : src_(src, hipFilterModeLinear), s_(s) {}
inline __device__ T operator()( int ix, int iy ) const {
return make_T<T>(tex2DFastBicubic(src_.texref(), s_.x * (ix + 0.5f), s_.y * (iy + 0.5f)));
}
};
template <typename T> struct Tex2D<T,RESIZE_BICUBIC> : public generator<T> {
gpu_sampler<T,0> src_;
float2 s_;
Tex2D( const gpu_image& src, float2 s ) : src_(src, hipFilterModePoint), s_(s) {}
inline __device__ T operator()( int ix, int iy ) const {
return make_T<T>(tex2DBicubic(src_.texref(), s_.x * (ix + 0.5f), s_.y * (iy + 0.5f)));
}
};
template <typename T> struct Tex2D<T,RESIZE_CATROM> : public generator<T> {
gpu_sampler<T,0> src_;
float2 s_;
Tex2D( const gpu_image& src, float2 s ) : src_(src, hipFilterModePoint), s_(s) {}
inline __device__ T operator()( int ix, int iy ) const {
return make_T<T>(tex2DCatRom(src_.texref(), s_.x * (ix + 0.5f), s_.y * (iy + 0.5f)));
}
};
template <typename T>
gpu_image resizeT( const gpu_image& src, unsigned w, unsigned h, resize_mode_t mode ) {
float2 s = make_float2((float)src.w() / w, (float)src.h() / h);
switch (mode) {
case RESIZE_NEAREST: return generate(w, h, Tex2D<T,RESIZE_NEAREST>(src, s));
case RESIZE_FAST_BILINEAR: return generate(w, h, Tex2D<T,RESIZE_FAST_BILINEAR>(src, s));
case RESIZE_BILINEAR: return generate(w, h, Tex2D<T,RESIZE_BILINEAR>(src, s));
case RESIZE_FAST_BICUBIC: return generate(w, h, Tex2D<T,RESIZE_FAST_BICUBIC>(src, s));
case RESIZE_BICUBIC: return generate(w, h, Tex2D<T,RESIZE_BICUBIC>(src, s));
case RESIZE_CATROM: return generate(w, h, Tex2D<T,RESIZE_CATROM>(src, s));
}
OZ_INVALID_FORMAT();
}
}
oz::gpu_image oz::resize( const gpu_image& src, unsigned w, unsigned h, resize_mode_t mode ) {
switch (src.format()) {
case FMT_FLOAT: return resizeT<float >(src, w, h, mode);
case FMT_FLOAT2: return resizeT<float2>(src, w, h, mode);
case FMT_FLOAT3: return resizeT<float3>(src, w, h, mode);
case FMT_FLOAT4: return resizeT<float4>(src, w, h, mode);
default:
OZ_INVALID_FORMAT();
}
}
namespace oz {
template <typename T> struct ResizeX2 : public generator<T> {
gpu_sampler<T,0> src_;
ResizeX2( const gpu_image& src) : src_(src) {}
inline __device__ T operator()( int ix, int iy ) const {
return src_(ix/2, iy/2);
}
};
template <typename T> struct ResizeHalf : public generator<T> {
gpu_sampler<T,0> src_;
ResizeHalf( const gpu_image& src) : src_(src) {}
inline __device__ T operator()( int ix, int iy ) const {
T c0 = src_(2*ix, 2*iy);
T c1 = src_(2*ix+1, 2*iy);
T c2 = src_(2*ix, 2*iy+1);
T c3 = src_(2*ix+1, 2*iy+1);
return 0.25f * (c0 + c1 + c2 + c3);
}
};
}
oz::gpu_image oz::resize_x2( const gpu_image& src ) {
switch (src.format()) {
case FMT_FLOAT: return generate(2*src.w(), 2*src.h(), ResizeX2<float >(src));
case FMT_FLOAT3: return generate(2*src.w(), 2*src.h(), ResizeX2<float3>(src));
default:
OZ_INVALID_FORMAT();
}
}
oz::gpu_image oz::resize_half( const gpu_image& src ) {
switch (src.format()) {
case FMT_FLOAT: return generate(src.w()/2, src.h()/2, ResizeHalf<float >(src));
case FMT_FLOAT3: return generate(src.w()/2, src.h()/2, ResizeHalf<float3>(src));
default:
OZ_INVALID_FORMAT();
}
}
| ff14a99094356ca1c569d92fc547d69e0cd303ba.cu | //
// by Jan Eric Kyprianidis <www.kyprianidis.com>
// Copyright (C) 2010-2012 Computer Graphics Systems Group at the
// Hasso-Plattner-Institut, Potsdam, Germany <www.hpi3d.de>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
#include <oz/resize.h>
#include <oz/generate.h>
#include <oz/gpu_sampler1.h>
#include <oz/tex2d_util.h>
namespace oz {
template <typename T, resize_mode_t mode> struct Tex2D;
template <typename T> struct Tex2D<T,RESIZE_NEAREST> : public generator<T> {
gpu_sampler<T,0> src_;
float2 s_;
Tex2D( const gpu_image& src, float2 s ) : src_(src, cudaFilterModePoint), s_(s) {}
inline __device__ T operator()( int ix, int iy ) const {
return src_( s_.x * (ix + 0.5f), s_.y * (iy + 0.5f) );
}
};
template <typename T> struct Tex2D<T,RESIZE_FAST_BILINEAR> : public generator<T> {
gpu_sampler<T,0> src_;
float2 s_;
Tex2D( const gpu_image& src, float2 s ) : src_(src, cudaFilterModeLinear), s_(s) {}
inline __device__ T operator()( int ix, int iy ) const {
return src_( s_.x * (ix + 0.5f), s_.y * (iy + 0.5f) );
}
};
template <typename T> struct Tex2D<T,RESIZE_BILINEAR> : public generator<T> {
gpu_sampler<T,0> src_;
float2 s_;
Tex2D( const gpu_image& src, float2 s ) : src_(src, cudaFilterModePoint), s_(s) {}
inline __device__ T operator()( int ix, int iy ) const {
return make_T<T>(tex2DBilinear(src_.texref(), s_.x * (ix + 0.5f), s_.y * (iy + 0.5f)));
}
};
template <typename T> struct Tex2D<T,RESIZE_FAST_BICUBIC> : public generator<T> {
gpu_sampler<T,0> src_;
float2 s_;
Tex2D( const gpu_image& src, float2 s ) : src_(src, cudaFilterModeLinear), s_(s) {}
inline __device__ T operator()( int ix, int iy ) const {
return make_T<T>(tex2DFastBicubic(src_.texref(), s_.x * (ix + 0.5f), s_.y * (iy + 0.5f)));
}
};
template <typename T> struct Tex2D<T,RESIZE_BICUBIC> : public generator<T> {
gpu_sampler<T,0> src_;
float2 s_;
Tex2D( const gpu_image& src, float2 s ) : src_(src, cudaFilterModePoint), s_(s) {}
inline __device__ T operator()( int ix, int iy ) const {
return make_T<T>(tex2DBicubic(src_.texref(), s_.x * (ix + 0.5f), s_.y * (iy + 0.5f)));
}
};
template <typename T> struct Tex2D<T,RESIZE_CATROM> : public generator<T> {
gpu_sampler<T,0> src_;
float2 s_;
Tex2D( const gpu_image& src, float2 s ) : src_(src, cudaFilterModePoint), s_(s) {}
inline __device__ T operator()( int ix, int iy ) const {
return make_T<T>(tex2DCatRom(src_.texref(), s_.x * (ix + 0.5f), s_.y * (iy + 0.5f)));
}
};
template <typename T>
gpu_image resizeT( const gpu_image& src, unsigned w, unsigned h, resize_mode_t mode ) {
float2 s = make_float2((float)src.w() / w, (float)src.h() / h);
switch (mode) {
case RESIZE_NEAREST: return generate(w, h, Tex2D<T,RESIZE_NEAREST>(src, s));
case RESIZE_FAST_BILINEAR: return generate(w, h, Tex2D<T,RESIZE_FAST_BILINEAR>(src, s));
case RESIZE_BILINEAR: return generate(w, h, Tex2D<T,RESIZE_BILINEAR>(src, s));
case RESIZE_FAST_BICUBIC: return generate(w, h, Tex2D<T,RESIZE_FAST_BICUBIC>(src, s));
case RESIZE_BICUBIC: return generate(w, h, Tex2D<T,RESIZE_BICUBIC>(src, s));
case RESIZE_CATROM: return generate(w, h, Tex2D<T,RESIZE_CATROM>(src, s));
}
OZ_INVALID_FORMAT();
}
}
oz::gpu_image oz::resize( const gpu_image& src, unsigned w, unsigned h, resize_mode_t mode ) {
switch (src.format()) {
case FMT_FLOAT: return resizeT<float >(src, w, h, mode);
case FMT_FLOAT2: return resizeT<float2>(src, w, h, mode);
case FMT_FLOAT3: return resizeT<float3>(src, w, h, mode);
case FMT_FLOAT4: return resizeT<float4>(src, w, h, mode);
default:
OZ_INVALID_FORMAT();
}
}
namespace oz {
template <typename T> struct ResizeX2 : public generator<T> {
gpu_sampler<T,0> src_;
ResizeX2( const gpu_image& src) : src_(src) {}
inline __device__ T operator()( int ix, int iy ) const {
return src_(ix/2, iy/2);
}
};
template <typename T> struct ResizeHalf : public generator<T> {
gpu_sampler<T,0> src_;
ResizeHalf( const gpu_image& src) : src_(src) {}
inline __device__ T operator()( int ix, int iy ) const {
T c0 = src_(2*ix, 2*iy);
T c1 = src_(2*ix+1, 2*iy);
T c2 = src_(2*ix, 2*iy+1);
T c3 = src_(2*ix+1, 2*iy+1);
return 0.25f * (c0 + c1 + c2 + c3);
}
};
}
oz::gpu_image oz::resize_x2( const gpu_image& src ) {
switch (src.format()) {
case FMT_FLOAT: return generate(2*src.w(), 2*src.h(), ResizeX2<float >(src));
case FMT_FLOAT3: return generate(2*src.w(), 2*src.h(), ResizeX2<float3>(src));
default:
OZ_INVALID_FORMAT();
}
}
oz::gpu_image oz::resize_half( const gpu_image& src ) {
switch (src.format()) {
case FMT_FLOAT: return generate(src.w()/2, src.h()/2, ResizeHalf<float >(src));
case FMT_FLOAT3: return generate(src.w()/2, src.h()/2, ResizeHalf<float3>(src));
default:
OZ_INVALID_FORMAT();
}
}
|
14d6141816eba3ef0bb9bfa2badd953ff60c41a2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_div.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
double *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_div), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_div), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_div), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 14d6141816eba3ef0bb9bfa2badd953ff60c41a2.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_div.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
double *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
double *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
double *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_div<<<gridBlock,threadBlock>>>(n,result,x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_div<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_div<<<gridBlock,threadBlock>>>(n,result,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
88047e3c3bb919411b2b6ff196267b7f64d6250a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <sstream>
#include <unistd.h>
#include <math.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <ctime>
#include <sys/time.h>
#include "readWeights30.h"//to read the weights
#include "deviceFunctions30.h"//contains device functions like matmul,add
using namespace std;
inline int _ConvertSMVer2Cores(int major, int minor)
{
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct
{
int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] =
{
{ 0x10, 8 }, // Tesla Generation (SM 1.0) G80 class
{ 0x11, 8 }, // Tesla Generation (SM 1.1) G8x class
{ 0x12, 8 }, // Tesla Generation (SM 1.2) G9x class
{ 0x13, 8 }, // Tesla Generation (SM 1.3) GT200 class
{ 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class
{ 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class
{ 0x30, 192}, // Kepler Generation (SM 3.0) GK10x class
{ 0x32, 192}, // Kepler Generation (SM 3.2) GK10x class
{ 0x35, 192}, // Kepler Generation (SM 3.5) GK11x class
{ 0x37, 192}, // Kepler Generation (SM 3.7) GK21x class
{ 0x50, 128}, // Maxwell Generation (SM 5.0) GM10x class
{ -1, -1 }
};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1)
{
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor))
{
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one to run properly
printf("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index-1].Cores);
return nGpuArchCoresPerSM[index-1].Cores;
}
__global__ void predictKernel(double *X,double *W_i,double *W_f,double *W_c,double *W_o,double *U_i,double *U_f,double *U_c,double *U_o,double *b_i,double *b_f,double *b_c,double *b_o,double *w,double *b,double *result,double *loop_count)//cuda kernel
{
// Get our global thread ID
int tid = blockIdx.x*blockDim.x+threadIdx.x;
//if(tid==31908)
//printf("Done");
loop_count[0]=0;
double x[30][3];//input to lstm,50 timestamps
double *c_t,*h_t,*i_t,*C_t,*f_t,*o_t;
double H[30][60];//storing the output of each timestamp(50 timestamps, each output of size 50)
double input[60],output[12];//input & output of dense layer
double pd1[12],pd2[12];//probabbility density for upper and lower window resp.
int i,j;
double sum,res;
if ((tid>29&&tid<1719551-30))
{
//create upper window
#pragma unroll
for(i=29;i>=0;i--)//i :timestamp from 0-49
{
x[i][0]=*(X+(tid-(29-i))*3+0);
x[i][1]=*(X+(tid-(29-i))*3+1);
x[i][2]=*(X+(tid-(29-i))*3+2);
loop_count[0]++;
}
//prediction for upper window
#pragma unroll
for(i=0;i<30;i++)//i: timestamp(t)
{
if(i==0)
{
i_t=sigmoid(add(matmul1(W_i,x[i]),b_i));
C_t=tan(add(matmul1(W_c,x[i]),b_c));
f_t=sigmoid(add(matmul1(W_f,x[i]),b_f));
c_t=mult(i_t,C_t);
o_t=sigmoid(add(matmul1(W_o,x[i]),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<30;j++)
{
H[i][j]=h_t[j];
loop_count[0]++;
}
}//if
else
{
i_t=sigmoid(add(add(matmul1(W_i,x[i]),matmul2(U_i,h_t)),b_i));
C_t=tan(add(add(matmul1(W_c,x[i]),matmul2(U_c,h_t)),b_c));
f_t=sigmoid(add(add(matmul1(W_f,x[i]),matmul2(U_f,h_t)),b_f));
c_t=add(mult(i_t,C_t),mult(f_t,c_t));
o_t=sigmoid(add(add(matmul1(W_o,x[i]),matmul2(U_o,h_t)),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<30;j++)
{
H[i][j]=h_t[j];
loop_count[0]++;
}
}//else
}
//backward pass
#pragma unroll
for(i=29;i>=0;i--)//i :timestamp from 0-49
{
x[29-i][0]=*(X+(tid-(29-i))*3+0);
x[29-i][1]=*(X+(tid-(29-i))*3+1);
x[29-i][2]=*(X+(tid-(29-i))*3+2);
loop_count[0]++;
}
#pragma unroll
for(i=0;i<30;i++)//i: timestamp(t)
{
if(i==0)
{
i_t=sigmoid(add(matmul1(W_i,x[i]),b_i));
C_t=tan(add(matmul1(W_c,x[i]),b_c));
f_t=sigmoid(add(matmul1(W_f,x[i]),b_f));
c_t=mult(i_t,C_t);
o_t=sigmoid(add(matmul1(W_o,x[i]),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<30;j++)
{
H[i][30+j]=h_t[j];
loop_count[0]++;
}
}//if
else
{
i_t=sigmoid(add(add(matmul1(W_i,x[i]),matmul2(U_i,h_t)),b_i));
C_t=tan(add(add(matmul1(W_c,x[i]),matmul2(U_c,h_t)),b_c));
f_t=sigmoid(add(add(matmul1(W_f,x[i]),matmul2(U_f,h_t)),b_f));
c_t=add(mult(i_t,C_t),mult(f_t,c_t));
o_t=sigmoid(add(add(matmul1(W_o,x[i]),matmul2(U_o,h_t)),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<30;j++)
{
H[i][30+j]=h_t[j];
loop_count[0]++;
}
}//else
}
//Mean Pooling
#pragma unroll
for(j=0;j<60;j++)
{
sum=0;
#pragma unroll
for(i=0;i<30;i++)
{
sum+=H[i][j];
loop_count[0]++;
}
input[j]=sum/(30.0);
}
//Dense Layer
sum=0;
#pragma unroll
for(i=0;i<12;i++)
{
output[i]=b[i];
#pragma unroll
for(j=0;j<60;j++)
{
output[i]+=(input[j]*(*(w+j*12+i)));
loop_count[0]++;
}
sum+=exp(output[i]);
}
#pragma unroll
for(i=0;i<12;i++)//prob density for upper window
{
pd1[i]=exp(output[i])/sum;
loop_count[0]++;
}
//create lower window
#pragma unroll
for(i=0;i<30;i++)//i :timestamp from 0-49
{
x[i][0]=*(X+(tid+i)*3+0);
x[i][1]=*(X+(tid+i)*3+1);
x[i][2]=*(X+(tid+i)*3+2);
loop_count[0]++;
}
//prediction for lower window
#pragma unroll
for(i=0;i<30;i++)//i: timestamp(t)
{
if(i==0)
{
i_t=sigmoid(add(matmul1(W_i,x[i]),b_i));
C_t=tan(add(matmul1(W_c,x[i]),b_c));
f_t=sigmoid(add(matmul1(W_f,x[i]),b_f));
c_t=mult(i_t,C_t);
o_t=sigmoid(add(matmul1(W_o,x[i]),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<30;j++)
{
H[i][j]=h_t[j];
loop_count[0]++;
}
}//if
else
{
i_t=sigmoid(add(add(matmul1(W_i,x[i]),matmul2(U_i,h_t)),b_i));
C_t=tan(add(add(matmul1(W_c,x[i]),matmul2(U_c,h_t)),b_c));
f_t=sigmoid(add(add(matmul1(W_f,x[i]),matmul2(U_f,h_t)),b_f));
c_t=add(mult(i_t,C_t),mult(f_t,c_t));
o_t=sigmoid(add(add(matmul1(W_o,x[i]),matmul2(U_o,h_t)),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<30;j++)
{
H[i][j]=h_t[j];
loop_count[0]++;
}
}//else
}
//Backward pass
#pragma unroll
for(i=0;i<30;i++)//i :timestamp from 0-49
{
x[29-i][0]=*(X+(tid+i)*3+0);
x[29-i][1]=*(X+(tid+i)*3+1);
x[29-i][2]=*(X+(tid+i)*3+2);
loop_count[0]++;
}
//prediction for lower window
#pragma unroll
for(i=0;i<30;i++)//i: timestamp(t)
{
if(i==0)
{
i_t=sigmoid(add(matmul1(W_i,x[i]),b_i));
C_t=tan(add(matmul1(W_c,x[i]),b_c));
f_t=sigmoid(add(matmul1(W_f,x[i]),b_f));
c_t=mult(i_t,C_t);
o_t=sigmoid(add(matmul1(W_o,x[i]),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<30;j++)
{
H[i][30+j]=h_t[j];
loop_count[0]++;
}
}//if
else
{
i_t=sigmoid(add(add(matmul1(W_i,x[i]),matmul2(U_i,h_t)),b_i));
C_t=tan(add(add(matmul1(W_c,x[i]),matmul2(U_c,h_t)),b_c));
f_t=sigmoid(add(add(matmul1(W_f,x[i]),matmul2(U_f,h_t)),b_f));
c_t=add(mult(i_t,C_t),mult(f_t,c_t));
o_t=sigmoid(add(add(matmul1(W_o,x[i]),matmul2(U_o,h_t)),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<30;j++)
{
H[i][30+j]=h_t[j];
loop_count[0]++;
}
}//else
}
//Mean Pooling
#pragma unroll
for(j=0;j<60;j++)
{
sum=0;
#pragma unroll
for(i=0;i<30;i++)
{
sum+=H[i][j];
loop_count[0]++;
}
input[j]=sum/(30.0);
}
//Dense Layer
sum=0;
#pragma unroll
for(i=0;i<12;i++)
{
output[i]=b[i];
#pragma unroll
for(j=0;j<60;j++)
{
output[i]+=(input[j]*(*(w+j*12+i)));
loop_count[0]++;
}
sum+=exp(output[i]);
}
#pragma unroll
for(i=0;i<12;i++)//prob density for upper window
{
pd2[i]=exp(output[i])/sum;
loop_count[0]++;
}
res=0;
#pragma unroll
for(i=0;i<12;i++)
{
res+=(pd1[i]*pd2[i]);
loop_count[0]++;
}
*(result+tid)=res;
}//if tid
}// kernel loop
int main()
{
double *X=(double *)malloc(1719551 * 3 * sizeof(double));//dataset
double *W_i=(double *)malloc(30*3*sizeof(double));
double *W_f=(double *)malloc(30*3*sizeof(double));
double *W_c=(double *)malloc(30*3*sizeof(double));
double *W_o=(double *)malloc(30*3*sizeof(double));
double *U_i=(double *)malloc(30*30*sizeof(double));
double *U_f=(double *)malloc(30*30*sizeof(double));
double *U_c=(double *)malloc(30*30*sizeof(double));
double *U_o=(double *)malloc(30*30*sizeof(double));
double *b_i=(double *)malloc(30*sizeof(double));
double *b_f=(double *)malloc(30*sizeof(double));
double *b_c=(double *)malloc(30*sizeof(double));
double *b_o=(double *)malloc(30*sizeof(double));
double *w=(double *)malloc(60*12*sizeof(double));
double *b=(double *)malloc(12*sizeof(double));
double *result=(double *)malloc(1719551*sizeof(double));
double *loop_count=(double *)malloc(1*sizeof(double));
readWeights(X,W_i,W_f,W_c,W_o,U_i,U_f,U_c,U_o,b_i,b_f,b_c,b_o,w,b);//read the weights from file(readWeights.h)
//for(int p=0;p<50;p++)
//printf("%f ",*(b_i+p));
//printf("\n");
double *X_gpu,*W_i_gpu,*W_f_gpu,*W_c_gpu,*W_o_gpu,*U_i_gpu,*U_f_gpu,*U_c_gpu,*U_o_gpu,*b_i_gpu,*b_f_gpu,*b_c_gpu,*b_o_gpu,*w_gpu,*b_gpu,*result_gpu,*loop_count_gpu;//device vector
size_t bytes1=1719551*3*sizeof(double);//size in bytes of the vector to be sent to gpu
size_t bytes2=30*3*sizeof(double);
size_t bytes3=30*30*sizeof(double);
size_t bytes4=30*sizeof(double);
size_t bytes5=60*12*sizeof(double);
size_t bytes6=12*sizeof(double);
size_t bytes7=1719551*sizeof(double);
// Allocate memory for each vector on GPU
hipMalloc(&X_gpu, bytes1);
hipMalloc(&W_i_gpu,bytes2);
hipMalloc(&W_f_gpu,bytes2);
hipMalloc(&W_c_gpu,bytes2);
hipMalloc(&W_o_gpu,bytes2);
hipMalloc(&U_i_gpu,bytes3);
hipMalloc(&U_f_gpu,bytes3);
hipMalloc(&U_c_gpu,bytes3);
hipMalloc(&U_o_gpu,bytes3);
hipMalloc(&b_i_gpu,bytes4);
hipMalloc(&b_f_gpu,bytes4);
hipMalloc(&b_c_gpu,bytes4);
hipMalloc(&b_o_gpu,bytes4);
hipMalloc(&w_gpu,bytes5);
hipMalloc(&b_gpu,bytes6);
hipMalloc(&result_gpu,bytes7);
hipMalloc(&loop_count_gpu,1*sizeof(double));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Copy host vectors to device
hipMemcpy(X_gpu,X,bytes1,hipMemcpyHostToDevice);
hipMemcpy(W_i_gpu,W_i,bytes2,hipMemcpyHostToDevice);
hipMemcpy(W_f_gpu,W_f,bytes2,hipMemcpyHostToDevice);
hipMemcpy(W_c_gpu,W_c,bytes2,hipMemcpyHostToDevice);
hipMemcpy(W_o_gpu,W_o,bytes2,hipMemcpyHostToDevice);
hipMemcpy(U_i_gpu,U_i,bytes3,hipMemcpyHostToDevice);
hipMemcpy(U_f_gpu,U_f,bytes3,hipMemcpyHostToDevice);
hipMemcpy(U_c_gpu,U_c,bytes3,hipMemcpyHostToDevice);
hipMemcpy(U_o_gpu,U_o,bytes3,hipMemcpyHostToDevice);
hipMemcpy(b_i_gpu,b_i,bytes4,hipMemcpyHostToDevice);
hipMemcpy(b_f_gpu,b_f,bytes4,hipMemcpyHostToDevice);
hipMemcpy(b_c_gpu,b_c,bytes4,hipMemcpyHostToDevice);
hipMemcpy(b_o_gpu,b_o,bytes4,hipMemcpyHostToDevice);
hipMemcpy(w_gpu,w,bytes5,hipMemcpyHostToDevice);
hipMemcpy(b_gpu,b,bytes6,hipMemcpyHostToDevice);
hipMemcpy(loop_count_gpu,loop_count,1*sizeof(double),hipMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)1719551/blockSize);
// Execute the kernel
//Gflops
double fs_t, fe_t, ft_t;
struct timeval t;
int cudaCores, smCount, totalThreads;
double f_avg;
int i=0;
hipSetDevice(i);
// Get device properties
printf("\nCUDA Device #%d\n\n", (i+1));
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
smCount = devProp.multiProcessorCount;
cudaCores = _ConvertSMVer2Cores(devProp.major, devProp.minor);
totalThreads=1719551-60;
gettimeofday(&t, NULL);
fs_t = t.tv_sec+(t.tv_usec/1000000.0);
hipEventRecord(start);
hipLaunchKernelGGL(( predictKernel), dim3(gridSize), dim3(blockSize), 0, 0, X_gpu,W_i_gpu,W_f_gpu,W_c_gpu,W_o_gpu,U_i_gpu,U_f_gpu,U_c_gpu,U_o_gpu,b_i_gpu,b_f_gpu,b_c_gpu,b_o_gpu,w_gpu,b_gpu,result_gpu,loop_count_gpu);
hipEventRecord(stop);
hipDeviceSynchronize();
gettimeofday(&t, NULL);
fe_t = t.tv_sec+(t.tv_usec/1000000.0);
ft_t = fe_t - fs_t;
hipMemcpy(loop_count,loop_count_gpu,sizeof(double),hipMemcpyDeviceToHost);
cout<<loop_count[0]<<' '<<smCount<<' '<<cudaCores<<' '<<totalThreads<<'\n';
f_avg += (loop_count[0]*smCount*cudaCores*totalThreads*10)/(ft_t*1000000000);
hipMemcpy(result,result_gpu,bytes7,hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
cout<<"Time:"<<'\n';
cout<<(float)(milliseconds/1000)<<'\n';
for(int z=31908;z<=31968;z++)
cout<<result[z]<<' ';
printf("Number of FLOPs: %lf G-FLOPs\n", (f_avg));
hipFree(X_gpu);
hipFree(W_i_gpu);
hipFree(W_f_gpu);
hipFree(W_c_gpu);
hipFree(W_o_gpu);
hipFree(U_i_gpu);
hipFree(U_f_gpu);
hipFree(U_c_gpu);
hipFree(U_o_gpu);
hipFree(b_i_gpu);
hipFree(b_f_gpu);
hipFree(b_c_gpu);
hipFree(b_o_gpu);
hipFree(w_gpu);
hipFree(b_gpu);
hipFree(result_gpu);
return 0;
}
| 88047e3c3bb919411b2b6ff196267b7f64d6250a.cu | #include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <sstream>
#include <unistd.h>
#include <math.h>
#include <stdlib.h>
#include <cuda.h>
#include <ctime>
#include <sys/time.h>
#include "readWeights30.h"//to read the weights
#include "deviceFunctions30.h"//contains device functions like matmul,add
using namespace std;
inline int _ConvertSMVer2Cores(int major, int minor)
{
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct
{
int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] =
{
{ 0x10, 8 }, // Tesla Generation (SM 1.0) G80 class
{ 0x11, 8 }, // Tesla Generation (SM 1.1) G8x class
{ 0x12, 8 }, // Tesla Generation (SM 1.2) G9x class
{ 0x13, 8 }, // Tesla Generation (SM 1.3) GT200 class
{ 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class
{ 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class
{ 0x30, 192}, // Kepler Generation (SM 3.0) GK10x class
{ 0x32, 192}, // Kepler Generation (SM 3.2) GK10x class
{ 0x35, 192}, // Kepler Generation (SM 3.5) GK11x class
{ 0x37, 192}, // Kepler Generation (SM 3.7) GK21x class
{ 0x50, 128}, // Maxwell Generation (SM 5.0) GM10x class
{ -1, -1 }
};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1)
{
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor))
{
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one to run properly
printf("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index-1].Cores);
return nGpuArchCoresPerSM[index-1].Cores;
}
__global__ void predictKernel(double *X,double *W_i,double *W_f,double *W_c,double *W_o,double *U_i,double *U_f,double *U_c,double *U_o,double *b_i,double *b_f,double *b_c,double *b_o,double *w,double *b,double *result,double *loop_count)//cuda kernel
{
// Get our global thread ID
int tid = blockIdx.x*blockDim.x+threadIdx.x;
//if(tid==31908)
//printf("Done");
loop_count[0]=0;
double x[30][3];//input to lstm,50 timestamps
double *c_t,*h_t,*i_t,*C_t,*f_t,*o_t;
double H[30][60];//storing the output of each timestamp(50 timestamps, each output of size 50)
double input[60],output[12];//input & output of dense layer
double pd1[12],pd2[12];//probabbility density for upper and lower window resp.
int i,j;
double sum,res;
if ((tid>29&&tid<1719551-30))
{
//create upper window
#pragma unroll
for(i=29;i>=0;i--)//i :timestamp from 0-49
{
x[i][0]=*(X+(tid-(29-i))*3+0);
x[i][1]=*(X+(tid-(29-i))*3+1);
x[i][2]=*(X+(tid-(29-i))*3+2);
loop_count[0]++;
}
//prediction for upper window
#pragma unroll
for(i=0;i<30;i++)//i: timestamp(t)
{
if(i==0)
{
i_t=sigmoid(add(matmul1(W_i,x[i]),b_i));
C_t=tan(add(matmul1(W_c,x[i]),b_c));
f_t=sigmoid(add(matmul1(W_f,x[i]),b_f));
c_t=mult(i_t,C_t);
o_t=sigmoid(add(matmul1(W_o,x[i]),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<30;j++)
{
H[i][j]=h_t[j];
loop_count[0]++;
}
}//if
else
{
i_t=sigmoid(add(add(matmul1(W_i,x[i]),matmul2(U_i,h_t)),b_i));
C_t=tan(add(add(matmul1(W_c,x[i]),matmul2(U_c,h_t)),b_c));
f_t=sigmoid(add(add(matmul1(W_f,x[i]),matmul2(U_f,h_t)),b_f));
c_t=add(mult(i_t,C_t),mult(f_t,c_t));
o_t=sigmoid(add(add(matmul1(W_o,x[i]),matmul2(U_o,h_t)),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<30;j++)
{
H[i][j]=h_t[j];
loop_count[0]++;
}
}//else
}
//backward pass
#pragma unroll
for(i=29;i>=0;i--)//i :timestamp from 0-49
{
x[29-i][0]=*(X+(tid-(29-i))*3+0);
x[29-i][1]=*(X+(tid-(29-i))*3+1);
x[29-i][2]=*(X+(tid-(29-i))*3+2);
loop_count[0]++;
}
#pragma unroll
for(i=0;i<30;i++)//i: timestamp(t)
{
if(i==0)
{
i_t=sigmoid(add(matmul1(W_i,x[i]),b_i));
C_t=tan(add(matmul1(W_c,x[i]),b_c));
f_t=sigmoid(add(matmul1(W_f,x[i]),b_f));
c_t=mult(i_t,C_t);
o_t=sigmoid(add(matmul1(W_o,x[i]),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<30;j++)
{
H[i][30+j]=h_t[j];
loop_count[0]++;
}
}//if
else
{
i_t=sigmoid(add(add(matmul1(W_i,x[i]),matmul2(U_i,h_t)),b_i));
C_t=tan(add(add(matmul1(W_c,x[i]),matmul2(U_c,h_t)),b_c));
f_t=sigmoid(add(add(matmul1(W_f,x[i]),matmul2(U_f,h_t)),b_f));
c_t=add(mult(i_t,C_t),mult(f_t,c_t));
o_t=sigmoid(add(add(matmul1(W_o,x[i]),matmul2(U_o,h_t)),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<30;j++)
{
H[i][30+j]=h_t[j];
loop_count[0]++;
}
}//else
}
//Mean Pooling
#pragma unroll
for(j=0;j<60;j++)
{
sum=0;
#pragma unroll
for(i=0;i<30;i++)
{
sum+=H[i][j];
loop_count[0]++;
}
input[j]=sum/(30.0);
}
//Dense Layer
sum=0;
#pragma unroll
for(i=0;i<12;i++)
{
output[i]=b[i];
#pragma unroll
for(j=0;j<60;j++)
{
output[i]+=(input[j]*(*(w+j*12+i)));
loop_count[0]++;
}
sum+=exp(output[i]);
}
#pragma unroll
for(i=0;i<12;i++)//prob density for upper window
{
pd1[i]=exp(output[i])/sum;
loop_count[0]++;
}
//create lower window
#pragma unroll
for(i=0;i<30;i++)//i :timestamp from 0-49
{
x[i][0]=*(X+(tid+i)*3+0);
x[i][1]=*(X+(tid+i)*3+1);
x[i][2]=*(X+(tid+i)*3+2);
loop_count[0]++;
}
//prediction for lower window
#pragma unroll
for(i=0;i<30;i++)//i: timestamp(t)
{
if(i==0)
{
i_t=sigmoid(add(matmul1(W_i,x[i]),b_i));
C_t=tan(add(matmul1(W_c,x[i]),b_c));
f_t=sigmoid(add(matmul1(W_f,x[i]),b_f));
c_t=mult(i_t,C_t);
o_t=sigmoid(add(matmul1(W_o,x[i]),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<30;j++)
{
H[i][j]=h_t[j];
loop_count[0]++;
}
}//if
else
{
i_t=sigmoid(add(add(matmul1(W_i,x[i]),matmul2(U_i,h_t)),b_i));
C_t=tan(add(add(matmul1(W_c,x[i]),matmul2(U_c,h_t)),b_c));
f_t=sigmoid(add(add(matmul1(W_f,x[i]),matmul2(U_f,h_t)),b_f));
c_t=add(mult(i_t,C_t),mult(f_t,c_t));
o_t=sigmoid(add(add(matmul1(W_o,x[i]),matmul2(U_o,h_t)),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<30;j++)
{
H[i][j]=h_t[j];
loop_count[0]++;
}
}//else
}
//Backward pass
#pragma unroll
for(i=0;i<30;i++)//i :timestamp from 0-49
{
x[29-i][0]=*(X+(tid+i)*3+0);
x[29-i][1]=*(X+(tid+i)*3+1);
x[29-i][2]=*(X+(tid+i)*3+2);
loop_count[0]++;
}
//prediction for lower window
#pragma unroll
for(i=0;i<30;i++)//i: timestamp(t)
{
if(i==0)
{
i_t=sigmoid(add(matmul1(W_i,x[i]),b_i));
C_t=tan(add(matmul1(W_c,x[i]),b_c));
f_t=sigmoid(add(matmul1(W_f,x[i]),b_f));
c_t=mult(i_t,C_t);
o_t=sigmoid(add(matmul1(W_o,x[i]),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<30;j++)
{
H[i][30+j]=h_t[j];
loop_count[0]++;
}
}//if
else
{
i_t=sigmoid(add(add(matmul1(W_i,x[i]),matmul2(U_i,h_t)),b_i));
C_t=tan(add(add(matmul1(W_c,x[i]),matmul2(U_c,h_t)),b_c));
f_t=sigmoid(add(add(matmul1(W_f,x[i]),matmul2(U_f,h_t)),b_f));
c_t=add(mult(i_t,C_t),mult(f_t,c_t));
o_t=sigmoid(add(add(matmul1(W_o,x[i]),matmul2(U_o,h_t)),b_o));
h_t=mult(o_t,tan(c_t));
#pragma unroll
for(j=0;j<30;j++)
{
H[i][30+j]=h_t[j];
loop_count[0]++;
}
}//else
}
//Mean Pooling
#pragma unroll
for(j=0;j<60;j++)
{
sum=0;
#pragma unroll
for(i=0;i<30;i++)
{
sum+=H[i][j];
loop_count[0]++;
}
input[j]=sum/(30.0);
}
//Dense Layer
sum=0;
#pragma unroll
for(i=0;i<12;i++)
{
output[i]=b[i];
#pragma unroll
for(j=0;j<60;j++)
{
output[i]+=(input[j]*(*(w+j*12+i)));
loop_count[0]++;
}
sum+=exp(output[i]);
}
#pragma unroll
for(i=0;i<12;i++)//prob density for upper window
{
pd2[i]=exp(output[i])/sum;
loop_count[0]++;
}
res=0;
#pragma unroll
for(i=0;i<12;i++)
{
res+=(pd1[i]*pd2[i]);
loop_count[0]++;
}
*(result+tid)=res;
}//if tid
}// kernel loop
int main()
{
double *X=(double *)malloc(1719551 * 3 * sizeof(double));//dataset
double *W_i=(double *)malloc(30*3*sizeof(double));
double *W_f=(double *)malloc(30*3*sizeof(double));
double *W_c=(double *)malloc(30*3*sizeof(double));
double *W_o=(double *)malloc(30*3*sizeof(double));
double *U_i=(double *)malloc(30*30*sizeof(double));
double *U_f=(double *)malloc(30*30*sizeof(double));
double *U_c=(double *)malloc(30*30*sizeof(double));
double *U_o=(double *)malloc(30*30*sizeof(double));
double *b_i=(double *)malloc(30*sizeof(double));
double *b_f=(double *)malloc(30*sizeof(double));
double *b_c=(double *)malloc(30*sizeof(double));
double *b_o=(double *)malloc(30*sizeof(double));
double *w=(double *)malloc(60*12*sizeof(double));
double *b=(double *)malloc(12*sizeof(double));
double *result=(double *)malloc(1719551*sizeof(double));
double *loop_count=(double *)malloc(1*sizeof(double));
readWeights(X,W_i,W_f,W_c,W_o,U_i,U_f,U_c,U_o,b_i,b_f,b_c,b_o,w,b);//read the weights from file(readWeights.h)
//for(int p=0;p<50;p++)
//printf("%f ",*(b_i+p));
//printf("\n");
double *X_gpu,*W_i_gpu,*W_f_gpu,*W_c_gpu,*W_o_gpu,*U_i_gpu,*U_f_gpu,*U_c_gpu,*U_o_gpu,*b_i_gpu,*b_f_gpu,*b_c_gpu,*b_o_gpu,*w_gpu,*b_gpu,*result_gpu,*loop_count_gpu;//device vector
size_t bytes1=1719551*3*sizeof(double);//size in bytes of the vector to be sent to gpu
size_t bytes2=30*3*sizeof(double);
size_t bytes3=30*30*sizeof(double);
size_t bytes4=30*sizeof(double);
size_t bytes5=60*12*sizeof(double);
size_t bytes6=12*sizeof(double);
size_t bytes7=1719551*sizeof(double);
// Allocate memory for each vector on GPU
cudaMalloc(&X_gpu, bytes1);
cudaMalloc(&W_i_gpu,bytes2);
cudaMalloc(&W_f_gpu,bytes2);
cudaMalloc(&W_c_gpu,bytes2);
cudaMalloc(&W_o_gpu,bytes2);
cudaMalloc(&U_i_gpu,bytes3);
cudaMalloc(&U_f_gpu,bytes3);
cudaMalloc(&U_c_gpu,bytes3);
cudaMalloc(&U_o_gpu,bytes3);
cudaMalloc(&b_i_gpu,bytes4);
cudaMalloc(&b_f_gpu,bytes4);
cudaMalloc(&b_c_gpu,bytes4);
cudaMalloc(&b_o_gpu,bytes4);
cudaMalloc(&w_gpu,bytes5);
cudaMalloc(&b_gpu,bytes6);
cudaMalloc(&result_gpu,bytes7);
cudaMalloc(&loop_count_gpu,1*sizeof(double));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Copy host vectors to device
cudaMemcpy(X_gpu,X,bytes1,cudaMemcpyHostToDevice);
cudaMemcpy(W_i_gpu,W_i,bytes2,cudaMemcpyHostToDevice);
cudaMemcpy(W_f_gpu,W_f,bytes2,cudaMemcpyHostToDevice);
cudaMemcpy(W_c_gpu,W_c,bytes2,cudaMemcpyHostToDevice);
cudaMemcpy(W_o_gpu,W_o,bytes2,cudaMemcpyHostToDevice);
cudaMemcpy(U_i_gpu,U_i,bytes3,cudaMemcpyHostToDevice);
cudaMemcpy(U_f_gpu,U_f,bytes3,cudaMemcpyHostToDevice);
cudaMemcpy(U_c_gpu,U_c,bytes3,cudaMemcpyHostToDevice);
cudaMemcpy(U_o_gpu,U_o,bytes3,cudaMemcpyHostToDevice);
cudaMemcpy(b_i_gpu,b_i,bytes4,cudaMemcpyHostToDevice);
cudaMemcpy(b_f_gpu,b_f,bytes4,cudaMemcpyHostToDevice);
cudaMemcpy(b_c_gpu,b_c,bytes4,cudaMemcpyHostToDevice);
cudaMemcpy(b_o_gpu,b_o,bytes4,cudaMemcpyHostToDevice);
cudaMemcpy(w_gpu,w,bytes5,cudaMemcpyHostToDevice);
cudaMemcpy(b_gpu,b,bytes6,cudaMemcpyHostToDevice);
cudaMemcpy(loop_count_gpu,loop_count,1*sizeof(double),cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)1719551/blockSize);
// Execute the kernel
//Gflops
double fs_t, fe_t, ft_t;
struct timeval t;
int cudaCores, smCount, totalThreads;
double f_avg;
int i=0;
cudaSetDevice(i);
// Get device properties
printf("\nCUDA Device #%d\n\n", (i+1));
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
smCount = devProp.multiProcessorCount;
cudaCores = _ConvertSMVer2Cores(devProp.major, devProp.minor);
totalThreads=1719551-60;
gettimeofday(&t, NULL);
fs_t = t.tv_sec+(t.tv_usec/1000000.0);
cudaEventRecord(start);
predictKernel<<<gridSize, blockSize>>>(X_gpu,W_i_gpu,W_f_gpu,W_c_gpu,W_o_gpu,U_i_gpu,U_f_gpu,U_c_gpu,U_o_gpu,b_i_gpu,b_f_gpu,b_c_gpu,b_o_gpu,w_gpu,b_gpu,result_gpu,loop_count_gpu);
cudaEventRecord(stop);
cudaThreadSynchronize();
gettimeofday(&t, NULL);
fe_t = t.tv_sec+(t.tv_usec/1000000.0);
ft_t = fe_t - fs_t;
cudaMemcpy(loop_count,loop_count_gpu,sizeof(double),cudaMemcpyDeviceToHost);
cout<<loop_count[0]<<' '<<smCount<<' '<<cudaCores<<' '<<totalThreads<<'\n';
f_avg += (loop_count[0]*smCount*cudaCores*totalThreads*10)/(ft_t*1000000000);
cudaMemcpy(result,result_gpu,bytes7,cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout<<"Time:"<<'\n';
cout<<(float)(milliseconds/1000)<<'\n';
for(int z=31908;z<=31968;z++)
cout<<result[z]<<' ';
printf("Number of FLOPs: %lf G-FLOPs\n", (f_avg));
cudaFree(X_gpu);
cudaFree(W_i_gpu);
cudaFree(W_f_gpu);
cudaFree(W_c_gpu);
cudaFree(W_o_gpu);
cudaFree(U_i_gpu);
cudaFree(U_f_gpu);
cudaFree(U_c_gpu);
cudaFree(U_o_gpu);
cudaFree(b_i_gpu);
cudaFree(b_f_gpu);
cudaFree(b_c_gpu);
cudaFree(b_o_gpu);
cudaFree(w_gpu);
cudaFree(b_gpu);
cudaFree(result_gpu);
return 0;
}
|
f2945ededddf061ec54cf9e4c882872609007b67.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <fstream>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/batch_gather_ops.h"
// Shared batch kernel
#include "caffe2/operators/gather_op.cuh"
namespace caffe2 {
template <>
bool BatchGatherOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, OperatorBase::Input<Tensor>(INDICES, CUDA));
}
template <>
template <typename TInd>
bool BatchGatherOp<CUDAContext>::DoRunWithType() {
// BatchGather is a special-case of Gather with Axis = 1, wrap = false.
return gather_helper::gather_impl_cuda<TInd>(
this, DATA, INDICES, 0, 1, false);
}
template <typename T_INDEX, typename TData>
__global__ void BatchGatherGradientKernel(
const TData* grad_data,
TData* out,
const T_INDEX* indices,
const int outer_dims_product,
const int N,
const int data_batch_size,
const int gathered_batch_size,
const int block_size,
const int src_indexing_axis_dim,
const bool wrap_indices) {
int begin_idx = blockIdx.x * blockDim.x + threadIdx.x;
int num_items = outer_dims_product * N * block_size;
for (int s = begin_idx; s < num_items; s += blockDim.x * gridDim.x) {
const int k = s % block_size;
const int j = s / block_size % N;
const int i = s / block_size / N;
T_INDEX idx = indices[j];
if (wrap_indices && idx < 0) {
idx = idx + src_indexing_axis_dim;
}
const float* src_offset =
grad_data + i * gathered_batch_size + j * block_size;
float* dst_offset = out + i * data_batch_size + idx * block_size;
atomicAdd(dst_offset + k, src_offset[k]);
}
}
template <>
bool BatchGatherGradientOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, OperatorBase::Input<Tensor>(INDICES, CUDA));
}
template <>
template <typename TInd>
bool BatchGatherGradientOp<CUDAContext>::DoRunWithType() {
return DispatchHelper<
TensorTypes2<float, GenericTensorImplementation>,
TInd>::call(this, OperatorBase::Input<Tensor>(DATA, CUDA));
}
template <>
template <typename TInd, typename TData>
bool BatchGatherGradientOp<CUDAContext>::DoRunWithType2() {
auto& data = Input(DATA);
auto& indices = Input(INDICES);
auto& grad = Input(GRAD);
// ONNX allows negative axis to index from the back, valid range: [-r, r].
int axis = axis_;
if (axis < 0) {
axis = data.dim() + axis;
}
// Outer dimensions of input data and gradient should be the same
// because they are preserved for gathers with axis > 0.
for (int acheck = 0; acheck < axis; acheck++) {
CAFFE_ENFORCE_EQ(
data.size(acheck), grad.size(acheck), "batch sizes should be the same");
}
auto* output = Output(0, data.sizes(), at::dtype<float>());
auto* out_data = output->template mutable_data<float>();
math::Set<float, CUDAContext>(output->numel(), 0, out_data, &context_);
const auto* grad_data = grad.template data<float>();
const TInd* idxs = indices.template data<TInd>();
// Treat all outer dimensions as a unit as they contribute to larger batch.
const int outer_dims_product = grad.size_to_dim(axis);
const int block_size = data.size_from_dim(axis + 1);
const int N = indices.numel();
const auto data_batch_size = data.size_from_dim(axis);
const auto gathered_batch_size = N * block_size;
const int src_indexing_axis_dim = data.dim(axis);
// Assign each thread index its own 'float' in block_size * N (kernel will
// loop if there is more data than fits NUM_BLOCKS * NUM_THREADS limit).
hipLaunchKernelGGL(( BatchGatherGradientKernel),
dim3(::min(outer_dims_product, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(::min(N * block_size, CAFFE_CUDA_NUM_THREADS)),
0,
context_.cuda_stream(),
grad_data,
out_data,
idxs,
outer_dims_product,
N,
data_batch_size,
gathered_batch_size,
block_size,
src_indexing_axis_dim,
false); // TBD: Add proper index wrapping support to Gather gradients.
return true;
}
template <>
template <typename TInd>
bool BatchGatherGradientOp<CUDAContext>::DoRunWithOtherType2() {
CAFFE_THROW(
"BatchGatherGradient is not implemented on tensor of type ",
Input(DATA).meta().name(),
"consider adding it as a type in the DispatchHelper list or implementing"
" a generic version (which won't work for duplicated indices though)");
}
REGISTER_CUDA_OPERATOR(BatchGather, BatchGatherOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(BatchGatherGradient, BatchGatherGradientOp<CUDAContext>);
} // namespace caffe2
| f2945ededddf061ec54cf9e4c882872609007b67.cu | #include <fstream>
#include "caffe2/core/common_gpu.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/batch_gather_ops.h"
// Shared batch kernel
#include "caffe2/operators/gather_op.cuh"
namespace caffe2 {
template <>
bool BatchGatherOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, OperatorBase::Input<Tensor>(INDICES, CUDA));
}
template <>
template <typename TInd>
bool BatchGatherOp<CUDAContext>::DoRunWithType() {
// BatchGather is a special-case of Gather with Axis = 1, wrap = false.
return gather_helper::gather_impl_cuda<TInd>(
this, DATA, INDICES, 0, 1, false);
}
template <typename T_INDEX, typename TData>
__global__ void BatchGatherGradientKernel(
const TData* grad_data,
TData* out,
const T_INDEX* indices,
const int outer_dims_product,
const int N,
const int data_batch_size,
const int gathered_batch_size,
const int block_size,
const int src_indexing_axis_dim,
const bool wrap_indices) {
int begin_idx = blockIdx.x * blockDim.x + threadIdx.x;
int num_items = outer_dims_product * N * block_size;
for (int s = begin_idx; s < num_items; s += blockDim.x * gridDim.x) {
const int k = s % block_size;
const int j = s / block_size % N;
const int i = s / block_size / N;
T_INDEX idx = indices[j];
if (wrap_indices && idx < 0) {
idx = idx + src_indexing_axis_dim;
}
const float* src_offset =
grad_data + i * gathered_batch_size + j * block_size;
float* dst_offset = out + i * data_batch_size + idx * block_size;
atomicAdd(dst_offset + k, src_offset[k]);
}
}
template <>
bool BatchGatherGradientOp<CUDAContext>::RunOnDevice() {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, OperatorBase::Input<Tensor>(INDICES, CUDA));
}
template <>
template <typename TInd>
bool BatchGatherGradientOp<CUDAContext>::DoRunWithType() {
return DispatchHelper<
TensorTypes2<float, GenericTensorImplementation>,
TInd>::call(this, OperatorBase::Input<Tensor>(DATA, CUDA));
}
template <>
template <typename TInd, typename TData>
bool BatchGatherGradientOp<CUDAContext>::DoRunWithType2() {
auto& data = Input(DATA);
auto& indices = Input(INDICES);
auto& grad = Input(GRAD);
// ONNX allows negative axis to index from the back, valid range: [-r, r].
int axis = axis_;
if (axis < 0) {
axis = data.dim() + axis;
}
// Outer dimensions of input data and gradient should be the same
// because they are preserved for gathers with axis > 0.
for (int acheck = 0; acheck < axis; acheck++) {
CAFFE_ENFORCE_EQ(
data.size(acheck), grad.size(acheck), "batch sizes should be the same");
}
auto* output = Output(0, data.sizes(), at::dtype<float>());
auto* out_data = output->template mutable_data<float>();
math::Set<float, CUDAContext>(output->numel(), 0, out_data, &context_);
const auto* grad_data = grad.template data<float>();
const TInd* idxs = indices.template data<TInd>();
// Treat all outer dimensions as a unit as they contribute to larger batch.
const int outer_dims_product = grad.size_to_dim(axis);
const int block_size = data.size_from_dim(axis + 1);
const int N = indices.numel();
const auto data_batch_size = data.size_from_dim(axis);
const auto gathered_batch_size = N * block_size;
const int src_indexing_axis_dim = data.dim(axis);
// Assign each thread index its own 'float' in block_size * N (kernel will
// loop if there is more data than fits NUM_BLOCKS * NUM_THREADS limit).
BatchGatherGradientKernel<<<
std::min(outer_dims_product, CAFFE_MAXIMUM_NUM_BLOCKS),
std::min(N * block_size, CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream()>>>(
grad_data,
out_data,
idxs,
outer_dims_product,
N,
data_batch_size,
gathered_batch_size,
block_size,
src_indexing_axis_dim,
false); // TBD: Add proper index wrapping support to Gather gradients.
return true;
}
template <>
template <typename TInd>
bool BatchGatherGradientOp<CUDAContext>::DoRunWithOtherType2() {
CAFFE_THROW(
"BatchGatherGradient is not implemented on tensor of type ",
Input(DATA).meta().name(),
"consider adding it as a type in the DispatchHelper list or implementing"
" a generic version (which won't work for duplicated indices though)");
}
REGISTER_CUDA_OPERATOR(BatchGather, BatchGatherOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(BatchGatherGradient, BatchGatherGradientOp<CUDAContext>);
} // namespace caffe2
|
a97f9a4a796b9db50a21bfada51081e55c4ce0c6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <unistd.h>
#include <hip/hip_runtime_api.h>
#include <errno.h>
#include <unistd.h>
#include "timer.c"
/*********************************************************
*
*
* To Compile:
* nvcc -o cuda_linear_regression cuda_linear_regression.cu -lm
*
*
* To Run:
* ./cuda_linear_regression
*
*
*********************************************************/
typedef struct plot_coordinate{
double x; // x co-ordinate
double y; // y co-ordinate
}plot_coordinate;
int no_of_data = 1000;
__device__ int d_n_data =1000;
plot_coordinate data[] = {
{82.45,155.07},{65.27,121.45},{67.29,117.39},{72.17,120.95},
{69.42,132.58},{76.35,137.91},{79.20,151.69},{71.97,123.08},
{85.03,137.12},{78.83,136.47},{71.34,131.75},{66.14,129.76},
{65.22,111.73},{77.67,137.24},{73.30,105.03},{71.56,120.18},
{66.92,105.91},{69.09,134.67},{54.03,108.08},{61.79,114.62},
{67.52,119.60},{31.12,75.51},{13.49,50.66},{61.43,134.15},
{51.51,107.20},{93.87,149.32},{98.59,167.92},{94.93,146.15},
{32.47,67.59},{36.91,92.19},{45.36,104.11},{42.58,97.37},
{ 2.38,35.79},{52.07,114.35},{40.76,111.33},{35.44,98.07},
{57.03,114.02},{17.15,65.52},{26.63,75.12},{68.64,132.38},
{87.73,137.17},{43.40,106.42},{59.12,103.58},{ 5.83,35.24},
{31.03,79.78},{68.56,127.27},{21.54,60.20},{19.62,67.80},
{61.39,128.09},{45.79,89.44},{16.02,64.22},{19.78,65.61},
{34.76,88.37},{45.97,85.20},{88.74,145.02},{76.48,129.69},
{19.76,56.76},{87.72,157.39},{66.75,118.41},{63.57,121.44},
{29.80,87.78},{32.63,85.94},{75.87,134.69},{ 0.85,40.28},
{94.47,163.58},{72.99,135.55},{64.22,127.04},{ 3.32,40.20},
{ 6.88,42.32},{31.08,75.99},{60.22,120.13},{17.45,60.36},
{57.29,105.03},{49.31,82.69},{11.87,61.21},{81.39,144.96},
{48.71,78.63},{11.23,36.32},{16.35,54.14},{19.70,57.20},
{17.03,63.54},{84.59,154.43},{ 7.41,42.73},{43.82,81.77},
{49.21,107.45},{53.00,95.15},{13.27,45.40},{67.77,128.98},
{93.90,165.29},{93.29,173.90},{40.02,86.36},{22.79,75.44},
{98.39,167.10},{94.95,164.10},{60.08,127.24},{ 6.06,45.37},
{61.35,121.18},{ 4.95,27.34},{23.32,66.61},{32.79,88.38},
{83.20,143.97},{60.59,126.92},{ 7.93,70.52},{94.46,152.96},
{71.82,121.20},{59.27,107.93},{64.93,142.11},{94.58,163.94},
{ 7.21,40.58},{52.74,116.16},{79.42,120.53},{ 7.82,30.57},
{ 9.83,45.29},{58.21,108.86},{48.11,107.39},{88.55,140.09},
{29.26,71.16},{34.96,80.00},{ 1.12,23.12},{55.14,104.13},
{19.82,50.32},{43.38,83.62},{24.62,51.03},{62.84,101.88},
{26.88,60.06},{94.48,144.08},{95.14,157.80},{47.90,85.47},
{90.90,143.82},{39.53,79.74},{80.77,155.44},{ 6.07,17.43},
{56.88,103.70},{43.95,84.71},{16.12,45.98},{ 5.12,44.40},
{81.71,134.56},{24.30,45.54},{83.68,146.90},{17.62,49.28},
{42.10,97.75},{41.25,84.38},{82.68,155.74},{44.56,95.45},
{85.21,142.50},{73.50,125.45},{ 3.45,52.95},{30.65,73.60},
{29.33,76.20},{30.31,85.46},{69.41,135.79},{73.21,133.16},
{40.62,87.68},{26.38,65.16},{ 5.14,59.66},{94.33,160.01},
{ 6.52,52.57},{90.79,146.06},{ 9.78,55.77},{ 4.71,53.43},
{74.01,129.97},{68.72,119.11},{16.35,59.99},{44.08,109.17},
{31.02,63.78},{14.76,33.17},{62.63,126.09},{55.88,96.90},
{57.41,99.30},{83.66,131.04},{86.08,175.22},{81.13,140.01},
{18.25,71.09},{65.68,104.02},{66.08,122.24},{48.81,96.28},
{79.07,132.27},{20.07,67.34},{16.24,48.49},{30.98,85.11},
{ 2.27,45.14},{44.11,76.86},{ 2.49,45.65},{72.96,136.23},
{89.49,156.60},{54.51,105.71},{92.23,153.22},{95.02,160.48},
{73.99,111.16},{52.70,93.18},{90.82,154.82},{53.42,100.57},
{19.77,60.95},{26.30,63.93},{23.07,54.59},{88.86,142.32},
{98.65,175.75},{76.19,130.10},{59.20,111.38},{58.43,121.18},
{33.27,82.74},{74.68,126.95},{88.64,141.44},{81.47,117.66},
{99.22,170.99},{98.17,163.34},{91.54,144.52},{17.22,67.20},
{66.49,115.36},{68.68,128.45},{ 1.35,54.22},{47.22,98.90},
{79.94,147.19},{22.05,76.35},{50.23,102.66},{ 5.97,37.93},
{67.56,98.13},{18.19,52.11},{81.03,149.27},{45.50,98.92},
{50.60,91.80},{73.59,129.07},{88.92,139.84},{92.80,159.34},
{ 6.39,45.68},{64.04,109.08},{57.32,111.22},{36.89,82.67},
{ 2.04,47.08},{ 3.58,43.67},{66.42,131.32},{81.67,145.83},
{ 3.01,28.87},{30.05,69.62},{32.51,91.29},{32.10,56.40},
{74.96,121.89},{66.82,125.73},{72.51,129.45},{ 5.91,48.37},
{37.12,82.47},{ 9.16,48.40},{13.04,46.47},{48.80,95.11},
{58.51,112.16},{44.86,85.77},{56.11,123.07},{82.96,151.82},
{24.90,79.21},{27.30,64.03},{99.30,144.46},{62.24,117.56},
{52.10,91.97},{39.86,79.58},{15.84,72.42},{91.38,151.59},
{39.75,76.49},{49.68,92.98},{53.69,123.67},{76.59,145.25},
{84.40,156.17},{81.04,142.59},{24.22,48.48},{63.39,115.54},
{10.21,40.70},{41.56,62.95},{88.85,137.60},{50.03,118.66},
{48.66,89.36},{57.74,104.91},{74.07,144.74},{77.68,138.69},
{98.53,163.18},{25.40,89.65},{ 4.38,50.45},{59.86,102.93},
{ 2.27,42.85},{81.03,143.24},{20.95,76.89},{52.59,116.92},
{82.19,145.87},{51.90,110.85},{43.83,105.20},{44.13,75.17},
{17.22,61.38},{46.16,92.95},{55.00,117.41},{ 7.73,39.87},
{95.80,164.28},{59.80,104.95},{22.16,52.76},{82.10,141.69},
{94.60,160.59},{18.61,28.99},{ 0.09,47.91},{91.39,158.91},
{65.15,130.03},{ 7.51,53.66},{64.79,130.85},{15.19,69.90},
{44.93,89.05},{18.02,63.77},{18.65,61.04},{66.05,134.15},
{41.95,77.11},{71.75,132.82},{86.89,161.83},{40.11,80.13},
{11.56,54.38},{15.36,72.22},{38.06,89.41},{99.49,182.71},
{11.80,44.98},{32.91,77.44},{92.77,151.86},{16.94,68.22},
{17.24,56.67},{68.12,142.77},{68.15,127.99},{ 3.56,36.04},
{53.17,102.91},{59.10,107.60},{16.95,58.11},{61.04,116.90},
{67.28,132.10},{34.20,67.56},{70.29,130.78},{75.05,117.15},
{96.04,161.15},{16.32,46.04},{ 7.14,43.90},{96.30,167.24},
{99.45,167.72},{15.83,47.52},{74.86,114.53},{37.08,96.05},
{ 6.63,31.29},{76.68,140.83},{38.03,89.69},{35.38,82.67},
{99.18,136.72},{ 1.49,35.32},{40.86,71.52},{36.16,87.19},
{46.66,109.91},{89.29,167.46},{55.40,97.42},{34.92,95.51},
{30.80,86.35},{25.23,63.36},{46.36,86.14},{13.89,65.48},
{55.55,93.72},{25.25,51.43},{82.79,139.96},{52.15,101.20},
{31.66,66.89},{43.96,83.82},{15.40,61.96},{97.62,161.90},
{17.03,44.60},{53.29,93.54},{64.91,130.41},{73.78,142.21},
{59.51,107.07},{87.11,153.09},{86.41,161.30},{17.11,70.42},
{15.93,70.49},{54.23,109.78},{62.93,109.82},{34.17,82.60},
{68.34,146.39},{28.41,64.48},{76.80,129.30},{95.42,151.63},
{64.32,116.92},{93.89,159.68},{74.96,149.71},{14.27,46.96},
{10.64,50.39},{17.18,43.97},{ 2.92,52.04},{96.04,167.13},
{48.51,101.01},{36.54,74.86},{35.91,75.86},{74.21,132.27},
{99.87,149.79},{82.35,148.39},{51.71,103.93},{74.97,133.12},
{94.46,157.28},{34.36,78.95},{40.30,92.46},{99.73,167.41},
{52.16,108.47},{58.01,102.16},{96.05,145.45},{17.18,54.94},
{ 2.62,40.96},{30.13,65.42},{13.35,58.22},{71.31,125.60},
{95.70,158.35},{ 2.73,45.15},{97.83,179.16},{28.52,71.03},
{65.27,103.35},{77.65,126.47},{44.02,99.96},{31.50,71.98},
{30.92,68.42},{ 3.90,33.31},{81.52,133.74},{64.99,132.19},
{ 7.06,55.22},{71.10,128.30},{43.63,88.87},{14.62,60.91},
{57.96,102.69},{22.60,74.92},{71.02,120.52},{72.80,136.35},
{79.02,126.69},{52.49,112.59},{ 0.19,47.94},{47.95,94.10},
{10.43,52.00},{57.04,124.36},{94.75,176.85},{ 6.21,50.17},
{77.08,136.86},{38.25,98.59},{96.31,153.49},{15.63,50.58},
{48.07,96.65},{29.37,91.68},{93.95,162.29},{14.86,64.86},
{55.48,117.13},{39.49,78.66},{17.29,63.56},{21.38,54.13},
{67.63,124.02},{18.74,47.72},{70.95,110.97},{63.18,120.04},
{82.09,145.44},{79.27,140.28},{23.30,75.42},{58.07,128.54},
{ 1.17,38.14},{43.35,85.94},{70.04,125.53},{93.60,159.75},
{ 9.74,42.74},{66.15,119.70},{99.91,153.79},{86.24,170.84},
{70.67,138.70},{49.61,110.31},{17.22,70.28},{46.41,98.86},
{19.76,65.18},{71.78,151.92},{88.22,158.34},{20.27,53.32},
{ 6.66,38.32},{82.44,145.08},{75.28,135.37},{17.33,69.56},
{25.39,90.00},{99.22,175.85},{45.15,86.49},{98.20,166.92},
{68.65,115.71},{91.06,150.84},{88.26,153.55},{ 4.07,47.73},
{35.18,84.76},{ 1.72,49.59},{13.84,69.71},{32.88,64.06},
{28.82,79.54},{14.98,60.96},{91.34,147.91},{94.29,153.25},
{39.27,91.57},{99.21,173.80},{15.22,59.83},{37.42,94.80},
{23.35,49.48},{56.46,91.68},{79.14,148.27},{13.71,62.49},
{45.44,92.67},{27.76,65.51},{72.71,127.57},{79.76,138.44},
{67.54,100.64},{44.33,92.14},{19.99,54.33},{13.21,59.86},
{82.42,137.42},{56.86,101.23},{18.29,44.21},{83.90,126.19},
{54.32,117.82},{11.57,59.56},{40.22,90.54},{ 0.97,24.21},
{13.29,55.09},{61.92,105.11},{19.82,81.97},{57.73,96.16},
{38.86,89.80},{86.58,153.61},{62.66,121.44},{85.51,134.84},
{91.57,158.71},{ 8.84,49.59},{91.57,136.11},{39.01,90.65},
{41.64,88.50},{77.06,146.16},{41.58,96.92},{29.78,72.24},
{ 9.31,63.47},{ 4.12,44.88},{85.92,150.99},{90.09,151.84},
{46.27,95.59},{84.84,134.93},{26.34,57.57},{50.43,96.16},
{ 2.88,25.83},{ 7.11,50.96},{16.51,47.60},{73.89,114.11},
{45.32,88.11},{88.84,132.51},{80.00,123.54},{ 6.47,47.79},
{60.00,106.47},{75.72,146.29},{10.65,62.48},{31.23,73.26},
{77.53,121.10},{40.60,95.22},{48.72,94.30},{50.23,88.26},
{96.85,159.63},{57.33,125.40},{64.74,129.05},{24.94,61.85},
{82.47,147.83},{67.22,124.22},{76.66,131.25},{73.56,151.75},
{19.36,56.66},{83.01,115.34},{41.98,79.77},{27.09,65.30},
{90.54,141.86},{81.78,137.00},{53.45,80.21},{84.43,145.49},
{34.04,84.18},{64.75,142.10},{60.98,106.50},{87.76,147.41},
{77.76,138.39},{80.04,145.45},{26.05,94.32},{97.00,170.04},
{42.05,98.36},{21.13,70.60},{29.70,67.99},{33.38,61.69},
{50.16,89.72},{50.22,100.23},{63.60,120.36},{13.76,54.38},
{53.43,110.84},{71.37,144.37},{ 8.10,56.51},{50.47,119.27},
{50.65,96.47},{10.14,49.66},{ 7.79,74.00},{67.56,119.06},
{58.93,113.17},{24.89,41.82},{52.45,102.32},{32.08,64.43},
{11.02,57.50},{94.14,164.65},{75.71,127.33},{83.84,134.81},
{96.60,168.54},{72.00,135.66},{53.03,105.83},{32.21,58.94},
{31.03,79.56},{83.04,144.26},{78.58,137.20},{87.36,140.76},
{68.41,150.16},{ 8.12,54.89},{63.22,118.29},{27.54,63.52},
{53.60,100.09},{60.42,98.19},{ 6.88,55.69},{26.33,69.75},
{72.19,132.73},{70.87,125.99},{97.80,168.70},{47.03,88.44},
{18.91,84.53},{10.86,56.49},{95.26,166.77},{89.35,160.12},
{ 1.11,29.40},{71.91,124.64},{50.05,92.00},{ 1.88,49.75},
{33.74,75.65},{99.84,164.44},{17.57,53.77},{75.64,137.60},
{ 6.76,38.31},{15.42,54.80},{90.43,151.35},{38.00,86.86},
{54.83,128.48},{ 5.00,48.26},{99.41,165.03},{55.49,136.74},
{17.69,66.98},{78.11,165.26},{74.17,117.71},{52.17,95.12},
{33.65,89.10},{31.03,88.57},{76.86,117.08},{96.81,165.16},
{21.64,75.28},{86.85,145.70},{85.75,158.93},{29.87,74.72},
{11.91,44.00},{23.40,74.94},{88.53,148.97},{70.23,124.86},
{43.71,91.50},{49.77,85.70},{29.28,67.78},{12.04,53.16},
{54.39,92.06},{51.96,85.72},{69.06,128.88},{80.24,150.69},
{26.16,69.57},{60.24,134.05},{ 3.23,34.58},{43.07,111.18},
{ 8.28,46.68},{23.92,56.04},{50.95,80.65},{17.20,40.50},
{55.76,107.63},{ 2.94,55.66},{80.80,152.89},{72.09,129.29},
{23.06,46.95},{54.25,118.47},{74.87,129.45},{18.46,52.04},
{46.08,98.46},{15.14,43.60},{75.59,119.50},{ 8.46,26.29},
{38.03,67.55},{20.59,80.62},{42.95,99.22},{14.76,48.50},
{62.18,107.07},{ 2.41,46.26},{68.55,139.84},{91.19,156.14},
{65.64,153.56},{26.91,67.76},{84.73,141.90},{55.04,114.08},
{53.28,96.66},{72.34,121.86},{35.21,61.10},{25.86,68.32},
{40.80,70.62},{83.16,136.63},{ 1.84,44.66},{98.14,165.56},
{92.78,166.98},{ 4.08,41.70},{ 1.70,32.25},{24.23,63.25},
{72.69,139.53},{11.85,54.34},{17.17,64.66},{34.42,71.95},
{48.25,109.59},{41.39,85.48},{ 3.11,51.08},{98.52,174.32},
{64.12,116.37},{21.65,72.72},{69.95,142.06},{85.71,138.26},
{74.60,133.55},{18.65,49.50},{12.47,43.50},{85.34,142.94},
{54.57,116.95},{37.47,87.34},{81.35,156.19},{90.42,167.55},
{32.62,83.33},{43.90,81.36},{40.76,83.87},{27.46,61.84},
{ 0.71,39.30},{50.49,97.46},{63.21,104.66},{85.29,143.18},
{66.07,118.09},{41.01,62.63},{70.07,107.34},{89.88,146.24},
{24.27,72.41},{11.67,52.46},{ 2.46,45.31},{90.44,152.17},
{30.21,63.25},{19.93,51.17},{54.78,103.51},{81.78,137.70},
{50.42,95.37},{36.57,84.66},{56.07,99.49},{93.33,171.32},
{42.89,81.41},{95.73,146.55},{15.09,48.90},{38.77,77.29},
{25.12,72.50},{51.68,116.94},{73.35,131.87},{86.30,141.22},
{18.64,68.35},{42.82,103.58},{18.05,60.95},{ 0.93,42.06},
{51.92,105.51},{86.17,151.87},{78.51,132.91},{71.60,138.14},
{60.94,107.61},{25.73,73.76},{89.77,146.34},{17.86,66.42},
{17.32,62.95},{17.74,58.61},{17.62,74.78},{29.49,69.46},
{ 6.97,46.16},{66.82,122.03},{65.83,125.74},{81.11,141.75},
{ 3.66,41.01},{47.10,103.63},{30.08,92.55},{13.74,57.80},
{71.11,119.96},{85.53,134.01},{30.06,75.18},{ 6.39,55.28},
{ 4.71,58.24},{90.58,156.30},{33.88,74.17},{30.15,58.67},
{ 3.13,45.77},{48.51,92.11},{32.87,80.67},{23.06,83.17},
{15.07,56.49},{22.75,76.55},{65.04,133.02},{66.48,107.61},
{10.28,49.68},{59.05,107.49},{19.16,67.00},{60.15,101.76},
{65.10,114.80},{76.70,132.78},{38.18,81.59},{22.45,71.10},
{ 5.95,48.36},{10.36,56.33},{21.70,67.53},{89.43,150.56},
{90.66,145.45},{18.83,66.13},{37.02,81.86},{83.30,136.05},
{49.76,96.94},{ 8.59,42.07},{99.14,165.45},{66.61,140.27},
{59.13,106.74},{13.69,64.66},{ 3.69,37.62},{82.55,152.57},
{16.86,59.16},{45.19,105.01},{93.84,162.69},{21.89,86.05},
{61.30,108.80},{41.07,89.96},{49.43,89.37},{72.23,122.68},
{30.12,62.82},{ 3.66,51.65},{92.08,146.13},{14.08,51.36},
{70.36,109.49},{49.30,95.77},{30.97,86.91},{37.02,86.69},
{87.33,159.73},{ 9.21,50.78},{56.33,97.30},{87.10,151.05},
{96.46,176.35},{32.08,79.44},{39.92,78.08},{34.26,71.62},
{54.20,116.50},{61.93,143.59},{ 0.17,28.98},{20.02,68.47},
{67.10,124.67},{10.50,55.32},{17.92,80.62},{ 1.13,49.11},
{23.42,61.62},{20.61,60.61},{58.59,130.42},{45.68,109.39},
{40.65,89.41},{40.52,96.88},{32.28,98.28},{24.68,70.29},
{97.32,146.42},{ 6.22,68.36},{64.16,112.26},{58.26,100.94},
{52.43,102.08},{35.20,91.98},{99.87,169.63},{ 7.17,41.08},
{92.21,152.49},{89.21,163.34},{94.95,160.36},{ 6.20,52.92},
{24.68,69.97},{88.56,166.68},{24.08,74.85},{20.38,66.00},
{84.57,148.39},{84.11,139.97},{40.21,105.66},{51.88,84.25},
{19.02,75.66},{97.92,164.22},{38.86,100.02},{76.97,131.01},
{85.08,145.73},{55.31,110.56},{58.80,123.03},{30.48,68.51},
{90.37,161.69},{92.93,157.06},{62.33,111.57},{28.72,67.78},
{66.38,117.51},{74.84,125.32},{62.34,127.23},{93.96,149.34},
{70.54,128.38},{78.01,139.64},{47.93,102.30},{61.76,122.96},
{88.68,152.56},{26.34,61.63},{50.17,104.98},{17.34,59.56},
{50.20,99.25},{24.46,71.96},{22.46,44.42},{75.85,118.58},
{22.97,77.21},{85.67,161.32},{32.35,98.54},{15.42,45.56},
{41.59,77.31},{82.11,143.74},{54.00,113.73},{ 3.46,59.65},
{ 1.92,34.47},{32.21,82.73},{39.94,78.28},{25.55,48.17},
{ 7.17,36.43},{ 8.83,24.42},{84.19,130.80},{10.86,54.87},
{44.58,86.79},{30.70,84.62},{ 2.96,44.81},{68.91,124.92},
{ 3.96,46.02},{ 9.65,33.46},{12.03,57.22},{50.41,96.71},
{17.40,61.16},{69.93,128.22},{93.95,147.08},{16.05,60.44},
{31.23,91.22},{51.78,91.57},{77.23,138.76},{14.60,60.31},
{58.51,105.52},{27.08,63.96},{95.07,163.48},{29.52,74.84},
{63.46,117.37},{82.11,139.92},{76.64,137.90},{28.58,74.39},
{19.20,62.95},{60.15,125.63},{99.02,157.54},{73.31,117.87},
{92.20,153.13},{90.70,154.11},{ 5.70,47.08},{60.30,108.19},
{32.09,70.53},{28.52,63.25},{10.76,49.56},{ 2.35,37.68},
{57.60,100.04},{26.49,66.68},{93.57,167.30},{25.95,85.51},
{ 7.44,39.17},{58.98,118.56},{21.96,58.41},{12.65,46.49},
{25.43,61.37},{17.02,49.31},{98.97,176.85},{45.53,83.28},
{65.89,127.86},{49.86,99.94},{16.78,57.64},{95.62,151.48},
{24.37,48.55},{57.74,113.98},{26.07,78.93},{14.95,71.57},
{28.77,66.55},{15.07,43.63},{80.59,137.39},{64.30,128.21},
{81.54,107.43},{86.39,160.85},{87.96,138.03},{35.68,95.12},
{17.28,55.07},{90.78,154.10},{88.52,163.38},{92.19,163.85},
{61.82,119.93},{52.13,107.98},{89.66,142.94},{94.27,166.71}
};
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error_calc(double m, double c) {
int loop; // counter
double mean_value;
double error_sum = 0;
for(loop=0; loop<no_of_data; loop++) {
error_sum += residual_error(data[loop].x, data[loop].y, m, c);
}
mean_value = error_sum / no_of_data;
return sqrt(mean_value);
}
__global__ void d_rms_error_calc(double *m, double *c,double *error_sum_arr,plot_coordinate *d_data) {
int loop = threadIdx.x + blockIdx.x *blockDim.x;
error_sum_arr[loop] = d_residual_error(d_data[loop].x,d_data[loop].y, *m, *c);
}
/* main function */
int main(){
int loop;
double m_base = 1.3;
double c_base = 10;
double e_base;
double m[8];
double c[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double m_origin[] = {0,1,1, 1, 0,-1,-1,-1};
double c_origin[] = {1,1,0,-1,-1,-1, 0, 1};
// initializing timer.c components
struct timespec timer_start, timer_stop;
long long int time_taken_for_execution;
// timer starts here
clock_gettime(CLOCK_MONOTONIC, &timer_start);
printf("\n===============================================================================\n");
printf("!! OUTPUT FOR LINEAR REGRESSION !! \n");
printf("===============================================================================\n\n");
hipError_t error;
double *d_m;
double *d_c;
double *d_error_sum_arr;
plot_coordinate *d_data;
e_base= rms_error_calc(m_base,c_base);
error=hipMalloc(&d_m,(sizeof(double) * 8));
if(error){
fprintf(stderr,"hipMalloc on d_m returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
error=hipMalloc(&d_c,(sizeof(double) * 8));
if(error){
fprintf(stderr,"hipMalloc on d_c returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
error=hipMalloc(&d_error_sum_arr,(sizeof(double) * 1000));
if(error){
fprintf(stderr,"hipMalloc on d_error_sum_arr returned %d %s\n",error, //371
hipGetErrorString(error));
exit(1);
}
error=hipMalloc(&d_data,sizeof(data)); //376
if(error){
fprintf(stderr,"hipMalloc on d_data returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(loop=0; loop<8; loop++) {
m[loop] = m_base + (m_origin[loop] * step);
c[loop] = c_base + (c_origin[loop] * step);
}
error = hipMemcpy(d_m,m,(sizeof(double)*8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr,"hipMemcpy to d_m returned %d %s\n",error,
hipGetErrorString(error));
}
error = hipMemcpy(d_c,c,(sizeof(double)*8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr,"hipMemcpy to d_c returned %d %s\n",error,
hipGetErrorString(error));
}
error = hipMemcpy(d_data, data,sizeof(data), hipMemcpyHostToDevice); //401
if(error){
fprintf(stderr,"hipMemcpy to d_data returned %d %s\n",error,
hipGetErrorString(error));
}
for(loop=0; loop<8; loop++){
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
hipLaunchKernelGGL(( d_rms_error_calc) , dim3(100),dim3(10), 0, 0, &d_m[loop],&d_c[loop],d_error_sum_arr,d_data);
hipDeviceSynchronize();
error =hipMemcpy(&h_error_sum_arr,d_error_sum_arr,(sizeof(double) *1000), hipMemcpyDeviceToHost);
if(error){
fprintf(stderr,"hipMemcpy to error_sum returned %d %s\n",error,
hipGetErrorString(error));
}
for(int loop2=0; loop2<no_of_data; loop2++){
error_sum_total+= h_error_sum_arr[loop2];
}
error_sum_mean = error_sum_total / no_of_data;
e[loop] =sqrt(error_sum_mean);
if(e[loop] < best_error){
best_error = e[loop];
error_sum_total +=h_error_sum_arr[loop];
}
error_sum_mean = error_sum_total /no_of_data;//431
e[loop] = sqrt(error_sum_mean); //432
if(e[loop]<best_error){ //434
best_error = e[loop];
best_error_i = loop;
}
error_sum_total = 0; //438
}
if(best_error <e_base){
e_base = best_error;
m_base = m[best_error_i];
c_base = c[best_error_i];
}
else {
minimum_found = 1;
}
}
error = hipFree(d_m);
if(error){
fprintf(stderr,"hipFree on d_m returned %d %s\n",error,
hipGetErrorString(error)); //453
exit(1);
}
error = hipFree(d_c);
if(error){
fprintf(stderr,"hipFree on d_c returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_data);
if(error){
fprintf(stderr,"hipFree on d_data returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_error_sum_arr);
if(error){
fprintf(stderr,"hipFree on d_error_sum_arr returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
// to print best result
printf("minimum m,c is %lf,%lf with error %lf", m_base, c_base, e_base);
// timer stops here
clock_gettime(CLOCK_MONOTONIC, &timer_stop);
timer_calc(&timer_start, &timer_stop, &time_taken_for_execution);
// output of time taken for execution is displayed
printf("\n\n===============================================================================\n");
printf("!! TIME TAKEN FOR EXECUTION !! \n");
printf("===============================================================================\n\n");
printf("Nanoseconds: %lld\n", time_taken_for_execution);
printf("Seconds: %0.9lf\n", ((time_taken_for_execution/1.0e9)));
printf("Minutes: %0.4lf\n", ((time_taken_for_execution/1.0e9)/60));
printf("Hours: %0.2lf\n", ((time_taken_for_execution/1.0e9)/3600));
return 0;
};
| a97f9a4a796b9db50a21bfada51081e55c4ce0c6.cu | #include <stdio.h>
#include <math.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
#include <errno.h>
#include <unistd.h>
#include "timer.c"
/*********************************************************
*
*
* To Compile:
* nvcc -o cuda_linear_regression cuda_linear_regression.cu -lm
*
*
* To Run:
* ./cuda_linear_regression
*
*
*********************************************************/
typedef struct plot_coordinate{
double x; // x co-ordinate
double y; // y co-ordinate
}plot_coordinate;
int no_of_data = 1000;
__device__ int d_n_data =1000;
plot_coordinate data[] = {
{82.45,155.07},{65.27,121.45},{67.29,117.39},{72.17,120.95},
{69.42,132.58},{76.35,137.91},{79.20,151.69},{71.97,123.08},
{85.03,137.12},{78.83,136.47},{71.34,131.75},{66.14,129.76},
{65.22,111.73},{77.67,137.24},{73.30,105.03},{71.56,120.18},
{66.92,105.91},{69.09,134.67},{54.03,108.08},{61.79,114.62},
{67.52,119.60},{31.12,75.51},{13.49,50.66},{61.43,134.15},
{51.51,107.20},{93.87,149.32},{98.59,167.92},{94.93,146.15},
{32.47,67.59},{36.91,92.19},{45.36,104.11},{42.58,97.37},
{ 2.38,35.79},{52.07,114.35},{40.76,111.33},{35.44,98.07},
{57.03,114.02},{17.15,65.52},{26.63,75.12},{68.64,132.38},
{87.73,137.17},{43.40,106.42},{59.12,103.58},{ 5.83,35.24},
{31.03,79.78},{68.56,127.27},{21.54,60.20},{19.62,67.80},
{61.39,128.09},{45.79,89.44},{16.02,64.22},{19.78,65.61},
{34.76,88.37},{45.97,85.20},{88.74,145.02},{76.48,129.69},
{19.76,56.76},{87.72,157.39},{66.75,118.41},{63.57,121.44},
{29.80,87.78},{32.63,85.94},{75.87,134.69},{ 0.85,40.28},
{94.47,163.58},{72.99,135.55},{64.22,127.04},{ 3.32,40.20},
{ 6.88,42.32},{31.08,75.99},{60.22,120.13},{17.45,60.36},
{57.29,105.03},{49.31,82.69},{11.87,61.21},{81.39,144.96},
{48.71,78.63},{11.23,36.32},{16.35,54.14},{19.70,57.20},
{17.03,63.54},{84.59,154.43},{ 7.41,42.73},{43.82,81.77},
{49.21,107.45},{53.00,95.15},{13.27,45.40},{67.77,128.98},
{93.90,165.29},{93.29,173.90},{40.02,86.36},{22.79,75.44},
{98.39,167.10},{94.95,164.10},{60.08,127.24},{ 6.06,45.37},
{61.35,121.18},{ 4.95,27.34},{23.32,66.61},{32.79,88.38},
{83.20,143.97},{60.59,126.92},{ 7.93,70.52},{94.46,152.96},
{71.82,121.20},{59.27,107.93},{64.93,142.11},{94.58,163.94},
{ 7.21,40.58},{52.74,116.16},{79.42,120.53},{ 7.82,30.57},
{ 9.83,45.29},{58.21,108.86},{48.11,107.39},{88.55,140.09},
{29.26,71.16},{34.96,80.00},{ 1.12,23.12},{55.14,104.13},
{19.82,50.32},{43.38,83.62},{24.62,51.03},{62.84,101.88},
{26.88,60.06},{94.48,144.08},{95.14,157.80},{47.90,85.47},
{90.90,143.82},{39.53,79.74},{80.77,155.44},{ 6.07,17.43},
{56.88,103.70},{43.95,84.71},{16.12,45.98},{ 5.12,44.40},
{81.71,134.56},{24.30,45.54},{83.68,146.90},{17.62,49.28},
{42.10,97.75},{41.25,84.38},{82.68,155.74},{44.56,95.45},
{85.21,142.50},{73.50,125.45},{ 3.45,52.95},{30.65,73.60},
{29.33,76.20},{30.31,85.46},{69.41,135.79},{73.21,133.16},
{40.62,87.68},{26.38,65.16},{ 5.14,59.66},{94.33,160.01},
{ 6.52,52.57},{90.79,146.06},{ 9.78,55.77},{ 4.71,53.43},
{74.01,129.97},{68.72,119.11},{16.35,59.99},{44.08,109.17},
{31.02,63.78},{14.76,33.17},{62.63,126.09},{55.88,96.90},
{57.41,99.30},{83.66,131.04},{86.08,175.22},{81.13,140.01},
{18.25,71.09},{65.68,104.02},{66.08,122.24},{48.81,96.28},
{79.07,132.27},{20.07,67.34},{16.24,48.49},{30.98,85.11},
{ 2.27,45.14},{44.11,76.86},{ 2.49,45.65},{72.96,136.23},
{89.49,156.60},{54.51,105.71},{92.23,153.22},{95.02,160.48},
{73.99,111.16},{52.70,93.18},{90.82,154.82},{53.42,100.57},
{19.77,60.95},{26.30,63.93},{23.07,54.59},{88.86,142.32},
{98.65,175.75},{76.19,130.10},{59.20,111.38},{58.43,121.18},
{33.27,82.74},{74.68,126.95},{88.64,141.44},{81.47,117.66},
{99.22,170.99},{98.17,163.34},{91.54,144.52},{17.22,67.20},
{66.49,115.36},{68.68,128.45},{ 1.35,54.22},{47.22,98.90},
{79.94,147.19},{22.05,76.35},{50.23,102.66},{ 5.97,37.93},
{67.56,98.13},{18.19,52.11},{81.03,149.27},{45.50,98.92},
{50.60,91.80},{73.59,129.07},{88.92,139.84},{92.80,159.34},
{ 6.39,45.68},{64.04,109.08},{57.32,111.22},{36.89,82.67},
{ 2.04,47.08},{ 3.58,43.67},{66.42,131.32},{81.67,145.83},
{ 3.01,28.87},{30.05,69.62},{32.51,91.29},{32.10,56.40},
{74.96,121.89},{66.82,125.73},{72.51,129.45},{ 5.91,48.37},
{37.12,82.47},{ 9.16,48.40},{13.04,46.47},{48.80,95.11},
{58.51,112.16},{44.86,85.77},{56.11,123.07},{82.96,151.82},
{24.90,79.21},{27.30,64.03},{99.30,144.46},{62.24,117.56},
{52.10,91.97},{39.86,79.58},{15.84,72.42},{91.38,151.59},
{39.75,76.49},{49.68,92.98},{53.69,123.67},{76.59,145.25},
{84.40,156.17},{81.04,142.59},{24.22,48.48},{63.39,115.54},
{10.21,40.70},{41.56,62.95},{88.85,137.60},{50.03,118.66},
{48.66,89.36},{57.74,104.91},{74.07,144.74},{77.68,138.69},
{98.53,163.18},{25.40,89.65},{ 4.38,50.45},{59.86,102.93},
{ 2.27,42.85},{81.03,143.24},{20.95,76.89},{52.59,116.92},
{82.19,145.87},{51.90,110.85},{43.83,105.20},{44.13,75.17},
{17.22,61.38},{46.16,92.95},{55.00,117.41},{ 7.73,39.87},
{95.80,164.28},{59.80,104.95},{22.16,52.76},{82.10,141.69},
{94.60,160.59},{18.61,28.99},{ 0.09,47.91},{91.39,158.91},
{65.15,130.03},{ 7.51,53.66},{64.79,130.85},{15.19,69.90},
{44.93,89.05},{18.02,63.77},{18.65,61.04},{66.05,134.15},
{41.95,77.11},{71.75,132.82},{86.89,161.83},{40.11,80.13},
{11.56,54.38},{15.36,72.22},{38.06,89.41},{99.49,182.71},
{11.80,44.98},{32.91,77.44},{92.77,151.86},{16.94,68.22},
{17.24,56.67},{68.12,142.77},{68.15,127.99},{ 3.56,36.04},
{53.17,102.91},{59.10,107.60},{16.95,58.11},{61.04,116.90},
{67.28,132.10},{34.20,67.56},{70.29,130.78},{75.05,117.15},
{96.04,161.15},{16.32,46.04},{ 7.14,43.90},{96.30,167.24},
{99.45,167.72},{15.83,47.52},{74.86,114.53},{37.08,96.05},
{ 6.63,31.29},{76.68,140.83},{38.03,89.69},{35.38,82.67},
{99.18,136.72},{ 1.49,35.32},{40.86,71.52},{36.16,87.19},
{46.66,109.91},{89.29,167.46},{55.40,97.42},{34.92,95.51},
{30.80,86.35},{25.23,63.36},{46.36,86.14},{13.89,65.48},
{55.55,93.72},{25.25,51.43},{82.79,139.96},{52.15,101.20},
{31.66,66.89},{43.96,83.82},{15.40,61.96},{97.62,161.90},
{17.03,44.60},{53.29,93.54},{64.91,130.41},{73.78,142.21},
{59.51,107.07},{87.11,153.09},{86.41,161.30},{17.11,70.42},
{15.93,70.49},{54.23,109.78},{62.93,109.82},{34.17,82.60},
{68.34,146.39},{28.41,64.48},{76.80,129.30},{95.42,151.63},
{64.32,116.92},{93.89,159.68},{74.96,149.71},{14.27,46.96},
{10.64,50.39},{17.18,43.97},{ 2.92,52.04},{96.04,167.13},
{48.51,101.01},{36.54,74.86},{35.91,75.86},{74.21,132.27},
{99.87,149.79},{82.35,148.39},{51.71,103.93},{74.97,133.12},
{94.46,157.28},{34.36,78.95},{40.30,92.46},{99.73,167.41},
{52.16,108.47},{58.01,102.16},{96.05,145.45},{17.18,54.94},
{ 2.62,40.96},{30.13,65.42},{13.35,58.22},{71.31,125.60},
{95.70,158.35},{ 2.73,45.15},{97.83,179.16},{28.52,71.03},
{65.27,103.35},{77.65,126.47},{44.02,99.96},{31.50,71.98},
{30.92,68.42},{ 3.90,33.31},{81.52,133.74},{64.99,132.19},
{ 7.06,55.22},{71.10,128.30},{43.63,88.87},{14.62,60.91},
{57.96,102.69},{22.60,74.92},{71.02,120.52},{72.80,136.35},
{79.02,126.69},{52.49,112.59},{ 0.19,47.94},{47.95,94.10},
{10.43,52.00},{57.04,124.36},{94.75,176.85},{ 6.21,50.17},
{77.08,136.86},{38.25,98.59},{96.31,153.49},{15.63,50.58},
{48.07,96.65},{29.37,91.68},{93.95,162.29},{14.86,64.86},
{55.48,117.13},{39.49,78.66},{17.29,63.56},{21.38,54.13},
{67.63,124.02},{18.74,47.72},{70.95,110.97},{63.18,120.04},
{82.09,145.44},{79.27,140.28},{23.30,75.42},{58.07,128.54},
{ 1.17,38.14},{43.35,85.94},{70.04,125.53},{93.60,159.75},
{ 9.74,42.74},{66.15,119.70},{99.91,153.79},{86.24,170.84},
{70.67,138.70},{49.61,110.31},{17.22,70.28},{46.41,98.86},
{19.76,65.18},{71.78,151.92},{88.22,158.34},{20.27,53.32},
{ 6.66,38.32},{82.44,145.08},{75.28,135.37},{17.33,69.56},
{25.39,90.00},{99.22,175.85},{45.15,86.49},{98.20,166.92},
{68.65,115.71},{91.06,150.84},{88.26,153.55},{ 4.07,47.73},
{35.18,84.76},{ 1.72,49.59},{13.84,69.71},{32.88,64.06},
{28.82,79.54},{14.98,60.96},{91.34,147.91},{94.29,153.25},
{39.27,91.57},{99.21,173.80},{15.22,59.83},{37.42,94.80},
{23.35,49.48},{56.46,91.68},{79.14,148.27},{13.71,62.49},
{45.44,92.67},{27.76,65.51},{72.71,127.57},{79.76,138.44},
{67.54,100.64},{44.33,92.14},{19.99,54.33},{13.21,59.86},
{82.42,137.42},{56.86,101.23},{18.29,44.21},{83.90,126.19},
{54.32,117.82},{11.57,59.56},{40.22,90.54},{ 0.97,24.21},
{13.29,55.09},{61.92,105.11},{19.82,81.97},{57.73,96.16},
{38.86,89.80},{86.58,153.61},{62.66,121.44},{85.51,134.84},
{91.57,158.71},{ 8.84,49.59},{91.57,136.11},{39.01,90.65},
{41.64,88.50},{77.06,146.16},{41.58,96.92},{29.78,72.24},
{ 9.31,63.47},{ 4.12,44.88},{85.92,150.99},{90.09,151.84},
{46.27,95.59},{84.84,134.93},{26.34,57.57},{50.43,96.16},
{ 2.88,25.83},{ 7.11,50.96},{16.51,47.60},{73.89,114.11},
{45.32,88.11},{88.84,132.51},{80.00,123.54},{ 6.47,47.79},
{60.00,106.47},{75.72,146.29},{10.65,62.48},{31.23,73.26},
{77.53,121.10},{40.60,95.22},{48.72,94.30},{50.23,88.26},
{96.85,159.63},{57.33,125.40},{64.74,129.05},{24.94,61.85},
{82.47,147.83},{67.22,124.22},{76.66,131.25},{73.56,151.75},
{19.36,56.66},{83.01,115.34},{41.98,79.77},{27.09,65.30},
{90.54,141.86},{81.78,137.00},{53.45,80.21},{84.43,145.49},
{34.04,84.18},{64.75,142.10},{60.98,106.50},{87.76,147.41},
{77.76,138.39},{80.04,145.45},{26.05,94.32},{97.00,170.04},
{42.05,98.36},{21.13,70.60},{29.70,67.99},{33.38,61.69},
{50.16,89.72},{50.22,100.23},{63.60,120.36},{13.76,54.38},
{53.43,110.84},{71.37,144.37},{ 8.10,56.51},{50.47,119.27},
{50.65,96.47},{10.14,49.66},{ 7.79,74.00},{67.56,119.06},
{58.93,113.17},{24.89,41.82},{52.45,102.32},{32.08,64.43},
{11.02,57.50},{94.14,164.65},{75.71,127.33},{83.84,134.81},
{96.60,168.54},{72.00,135.66},{53.03,105.83},{32.21,58.94},
{31.03,79.56},{83.04,144.26},{78.58,137.20},{87.36,140.76},
{68.41,150.16},{ 8.12,54.89},{63.22,118.29},{27.54,63.52},
{53.60,100.09},{60.42,98.19},{ 6.88,55.69},{26.33,69.75},
{72.19,132.73},{70.87,125.99},{97.80,168.70},{47.03,88.44},
{18.91,84.53},{10.86,56.49},{95.26,166.77},{89.35,160.12},
{ 1.11,29.40},{71.91,124.64},{50.05,92.00},{ 1.88,49.75},
{33.74,75.65},{99.84,164.44},{17.57,53.77},{75.64,137.60},
{ 6.76,38.31},{15.42,54.80},{90.43,151.35},{38.00,86.86},
{54.83,128.48},{ 5.00,48.26},{99.41,165.03},{55.49,136.74},
{17.69,66.98},{78.11,165.26},{74.17,117.71},{52.17,95.12},
{33.65,89.10},{31.03,88.57},{76.86,117.08},{96.81,165.16},
{21.64,75.28},{86.85,145.70},{85.75,158.93},{29.87,74.72},
{11.91,44.00},{23.40,74.94},{88.53,148.97},{70.23,124.86},
{43.71,91.50},{49.77,85.70},{29.28,67.78},{12.04,53.16},
{54.39,92.06},{51.96,85.72},{69.06,128.88},{80.24,150.69},
{26.16,69.57},{60.24,134.05},{ 3.23,34.58},{43.07,111.18},
{ 8.28,46.68},{23.92,56.04},{50.95,80.65},{17.20,40.50},
{55.76,107.63},{ 2.94,55.66},{80.80,152.89},{72.09,129.29},
{23.06,46.95},{54.25,118.47},{74.87,129.45},{18.46,52.04},
{46.08,98.46},{15.14,43.60},{75.59,119.50},{ 8.46,26.29},
{38.03,67.55},{20.59,80.62},{42.95,99.22},{14.76,48.50},
{62.18,107.07},{ 2.41,46.26},{68.55,139.84},{91.19,156.14},
{65.64,153.56},{26.91,67.76},{84.73,141.90},{55.04,114.08},
{53.28,96.66},{72.34,121.86},{35.21,61.10},{25.86,68.32},
{40.80,70.62},{83.16,136.63},{ 1.84,44.66},{98.14,165.56},
{92.78,166.98},{ 4.08,41.70},{ 1.70,32.25},{24.23,63.25},
{72.69,139.53},{11.85,54.34},{17.17,64.66},{34.42,71.95},
{48.25,109.59},{41.39,85.48},{ 3.11,51.08},{98.52,174.32},
{64.12,116.37},{21.65,72.72},{69.95,142.06},{85.71,138.26},
{74.60,133.55},{18.65,49.50},{12.47,43.50},{85.34,142.94},
{54.57,116.95},{37.47,87.34},{81.35,156.19},{90.42,167.55},
{32.62,83.33},{43.90,81.36},{40.76,83.87},{27.46,61.84},
{ 0.71,39.30},{50.49,97.46},{63.21,104.66},{85.29,143.18},
{66.07,118.09},{41.01,62.63},{70.07,107.34},{89.88,146.24},
{24.27,72.41},{11.67,52.46},{ 2.46,45.31},{90.44,152.17},
{30.21,63.25},{19.93,51.17},{54.78,103.51},{81.78,137.70},
{50.42,95.37},{36.57,84.66},{56.07,99.49},{93.33,171.32},
{42.89,81.41},{95.73,146.55},{15.09,48.90},{38.77,77.29},
{25.12,72.50},{51.68,116.94},{73.35,131.87},{86.30,141.22},
{18.64,68.35},{42.82,103.58},{18.05,60.95},{ 0.93,42.06},
{51.92,105.51},{86.17,151.87},{78.51,132.91},{71.60,138.14},
{60.94,107.61},{25.73,73.76},{89.77,146.34},{17.86,66.42},
{17.32,62.95},{17.74,58.61},{17.62,74.78},{29.49,69.46},
{ 6.97,46.16},{66.82,122.03},{65.83,125.74},{81.11,141.75},
{ 3.66,41.01},{47.10,103.63},{30.08,92.55},{13.74,57.80},
{71.11,119.96},{85.53,134.01},{30.06,75.18},{ 6.39,55.28},
{ 4.71,58.24},{90.58,156.30},{33.88,74.17},{30.15,58.67},
{ 3.13,45.77},{48.51,92.11},{32.87,80.67},{23.06,83.17},
{15.07,56.49},{22.75,76.55},{65.04,133.02},{66.48,107.61},
{10.28,49.68},{59.05,107.49},{19.16,67.00},{60.15,101.76},
{65.10,114.80},{76.70,132.78},{38.18,81.59},{22.45,71.10},
{ 5.95,48.36},{10.36,56.33},{21.70,67.53},{89.43,150.56},
{90.66,145.45},{18.83,66.13},{37.02,81.86},{83.30,136.05},
{49.76,96.94},{ 8.59,42.07},{99.14,165.45},{66.61,140.27},
{59.13,106.74},{13.69,64.66},{ 3.69,37.62},{82.55,152.57},
{16.86,59.16},{45.19,105.01},{93.84,162.69},{21.89,86.05},
{61.30,108.80},{41.07,89.96},{49.43,89.37},{72.23,122.68},
{30.12,62.82},{ 3.66,51.65},{92.08,146.13},{14.08,51.36},
{70.36,109.49},{49.30,95.77},{30.97,86.91},{37.02,86.69},
{87.33,159.73},{ 9.21,50.78},{56.33,97.30},{87.10,151.05},
{96.46,176.35},{32.08,79.44},{39.92,78.08},{34.26,71.62},
{54.20,116.50},{61.93,143.59},{ 0.17,28.98},{20.02,68.47},
{67.10,124.67},{10.50,55.32},{17.92,80.62},{ 1.13,49.11},
{23.42,61.62},{20.61,60.61},{58.59,130.42},{45.68,109.39},
{40.65,89.41},{40.52,96.88},{32.28,98.28},{24.68,70.29},
{97.32,146.42},{ 6.22,68.36},{64.16,112.26},{58.26,100.94},
{52.43,102.08},{35.20,91.98},{99.87,169.63},{ 7.17,41.08},
{92.21,152.49},{89.21,163.34},{94.95,160.36},{ 6.20,52.92},
{24.68,69.97},{88.56,166.68},{24.08,74.85},{20.38,66.00},
{84.57,148.39},{84.11,139.97},{40.21,105.66},{51.88,84.25},
{19.02,75.66},{97.92,164.22},{38.86,100.02},{76.97,131.01},
{85.08,145.73},{55.31,110.56},{58.80,123.03},{30.48,68.51},
{90.37,161.69},{92.93,157.06},{62.33,111.57},{28.72,67.78},
{66.38,117.51},{74.84,125.32},{62.34,127.23},{93.96,149.34},
{70.54,128.38},{78.01,139.64},{47.93,102.30},{61.76,122.96},
{88.68,152.56},{26.34,61.63},{50.17,104.98},{17.34,59.56},
{50.20,99.25},{24.46,71.96},{22.46,44.42},{75.85,118.58},
{22.97,77.21},{85.67,161.32},{32.35,98.54},{15.42,45.56},
{41.59,77.31},{82.11,143.74},{54.00,113.73},{ 3.46,59.65},
{ 1.92,34.47},{32.21,82.73},{39.94,78.28},{25.55,48.17},
{ 7.17,36.43},{ 8.83,24.42},{84.19,130.80},{10.86,54.87},
{44.58,86.79},{30.70,84.62},{ 2.96,44.81},{68.91,124.92},
{ 3.96,46.02},{ 9.65,33.46},{12.03,57.22},{50.41,96.71},
{17.40,61.16},{69.93,128.22},{93.95,147.08},{16.05,60.44},
{31.23,91.22},{51.78,91.57},{77.23,138.76},{14.60,60.31},
{58.51,105.52},{27.08,63.96},{95.07,163.48},{29.52,74.84},
{63.46,117.37},{82.11,139.92},{76.64,137.90},{28.58,74.39},
{19.20,62.95},{60.15,125.63},{99.02,157.54},{73.31,117.87},
{92.20,153.13},{90.70,154.11},{ 5.70,47.08},{60.30,108.19},
{32.09,70.53},{28.52,63.25},{10.76,49.56},{ 2.35,37.68},
{57.60,100.04},{26.49,66.68},{93.57,167.30},{25.95,85.51},
{ 7.44,39.17},{58.98,118.56},{21.96,58.41},{12.65,46.49},
{25.43,61.37},{17.02,49.31},{98.97,176.85},{45.53,83.28},
{65.89,127.86},{49.86,99.94},{16.78,57.64},{95.62,151.48},
{24.37,48.55},{57.74,113.98},{26.07,78.93},{14.95,71.57},
{28.77,66.55},{15.07,43.63},{80.59,137.39},{64.30,128.21},
{81.54,107.43},{86.39,160.85},{87.96,138.03},{35.68,95.12},
{17.28,55.07},{90.78,154.10},{88.52,163.38},{92.19,163.85},
{61.82,119.93},{52.13,107.98},{89.66,142.94},{94.27,166.71}
};
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error_calc(double m, double c) {
int loop; // counter
double mean_value;
double error_sum = 0;
for(loop=0; loop<no_of_data; loop++) {
error_sum += residual_error(data[loop].x, data[loop].y, m, c);
}
mean_value = error_sum / no_of_data;
return sqrt(mean_value);
}
__global__ void d_rms_error_calc(double *m, double *c,double *error_sum_arr,plot_coordinate *d_data) {
int loop = threadIdx.x + blockIdx.x *blockDim.x;
error_sum_arr[loop] = d_residual_error(d_data[loop].x,d_data[loop].y, *m, *c);
}
/* main function */
int main(){
int loop;
double m_base = 1.3;
double c_base = 10;
double e_base;
double m[8];
double c[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double m_origin[] = {0,1,1, 1, 0,-1,-1,-1};
double c_origin[] = {1,1,0,-1,-1,-1, 0, 1};
// initializing timer.c components
struct timespec timer_start, timer_stop;
long long int time_taken_for_execution;
// timer starts here
clock_gettime(CLOCK_MONOTONIC, &timer_start);
printf("\n===============================================================================\n");
printf("!! OUTPUT FOR LINEAR REGRESSION !! \n");
printf("===============================================================================\n\n");
cudaError_t error;
double *d_m;
double *d_c;
double *d_error_sum_arr;
plot_coordinate *d_data;
e_base= rms_error_calc(m_base,c_base);
error=cudaMalloc(&d_m,(sizeof(double) * 8));
if(error){
fprintf(stderr,"cudaMalloc on d_m returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_c,(sizeof(double) * 8));
if(error){
fprintf(stderr,"cudaMalloc on d_c returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_error_sum_arr,(sizeof(double) * 1000));
if(error){
fprintf(stderr,"cudaMalloc on d_error_sum_arr returned %d %s\n",error, //371
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_data,sizeof(data)); //376
if(error){
fprintf(stderr,"cudaMalloc on d_data returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(loop=0; loop<8; loop++) {
m[loop] = m_base + (m_origin[loop] * step);
c[loop] = c_base + (c_origin[loop] * step);
}
error = cudaMemcpy(d_m,m,(sizeof(double)*8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr,"cudaMemcpy to d_m returned %d %s\n",error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_c,c,(sizeof(double)*8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr,"cudaMemcpy to d_c returned %d %s\n",error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_data, data,sizeof(data), cudaMemcpyHostToDevice); //401
if(error){
fprintf(stderr,"cudaMemcpy to d_data returned %d %s\n",error,
cudaGetErrorString(error));
}
for(loop=0; loop<8; loop++){
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
d_rms_error_calc <<<100,10>>>(&d_m[loop],&d_c[loop],d_error_sum_arr,d_data);
cudaThreadSynchronize();
error =cudaMemcpy(&h_error_sum_arr,d_error_sum_arr,(sizeof(double) *1000), cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr,"cudaMemcpy to error_sum returned %d %s\n",error,
cudaGetErrorString(error));
}
for(int loop2=0; loop2<no_of_data; loop2++){
error_sum_total+= h_error_sum_arr[loop2];
}
error_sum_mean = error_sum_total / no_of_data;
e[loop] =sqrt(error_sum_mean);
if(e[loop] < best_error){
best_error = e[loop];
error_sum_total +=h_error_sum_arr[loop];
}
error_sum_mean = error_sum_total /no_of_data;//431
e[loop] = sqrt(error_sum_mean); //432
if(e[loop]<best_error){ //434
best_error = e[loop];
best_error_i = loop;
}
error_sum_total = 0; //438
}
if(best_error <e_base){
e_base = best_error;
m_base = m[best_error_i];
c_base = c[best_error_i];
}
else {
minimum_found = 1;
}
}
error = cudaFree(d_m);
if(error){
fprintf(stderr,"cudaFree on d_m returned %d %s\n",error,
cudaGetErrorString(error)); //453
exit(1);
}
error = cudaFree(d_c);
if(error){
fprintf(stderr,"cudaFree on d_c returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_data);
if(error){
fprintf(stderr,"cudaFree on d_data returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr,"cudaFree on d_error_sum_arr returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
// to print best result
printf("minimum m,c is %lf,%lf with error %lf", m_base, c_base, e_base);
// timer stops here
clock_gettime(CLOCK_MONOTONIC, &timer_stop);
timer_calc(&timer_start, &timer_stop, &time_taken_for_execution);
// output of time taken for execution is displayed
printf("\n\n===============================================================================\n");
printf("!! TIME TAKEN FOR EXECUTION !! \n");
printf("===============================================================================\n\n");
printf("Nanoseconds: %lld\n", time_taken_for_execution);
printf("Seconds: %0.9lf\n", ((time_taken_for_execution/1.0e9)));
printf("Minutes: %0.4lf\n", ((time_taken_for_execution/1.0e9)/60));
printf("Hours: %0.2lf\n", ((time_taken_for_execution/1.0e9)/3600));
return 0;
};
|
d2b3722a2ff7aa509e55a791980051fab63ee592.hip | // !!! This is a file automatically generated by hipify!!!
/*
******************************************************************
* HISTORY
* 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University
* Prepared for 15-681, Fall 1994.
* Modified by Shuai Che
******************************************************************
*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "backprop.h"
#include <math.h>
#include <string.h>
#include <hip/hip_runtime.h>
//#define OPEN
#define ABS(x) (((x) > 0.0) ? (x) : (-(x)))
#define fastcopy(to,from,len)\
{\
register char *_to,*_from;\
register long _i,_l;\
_to = (char *)(to);\
_from = (char *)(from);\
_l = (len);\
for (_i = 0; _i < _l; _i++) *_to++ = *_from++;\
}
#define CUDA_CALL_SAFE(f) \
do \
{ \
hipError_t _cuda_error = f; \
if (_cuda_error != hipSuccess) \
{ \
fprintf(stderr, \
"%s, %d, CUDA ERROR: %s %s\n", \
__FILE__, \
__LINE__, \
hipGetErrorName(_cuda_error), \
hipGetErrorString(_cuda_error) \
); \
abort(); \
exit(EXIT_FAILURE); \
} \
} while (0)
/*** Return random number between 0.0 and 1.0 ***/
float drnd()
{
return ((float) rand() / (float) BIGRND);
}
/*** Return random number between -1.0 and 1.0 ***/
float dpn1()
{
return ((drnd() * 2.0) - 1.0);
}
/*** The squashing function. Currently, it's a sigmoid. ***/
float squash(float x)
{
//x = -x;
//m = 1 + x + x*x/2 + x*x*x/6 + x*x*x*x/24 + x*x*x*x*x/120;
//return(1.0 / (1.0 + m));
return (1.0 / (1.0 + exp(-x)));
}
/*** Allocate 1d array of floats ***/
float *alloc_1d_dbl(long n)
{
float *ptr;
CUDA_CALL_SAFE(hipMallocManaged(&ptr, n * sizeof(float)));
return ptr;
}
/*** Allocate 2d array of floats ***/
float *alloc_2d_dbl(long m, long n)
{
float *ptr;
CUDA_CALL_SAFE(hipMallocManaged(&ptr, m * n * sizeof(float)));
return ptr;
}
void bpnn_randomize_weights(float *w, long m, long n)
{
long i, j;
for (i = 0; i <= m; i++) {
for (j = 0; j <= n; j++) {
w[i * (n + 1) + j] = (float) rand()/RAND_MAX;
// w[i][j] = dpn1();
}
}
}
void bpnn_randomize_row(float *w, long m)
{
long i;
for (i = 0; i <= m; i++) {
//w[i] = (float) rand()/RAND_MAX;
w[i] = 0.1;
}
}
extern "C"
void bpnn_zero_weights(float *w, long m, long n)
{
memset(w, 0, sizeof(float) * (m + 1) * (n + 1));
}
extern "C"
void bpnn_initialize(long seed)
{
printf("Random number generator seed: %d\n", seed);
srand(seed);
}
extern "C"
BPNN *bpnn_internal_create(long n_in, long n_hidden, long n_out)
{
BPNN *newnet;
newnet = (BPNN *)malloc(sizeof(BPNN));
if (newnet == NULL)
{
printf("BPNN_CREATE: Couldn't allocate neural network\n");
return (NULL);
}
newnet->input_n = n_in;
newnet->hidden_n = n_hidden;
newnet->output_n = n_out;
newnet->input_units = alloc_1d_dbl(n_in + 1);
newnet->hidden_units = alloc_1d_dbl(n_hidden + 1);
newnet->output_units = alloc_1d_dbl(n_out + 1);
newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1);
newnet->output_delta = alloc_1d_dbl(n_out + 1);
newnet->target = alloc_1d_dbl(n_out + 1);
newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
return (newnet);
}
extern "C"
void bpnn_free(BPNN *net)
{
CUDA_CALL_SAFE(hipFree((char *) net->input_units));
CUDA_CALL_SAFE(hipFree((char *) net->hidden_units));
CUDA_CALL_SAFE(hipFree((char *) net->output_units));
CUDA_CALL_SAFE(hipFree((char *) net->hidden_delta));
CUDA_CALL_SAFE(hipFree((char *) net->output_delta));
CUDA_CALL_SAFE(hipFree((char *) net->target));
CUDA_CALL_SAFE(hipFree((char *) net->input_weights));
CUDA_CALL_SAFE(hipFree((char *) net->input_prev_weights));
CUDA_CALL_SAFE(hipFree((char *) net->hidden_weights));
CUDA_CALL_SAFE(hipFree((char *) net->hidden_prev_weights));
free((char *) net);
}
/*** Creates a new fully-connected network from scratch,
with the given numbers of input, hidden, and output units.
Threshold units are automatically included. All weights are
randomly initialized.
Space is also allocated for temporary storage (momentum weights,
error computations, etc).
***/
BPNN *bpnn_create(long n_in, long n_hidden, long n_out)
{
BPNN *newnet;
newnet = bpnn_internal_create(n_in, n_hidden, n_out);
bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden);
bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out);
bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden);
bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out);
bpnn_randomize_row(newnet->target, n_out);
return (newnet);
}
extern "C"
void bpnn_layerforward(float *l1, float *l2, float *conn, long n1, long n2)
{
float sum;
long j, k;
/*** Set up thresholding unit ***/
l1[0] = 1.0;
#ifdef OPEN
omp_set_num_threads(NUM_THREAD);
#pragma omp parallel for shared(conn, n1, n2, l1) private(k, j) reduction(+: sum) schedule(static)
#endif
/*** For each unit in second layer ***/
for (j = 1; j <= n2; j++) {
/*** Compute weighted sum of its inputs ***/
sum = 0.0;
for (k = 0; k <= n1; k++) {
sum += conn[k * (n2 + 1) + j] * l1[k];
}
l2[j] = squash(sum);
}
}
extern "C"
void bpnn_output_error(float *delta, float *target, float *output, long nj, float *err)
{
long j;
float o, t, errsum;
errsum = 0.0;
for (j = 1; j <= nj; j++) {
o = output[j];
t = target[j];
delta[j] = o * (1.0 - o) * (t - o);
errsum += ABS(delta[j]);
}
*err = errsum;
}
extern "C"
void bpnn_hidden_error(float *delta_h,
long nh,
float *delta_o,
long no,
float *who,
float *hidden,
float *err)
{
long j, k;
float h, sum, errsum;
errsum = 0.0;
for (j = 1; j <= nh; j++) {
h = hidden[j];
sum = 0.0;
for (k = 1; k <= no; k++) {
sum += delta_o[k] * who[j * (no + 1) + k];
}
delta_h[j] = h * (1.0 - h) * sum;
errsum += ABS(delta_h[j]);
}
*err = errsum;
}
extern "C"
void bpnn_adjust_weights(float *delta, long ndelta, float *ly, long nly, float *w, float *oldw)
{
float new_dw;
long k, j;
ly[0] = 1.0;
//eta = 0.3;
//momentum = 0.3;
#ifdef OPEN
omp_set_num_threads(NUM_THREAD);
#pragma omp parallel for \
shared(oldw, w, delta) \
private(j, k, new_dw) \
firstprivate(ndelta, nly, momentum)
#endif
for (j = 1; j <= ndelta; j++) {
for (k = 0; k <= nly; k++) {
new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k * (ndelta + 1) + j]));
w[k * (ndelta + 1) + j] += new_dw;
oldw[k * (ndelta + 1) + j] = new_dw;
}
}
}
extern "C"
void bpnn_feedforward(BPNN *net)
{
long in, hid, out;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
/*** Feed forward input activations. ***/
bpnn_layerforward(net->input_units, net->hidden_units,
net->input_weights, in, hid);
bpnn_layerforward(net->hidden_units, net->output_units,
net->hidden_weights, hid, out);
}
extern "C"
void bpnn_train(BPNN *net, float *eo, float *eh)
{
long in, hid, out;
float out_err, hid_err;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
/*** Feed forward input activations. ***/
bpnn_layerforward(net->input_units, net->hidden_units,
net->input_weights, in, hid);
bpnn_layerforward(net->hidden_units, net->output_units,
net->hidden_weights, hid, out);
/*** Compute error on output and hidden units. ***/
bpnn_output_error(net->output_delta, net->target, net->output_units,
out, &out_err);
bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out,
net->hidden_weights, net->hidden_units, &hid_err);
*eo = out_err;
*eh = hid_err;
/*** Adjust input and hidden weights. ***/
bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid,
net->hidden_weights, net->hidden_prev_weights);
bpnn_adjust_weights(net->hidden_delta, hid, net->input_units, in,
net->input_weights, net->input_prev_weights);
}
| d2b3722a2ff7aa509e55a791980051fab63ee592.cu | /*
******************************************************************
* HISTORY
* 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University
* Prepared for 15-681, Fall 1994.
* Modified by Shuai Che
******************************************************************
*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "backprop.h"
#include <math.h>
#include <string.h>
#include <cuda.h>
//#define OPEN
#define ABS(x) (((x) > 0.0) ? (x) : (-(x)))
#define fastcopy(to,from,len)\
{\
register char *_to,*_from;\
register long _i,_l;\
_to = (char *)(to);\
_from = (char *)(from);\
_l = (len);\
for (_i = 0; _i < _l; _i++) *_to++ = *_from++;\
}
#define CUDA_CALL_SAFE(f) \
do \
{ \
cudaError_t _cuda_error = f; \
if (_cuda_error != cudaSuccess) \
{ \
fprintf(stderr, \
"%s, %d, CUDA ERROR: %s %s\n", \
__FILE__, \
__LINE__, \
cudaGetErrorName(_cuda_error), \
cudaGetErrorString(_cuda_error) \
); \
abort(); \
exit(EXIT_FAILURE); \
} \
} while (0)
/*** Return random number between 0.0 and 1.0 ***/
float drnd()
{
return ((float) rand() / (float) BIGRND);
}
/*** Return random number between -1.0 and 1.0 ***/
float dpn1()
{
return ((drnd() * 2.0) - 1.0);
}
/*** The squashing function. Currently, it's a sigmoid. ***/
float squash(float x)
{
//x = -x;
//m = 1 + x + x*x/2 + x*x*x/6 + x*x*x*x/24 + x*x*x*x*x/120;
//return(1.0 / (1.0 + m));
return (1.0 / (1.0 + exp(-x)));
}
/*** Allocate 1d array of floats ***/
float *alloc_1d_dbl(long n)
{
float *ptr;
CUDA_CALL_SAFE(cudaMallocManaged(&ptr, n * sizeof(float)));
return ptr;
}
/*** Allocate 2d array of floats ***/
float *alloc_2d_dbl(long m, long n)
{
float *ptr;
CUDA_CALL_SAFE(cudaMallocManaged(&ptr, m * n * sizeof(float)));
return ptr;
}
void bpnn_randomize_weights(float *w, long m, long n)
{
long i, j;
for (i = 0; i <= m; i++) {
for (j = 0; j <= n; j++) {
w[i * (n + 1) + j] = (float) rand()/RAND_MAX;
// w[i][j] = dpn1();
}
}
}
void bpnn_randomize_row(float *w, long m)
{
long i;
for (i = 0; i <= m; i++) {
//w[i] = (float) rand()/RAND_MAX;
w[i] = 0.1;
}
}
extern "C"
void bpnn_zero_weights(float *w, long m, long n)
{
memset(w, 0, sizeof(float) * (m + 1) * (n + 1));
}
extern "C"
void bpnn_initialize(long seed)
{
printf("Random number generator seed: %d\n", seed);
srand(seed);
}
extern "C"
BPNN *bpnn_internal_create(long n_in, long n_hidden, long n_out)
{
BPNN *newnet;
newnet = (BPNN *)malloc(sizeof(BPNN));
if (newnet == NULL)
{
printf("BPNN_CREATE: Couldn't allocate neural network\n");
return (NULL);
}
newnet->input_n = n_in;
newnet->hidden_n = n_hidden;
newnet->output_n = n_out;
newnet->input_units = alloc_1d_dbl(n_in + 1);
newnet->hidden_units = alloc_1d_dbl(n_hidden + 1);
newnet->output_units = alloc_1d_dbl(n_out + 1);
newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1);
newnet->output_delta = alloc_1d_dbl(n_out + 1);
newnet->target = alloc_1d_dbl(n_out + 1);
newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
return (newnet);
}
extern "C"
void bpnn_free(BPNN *net)
{
CUDA_CALL_SAFE(cudaFree((char *) net->input_units));
CUDA_CALL_SAFE(cudaFree((char *) net->hidden_units));
CUDA_CALL_SAFE(cudaFree((char *) net->output_units));
CUDA_CALL_SAFE(cudaFree((char *) net->hidden_delta));
CUDA_CALL_SAFE(cudaFree((char *) net->output_delta));
CUDA_CALL_SAFE(cudaFree((char *) net->target));
CUDA_CALL_SAFE(cudaFree((char *) net->input_weights));
CUDA_CALL_SAFE(cudaFree((char *) net->input_prev_weights));
CUDA_CALL_SAFE(cudaFree((char *) net->hidden_weights));
CUDA_CALL_SAFE(cudaFree((char *) net->hidden_prev_weights));
free((char *) net);
}
/*** Creates a new fully-connected network from scratch,
with the given numbers of input, hidden, and output units.
Threshold units are automatically included. All weights are
randomly initialized.
Space is also allocated for temporary storage (momentum weights,
error computations, etc).
***/
BPNN *bpnn_create(long n_in, long n_hidden, long n_out)
{
BPNN *newnet;
newnet = bpnn_internal_create(n_in, n_hidden, n_out);
bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden);
bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out);
bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden);
bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out);
bpnn_randomize_row(newnet->target, n_out);
return (newnet);
}
extern "C"
void bpnn_layerforward(float *l1, float *l2, float *conn, long n1, long n2)
{
float sum;
long j, k;
/*** Set up thresholding unit ***/
l1[0] = 1.0;
#ifdef OPEN
omp_set_num_threads(NUM_THREAD);
#pragma omp parallel for shared(conn, n1, n2, l1) private(k, j) reduction(+: sum) schedule(static)
#endif
/*** For each unit in second layer ***/
for (j = 1; j <= n2; j++) {
/*** Compute weighted sum of its inputs ***/
sum = 0.0;
for (k = 0; k <= n1; k++) {
sum += conn[k * (n2 + 1) + j] * l1[k];
}
l2[j] = squash(sum);
}
}
extern "C"
void bpnn_output_error(float *delta, float *target, float *output, long nj, float *err)
{
long j;
float o, t, errsum;
errsum = 0.0;
for (j = 1; j <= nj; j++) {
o = output[j];
t = target[j];
delta[j] = o * (1.0 - o) * (t - o);
errsum += ABS(delta[j]);
}
*err = errsum;
}
extern "C"
void bpnn_hidden_error(float *delta_h,
long nh,
float *delta_o,
long no,
float *who,
float *hidden,
float *err)
{
long j, k;
float h, sum, errsum;
errsum = 0.0;
for (j = 1; j <= nh; j++) {
h = hidden[j];
sum = 0.0;
for (k = 1; k <= no; k++) {
sum += delta_o[k] * who[j * (no + 1) + k];
}
delta_h[j] = h * (1.0 - h) * sum;
errsum += ABS(delta_h[j]);
}
*err = errsum;
}
extern "C"
void bpnn_adjust_weights(float *delta, long ndelta, float *ly, long nly, float *w, float *oldw)
{
float new_dw;
long k, j;
ly[0] = 1.0;
//eta = 0.3;
//momentum = 0.3;
#ifdef OPEN
omp_set_num_threads(NUM_THREAD);
#pragma omp parallel for \
shared(oldw, w, delta) \
private(j, k, new_dw) \
firstprivate(ndelta, nly, momentum)
#endif
for (j = 1; j <= ndelta; j++) {
for (k = 0; k <= nly; k++) {
new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k * (ndelta + 1) + j]));
w[k * (ndelta + 1) + j] += new_dw;
oldw[k * (ndelta + 1) + j] = new_dw;
}
}
}
extern "C"
void bpnn_feedforward(BPNN *net)
{
long in, hid, out;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
/*** Feed forward input activations. ***/
bpnn_layerforward(net->input_units, net->hidden_units,
net->input_weights, in, hid);
bpnn_layerforward(net->hidden_units, net->output_units,
net->hidden_weights, hid, out);
}
extern "C"
void bpnn_train(BPNN *net, float *eo, float *eh)
{
long in, hid, out;
float out_err, hid_err;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
/*** Feed forward input activations. ***/
bpnn_layerforward(net->input_units, net->hidden_units,
net->input_weights, in, hid);
bpnn_layerforward(net->hidden_units, net->output_units,
net->hidden_weights, hid, out);
/*** Compute error on output and hidden units. ***/
bpnn_output_error(net->output_delta, net->target, net->output_units,
out, &out_err);
bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out,
net->hidden_weights, net->hidden_units, &hid_err);
*eo = out_err;
*eh = hid_err;
/*** Adjust input and hidden weights. ***/
bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid,
net->hidden_weights, net->hidden_prev_weights);
bpnn_adjust_weights(net->hidden_delta, hid, net->input_units, in,
net->input_weights, net->input_prev_weights);
}
|
85f34289f48b6c371a74775b742878b21f806f03.hip | // !!! This is a file automatically generated by hipify!!!
//#include <hip/hip_runtime.h>
//#include "device_launch_parameters.h"
//#include <helper_cuda.h>
////#include "sm_20_atomic_functions.h"
//
//#include <thrust/host_vector.h>
//#include <thrust/device_vector.h>
//#include <thrust/count.h>
//#include <stdio.h>
//
//#define REAL float
////#define USE_CONST_MEM
//#define HANDLE_ERROR checkCudaErrors
//
//float elapsedTime;
//#define START_GPU {\
//elapsedTime = 0.0;\
//hipEvent_t start, stop;\
//checkCudaErrors(hipEventCreate(&start)); \
//checkCudaErrors(hipEventCreate(&stop));\
//checkCudaErrors(hipEventRecord(start, 0));\
//
//#define END_GPU \
//checkCudaErrors(hipEventRecord(stop, 0));\
//checkCudaErrors(hipEventSynchronize(stop));\
//checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); \
//printf("GPU Time used: %3.2f ms\n", elapsedTime);\
//checkCudaErrors(hipEventDestroy(start));\
//checkCudaErrors(hipEventDestroy(stop));}
//
//#define START_CPU {\
//double start = omp_get_wtime();
//
//#define END_CPU \
//double end = omp_get_wtime();\
//double duration = end - start;\
//printf("CPU Time used: %3.1f ms\n", duration * 1000);}
//
////############################################################################
//#ifdef _WIN64
//#define GLUT_NO_LIB_PRAGMA
//#pragma comment (lib, "opengl32.lib")
//#pragma comment (lib, "glut64.lib")
//#endif //_WIN64
//
///* On Windows, include the local copy of glut.h and glext.h */
//#include "GL/glut.h"
//#include "GL/glext.h"
//#define GET_PROC_ADDRESS( str ) wglGetProcAddress( str )
//
////----------------------bitmap------------------------------
//struct CPUAnimBitmap {
// //
// unsigned char *pixels;
// int width, height;
// //
// void *dataBlock;
//
// //
// void(*fAnim)(void*, int);
// void(*animExit)(void*);
// void(*clickDrag)(void*, int, int, int, int);
// int dragStartX, dragStartY;
//
// CPUAnimBitmap(int w, int h, void *d = NULL) {
// width = w;
// height = h;
// //r g b alph
// pixels = new unsigned char[width * height * 4];
// dataBlock = d;
// clickDrag = NULL;
// }
//
// ~CPUAnimBitmap() {
// delete[] pixels;
// }
//
// unsigned char* get_ptr(void) const { return pixels; }
// long image_size(void) const { return width * height * 4; }
//
// void click_drag(void(*f)(void*, int, int, int, int)) {
// clickDrag = f;
// }
//
// //
// //input: fGPUbitmap
// // ecuda
// void anim_and_exit(void(*f)(void*, int), void(*e)(void*)) {
// CPUAnimBitmap** bitmap = get_bitmap_ptr();
// *bitmap = this;
// fAnim = f;
// animExit = e;
// // a bug in the Windows GLUT implementation prevents us from
// // passing zero arguments to glutInit()
// int c = 1;
// char* dummy = "";
// glutInit(&c, &dummy);
// glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
// glutInitWindowSize(width, height);
// glutCreateWindow("bitmap");
// glutKeyboardFunc(Key);
// glutDisplayFunc(Draw);
//
// if (clickDrag != NULL)
// glutMouseFunc(mouse_func);
//
// //glutIdleFunc
// //GLUT
// //idle function
// glutIdleFunc(idle_func);
// glutMainLoop();
// }
//
// // static method used for glut callbacks
// static CPUAnimBitmap** get_bitmap_ptr(void) {
// static CPUAnimBitmap* gBitmap;
// return &gBitmap;
// }
//
// // static method used for glut callbacks
// static void mouse_func(int button, int state,
// int mx, int my) {
// if (button == GLUT_LEFT_BUTTON) {
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// if (state == GLUT_DOWN) {
// bitmap->dragStartX = mx;
// bitmap->dragStartY = my;
// }
// else if (state == GLUT_UP) {
// bitmap->clickDrag(bitmap->dataBlock,
// bitmap->dragStartX,
// bitmap->dragStartY,
// mx, my);
// }
// }
// }
//
// // static method used for glut callbacks
// static void idle_func(void) {
// static int ticks = 1;
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// bitmap->fAnim(bitmap->dataBlock, ticks++);
// glutPostRedisplay();
// }
//
// // static method used for glut callbacks
// static void Key(unsigned char key, int x, int y) {
// switch (key) {
// case 27:
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// bitmap->animExit(bitmap->dataBlock);
// //delete bitmap;
// exit(0);
// }
// }
//
// // static method used for glut callbacks
// static void Draw(void) {
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// glClearColor(0.0, 0.0, 0.0, 1.0);
// glClear(GL_COLOR_BUFFER_BIT);
// glDrawPixels(bitmap->width, bitmap->height, GL_RGBA, GL_UNSIGNED_BYTE, bitmap->pixels);
// glutSwapBuffers();
// }
//};
//
////
//#define DIM 1024
//#define rnd( x ) (x * rand() / RAND_MAX)
//#define INF 2e10f
//
////-----------------------------------------------------------
//struct Sphere {
// REAL r, b, g;
// REAL radius;
// //
// REAL x, y, z;
// //
// REAL dx, dy, dz;
// bool isCrash;
// // ox,oy
// //
// //
// __device__ REAL hit(REAL ox, REAL oy, REAL *n) {
// REAL dx = ox - x;
// REAL dy = oy - y;
// //
// if (dx*dx + dy*dy < radius*radius) {
// REAL dz = sqrtf(radius*radius - dx*dx - dy*dy);
// *n = dz / sqrtf(radius * radius);
// return dz + z;
// }
// //
// return -INF;
// }
//};
//
////----------------------
//#define SPHERES 2000
//
//int *d_crashnum, *h_crashnum;
//
//#ifdef USE_CONST_MEM
//__constant__ Sphere d_spheres[SPHERES];
//#else
//Sphere *d_spheres;
//#endif
//
////------------------------cuda kernel --------------------------
//
//#define STEP_SIZE REAL(20.0)
//
////
//__global__ void crash_sharedMemory(Sphere *s, int num_sphere, int*d_crashnum)
//{
// //-------- block---------
// __shared__ Sphere rowSphere[32];
// __shared__ Sphere colSphere[32];
// int rowId1 = blockIdx.x * blockDim.x;
// for (int i = rowId1; i < rowId1 + blockDim.x; i++)
// {
// if (i >= num_sphere)
// break;
// rowSphere[i-rowId1] = s[i];
// }
//
// int colId1 = blockIdx.y * blockDim.y;
// for (int i = colId1; i < colId1 + blockDim.y; i++)
// {
// if (i >= num_sphere)
// break;
// colSphere[i- colId1] = s[i];
// }
// //ok
// __syncthreads();
//
// //
// int s1 = threadIdx.x + blockIdx.x * blockDim.x;
// int s2 = threadIdx.y + blockIdx.y * blockDim.y;
//
// //x,y,
// if (s1 < num_sphere && s2 < num_sphere && s1 < s2)
// {
// REAL dx = rowSphere[threadIdx.x].x - colSphere[threadIdx.y].x;
// REAL dy = rowSphere[threadIdx.x].y - colSphere[threadIdx.y].y;
// REAL dz = rowSphere[threadIdx.x].z - colSphere[threadIdx.y].z;
// REAL totalRadius = rowSphere[threadIdx.x].radius + colSphere[threadIdx.y].radius;
// //
// if (dx*dx + dy*dy + dz*dz <= totalRadius * totalRadius)
// {
// s[s1].isCrash = true;
// s[s2].isCrash = true;
// atomicAdd(d_crashnum, 1);
// }
// }
//
// __syncthreads();
//}
//
////
//__global__ void kernelMoving(Sphere *s, int len)
//{
// int x = threadIdx.x + blockIdx.x * blockDim.x;
// //x
// while (x < len) {
//
// s[x].isCrash = false;
// s[x].x += s[x].dx;
// s[x].y += s[x].dy;
// s[x].z += s[x].dz;
// x += gridDim.x*blockDim.x;
// }
//}
//
//#ifdef USE_CONST_MEM
//__global__ void kernel(unsigned char *ptr) {
//#else
//__global__ void kernel(Sphere *d_spheres, unsigned char *ptr) {
//#endif
// //pixel
// int x = threadIdx.x + blockIdx.x * blockDim.x;
// int y = threadIdx.y + blockIdx.y * blockDim.y;
// //
// int offset = x + y * blockDim.x * gridDim.x;
// REAL ox = (x - DIM / 2);
// REAL oy = (y - DIM / 2);
//
// REAL r = 0, g = 0, b = 0;
// REAL maxz = -INF;
// for (int i = 0; i < SPHERES; i++) {
// REAL n;
// REAL t = d_spheres[i].hit(ox, oy, &n);
// if (t > maxz) {
// REAL fscale = n;
// if (d_spheres[i].isCrash)
// {
// r = 1.0f *fscale;
// g = 0.0f*fscale;
// b = 0.0f*fscale;
// }
// else
// {
// r = d_spheres[i].r * fscale;
// g = d_spheres[i].g * fscale;
// b = d_spheres[i].b * fscale;
// maxz = t;
// }
// }
// }
//
// ptr[offset * 4 + 0] = (int)(r * 255);
// ptr[offset * 4 + 1] = (int)(g * 255);
// ptr[offset * 4 + 2] = (int)(b * 255);
// ptr[offset * 4 + 3] = 255;
//}
//
//// globals needed by the update routine
//struct DataBlock {
// // gpu bitmap
// unsigned char *dev_bitmap;
// //cpubitmap
// CPUAnimBitmap *bitmap;
//};
//
//void generate_frame(DataBlock *d, int ticks) {
//
// // initialize all integers of a device_vector to 0
// /*int * d_crashNumList;
// hipMalloc(&d_crashNumList, sizeof(int)* SPHERES);
// hipMemset(d_crashNumList, 0, sizeof(int)* SPHERES);*/
//
// float totalTime = 0.0;
// //0
// HANDLE_ERROR(hipMemset(d_crashnum, 0, sizeof(int)));
// // copyhost
// HANDLE_ERROR(hipMemcpy(h_crashnum, d_crashnum,
// sizeof(int), hipMemcpyDeviceToHost));
// printf("init num of crash: %d\n", (*h_crashnum));
//
// START_GPU
//
// //------------ --2000 ----------------
// kernelMoving << <64, 32 >> > (d_spheres, SPHERES);
// END_GPU
// totalTime += elapsedTime;
// START_GPU
// //--------------------------------
// //SPHERES 2000 grid 64 * 64
// //dim3 crashGrids(64, 64);
// dim3 crashGrids(64, 64);
// dim3 crashBlock(32, 32);
//
// crash_sharedMemory << <crashGrids, crashBlock >> > (d_spheres, SPHERES, d_crashnum);
//
// END_GPU
// totalTime += elapsedTime;
//
// //----------- bitmap--------
// START_GPU
// dim3 grids(DIM / 16, DIM / 16);
// dim3 threads(16, 16);
//#ifdef USE_CONST_MEM
// kernel << <grids, threads >> > (d->dev_bitmap);
//#else
// kernel << <grids, threads >> > (d_spheres, d->dev_bitmap);
//#endif
//
// END_GPU
// totalTime += elapsedTime;
//
// //-----bitmap device host -----------
// HANDLE_ERROR(hipMemcpy(d->bitmap->get_ptr(), d->dev_bitmap,
// d->bitmap->image_size(), hipMemcpyDeviceToHost));
//
// HANDLE_ERROR(hipMemcpy(h_crashnum, d_crashnum,
// sizeof(int), hipMemcpyDeviceToHost));
// printf("num of pair sphere crash: %d\n", (*h_crashnum));
// printf("total time: %3.1f\n", totalTime);
// printf("---------------------------------------------\n");
//
//}
//
//// clean up memory allocated on the GPU
//void cleanup(DataBlock *d) {
// HANDLE_ERROR(hipFree(d->dev_bitmap));
// //
// HANDLE_ERROR(hipFree(d_crashnum));
// free(h_crashnum);
//}
//
////-------------------------main-------------------------------
//
//int main(void) {
// //-------------------
// DataBlock data;
// CPUAnimBitmap bitmap(DIM, DIM, &data);
// data.bitmap = &bitmap;
//
// //
// h_crashnum = (int *)malloc(sizeof(int));
// *h_crashnum = 0;
//
// HANDLE_ERROR(hipMalloc((void**)&d_crashnum, sizeof(int)));
// HANDLE_ERROR(hipMemcpy(d_crashnum, h_crashnum,
// sizeof(int), hipMemcpyHostToDevice));
//
// //---------gpu-------------
// HANDLE_ERROR(hipMalloc((void**)&data.dev_bitmap, bitmap.image_size()));
//
//#ifdef USE_CONST_MEM
//#else
// HANDLE_ERROR(hipMalloc((void**)&d_spheres, sizeof(Sphere) * SPHERES));
//#endif
//
// // allocate temp memory, initialize it, copy to constant
// // memory on the GPU, then free our temp memory
// Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere) * SPHERES);
// for (int i = 0; i < SPHERES; i++) {
// temp_s[i].r = rnd(1.0f);
// temp_s[i].g = rnd(1.0f);
// temp_s[i].b = rnd(1.0f);
//
// temp_s[i].x = rnd(1000.0f) - 500;
// temp_s[i].y = rnd(1000.0f) - 500;
// temp_s[i].z = rnd(1000.0f) - 500;
// temp_s[i].radius = rnd(10.0f) + 5;
//
// //
// temp_s[i].dx = STEP_SIZE * ((rand() / (float)RAND_MAX) * 2 - 1);
// temp_s[i].dy = STEP_SIZE * ((rand() / (float)RAND_MAX) * 2 - 1);
// temp_s[i].dz = STEP_SIZE * ((rand() / (float)RAND_MAX) * 2 - 1);
// }
//
//#ifdef USE_CONST_MEM
// HANDLE_ERROR(hipMemcpyToSymbol(d_spheres, temp_s, sizeof(Sphere) * SPHERES));
//#else
// HANDLE_ERROR(hipMemcpy(d_spheres, temp_s, sizeof(Sphere)*SPHERES, hipMemcpyHostToDevice));
//#endif
//
// free(temp_s);
//
// // display
// bitmap.anim_and_exit((void(*)(void*, int))generate_frame, (void(*)(void*))cleanup);
//} | 85f34289f48b6c371a74775b742878b21f806f03.cu | //#include <cuda_runtime.h>
//#include "device_launch_parameters.h"
//#include <helper_cuda.h>
////#include "sm_20_atomic_functions.h"
//
//#include <thrust/host_vector.h>
//#include <thrust/device_vector.h>
//#include <thrust/count.h>
//#include <stdio.h>
//
//#define REAL float
////#define USE_CONST_MEM
//#define HANDLE_ERROR checkCudaErrors
//
//float elapsedTime;
//#define START_GPU {\
//elapsedTime = 0.0;\
//cudaEvent_t start, stop;\
//checkCudaErrors(cudaEventCreate(&start)); \
//checkCudaErrors(cudaEventCreate(&stop));\
//checkCudaErrors(cudaEventRecord(start, 0));\
//
//#define END_GPU \
//checkCudaErrors(cudaEventRecord(stop, 0));\
//checkCudaErrors(cudaEventSynchronize(stop));\
//checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); \
//printf("GPU Time used: %3.2f ms\n", elapsedTime);\
//checkCudaErrors(cudaEventDestroy(start));\
//checkCudaErrors(cudaEventDestroy(stop));}
//
//#define START_CPU {\
//double start = omp_get_wtime();
//
//#define END_CPU \
//double end = omp_get_wtime();\
//double duration = end - start;\
//printf("CPU Time used: %3.1f ms\n", duration * 1000);}
//
////############################################################################
//#ifdef _WIN64
//#define GLUT_NO_LIB_PRAGMA
//#pragma comment (lib, "opengl32.lib")
//#pragma comment (lib, "glut64.lib")
//#endif //_WIN64
//
///* On Windows, include the local copy of glut.h and glext.h */
//#include "GL/glut.h"
//#include "GL/glext.h"
//#define GET_PROC_ADDRESS( str ) wglGetProcAddress( str )
//
////----------------------封装了bitmap类------------------------------
//struct CPUAnimBitmap {
// //数据内容
// unsigned char *pixels;
// int width, height;
// //一个指针
// void *dataBlock;
//
// //可以动态的设置函数的指针
// void(*fAnim)(void*, int);
// void(*animExit)(void*);
// void(*clickDrag)(void*, int, int, int, int);
// int dragStartX, dragStartY;
//
// CPUAnimBitmap(int w, int h, void *d = NULL) {
// width = w;
// height = h;
// //r g b alph
// pixels = new unsigned char[width * height * 4];
// dataBlock = d;
// clickDrag = NULL;
// }
//
// ~CPUAnimBitmap() {
// delete[] pixels;
// }
//
// unsigned char* get_ptr(void) const { return pixels; }
// long image_size(void) const { return width * height * 4; }
//
// void click_drag(void(*f)(void*, int, int, int, int)) {
// clickDrag = f;
// }
//
// //渲染这个图片
// //input: f就是使用GPU计算得到bitmap的图片的函数
// // e是cuda 清理函数
// void anim_and_exit(void(*f)(void*, int), void(*e)(void*)) {
// CPUAnimBitmap** bitmap = get_bitmap_ptr();
// *bitmap = this;
// fAnim = f;
// animExit = e;
// // a bug in the Windows GLUT implementation prevents us from
// // passing zero arguments to glutInit()
// int c = 1;
// char* dummy = "";
// glutInit(&c, &dummy);
// glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
// glutInitWindowSize(width, height);
// glutCreateWindow("bitmap");
// glutKeyboardFunc(Key);
// glutDisplayFunc(Draw);
//
// if (clickDrag != NULL)
// glutMouseFunc(mouse_func);
//
// //glutIdleFunc设置全局的回调函数,当没有窗口事件到达时,
// //GLUT程序功能可以执行后台处理任务或连续动画。
// //如果启用,这个idle function会被不断调用,直到有窗口事件发生。
// glutIdleFunc(idle_func);
// glutMainLoop();
// }
//
// // static method used for glut callbacks
// static CPUAnimBitmap** get_bitmap_ptr(void) {
// static CPUAnimBitmap* gBitmap;
// return &gBitmap;
// }
//
// // static method used for glut callbacks
// static void mouse_func(int button, int state,
// int mx, int my) {
// if (button == GLUT_LEFT_BUTTON) {
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// if (state == GLUT_DOWN) {
// bitmap->dragStartX = mx;
// bitmap->dragStartY = my;
// }
// else if (state == GLUT_UP) {
// bitmap->clickDrag(bitmap->dataBlock,
// bitmap->dragStartX,
// bitmap->dragStartY,
// mx, my);
// }
// }
// }
//
// // static method used for glut callbacks
// static void idle_func(void) {
// static int ticks = 1;
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// bitmap->fAnim(bitmap->dataBlock, ticks++);
// glutPostRedisplay();
// }
//
// // static method used for glut callbacks
// static void Key(unsigned char key, int x, int y) {
// switch (key) {
// case 27:
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// bitmap->animExit(bitmap->dataBlock);
// //delete bitmap;
// exit(0);
// }
// }
//
// // static method used for glut callbacks
// static void Draw(void) {
// CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
// glClearColor(0.0, 0.0, 0.0, 1.0);
// glClear(GL_COLOR_BUFFER_BIT);
// glDrawPixels(bitmap->width, bitmap->height, GL_RGBA, GL_UNSIGNED_BYTE, bitmap->pixels);
// glutSwapBuffers();
// }
//};
//
////图片的像素值
//#define DIM 1024
//#define rnd( x ) (x * rand() / RAND_MAX)
//#define INF 2e10f
//
////----------------------------封装了一个球-------------------------------
//struct Sphere {
// REAL r, b, g;
// REAL radius;
// //小球的位置
// REAL x, y, z;
// //每一帧小球的移动的速度
// REAL dx, dy, dz;
// bool isCrash;
// //来自于 ox,oy处的像素的光线,是否和这个球体相交。
// //如果光线与球面相交,那么这个方法将计算从相机到光线命中球面出的距离。
// //若果和多个球面相交,只记录最接近相机的球面才会被看见。
// __device__ REAL hit(REAL ox, REAL oy, REAL *n) {
// REAL dx = ox - x;
// REAL dy = oy - y;
// //距离小于球体的半径的时候,才能和球体相交
// if (dx*dx + dy*dy < radius*radius) {
// REAL dz = sqrtf(radius*radius - dx*dx - dy*dy);
// *n = dz / sqrtf(radius * radius);
// return dz + z;
// }
// //无穷远
// return -INF;
// }
//};
//
////------------小球碰撞的个数----------
//#define SPHERES 2000
//
//int *d_crashnum, *h_crashnum;
//
//#ifdef USE_CONST_MEM
//__constant__ Sphere d_spheres[SPHERES];
//#else
//Sphere *d_spheres;
//#endif
//
////------------------------cuda kernel --------------------------
//
//#define STEP_SIZE REAL(20.0)
//
////监测碰撞的小球的个数
//__global__ void crash_sharedMemory(Sphere *s, int num_sphere, int*d_crashnum)
//{
// //--------拷贝数据到内存中 一个block共享---------
// __shared__ Sphere rowSphere[32];
// __shared__ Sphere colSphere[32];
// int rowId1 = blockIdx.x * blockDim.x;
// for (int i = rowId1; i < rowId1 + blockDim.x; i++)
// {
// if (i >= num_sphere)
// break;
// rowSphere[i-rowId1] = s[i];
// }
//
// int colId1 = blockIdx.y * blockDim.y;
// for (int i = colId1; i < colId1 + blockDim.y; i++)
// {
// if (i >= num_sphere)
// break;
// colSphere[i- colId1] = s[i];
// }
// //等待数据可以加载ok
// __syncthreads();
//
// //得到两个碰撞小球的序号
// int s1 = threadIdx.x + blockIdx.x * blockDim.x;
// int s2 = threadIdx.y + blockIdx.y * blockDim.y;
//
// //对序号为x,y的两个小球进行碰撞监测,对称矩阵,计算一半的矩阵
// if (s1 < num_sphere && s2 < num_sphere && s1 < s2)
// {
// REAL dx = rowSphere[threadIdx.x].x - colSphere[threadIdx.y].x;
// REAL dy = rowSphere[threadIdx.x].y - colSphere[threadIdx.y].y;
// REAL dz = rowSphere[threadIdx.x].z - colSphere[threadIdx.y].z;
// REAL totalRadius = rowSphere[threadIdx.x].radius + colSphere[threadIdx.y].radius;
// //判断是否碰撞
// if (dx*dx + dy*dy + dz*dz <= totalRadius * totalRadius)
// {
// s[s1].isCrash = true;
// s[s2].isCrash = true;
// atomicAdd(d_crashnum, 1);
// }
// }
//
// __syncthreads();
//}
//
////更新球体所在的位置
//__global__ void kernelMoving(Sphere *s, int len)
//{
// int x = threadIdx.x + blockIdx.x * blockDim.x;
// //对第x 个球体,更新它所在的位置
// while (x < len) {
//
// s[x].isCrash = false;
// s[x].x += s[x].dx;
// s[x].y += s[x].dy;
// s[x].z += s[x].dz;
// x += gridDim.x*blockDim.x;
// }
//}
//
//#ifdef USE_CONST_MEM
//__global__ void kernel(unsigned char *ptr) {
//#else
//__global__ void kernel(Sphere *d_spheres, unsigned char *ptr) {
//#endif
// //得到pixel 的像素的位置。
// int x = threadIdx.x + blockIdx.x * blockDim.x;
// int y = threadIdx.y + blockIdx.y * blockDim.y;
// //这是第几个像素
// int offset = x + y * blockDim.x * gridDim.x;
// REAL ox = (x - DIM / 2);
// REAL oy = (y - DIM / 2);
//
// REAL r = 0, g = 0, b = 0;
// REAL maxz = -INF;
// for (int i = 0; i < SPHERES; i++) {
// REAL n;
// REAL t = d_spheres[i].hit(ox, oy, &n);
// if (t > maxz) {
// REAL fscale = n;
// if (d_spheres[i].isCrash)
// {
// r = 1.0f *fscale;
// g = 0.0f*fscale;
// b = 0.0f*fscale;
// }
// else
// {
// r = d_spheres[i].r * fscale;
// g = d_spheres[i].g * fscale;
// b = d_spheres[i].b * fscale;
// maxz = t;
// }
// }
// }
//
// ptr[offset * 4 + 0] = (int)(r * 255);
// ptr[offset * 4 + 1] = (int)(g * 255);
// ptr[offset * 4 + 2] = (int)(b * 255);
// ptr[offset * 4 + 3] = 255;
//}
//
//// globals needed by the update routine
//struct DataBlock {
// //存放 gpu 中的bitmap 的数据
// unsigned char *dev_bitmap;
// //cpu中存放bitmap 的数据
// CPUAnimBitmap *bitmap;
//};
//
//void generate_frame(DataBlock *d, int ticks) {
//
// // initialize all integers of a device_vector to 0
// /*int * d_crashNumList;
// cudaMalloc(&d_crashNumList, sizeof(int)* SPHERES);
// cudaMemset(d_crashNumList, 0, sizeof(int)* SPHERES);*/
//
// float totalTime = 0.0;
// //把小球的碰撞的计数器清0
// HANDLE_ERROR(cudaMemset(d_crashnum, 0, sizeof(int)));
// //把小球的个数 copy到host 中,并打印出来
// HANDLE_ERROR(cudaMemcpy(h_crashnum, d_crashnum,
// sizeof(int), cudaMemcpyDeviceToHost));
// printf("init num of crash: %d\n", (*h_crashnum));
//
// START_GPU
//
// //------------移动的小球 --2000个 ----------------
// kernelMoving << <64, 32 >> > (d_spheres, SPHERES);
// END_GPU
// totalTime += elapsedTime;
// START_GPU
// //--------------监测小球的碰撞------------------
// //SPHERES 是2000 的时候 grid 64 * 64
// //dim3 crashGrids(64, 64);
// dim3 crashGrids(64, 64);
// dim3 crashBlock(32, 32);
//
// crash_sharedMemory << <crashGrids, crashBlock >> > (d_spheres, SPHERES, d_crashnum);
//
// END_GPU
// totalTime += elapsedTime;
//
// //-----------从小球数据中生成一张的 bitmap--------
// START_GPU
// dim3 grids(DIM / 16, DIM / 16);
// dim3 threads(16, 16);
//#ifdef USE_CONST_MEM
// kernel << <grids, threads >> > (d->dev_bitmap);
//#else
// kernel << <grids, threads >> > (d_spheres, d->dev_bitmap);
//#endif
//
// END_GPU
// totalTime += elapsedTime;
//
// //-----把bitmap 的数据从 device 拷贝到 host 中-----------
// HANDLE_ERROR(cudaMemcpy(d->bitmap->get_ptr(), d->dev_bitmap,
// d->bitmap->image_size(), cudaMemcpyDeviceToHost));
//
// HANDLE_ERROR(cudaMemcpy(h_crashnum, d_crashnum,
// sizeof(int), cudaMemcpyDeviceToHost));
// printf("num of pair sphere crash: %d\n", (*h_crashnum));
// printf("total time: %3.1f\n", totalTime);
// printf("---------------------------------------------\n");
//
//}
//
//// clean up memory allocated on the GPU
//void cleanup(DataBlock *d) {
// HANDLE_ERROR(cudaFree(d->dev_bitmap));
// //释放小球碰撞个数的空间
// HANDLE_ERROR(cudaFree(d_crashnum));
// free(h_crashnum);
//}
//
////-------------------------main-------------------------------
//
//int main(void) {
// //---------分配图片的空间----------
// DataBlock data;
// CPUAnimBitmap bitmap(DIM, DIM, &data);
// data.bitmap = &bitmap;
//
// //分配小球碰撞的计数器的空间
// h_crashnum = (int *)malloc(sizeof(int));
// *h_crashnum = 0;
//
// HANDLE_ERROR(cudaMalloc((void**)&d_crashnum, sizeof(int)));
// HANDLE_ERROR(cudaMemcpy(d_crashnum, h_crashnum,
// sizeof(int), cudaMemcpyHostToDevice));
//
// //---------分配gpu空间-------------
// HANDLE_ERROR(cudaMalloc((void**)&data.dev_bitmap, bitmap.image_size()));
//
//#ifdef USE_CONST_MEM
//#else
// HANDLE_ERROR(cudaMalloc((void**)&d_spheres, sizeof(Sphere) * SPHERES));
//#endif
//
// // allocate temp memory, initialize it, copy to constant
// // memory on the GPU, then free our temp memory
// Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere) * SPHERES);
// for (int i = 0; i < SPHERES; i++) {
// temp_s[i].r = rnd(1.0f);
// temp_s[i].g = rnd(1.0f);
// temp_s[i].b = rnd(1.0f);
//
// temp_s[i].x = rnd(1000.0f) - 500;
// temp_s[i].y = rnd(1000.0f) - 500;
// temp_s[i].z = rnd(1000.0f) - 500;
// temp_s[i].radius = rnd(10.0f) + 5;
//
// //初始化 小球移动的速度
// temp_s[i].dx = STEP_SIZE * ((rand() / (float)RAND_MAX) * 2 - 1);
// temp_s[i].dy = STEP_SIZE * ((rand() / (float)RAND_MAX) * 2 - 1);
// temp_s[i].dz = STEP_SIZE * ((rand() / (float)RAND_MAX) * 2 - 1);
// }
//
//#ifdef USE_CONST_MEM
// HANDLE_ERROR(cudaMemcpyToSymbol(d_spheres, temp_s, sizeof(Sphere) * SPHERES));
//#else
// HANDLE_ERROR(cudaMemcpy(d_spheres, temp_s, sizeof(Sphere)*SPHERES, cudaMemcpyHostToDevice));
//#endif
//
// free(temp_s);
//
// // display
// bitmap.anim_and_exit((void(*)(void*, int))generate_frame, (void(*)(void*))cleanup);
//} |
b84dfedcff79019dafd43771d7e685e21cf0a041.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "tests.h"
#include "helper.h"
#include "gpu_pso.h"
texture<float, 2, hipReadModeElementType> texETCMatrix;
__device__ int GetDiscreteCoordT1(float val)
{
return floorf(val);
}
/* Unfortunately, we cannot do external calls to device code, so we have to copy this here under a DIFFERENT name(!!!)...
* Thanks Nvidia!
*/
__device__ float CalcMakespanT(int numTasks, int numMachines, float *matching, float *scratch)
{
int i;
float makespan;
int threadID = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
int taskOffset, machineOffset;
float matchingVal;
float val;
makespan = 0.0f;
taskOffset = __mul24(threadID, numTasks);
machineOffset = __mul24(threadID, numMachines);
//Clear our scratch table
for (i = 0; i < numTasks; i++)
scratch[machineOffset + (int) floorf(matching[taskOffset + i])] = 0.0f;
for (i = 0; i < numTasks; i++)
{
matchingVal = matching[taskOffset + i];
scratch[machineOffset + (int) floorf(matchingVal)] += tex2D(texETCMatrix, matchingVal, (float) i);
val = scratch[machineOffset + (int) floorf(matchingVal)];
if (val > makespan)
makespan = val;
}
return makespan;
}
__global__ void TestMakespan(int numTasks, int numMachines, int numMatchings, float *matching, float *scratch, float *outVal)
{
int threadID = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (threadID < numMatchings)
outVal[threadID] = CalcMakespanT(numTasks, numMachines, matching, scratch);
}
int TestGPUMakespan()
{
int i, j, k;
int passed = 1;
float *dOut, *matching, *scratch;
float *hMatching, *hScratch;
int numMatchings;
int threadsPerBlock, numBlocks;
float *cpuMakespans, *gpuMakespans;
float *tempMatching;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
BuildMachineList("machines100.txt");
BuildTaskList("tasks1000.txt");
GenerateETCMatrix();
numMatchings = 1024;
threadsPerBlock = 64;
numBlocks = CalcNumBlocks(numMatchings, threadsPerBlock);
printf("\tRunning GPU Makespan Test...\n");
srand((unsigned int) time(NULL));
hMatching = (float *) calloc(numMatchings * GetNumTasks(), sizeof(float));
hScratch = (float *) calloc(numMatchings * GetNumMachines(), sizeof(float));
cpuMakespans = (float *) malloc(numMatchings * sizeof(float));
gpuMakespans = (float *) malloc(numMatchings * sizeof(float));
tempMatching = (float *) malloc(GetNumTasks() * sizeof(float));
for (i = 0; i < numMatchings * GetNumTasks(); i++)
hMatching[i] = (float) (rand() % (GetNumMachines() * 100)) / 100.0f;
//Compute the makespans on the CPU
for (i = 0; i < numBlocks; i++)
{
for (j = 0; j < threadsPerBlock; j++)
{
for (k = 0; k < GetNumTasks(); k++)
{
tempMatching[k] = hMatching[i * threadsPerBlock * GetNumTasks() + k * threadsPerBlock + j];
}
cpuMakespans[i * threadsPerBlock + j] = ComputeMakespan(tempMatching, GetNumTasks());
}
}
hipMalloc((void **)&dOut, sizeof(float) * numMatchings );
hipMalloc((void **)&matching, sizeof(float) * numMatchings * GetNumTasks() );
hipMalloc((void **)&scratch, sizeof(float) * numMatchings * GetNumMachines() );
InitTexture();
hipMemcpy(matching, hMatching, sizeof(float) * numMatchings * GetNumTasks(), hipMemcpyHostToDevice);
hipMemcpy(scratch, hScratch, sizeof(float) * numMatchings * GetNumMachines(), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( UpdateFitnessShared), dim3(numBlocks), dim3(threadsPerBlock), threadsPerBlock * GetNumMachines() * sizeof(float), 0, numBlocks, threadsPerBlock, GetNumTasks(), GetNumMachines(), matching, dOut);
hipDeviceSynchronize();
hipMemcpy(gpuMakespans, dOut, sizeof(float) * numMatchings , hipMemcpyDeviceToHost);
for (i = 0; i < numMatchings; i++)
{
if (abs(gpuMakespans[i] - cpuMakespans[i]) > ACCEPTED_DELTA)
{
printf("\t[ERROR] - %d GPU Makespan was: %f (expected: %f)\n", i, gpuMakespans[i], cpuMakespans[i]);
passed = 0;
}
}
PrintTestResults(passed);
free(hMatching);
free(hScratch);
free(cpuMakespans);
free(gpuMakespans);
hipFree(dOut);
hipFree(matching);
hipFree(scratch);
return passed;
}
void RunSwarmFunctionTests()
{
int passed = 1;
printf("\nStarting GPU makespan tests...\n\n");
passed &= TestGPUMakespan();
if (passed)
printf("[PASSED] All makespan tests passed!\n\n");
else
printf("[FAILED] makespan tests failed!\n\n");
}
| b84dfedcff79019dafd43771d7e685e21cf0a041.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include "tests.h"
#include "helper.h"
#include "gpu_pso.h"
texture<float, 2, cudaReadModeElementType> texETCMatrix;
__device__ int GetDiscreteCoordT1(float val)
{
return floorf(val);
}
/* Unfortunately, we cannot do external calls to device code, so we have to copy this here under a DIFFERENT name(!!!)...
* Thanks Nvidia!
*/
__device__ float CalcMakespanT(int numTasks, int numMachines, float *matching, float *scratch)
{
int i;
float makespan;
int threadID = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
int taskOffset, machineOffset;
float matchingVal;
float val;
makespan = 0.0f;
taskOffset = __mul24(threadID, numTasks);
machineOffset = __mul24(threadID, numMachines);
//Clear our scratch table
for (i = 0; i < numTasks; i++)
scratch[machineOffset + (int) floorf(matching[taskOffset + i])] = 0.0f;
for (i = 0; i < numTasks; i++)
{
matchingVal = matching[taskOffset + i];
scratch[machineOffset + (int) floorf(matchingVal)] += tex2D(texETCMatrix, matchingVal, (float) i);
val = scratch[machineOffset + (int) floorf(matchingVal)];
if (val > makespan)
makespan = val;
}
return makespan;
}
__global__ void TestMakespan(int numTasks, int numMachines, int numMatchings, float *matching, float *scratch, float *outVal)
{
int threadID = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (threadID < numMatchings)
outVal[threadID] = CalcMakespanT(numTasks, numMachines, matching, scratch);
}
int TestGPUMakespan()
{
int i, j, k;
int passed = 1;
float *dOut, *matching, *scratch;
float *hMatching, *hScratch;
int numMatchings;
int threadsPerBlock, numBlocks;
float *cpuMakespans, *gpuMakespans;
float *tempMatching;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
BuildMachineList("machines100.txt");
BuildTaskList("tasks1000.txt");
GenerateETCMatrix();
numMatchings = 1024;
threadsPerBlock = 64;
numBlocks = CalcNumBlocks(numMatchings, threadsPerBlock);
printf("\tRunning GPU Makespan Test...\n");
srand((unsigned int) time(NULL));
hMatching = (float *) calloc(numMatchings * GetNumTasks(), sizeof(float));
hScratch = (float *) calloc(numMatchings * GetNumMachines(), sizeof(float));
cpuMakespans = (float *) malloc(numMatchings * sizeof(float));
gpuMakespans = (float *) malloc(numMatchings * sizeof(float));
tempMatching = (float *) malloc(GetNumTasks() * sizeof(float));
for (i = 0; i < numMatchings * GetNumTasks(); i++)
hMatching[i] = (float) (rand() % (GetNumMachines() * 100)) / 100.0f;
//Compute the makespans on the CPU
for (i = 0; i < numBlocks; i++)
{
for (j = 0; j < threadsPerBlock; j++)
{
for (k = 0; k < GetNumTasks(); k++)
{
tempMatching[k] = hMatching[i * threadsPerBlock * GetNumTasks() + k * threadsPerBlock + j];
}
cpuMakespans[i * threadsPerBlock + j] = ComputeMakespan(tempMatching, GetNumTasks());
}
}
cudaMalloc((void **)&dOut, sizeof(float) * numMatchings );
cudaMalloc((void **)&matching, sizeof(float) * numMatchings * GetNumTasks() );
cudaMalloc((void **)&scratch, sizeof(float) * numMatchings * GetNumMachines() );
InitTexture();
cudaMemcpy(matching, hMatching, sizeof(float) * numMatchings * GetNumTasks(), cudaMemcpyHostToDevice);
cudaMemcpy(scratch, hScratch, sizeof(float) * numMatchings * GetNumMachines(), cudaMemcpyHostToDevice);
UpdateFitnessShared<<<numBlocks, threadsPerBlock, threadsPerBlock * GetNumMachines() * sizeof(float)>>>(numBlocks, threadsPerBlock, GetNumTasks(), GetNumMachines(), matching, dOut);
cudaThreadSynchronize();
cudaMemcpy(gpuMakespans, dOut, sizeof(float) * numMatchings , cudaMemcpyDeviceToHost);
for (i = 0; i < numMatchings; i++)
{
if (abs(gpuMakespans[i] - cpuMakespans[i]) > ACCEPTED_DELTA)
{
printf("\t[ERROR] - %d GPU Makespan was: %f (expected: %f)\n", i, gpuMakespans[i], cpuMakespans[i]);
passed = 0;
}
}
PrintTestResults(passed);
free(hMatching);
free(hScratch);
free(cpuMakespans);
free(gpuMakespans);
cudaFree(dOut);
cudaFree(matching);
cudaFree(scratch);
return passed;
}
void RunSwarmFunctionTests()
{
int passed = 1;
printf("\nStarting GPU makespan tests...\n\n");
passed &= TestGPUMakespan();
if (passed)
printf("[PASSED] All makespan tests passed!\n\n");
else
printf("[FAILED] makespan tests failed!\n\n");
}
|
48398af3615846b55feb970cc38c0c2d8b1872a9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gtest/gtest.h"
#include <rmm/device_buffer.hpp>
#include <rmm/mr/device/cnmem_managed_memory_resource.hpp>
#include <rmm/mr/device/cnmem_memory_resource.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/default_memory_resource.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/detail/error.hpp>
#include <thrust/sequence.h>
#include <thrust/equal.h>
#include <hip/hip_runtime_api.h>
#include <cstddef>
#include <random>
void sync_stream(hipStream_t stream) {
EXPECT_EQ(hipSuccess, hipStreamSynchronize(stream));
}
template <typename MemoryResourceType>
struct DeviceBufferTest : public ::testing::Test {
hipStream_t stream{};
std::size_t size{};
MemoryResourceType mr{};
DeviceBufferTest() {
std::default_random_engine generator;
std::uniform_int_distribution<std::size_t> distribution(1000, 100000);
size = distribution(generator);
}
void SetUp() override { EXPECT_EQ(hipSuccess, hipStreamCreate(&stream)); }
void TearDown() override {
EXPECT_EQ(hipSuccess, hipStreamDestroy(stream));
};
};
using resources = ::testing::Types<
rmm::mr::cuda_memory_resource, rmm::mr::managed_memory_resource,
rmm::mr::cnmem_memory_resource, rmm::mr::cnmem_managed_memory_resource>;
TYPED_TEST_CASE(DeviceBufferTest, resources);
TYPED_TEST(DeviceBufferTest, DefaultMemoryResource) {
rmm::device_buffer buff(this->size);
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(this->size, buff.capacity());
EXPECT_EQ(rmm::mr::get_default_resource(), buff.memory_resource());
EXPECT_EQ(0, buff.stream());
}
TYPED_TEST(DeviceBufferTest, DefaultMemoryResourceStream) {
rmm::device_buffer buff(this->size, this->stream);
sync_stream(this->stream);
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(this->size, buff.capacity());
EXPECT_EQ(rmm::mr::get_default_resource(), buff.memory_resource());
EXPECT_EQ(this->stream, buff.stream());
}
TYPED_TEST(DeviceBufferTest, ExplicitMemoryResource) {
rmm::device_buffer buff(this->size, 0, &this->mr);
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(this->size, buff.capacity());
EXPECT_EQ(&this->mr, buff.memory_resource());
EXPECT_TRUE(this->mr.is_equal(*buff.memory_resource()));
EXPECT_EQ(0, buff.stream());
}
TYPED_TEST(DeviceBufferTest, ExplicitMemoryResourceStream) {
rmm::device_buffer buff(this->size, this->stream, &this->mr);
sync_stream(this->stream);
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(this->size, buff.capacity());
EXPECT_EQ(&this->mr, buff.memory_resource());
EXPECT_TRUE(this->mr.is_equal(*buff.memory_resource()));
EXPECT_EQ(this->stream, buff.stream());
}
TYPED_TEST(DeviceBufferTest, CopyFromRawDevicePointer) {
void *device_memory{nullptr};
EXPECT_EQ(hipSuccess, hipMalloc(&device_memory, this->size));
rmm::device_buffer buff(device_memory, this->size);
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(this->size, buff.capacity());
EXPECT_EQ(rmm::mr::get_default_resource(), buff.memory_resource());
EXPECT_EQ(0, buff.stream());
// TODO check for equality between the contents of the two allocations
EXPECT_EQ(hipSuccess, hipFree(device_memory));
}
TYPED_TEST(DeviceBufferTest, CopyFromRawHostPointer) {
std::vector<uint8_t> host_data(this->size);
rmm::device_buffer buff(static_cast<void *>(host_data.data()), this->size);
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(this->size, buff.capacity());
EXPECT_EQ(rmm::mr::get_default_resource(), buff.memory_resource());
EXPECT_EQ(0, buff.stream());
// TODO check for equality between the contents of the two allocations
}
TYPED_TEST(DeviceBufferTest, CopyFromNullptr) {
// can copy from a nullptr only if size == 0
rmm::device_buffer buff(nullptr, 0);
EXPECT_EQ(nullptr, buff.data());
EXPECT_EQ(0, buff.size());
EXPECT_EQ(0, buff.capacity());
EXPECT_EQ(rmm::mr::get_default_resource(), buff.memory_resource());
EXPECT_EQ(0, buff.stream());
}
TYPED_TEST(DeviceBufferTest, CopyFromNullptrNonZero) {
// can copy from a nullptr only if size == 0
EXPECT_THROW(rmm::device_buffer buff(nullptr, 1), rmm::logic_error);
}
TYPED_TEST(DeviceBufferTest, CopyConstructor) {
rmm::device_buffer buff(this->size, 0, &this->mr);
// Initialize buffer
thrust::sequence(thrust::device, static_cast<char *>(buff.data()),
static_cast<char *>(buff.data()) + buff.size(), 0);
rmm::device_buffer buff_copy(buff); // uses default stream and MR
EXPECT_NE(nullptr, buff_copy.data());
EXPECT_NE(buff.data(), buff_copy.data());
EXPECT_EQ(buff.size(), buff_copy.size());
EXPECT_EQ(buff.capacity(), buff_copy.capacity());
EXPECT_EQ(buff_copy.memory_resource(), rmm::mr::get_default_resource());
EXPECT_TRUE(
buff_copy.memory_resource()->is_equal(*rmm::mr::get_default_resource()));
EXPECT_EQ(buff_copy.stream(), hipStream_t{0});
EXPECT_TRUE(thrust::equal(thrust::device, static_cast<char *>(buff.data()),
static_cast<char *>(buff.data()) + buff.size(),
static_cast<char *>(buff_copy.data())));
// now use buff's stream and MR
rmm::device_buffer buff_copy2(buff, buff.stream(), buff.memory_resource());
EXPECT_EQ(buff_copy2.memory_resource(), buff.memory_resource());
EXPECT_TRUE(buff_copy2.memory_resource()->is_equal(*buff.memory_resource()));
EXPECT_EQ(buff_copy2.stream(), buff.stream());
// EXPECT_TRUE(
// thrust::equal(thrust::device, static_cast<signed char *>(buff.data()),
// static_cast<signed char *>(buff.data()) + buff.size(),
// static_cast<signed char *>(buff_copy.data())));
}
TYPED_TEST(DeviceBufferTest, CopyCapacityLargerThanSize) {
rmm::device_buffer buff(this->size, 0, &this->mr);
// Resizing smaller to make `size()` < `capacity()`
auto new_size = this->size - 1;
buff.resize(new_size);
// Can't do this until RMM cmake is setup to build cuda files
// thrust::sequence(thrust::device, static_cast<signed char *>(buff.data()),
// static_cast<signed char *>(buffer.data()) + buff.size(),
// 0);
rmm::device_buffer buff_copy(buff);
EXPECT_NE(nullptr, buff_copy.data());
EXPECT_NE(buff.data(), buff_copy.data());
EXPECT_EQ(buff.size(), buff_copy.size());
// The capacity of the copy should be equal to the `size()` of the original
EXPECT_EQ(new_size, buff_copy.capacity());
EXPECT_EQ(buff_copy.memory_resource(), rmm::mr::get_default_resource());
EXPECT_TRUE(
buff_copy.memory_resource()->is_equal(*rmm::mr::get_default_resource()));
EXPECT_EQ(buff_copy.stream(), hipStream_t{0});
// EXPECT_TRUE(
// thrust::equal(thrust::device, static_cast<signed char *>(buff.data()),
// static_cast<signed char *>(buff.data()) + buff.size(),
// static_cast<signed char *>(buff_copy.data())));
}
TYPED_TEST(DeviceBufferTest, CopyConstructorExplicitMr) {
rmm::device_buffer buff(this->size, 0, &this->mr);
// Can't do this until RMM cmake is setup to build cuda files
// thrust::sequence(thrust::device, static_cast<signed char *>(buff.data()),
// static_cast<signed char *>(buffer.data()) + buff.size(),
// 0);
rmm::device_buffer buff_copy(buff, this->stream, &this->mr);
EXPECT_NE(nullptr, buff_copy.data());
EXPECT_NE(buff.data(), buff_copy.data());
EXPECT_EQ(buff.size(), buff_copy.size());
EXPECT_EQ(buff.capacity(), buff_copy.capacity());
EXPECT_EQ(buff.memory_resource(), buff_copy.memory_resource());
EXPECT_TRUE(buff.memory_resource()->is_equal(*buff_copy.memory_resource()));
EXPECT_NE(buff.stream(), buff_copy.stream());
// EXPECT_TRUE(
// thrust::equal(thrust::device, static_cast<signed char *>(buff.data()),
// static_cast<signed char *>(buff.data()) + buff.size(),
// static_cast<signed char *>(buff_copy.data())));
}
TYPED_TEST(DeviceBufferTest, CopyCapacityLargerThanSizeExplicitMr) {
rmm::device_buffer buff(this->size, 0, &this->mr);
// Resizing smaller to make `size()` < `capacity()`
auto new_size = this->size - 1;
buff.resize(new_size);
// Can't do this until RMM cmake is setup to build cuda files
// thrust::sequence(thrust::device, static_cast<signed char *>(buff.data()),
// static_cast<signed char *>(buffer.data()) + buff.size(),
// 0);
rmm::device_buffer buff_copy(buff, this->stream, &this->mr);
EXPECT_NE(nullptr, buff_copy.data());
EXPECT_NE(buff.data(), buff_copy.data());
EXPECT_EQ(buff.size(), buff_copy.size());
// The capacity of the copy should be equal to the `size()` of the original
EXPECT_EQ(new_size, buff_copy.capacity());
EXPECT_NE(buff.capacity(), buff_copy.capacity());
EXPECT_EQ(buff.memory_resource(), buff_copy.memory_resource());
EXPECT_TRUE(buff.memory_resource()->is_equal(*buff_copy.memory_resource()));
EXPECT_NE(buff.stream(), buff_copy.stream());
// EXPECT_TRUE(
// thrust::equal(thrust::device, static_cast<signed char *>(buff.data()),
// static_cast<signed char *>(buff.data()) + buff.size(),
// static_cast<signed char *>(buff_copy.data())));
}
TYPED_TEST(DeviceBufferTest, CopyAssignmentToDefault) {
rmm::device_buffer const from(this->size, 0, &this->mr);
rmm::device_buffer to{};
EXPECT_NO_THROW(to = from);
EXPECT_NE(nullptr, to.data());
EXPECT_NE(nullptr, from.data());
EXPECT_NE(from.data(), to.data());
EXPECT_EQ(from.size(), to.size());
EXPECT_EQ(from.capacity(), to.capacity());
EXPECT_EQ(from.stream(), to.stream());
EXPECT_EQ(from.memory_resource(), to.memory_resource());
// TODO Check contents of memory
}
TYPED_TEST(DeviceBufferTest, CopyAssignment) {
rmm::device_buffer from(this->size, 0, &this->mr);
rmm::device_buffer to(this->size - 1, 0, &this->mr);
EXPECT_NO_THROW(to = from);
EXPECT_NE(nullptr, to.data());
EXPECT_NE(nullptr, from.data());
EXPECT_NE(from.data(), to.data());
EXPECT_EQ(from.size(), to.size());
EXPECT_EQ(from.capacity(), to.capacity());
EXPECT_EQ(from.stream(), to.stream());
EXPECT_EQ(from.memory_resource(), to.memory_resource());
// TODO Check contents of memory
}
TYPED_TEST(DeviceBufferTest, CopyAssignmentCapacityLargerThanSize) {
rmm::device_buffer from(this->size, 0, &this->mr);
from.resize(from.size() - 1);
rmm::device_buffer to(42, 0, &this->mr);
EXPECT_NO_THROW(to = from);
EXPECT_NE(nullptr, to.data());
EXPECT_NE(nullptr, from.data());
EXPECT_NE(from.data(), to.data());
EXPECT_EQ(from.size(), to.size());
EXPECT_NE(from.capacity(),
to.capacity()); // copy doesn't copy the larger capacity
EXPECT_EQ(from.stream(), to.stream());
EXPECT_EQ(from.memory_resource(), to.memory_resource());
// TODO Check contents of memory
}
TYPED_TEST(DeviceBufferTest, SelfCopyAssignment) {
rmm::device_buffer buff(this->size, 0, &this->mr);
auto p = buff.data();
auto size = buff.size();
auto capacity = buff.capacity();
auto mr = buff.memory_resource();
auto stream = buff.stream();
buff = buff; // self-assignment shouldn't modify the buffer
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(p, buff.data());
EXPECT_EQ(size, buff.size());
EXPECT_EQ(capacity, buff.capacity());
EXPECT_EQ(stream, buff.stream());
EXPECT_EQ(mr, buff.memory_resource());
}
TYPED_TEST(DeviceBufferTest, MoveConstructor) {
rmm::device_buffer buff(this->size, 0, &this->mr);
auto p = buff.data();
auto size = buff.size();
auto capacity = buff.capacity();
auto mr = buff.memory_resource();
auto stream = buff.stream();
// New buffer should have the same contents as the original
rmm::device_buffer buff_new(std::move(buff));
EXPECT_NE(nullptr, buff_new.data());
EXPECT_EQ(p, buff_new.data());
EXPECT_EQ(size, buff_new.size());
EXPECT_EQ(capacity, buff_new.capacity());
EXPECT_EQ(stream, buff_new.stream());
EXPECT_EQ(mr, buff_new.memory_resource());
// Original buffer should be empty
EXPECT_EQ(nullptr, buff.data());
EXPECT_EQ(0, buff.size());
EXPECT_EQ(0, buff.capacity());
EXPECT_EQ(0, buff.stream());
EXPECT_NE(nullptr, buff.memory_resource());
}
TYPED_TEST(DeviceBufferTest, MoveConstructorStream) {
rmm::device_buffer buff(this->size, this->stream, &this->mr);
sync_stream(this->stream);
auto p = buff.data();
auto size = buff.size();
auto capacity = buff.capacity();
auto mr = buff.memory_resource();
auto stream = buff.stream();
// New buffer should have the same contents as the original
rmm::device_buffer buff_new(std::move(buff));
sync_stream(this->stream);
EXPECT_NE(nullptr, buff_new.data());
EXPECT_EQ(p, buff_new.data());
EXPECT_EQ(size, buff_new.size());
EXPECT_EQ(capacity, buff_new.capacity());
EXPECT_EQ(stream, buff_new.stream());
EXPECT_EQ(mr, buff_new.memory_resource());
// Original buffer should be empty
EXPECT_EQ(nullptr, buff.data());
EXPECT_EQ(0, buff.size());
EXPECT_EQ(0, buff.capacity());
EXPECT_EQ(0, buff.stream());
EXPECT_NE(nullptr, buff.memory_resource());
}
TYPED_TEST(DeviceBufferTest, MoveAssignmentToDefault) {
rmm::device_buffer from(this->size, 0, &this->mr);
auto p = from.data();
auto size = from.size();
auto capacity = from.capacity();
auto mr = from.memory_resource();
auto stream = from.stream();
rmm::device_buffer to;
EXPECT_NO_THROW(to = std::move(from));
// contents of `from` should be in `to`
EXPECT_NE(nullptr, to.data());
EXPECT_EQ(p, to.data());
EXPECT_EQ(size, to.size());
EXPECT_EQ(capacity, to.capacity());
EXPECT_EQ(stream, to.stream());
EXPECT_EQ(mr, to.memory_resource());
// `from` should be empty
EXPECT_EQ(nullptr, from.data());
EXPECT_EQ(0, from.size());
EXPECT_EQ(0, from.capacity());
EXPECT_EQ(0, from.stream());
EXPECT_NE(nullptr, from.memory_resource());
}
TYPED_TEST(DeviceBufferTest, MoveAssignment) {
rmm::device_buffer from(this->size, 0, &this->mr);
auto p = from.data();
auto size = from.size();
auto capacity = from.capacity();
auto mr = from.memory_resource();
auto stream = from.stream();
rmm::device_buffer to(this->size - 1, 0, &this->mr);
EXPECT_NO_THROW(to = std::move(from));
// contents of `from` should be in `to`
EXPECT_NE(nullptr, to.data());
EXPECT_EQ(p, to.data());
EXPECT_EQ(size, to.size());
EXPECT_EQ(capacity, to.capacity());
EXPECT_EQ(stream, to.stream());
EXPECT_EQ(mr, to.memory_resource());
// `from` should be empty
EXPECT_EQ(nullptr, from.data());
EXPECT_EQ(0, from.size());
EXPECT_EQ(0, from.capacity());
EXPECT_EQ(0, from.stream());
EXPECT_NE(nullptr, from.memory_resource());
}
TYPED_TEST(DeviceBufferTest, SelfMoveAssignment) {
rmm::device_buffer buff(this->size, 0, &this->mr);
auto p = buff.data();
auto size = buff.size();
auto capacity = buff.capacity();
auto mr = buff.memory_resource();
auto stream = buff.stream();
buff = buff; // self-assignment shouldn't modify the buffer
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(p, buff.data());
EXPECT_EQ(size, buff.size());
EXPECT_EQ(capacity, buff.capacity());
EXPECT_EQ(stream, buff.stream());
EXPECT_EQ(mr, buff.memory_resource());
}
TYPED_TEST(DeviceBufferTest, ResizeSmaller) {
rmm::device_buffer buff(this->size, 0, &this->mr);
auto old_data = buff.data();
auto new_size = this->size - 1;
buff.resize(new_size);
EXPECT_EQ(new_size, buff.size());
EXPECT_EQ(this->size, buff.capacity()); // Capacity should be unchanged
// Resizing smaller means the existing allocation should remain unchanged
EXPECT_EQ(old_data, buff.data());
EXPECT_NO_THROW(buff.shrink_to_fit());
EXPECT_NE(nullptr, buff.data());
// A reallocation should have occured
EXPECT_NE(old_data, buff.data());
EXPECT_EQ(new_size, buff.size());
EXPECT_EQ(buff.capacity(), buff.size());
// TODO Verify device memory contents are equal
}
TYPED_TEST(DeviceBufferTest, ResizeBigger) {
rmm::device_buffer buff(this->size, 0, &this->mr);
auto old_data = buff.data();
auto new_size = this->size + 1;
buff.resize(new_size);
EXPECT_EQ(new_size, buff.size());
EXPECT_EQ(new_size, buff.capacity());
// Resizing bigger means the data should point to a new allocation
EXPECT_NE(old_data, buff.data());
}
| 48398af3615846b55feb970cc38c0c2d8b1872a9.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gtest/gtest.h"
#include <rmm/device_buffer.hpp>
#include <rmm/mr/device/cnmem_managed_memory_resource.hpp>
#include <rmm/mr/device/cnmem_memory_resource.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/default_memory_resource.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/detail/error.hpp>
#include <thrust/sequence.h>
#include <thrust/equal.h>
#include <cuda_runtime_api.h>
#include <cstddef>
#include <random>
void sync_stream(cudaStream_t stream) {
EXPECT_EQ(cudaSuccess, cudaStreamSynchronize(stream));
}
template <typename MemoryResourceType>
struct DeviceBufferTest : public ::testing::Test {
cudaStream_t stream{};
std::size_t size{};
MemoryResourceType mr{};
DeviceBufferTest() {
std::default_random_engine generator;
std::uniform_int_distribution<std::size_t> distribution(1000, 100000);
size = distribution(generator);
}
void SetUp() override { EXPECT_EQ(cudaSuccess, cudaStreamCreate(&stream)); }
void TearDown() override {
EXPECT_EQ(cudaSuccess, cudaStreamDestroy(stream));
};
};
using resources = ::testing::Types<
rmm::mr::cuda_memory_resource, rmm::mr::managed_memory_resource,
rmm::mr::cnmem_memory_resource, rmm::mr::cnmem_managed_memory_resource>;
TYPED_TEST_CASE(DeviceBufferTest, resources);
TYPED_TEST(DeviceBufferTest, DefaultMemoryResource) {
rmm::device_buffer buff(this->size);
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(this->size, buff.capacity());
EXPECT_EQ(rmm::mr::get_default_resource(), buff.memory_resource());
EXPECT_EQ(0, buff.stream());
}
TYPED_TEST(DeviceBufferTest, DefaultMemoryResourceStream) {
rmm::device_buffer buff(this->size, this->stream);
sync_stream(this->stream);
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(this->size, buff.capacity());
EXPECT_EQ(rmm::mr::get_default_resource(), buff.memory_resource());
EXPECT_EQ(this->stream, buff.stream());
}
TYPED_TEST(DeviceBufferTest, ExplicitMemoryResource) {
rmm::device_buffer buff(this->size, 0, &this->mr);
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(this->size, buff.capacity());
EXPECT_EQ(&this->mr, buff.memory_resource());
EXPECT_TRUE(this->mr.is_equal(*buff.memory_resource()));
EXPECT_EQ(0, buff.stream());
}
TYPED_TEST(DeviceBufferTest, ExplicitMemoryResourceStream) {
rmm::device_buffer buff(this->size, this->stream, &this->mr);
sync_stream(this->stream);
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(this->size, buff.capacity());
EXPECT_EQ(&this->mr, buff.memory_resource());
EXPECT_TRUE(this->mr.is_equal(*buff.memory_resource()));
EXPECT_EQ(this->stream, buff.stream());
}
TYPED_TEST(DeviceBufferTest, CopyFromRawDevicePointer) {
void *device_memory{nullptr};
EXPECT_EQ(cudaSuccess, cudaMalloc(&device_memory, this->size));
rmm::device_buffer buff(device_memory, this->size);
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(this->size, buff.capacity());
EXPECT_EQ(rmm::mr::get_default_resource(), buff.memory_resource());
EXPECT_EQ(0, buff.stream());
// TODO check for equality between the contents of the two allocations
EXPECT_EQ(cudaSuccess, cudaFree(device_memory));
}
TYPED_TEST(DeviceBufferTest, CopyFromRawHostPointer) {
std::vector<uint8_t> host_data(this->size);
rmm::device_buffer buff(static_cast<void *>(host_data.data()), this->size);
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(this->size, buff.size());
EXPECT_EQ(this->size, buff.capacity());
EXPECT_EQ(rmm::mr::get_default_resource(), buff.memory_resource());
EXPECT_EQ(0, buff.stream());
// TODO check for equality between the contents of the two allocations
}
TYPED_TEST(DeviceBufferTest, CopyFromNullptr) {
// can copy from a nullptr only if size == 0
rmm::device_buffer buff(nullptr, 0);
EXPECT_EQ(nullptr, buff.data());
EXPECT_EQ(0, buff.size());
EXPECT_EQ(0, buff.capacity());
EXPECT_EQ(rmm::mr::get_default_resource(), buff.memory_resource());
EXPECT_EQ(0, buff.stream());
}
TYPED_TEST(DeviceBufferTest, CopyFromNullptrNonZero) {
// can copy from a nullptr only if size == 0
EXPECT_THROW(rmm::device_buffer buff(nullptr, 1), rmm::logic_error);
}
TYPED_TEST(DeviceBufferTest, CopyConstructor) {
rmm::device_buffer buff(this->size, 0, &this->mr);
// Initialize buffer
thrust::sequence(thrust::device, static_cast<char *>(buff.data()),
static_cast<char *>(buff.data()) + buff.size(), 0);
rmm::device_buffer buff_copy(buff); // uses default stream and MR
EXPECT_NE(nullptr, buff_copy.data());
EXPECT_NE(buff.data(), buff_copy.data());
EXPECT_EQ(buff.size(), buff_copy.size());
EXPECT_EQ(buff.capacity(), buff_copy.capacity());
EXPECT_EQ(buff_copy.memory_resource(), rmm::mr::get_default_resource());
EXPECT_TRUE(
buff_copy.memory_resource()->is_equal(*rmm::mr::get_default_resource()));
EXPECT_EQ(buff_copy.stream(), cudaStream_t{0});
EXPECT_TRUE(thrust::equal(thrust::device, static_cast<char *>(buff.data()),
static_cast<char *>(buff.data()) + buff.size(),
static_cast<char *>(buff_copy.data())));
// now use buff's stream and MR
rmm::device_buffer buff_copy2(buff, buff.stream(), buff.memory_resource());
EXPECT_EQ(buff_copy2.memory_resource(), buff.memory_resource());
EXPECT_TRUE(buff_copy2.memory_resource()->is_equal(*buff.memory_resource()));
EXPECT_EQ(buff_copy2.stream(), buff.stream());
// EXPECT_TRUE(
// thrust::equal(thrust::device, static_cast<signed char *>(buff.data()),
// static_cast<signed char *>(buff.data()) + buff.size(),
// static_cast<signed char *>(buff_copy.data())));
}
TYPED_TEST(DeviceBufferTest, CopyCapacityLargerThanSize) {
rmm::device_buffer buff(this->size, 0, &this->mr);
// Resizing smaller to make `size()` < `capacity()`
auto new_size = this->size - 1;
buff.resize(new_size);
// Can't do this until RMM cmake is setup to build cuda files
// thrust::sequence(thrust::device, static_cast<signed char *>(buff.data()),
// static_cast<signed char *>(buffer.data()) + buff.size(),
// 0);
rmm::device_buffer buff_copy(buff);
EXPECT_NE(nullptr, buff_copy.data());
EXPECT_NE(buff.data(), buff_copy.data());
EXPECT_EQ(buff.size(), buff_copy.size());
// The capacity of the copy should be equal to the `size()` of the original
EXPECT_EQ(new_size, buff_copy.capacity());
EXPECT_EQ(buff_copy.memory_resource(), rmm::mr::get_default_resource());
EXPECT_TRUE(
buff_copy.memory_resource()->is_equal(*rmm::mr::get_default_resource()));
EXPECT_EQ(buff_copy.stream(), cudaStream_t{0});
// EXPECT_TRUE(
// thrust::equal(thrust::device, static_cast<signed char *>(buff.data()),
// static_cast<signed char *>(buff.data()) + buff.size(),
// static_cast<signed char *>(buff_copy.data())));
}
TYPED_TEST(DeviceBufferTest, CopyConstructorExplicitMr) {
rmm::device_buffer buff(this->size, 0, &this->mr);
// Can't do this until RMM cmake is setup to build cuda files
// thrust::sequence(thrust::device, static_cast<signed char *>(buff.data()),
// static_cast<signed char *>(buffer.data()) + buff.size(),
// 0);
rmm::device_buffer buff_copy(buff, this->stream, &this->mr);
EXPECT_NE(nullptr, buff_copy.data());
EXPECT_NE(buff.data(), buff_copy.data());
EXPECT_EQ(buff.size(), buff_copy.size());
EXPECT_EQ(buff.capacity(), buff_copy.capacity());
EXPECT_EQ(buff.memory_resource(), buff_copy.memory_resource());
EXPECT_TRUE(buff.memory_resource()->is_equal(*buff_copy.memory_resource()));
EXPECT_NE(buff.stream(), buff_copy.stream());
// EXPECT_TRUE(
// thrust::equal(thrust::device, static_cast<signed char *>(buff.data()),
// static_cast<signed char *>(buff.data()) + buff.size(),
// static_cast<signed char *>(buff_copy.data())));
}
TYPED_TEST(DeviceBufferTest, CopyCapacityLargerThanSizeExplicitMr) {
rmm::device_buffer buff(this->size, 0, &this->mr);
// Resizing smaller to make `size()` < `capacity()`
auto new_size = this->size - 1;
buff.resize(new_size);
// Can't do this until RMM cmake is setup to build cuda files
// thrust::sequence(thrust::device, static_cast<signed char *>(buff.data()),
// static_cast<signed char *>(buffer.data()) + buff.size(),
// 0);
rmm::device_buffer buff_copy(buff, this->stream, &this->mr);
EXPECT_NE(nullptr, buff_copy.data());
EXPECT_NE(buff.data(), buff_copy.data());
EXPECT_EQ(buff.size(), buff_copy.size());
// The capacity of the copy should be equal to the `size()` of the original
EXPECT_EQ(new_size, buff_copy.capacity());
EXPECT_NE(buff.capacity(), buff_copy.capacity());
EXPECT_EQ(buff.memory_resource(), buff_copy.memory_resource());
EXPECT_TRUE(buff.memory_resource()->is_equal(*buff_copy.memory_resource()));
EXPECT_NE(buff.stream(), buff_copy.stream());
// EXPECT_TRUE(
// thrust::equal(thrust::device, static_cast<signed char *>(buff.data()),
// static_cast<signed char *>(buff.data()) + buff.size(),
// static_cast<signed char *>(buff_copy.data())));
}
TYPED_TEST(DeviceBufferTest, CopyAssignmentToDefault) {
rmm::device_buffer const from(this->size, 0, &this->mr);
rmm::device_buffer to{};
EXPECT_NO_THROW(to = from);
EXPECT_NE(nullptr, to.data());
EXPECT_NE(nullptr, from.data());
EXPECT_NE(from.data(), to.data());
EXPECT_EQ(from.size(), to.size());
EXPECT_EQ(from.capacity(), to.capacity());
EXPECT_EQ(from.stream(), to.stream());
EXPECT_EQ(from.memory_resource(), to.memory_resource());
// TODO Check contents of memory
}
TYPED_TEST(DeviceBufferTest, CopyAssignment) {
rmm::device_buffer from(this->size, 0, &this->mr);
rmm::device_buffer to(this->size - 1, 0, &this->mr);
EXPECT_NO_THROW(to = from);
EXPECT_NE(nullptr, to.data());
EXPECT_NE(nullptr, from.data());
EXPECT_NE(from.data(), to.data());
EXPECT_EQ(from.size(), to.size());
EXPECT_EQ(from.capacity(), to.capacity());
EXPECT_EQ(from.stream(), to.stream());
EXPECT_EQ(from.memory_resource(), to.memory_resource());
// TODO Check contents of memory
}
TYPED_TEST(DeviceBufferTest, CopyAssignmentCapacityLargerThanSize) {
rmm::device_buffer from(this->size, 0, &this->mr);
from.resize(from.size() - 1);
rmm::device_buffer to(42, 0, &this->mr);
EXPECT_NO_THROW(to = from);
EXPECT_NE(nullptr, to.data());
EXPECT_NE(nullptr, from.data());
EXPECT_NE(from.data(), to.data());
EXPECT_EQ(from.size(), to.size());
EXPECT_NE(from.capacity(),
to.capacity()); // copy doesn't copy the larger capacity
EXPECT_EQ(from.stream(), to.stream());
EXPECT_EQ(from.memory_resource(), to.memory_resource());
// TODO Check contents of memory
}
TYPED_TEST(DeviceBufferTest, SelfCopyAssignment) {
rmm::device_buffer buff(this->size, 0, &this->mr);
auto p = buff.data();
auto size = buff.size();
auto capacity = buff.capacity();
auto mr = buff.memory_resource();
auto stream = buff.stream();
buff = buff; // self-assignment shouldn't modify the buffer
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(p, buff.data());
EXPECT_EQ(size, buff.size());
EXPECT_EQ(capacity, buff.capacity());
EXPECT_EQ(stream, buff.stream());
EXPECT_EQ(mr, buff.memory_resource());
}
TYPED_TEST(DeviceBufferTest, MoveConstructor) {
rmm::device_buffer buff(this->size, 0, &this->mr);
auto p = buff.data();
auto size = buff.size();
auto capacity = buff.capacity();
auto mr = buff.memory_resource();
auto stream = buff.stream();
// New buffer should have the same contents as the original
rmm::device_buffer buff_new(std::move(buff));
EXPECT_NE(nullptr, buff_new.data());
EXPECT_EQ(p, buff_new.data());
EXPECT_EQ(size, buff_new.size());
EXPECT_EQ(capacity, buff_new.capacity());
EXPECT_EQ(stream, buff_new.stream());
EXPECT_EQ(mr, buff_new.memory_resource());
// Original buffer should be empty
EXPECT_EQ(nullptr, buff.data());
EXPECT_EQ(0, buff.size());
EXPECT_EQ(0, buff.capacity());
EXPECT_EQ(0, buff.stream());
EXPECT_NE(nullptr, buff.memory_resource());
}
TYPED_TEST(DeviceBufferTest, MoveConstructorStream) {
rmm::device_buffer buff(this->size, this->stream, &this->mr);
sync_stream(this->stream);
auto p = buff.data();
auto size = buff.size();
auto capacity = buff.capacity();
auto mr = buff.memory_resource();
auto stream = buff.stream();
// New buffer should have the same contents as the original
rmm::device_buffer buff_new(std::move(buff));
sync_stream(this->stream);
EXPECT_NE(nullptr, buff_new.data());
EXPECT_EQ(p, buff_new.data());
EXPECT_EQ(size, buff_new.size());
EXPECT_EQ(capacity, buff_new.capacity());
EXPECT_EQ(stream, buff_new.stream());
EXPECT_EQ(mr, buff_new.memory_resource());
// Original buffer should be empty
EXPECT_EQ(nullptr, buff.data());
EXPECT_EQ(0, buff.size());
EXPECT_EQ(0, buff.capacity());
EXPECT_EQ(0, buff.stream());
EXPECT_NE(nullptr, buff.memory_resource());
}
TYPED_TEST(DeviceBufferTest, MoveAssignmentToDefault) {
rmm::device_buffer from(this->size, 0, &this->mr);
auto p = from.data();
auto size = from.size();
auto capacity = from.capacity();
auto mr = from.memory_resource();
auto stream = from.stream();
rmm::device_buffer to;
EXPECT_NO_THROW(to = std::move(from));
// contents of `from` should be in `to`
EXPECT_NE(nullptr, to.data());
EXPECT_EQ(p, to.data());
EXPECT_EQ(size, to.size());
EXPECT_EQ(capacity, to.capacity());
EXPECT_EQ(stream, to.stream());
EXPECT_EQ(mr, to.memory_resource());
// `from` should be empty
EXPECT_EQ(nullptr, from.data());
EXPECT_EQ(0, from.size());
EXPECT_EQ(0, from.capacity());
EXPECT_EQ(0, from.stream());
EXPECT_NE(nullptr, from.memory_resource());
}
TYPED_TEST(DeviceBufferTest, MoveAssignment) {
rmm::device_buffer from(this->size, 0, &this->mr);
auto p = from.data();
auto size = from.size();
auto capacity = from.capacity();
auto mr = from.memory_resource();
auto stream = from.stream();
rmm::device_buffer to(this->size - 1, 0, &this->mr);
EXPECT_NO_THROW(to = std::move(from));
// contents of `from` should be in `to`
EXPECT_NE(nullptr, to.data());
EXPECT_EQ(p, to.data());
EXPECT_EQ(size, to.size());
EXPECT_EQ(capacity, to.capacity());
EXPECT_EQ(stream, to.stream());
EXPECT_EQ(mr, to.memory_resource());
// `from` should be empty
EXPECT_EQ(nullptr, from.data());
EXPECT_EQ(0, from.size());
EXPECT_EQ(0, from.capacity());
EXPECT_EQ(0, from.stream());
EXPECT_NE(nullptr, from.memory_resource());
}
TYPED_TEST(DeviceBufferTest, SelfMoveAssignment) {
rmm::device_buffer buff(this->size, 0, &this->mr);
auto p = buff.data();
auto size = buff.size();
auto capacity = buff.capacity();
auto mr = buff.memory_resource();
auto stream = buff.stream();
buff = buff; // self-assignment shouldn't modify the buffer
EXPECT_NE(nullptr, buff.data());
EXPECT_EQ(p, buff.data());
EXPECT_EQ(size, buff.size());
EXPECT_EQ(capacity, buff.capacity());
EXPECT_EQ(stream, buff.stream());
EXPECT_EQ(mr, buff.memory_resource());
}
TYPED_TEST(DeviceBufferTest, ResizeSmaller) {
rmm::device_buffer buff(this->size, 0, &this->mr);
auto old_data = buff.data();
auto new_size = this->size - 1;
buff.resize(new_size);
EXPECT_EQ(new_size, buff.size());
EXPECT_EQ(this->size, buff.capacity()); // Capacity should be unchanged
// Resizing smaller means the existing allocation should remain unchanged
EXPECT_EQ(old_data, buff.data());
EXPECT_NO_THROW(buff.shrink_to_fit());
EXPECT_NE(nullptr, buff.data());
// A reallocation should have occured
EXPECT_NE(old_data, buff.data());
EXPECT_EQ(new_size, buff.size());
EXPECT_EQ(buff.capacity(), buff.size());
// TODO Verify device memory contents are equal
}
TYPED_TEST(DeviceBufferTest, ResizeBigger) {
rmm::device_buffer buff(this->size, 0, &this->mr);
auto old_data = buff.data();
auto new_size = this->size + 1;
buff.resize(new_size);
EXPECT_EQ(new_size, buff.size());
EXPECT_EQ(new_size, buff.capacity());
// Resizing bigger means the data should point to a new allocation
EXPECT_NE(old_data, buff.data());
}
|
7476fef4c303b1832c1117ded080bae154887c26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void Float(float * x, bool* y, size_t idxf, size_t idxb, size_t N)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
x[(idxf)*N + i] = float(y[(idxb-1)*N + i]);
return;
} | 7476fef4c303b1832c1117ded080bae154887c26.cu | #include "includes.h"
__global__ void Float(float * x, bool* y, size_t idxf, size_t idxb, size_t N)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
x[(idxf)*N + i] = float(y[(idxb-1)*N + i]);
return;
} |
016eb210649c4bf94afb1c29328ccca49b45ae9c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define TPB 256
#define B 1
__global__ void hello()
{
printf("Hello World ! My thread id is %2d \n",threadIdx.x);
}
int main()
{
hipLaunchKernelGGL(( hello), dim3(B),dim3(TPB), 0, 0, );
hipDeviceSynchronize();
return 0;
}
| 016eb210649c4bf94afb1c29328ccca49b45ae9c.cu | #include <stdio.h>
#define TPB 256
#define B 1
__global__ void hello()
{
printf("Hello World ! My thread id is %2d \n",threadIdx.x);
}
int main()
{
hello<<<B,TPB>>>();
cudaDeviceSynchronize();
return 0;
}
|
074feecdddbad9864799c6bf4a1f76d6576c394a.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <hip/hip_runtime.h>
#include "convolution_common.h"
//#include "convolution_hip.cuh"
#include "convolution_test.cuh"
using namespace std;
extern "C" float convolution(
float *input,
float *kernel,
float *output,
int width_input,
int height_input,
int deep_input,
int width_kernel,
int height_kernel,
int deep_kernel,
int long_kernel,
int width_output,
int height_output,
int deep_output,
int stride_x,
int stride_y,
int padding_x,
int padding_y
)
{
dim3 grid(width_output, height_output, deep_output);
dim3 thread(width_kernel,height_kernel,deep_kernel);
hipError_t error;
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipLaunchKernelGGL(( Convolution_kernel), dim3(grid), dim3(thread) , 0, 0,
input,
kernel,
output,
width_input,
height_input,
deep_input,
width_kernel,
height_kernel,
deep_kernel,
long_kernel,
width_output,
height_output,
deep_output,
stride_x,
stride_y,
padding_x,
padding_y
);
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
return msecTotal;
} | 074feecdddbad9864799c6bf4a1f76d6576c394a.cu | #include <iostream>
#include <stdio.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <cuda_runtime.h>
#include "convolution_common.h"
//#include "convolution.cuh"
#include "convolution_test.cuh"
using namespace std;
extern "C" float convolution(
float *input,
float *kernel,
float *output,
int width_input,
int height_input,
int deep_input,
int width_kernel,
int height_kernel,
int deep_kernel,
int long_kernel,
int width_output,
int height_output,
int deep_output,
int stride_x,
int stride_y,
int padding_x,
int padding_y
)
{
dim3 grid(width_output, height_output, deep_output);
dim3 thread(width_kernel,height_kernel,deep_kernel);
cudaError_t error;
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
Convolution_kernel<<< grid, thread >>>(
input,
kernel,
output,
width_input,
height_input,
deep_input,
width_kernel,
height_kernel,
deep_kernel,
long_kernel,
width_output,
height_output,
deep_output,
stride_x,
stride_y,
padding_x,
padding_y
);
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
return msecTotal;
} |
047cb6f0ea5fce403bd8e7c41d37f332975bf96a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
// Modifies by Frost for 1D ussage
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T linear_interpolate(const T* bottom_data,
const int height,
T t,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (t < -1.0 || t > height) {
//empty
return 0;
}
if (t <= 0) t = 0;
int t_low = (int) t;
int t_high;
// get closest integers to t
if (t_low >= height - 1) {
t_high = t_low = height - 1;
t = (T) t_low;
} else {
t_high = t_low + 1;
}
// get the distance to t
T lt = t - t_low;
T ht = 1. - lt;
// do linear interpolation
T v1 = bottom_data[t_low];
T v2 = bottom_data[t_high];
T w1 = ht, w2 = lt;
T val = (w1 * v1 + w2 * v2);
// printf("Check Linear Interpolate: w1=%f, v1=%f, w2=%f, v2=%f \n", w1, v1, w2, v2);
return val;
}
template <typename T>
__global__ void Align1DForward(const int nthreads, const T* bottom_data,
const T spatial_scale, const int channels,
const int height,
const int pooled_height,
const int sampling_ratio,
const T* bottom_rois, T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, pt) is an element in the pooled output
int pt = index % pooled_height;
int c = (index / pooled_height) % channels;
int n = index / pooled_height / channels;
// printf("Debug Main Loop: get pt, c, n are %d, %d, %d \n", pt, c, n);
const T* offset_bottom_rois = bottom_rois + n * 3;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start = offset_bottom_rois[1] * spatial_scale;
T roi_end = offset_bottom_rois[2] * spatial_scale;
// printf("Debug roi boundary: w1, w2, is %f, %f \n", roi_start,roi_end,);
// Force malformed ROIs to be 1x1
T roi_height = max(roi_end- roi_start, (T)1.);
T bin_size = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid; // e.g. = 4
T output_val = 0.;
for (int it = 0; it < roi_bin_grid; it ++) // e.g., it = 0, 1
{
const T t = roi_start + pt * bin_size + static_cast<T>(it + .5f) * bin_size / static_cast<T>(roi_bin_grid); // e.g., 0.5, 1.5
T val = linear_interpolate(offset_bottom_data, height, t, index);
// printf("Debug linear_interpolate: input=height:%d, t:%f, ... ; output=val:%f \n", height, t, val);
output_val += val;
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void linear_interpolate_gradient(
const int height,
T t,
T & w1, T & w2,
int & t_low, int & t_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (t < -1.0 || t > height) {
//empty
w1 = w2 = 0.;
t_low = t_high = -1;
return;
}
if (t <= 0) t = 0;
t_low = (int) t;
if (t_low >= height - 1) {
t_high = t_low = height - 1;
t = (T) t_low;
} else {
t_high = t_low + 1;
}
T lt = t - t_low;
T ht = 1. - lt;
// T val = (w1 * v1 + w2 * v2);
// T w1 = ht, w2 = lt;
w1 = ht , w2 = lt;
return;
}
template <typename T>
__global__ void Align1DBackwardFeature(const int nthreads, const T* top_diff,
const int num_rois, const T spatial_scale,
const int channels, const int height,
const int pooled_height,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, pt) is an element in the pooled output
int pt = (index ) % pooled_height;
int c = (index / pooled_height) % channels;
int n = index / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 3;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start= offset_bottom_rois[1] * spatial_scale;
T roi_end= offset_bottom_rois[2] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_height = max(roi_end- roi_start, (T)1.);
T bin_size = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height;
int top_offset = (n * channels + c) * pooled_height;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[pt];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid= (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid; // e.g. = 4
for (int it = 0; it < roi_bin_grid; it ++) // e.g., iy = 0, 1
{
const T t = roi_start+ pt * bin_size+ static_cast<T>(it + .5f) * bin_size/ static_cast<T>(roi_bin_grid); // e.g., 0.5, 1.5
T w1, w2;
int t_low, t_high;
linear_interpolate_gradient(height, t, w1, w2, t_low, t_high, index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
if (t_low >= 0 && t_high >= 0)
{
atomicAdd(offset_bottom_diff + t_low, static_cast<T>(g1));
atomicAdd(offset_bottom_diff + t_high, static_cast<T>(g2));
} // if
} // it
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
at::Tensor Align_forward_cuda(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int sampling_ratio) {
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto output = at::empty({num_rois, channels, pooled_height}, input.options());
auto output_size = num_rois * pooled_height * channels;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(THCCeilDiv((long)output_size, 512L), 4096L));
dim3 block(512);
// printf("Debug main function: height:%d\n", height);
if (output.numel() == 0) {
THCudaCheck(hipGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.type(), "Align1D_forward", [&] {
hipLaunchKernelGGL(( Align1DForward<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
pooled_height,
sampling_ratio,
rois.contiguous().data<scalar_t>(),
output.data<scalar_t>());
});
THCudaCheck(hipGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor Align_backward_cuda(const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int batch_size,
const int channels,
const int height,
const int sampling_ratio) {
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto grad_input = at::zeros({batch_size, channels, height}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(THCCeilDiv((long)grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(hipGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlign_backward", [&] {
hipLaunchKernelGGL(( Align1DBackwardFeature<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.contiguous().data<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
pooled_height,
sampling_ratio,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>());
});
THCudaCheck(hipGetLastError());
return grad_input;
}
| 047cb6f0ea5fce403bd8e7c41d37f332975bf96a.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
// Modifies by Frost for 1D ussage
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T linear_interpolate(const T* bottom_data,
const int height,
T t,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (t < -1.0 || t > height) {
//empty
return 0;
}
if (t <= 0) t = 0;
int t_low = (int) t;
int t_high;
// get closest integers to t
if (t_low >= height - 1) {
t_high = t_low = height - 1;
t = (T) t_low;
} else {
t_high = t_low + 1;
}
// get the distance to t
T lt = t - t_low;
T ht = 1. - lt;
// do linear interpolation
T v1 = bottom_data[t_low];
T v2 = bottom_data[t_high];
T w1 = ht, w2 = lt;
T val = (w1 * v1 + w2 * v2);
// printf("Check Linear Interpolate: w1=%f, v1=%f, w2=%f, v2=%f \n", w1, v1, w2, v2);
return val;
}
template <typename T>
__global__ void Align1DForward(const int nthreads, const T* bottom_data,
const T spatial_scale, const int channels,
const int height,
const int pooled_height,
const int sampling_ratio,
const T* bottom_rois, T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, pt) is an element in the pooled output
int pt = index % pooled_height;
int c = (index / pooled_height) % channels;
int n = index / pooled_height / channels;
// printf("Debug Main Loop: get pt, c, n are %d, %d, %d \n", pt, c, n);
const T* offset_bottom_rois = bottom_rois + n * 3;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start = offset_bottom_rois[1] * spatial_scale;
T roi_end = offset_bottom_rois[2] * spatial_scale;
// printf("Debug roi boundary: w1, w2, is %f, %f \n", roi_start,roi_end,);
// Force malformed ROIs to be 1x1
T roi_height = max(roi_end- roi_start, (T)1.);
T bin_size = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid; // e.g. = 4
T output_val = 0.;
for (int it = 0; it < roi_bin_grid; it ++) // e.g., it = 0, 1
{
const T t = roi_start + pt * bin_size + static_cast<T>(it + .5f) * bin_size / static_cast<T>(roi_bin_grid); // e.g., 0.5, 1.5
T val = linear_interpolate(offset_bottom_data, height, t, index);
// printf("Debug linear_interpolate: input=height:%d, t:%f, ... ; output=val:%f \n", height, t, val);
output_val += val;
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void linear_interpolate_gradient(
const int height,
T t,
T & w1, T & w2,
int & t_low, int & t_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (t < -1.0 || t > height) {
//empty
w1 = w2 = 0.;
t_low = t_high = -1;
return;
}
if (t <= 0) t = 0;
t_low = (int) t;
if (t_low >= height - 1) {
t_high = t_low = height - 1;
t = (T) t_low;
} else {
t_high = t_low + 1;
}
T lt = t - t_low;
T ht = 1. - lt;
// T val = (w1 * v1 + w2 * v2);
// T w1 = ht, w2 = lt;
w1 = ht , w2 = lt;
return;
}
template <typename T>
__global__ void Align1DBackwardFeature(const int nthreads, const T* top_diff,
const int num_rois, const T spatial_scale,
const int channels, const int height,
const int pooled_height,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, pt) is an element in the pooled output
int pt = (index ) % pooled_height;
int c = (index / pooled_height) % channels;
int n = index / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 3;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start= offset_bottom_rois[1] * spatial_scale;
T roi_end= offset_bottom_rois[2] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_height = max(roi_end- roi_start, (T)1.);
T bin_size = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height;
int top_offset = (n * channels + c) * pooled_height;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[pt];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid= (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid; // e.g. = 4
for (int it = 0; it < roi_bin_grid; it ++) // e.g., iy = 0, 1
{
const T t = roi_start+ pt * bin_size+ static_cast<T>(it + .5f) * bin_size/ static_cast<T>(roi_bin_grid); // e.g., 0.5, 1.5
T w1, w2;
int t_low, t_high;
linear_interpolate_gradient(height, t, w1, w2, t_low, t_high, index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
if (t_low >= 0 && t_high >= 0)
{
atomicAdd(offset_bottom_diff + t_low, static_cast<T>(g1));
atomicAdd(offset_bottom_diff + t_high, static_cast<T>(g2));
} // if
} // it
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
at::Tensor Align_forward_cuda(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int sampling_ratio) {
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto output = at::empty({num_rois, channels, pooled_height}, input.options());
auto output_size = num_rois * pooled_height * channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(THCCeilDiv((long)output_size, 512L), 4096L));
dim3 block(512);
// printf("Debug main function: height:%d\n", height);
if (output.numel() == 0) {
THCudaCheck(cudaGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.type(), "Align1D_forward", [&] {
Align1DForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
pooled_height,
sampling_ratio,
rois.contiguous().data<scalar_t>(),
output.data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor Align_backward_cuda(const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int batch_size,
const int channels,
const int height,
const int sampling_ratio) {
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto grad_input = at::zeros({batch_size, channels, height}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(THCCeilDiv((long)grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(cudaGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlign_backward", [&] {
Align1DBackwardFeature<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
pooled_height,
sampling_ratio,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return grad_input;
}
|
7fe4b1111f11f87cc91c8114e3ad9108ff628518.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cmath>
#include <cstdio>
#include <iostream>
#include <chrono>
#define TILE_WIDTH 2
#define WIDTH 4
using namespace std;
/*
//MatrixMul_Kernel Algorithm GPU
__global__
void MatrixMulKernel(float* M, float* N, float* P, int Width) {
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the d_P element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the M and N tiles required to compute P element
for (int ph = 0; ph < ceil(Width / (float)TILE_WIDTH); ++ph) {
// Collaborative loading of M and N tiles into shared memory
if ((Row < Width) && ((ph*TILE_WIDTH + tx) < Width))
Mds[ty][tx] = M[Row*Width + ph*TILE_WIDTH + tx];
if (((ph*TILE_WIDTH + ty) < Width) && (Col < Width))
Nds[ty][tx] = N[(ph*TILE_WIDTH + ty)*Width + Col];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if ((Row < Width) && (Col < Width))
P[Row*Width + Col] = Pvalue;
}
*/
//MatrixMul_Kernel Algorithm GPU
__global__
void MatrixMulKernel(float* M, float* N, float* P, int Width) {
__shared__ float Mds[TILE_WIDTH][WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the d_P element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the M and N tiles required to compute P element
for (int ph = 0; ph < ceil(Width / (float)TILE_WIDTH); ++ph) {
// Collaborative loading of M and N tiles into shared memory
if ((Row < Width) && ((ph*WIDTH + tx) < Width))
Mds[ty][tx] = M[Row*Width + ph*WIDTH + tx];
if (((ph*TILE_WIDTH + ty) < Width) && (Col < Width))
Nds[ty][tx] = N[(ph*TILE_WIDTH + ty)*Width + Col];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if ((Row < Width) && (Col < Width))
P[Row*Width + Col] = Pvalue;
}
/*MatrixMul_GPU*/
void MatrixMulGPU(float* h_M, float* h_N, float* h_P, int width) {
int size = width * width * sizeof(float);
float *d_M;
float *d_N;
float *d_P;
hipMalloc(&d_M, size);
hipMemcpy(d_M, h_M, size, hipMemcpyHostToDevice);
hipMalloc(&d_N, size);
hipMemcpy(d_N, h_N, size, hipMemcpyHostToDevice);
hipMalloc(&d_P, size);
hipMemcpy(d_P, h_P, size, hipMemcpyHostToDevice);
dim3 dimGrid(ceil(width / 2.0), ceil(width / 2.0), 1);
dim3 dimBlock(2.0, 2.0, 1);
MatrixMulKernel << < dimGrid, dimBlock >> > (d_M, d_N, d_P, width);
hipMemcpy(h_P, d_P, size, hipMemcpyDeviceToHost);
hipFree(d_M);
hipFree(d_N);
hipFree(d_P);
}
int main() {
float *h_M, *h_N, *h_P;
int width = 4;
h_M = (float*)malloc(width*width * sizeof(float));
h_N = (float*)malloc(width*width * sizeof(float));
h_P = (float*)malloc(width*width * sizeof(float));
for (int i = 0; i < width*width; i++) {
h_M[i] = 2.0f;
h_N[i] = 3.0f;
h_P[i] = 0.0f;
}
chrono::time_point<chrono::system_clock> MatrixMulGPU_Start, MatrixMulGPU_End;
MatrixMulGPU_Start = chrono::system_clock::now();
MatrixMulGPU(h_M, h_N, h_P, width);
MatrixMulGPU_End = chrono::system_clock::now();
cout << "MatrixMul_GPU: " << chrono::duration_cast<chrono::nanoseconds>(MatrixMulGPU_End - MatrixMulGPU_Start).count() << "ns." << endl;
/*Print*/
for (int i = 0; i < width; i++) {
for (int j = 0; j < width; j++) {
cout << h_P[i*width + j] << " ";
}
cout << endl;
}
cout << endl;
free(h_M);
free(h_N);
free(h_P);
return 0;
}
| 7fe4b1111f11f87cc91c8114e3ad9108ff628518.cu | #include <cuda.h>
#include <cmath>
#include <cstdio>
#include <iostream>
#include <chrono>
#define TILE_WIDTH 2
#define WIDTH 4
using namespace std;
/*
//MatrixMul_Kernel Algorithm GPU
__global__
void MatrixMulKernel(float* M, float* N, float* P, int Width) {
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the d_P element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the M and N tiles required to compute P element
for (int ph = 0; ph < ceil(Width / (float)TILE_WIDTH); ++ph) {
// Collaborative loading of M and N tiles into shared memory
if ((Row < Width) && ((ph*TILE_WIDTH + tx) < Width))
Mds[ty][tx] = M[Row*Width + ph*TILE_WIDTH + tx];
if (((ph*TILE_WIDTH + ty) < Width) && (Col < Width))
Nds[ty][tx] = N[(ph*TILE_WIDTH + ty)*Width + Col];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if ((Row < Width) && (Col < Width))
P[Row*Width + Col] = Pvalue;
}
*/
//MatrixMul_Kernel Algorithm GPU
__global__
void MatrixMulKernel(float* M, float* N, float* P, int Width) {
__shared__ float Mds[TILE_WIDTH][WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the d_P element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the M and N tiles required to compute P element
for (int ph = 0; ph < ceil(Width / (float)TILE_WIDTH); ++ph) {
// Collaborative loading of M and N tiles into shared memory
if ((Row < Width) && ((ph*WIDTH + tx) < Width))
Mds[ty][tx] = M[Row*Width + ph*WIDTH + tx];
if (((ph*TILE_WIDTH + ty) < Width) && (Col < Width))
Nds[ty][tx] = N[(ph*TILE_WIDTH + ty)*Width + Col];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if ((Row < Width) && (Col < Width))
P[Row*Width + Col] = Pvalue;
}
/*MatrixMul_GPU*/
void MatrixMulGPU(float* h_M, float* h_N, float* h_P, int width) {
int size = width * width * sizeof(float);
float *d_M;
float *d_N;
float *d_P;
cudaMalloc(&d_M, size);
cudaMemcpy(d_M, h_M, size, cudaMemcpyHostToDevice);
cudaMalloc(&d_N, size);
cudaMemcpy(d_N, h_N, size, cudaMemcpyHostToDevice);
cudaMalloc(&d_P, size);
cudaMemcpy(d_P, h_P, size, cudaMemcpyHostToDevice);
dim3 dimGrid(ceil(width / 2.0), ceil(width / 2.0), 1);
dim3 dimBlock(2.0, 2.0, 1);
MatrixMulKernel << < dimGrid, dimBlock >> > (d_M, d_N, d_P, width);
cudaMemcpy(h_P, d_P, size, cudaMemcpyDeviceToHost);
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
}
int main() {
float *h_M, *h_N, *h_P;
int width = 4;
h_M = (float*)malloc(width*width * sizeof(float));
h_N = (float*)malloc(width*width * sizeof(float));
h_P = (float*)malloc(width*width * sizeof(float));
for (int i = 0; i < width*width; i++) {
h_M[i] = 2.0f;
h_N[i] = 3.0f;
h_P[i] = 0.0f;
}
chrono::time_point<chrono::system_clock> MatrixMulGPU_Start, MatrixMulGPU_End;
MatrixMulGPU_Start = chrono::system_clock::now();
MatrixMulGPU(h_M, h_N, h_P, width);
MatrixMulGPU_End = chrono::system_clock::now();
cout << "MatrixMul_GPU: " << chrono::duration_cast<chrono::nanoseconds>(MatrixMulGPU_End - MatrixMulGPU_Start).count() << "ns." << endl;
/*Print*/
for (int i = 0; i < width; i++) {
for (int j = 0; j < width; j++) {
cout << h_P[i*width + j] << " ";
}
cout << endl;
}
cout << endl;
free(h_M);
free(h_N);
free(h_P);
return 0;
}
|
9cab8d9e4ff29eaabbf25e3ea7ff0583397e2251.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <quantiles/tdigest/tdigest_util.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/concatenate.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/merge.cuh>
#include <cudf/detail/sorting.hpp>
#include <cudf/detail/tdigest/tdigest.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/advance.h>
#include <thrust/binary_search.h>
#include <thrust/distance.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/merge.h>
#include <thrust/pair.h>
#include <thrust/reduce.h>
#include <thrust/remove.h>
#include <thrust/replace.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
namespace cudf {
namespace tdigest {
namespace detail {
namespace {
// the most representative point within a cluster of similar
// values. {mean, weight}
// NOTE: Using a tuple here instead of a struct to take advantage of
// thrust zip iterators for output.
using centroid = thrust::tuple<double, double, bool>;
// make a centroid from a scalar with a weight of 1.
template <typename T>
struct make_centroid {
column_device_view const col;
centroid operator() __device__(size_type index) const
{
auto const is_valid = col.is_valid(index);
auto const mean = is_valid ? static_cast<double>(col.element<T>(index)) : 0.0;
auto const weight = is_valid ? 1.0 : 0.0;
return {mean, weight, is_valid};
}
};
// make a centroid from a scalar with a weight of 1. this functor
// assumes any value index it is passed is not null
template <typename T>
struct make_centroid_no_nulls {
column_device_view const col;
centroid operator() __device__(size_type index) const
{
return {static_cast<double>(col.element<T>(index)), 1.0, true};
}
};
// make a centroid from an input stream of mean/weight values.
struct make_weighted_centroid {
double const* mean;
double const* weight;
centroid operator() __device__(size_type index) { return {mean[index], weight[index], true}; }
};
// merge two centroids
struct merge_centroids {
centroid operator() __device__(centroid const& lhs, centroid const& rhs) const
{
bool const lhs_valid = thrust::get<2>(lhs);
bool const rhs_valid = thrust::get<2>(rhs);
if (!lhs_valid && !rhs_valid) { return {0, 0, false}; }
if (!lhs_valid) { return rhs; }
if (!rhs_valid) { return lhs; }
double const lhs_mean = thrust::get<0>(lhs);
double const rhs_mean = thrust::get<0>(rhs);
double const lhs_weight = thrust::get<1>(lhs);
double const rhs_weight = thrust::get<1>(rhs);
double const new_weight = lhs_weight + rhs_weight;
return {(lhs_mean * lhs_weight + rhs_mean * rhs_weight) / new_weight, new_weight, true};
}
};
/**
* @brief A functor which returns the nearest cumulative weight in the grouped input stream prior to
* the specified next weight limit.
*
* This functor assumes the weight for all scalars is simply 1. Under this assumption,
* the nearest weight that will be <= the next limit is simply the nearest integer < the limit,
* which we can get by just taking floor(next_limit). For example if our next limit is 3.56, the
* nearest whole number <= it is floor(3.56) == 3.
*/
struct nearest_value_scalar_weights_grouped {
offset_type const* group_offsets;
thrust::pair<double, int> operator() __device__(double next_limit, size_type group_index) const
{
double const f = floor(next_limit);
auto const relative_weight_index = max(0, static_cast<int>(next_limit) - 1);
auto const group_size = group_offsets[group_index + 1] - group_offsets[group_index];
return {f, relative_weight_index < group_size ? relative_weight_index : group_size - 1};
}
};
/**
* @brief A functor which returns the nearest cumulative weight in the input stream prior to the
* specified next weight limit.
*
* This functor assumes the weight for all scalars is simply 1. Under this assumption,
* the nearest weight that will be <= the next limit is simply the nearest integer < the limit,
* which we can get by just taking floor(next_limit). For example if our next limit is 3.56, the
* nearest whole number <= it is floor(3.56) == 3.
*/
struct nearest_value_scalar_weights {
size_type const input_size;
thrust::pair<double, int> operator() __device__(double next_limit, size_type) const
{
double const f = floor(next_limit);
auto const relative_weight_index = max(0, static_cast<int>(next_limit) - 1);
return {f, relative_weight_index < input_size ? relative_weight_index : input_size - 1};
}
};
/**
* @brief A functor which returns the nearest cumulative weight in the input stream prior to the
* specified next weight limit.
*
* This functor assumes we are dealing with grouped, sorted, weighted centroids.
*/
template <typename GroupOffsetsIter>
struct nearest_value_centroid_weights {
double const* cumulative_weights;
GroupOffsetsIter outer_offsets; // groups
offset_type const* inner_offsets; // tdigests within a group
thrust::pair<double, int> operator() __device__(double next_limit, size_type group_index) const
{
auto const tdigest_begin = outer_offsets[group_index];
auto const tdigest_end = outer_offsets[group_index + 1];
auto const num_weights = inner_offsets[tdigest_end] - inner_offsets[tdigest_begin];
// NOTE: as it is today, this functor will never be called for any digests that are empty, but
// I'll leave this check here for safety.
if (num_weights == 0) { return thrust::pair<double, int>{0, 0}; }
double const* group_cumulative_weights = cumulative_weights + inner_offsets[tdigest_begin];
auto const index = ((thrust::lower_bound(thrust::seq,
group_cumulative_weights,
group_cumulative_weights + num_weights,
next_limit)) -
group_cumulative_weights);
return index == 0 ? thrust::pair<double, int>{0, 0}
: thrust::pair<double, int>{group_cumulative_weights[index - 1],
static_cast<int>(index) - 1};
}
};
/**
* @brief A functor which returns the cumulative input weight for a given index in a
* set of grouped input values.
*
* This functor assumes the weight for all scalars is simply 1. Under this assumption,
* the cumulative weight for a given value index I is simply I+1.
*/
struct cumulative_scalar_weight_grouped {
cudf::device_span<size_type const> group_offsets;
cudf::device_span<size_type const> group_labels;
std::tuple<size_type, size_type, double> operator() __device__(size_type value_index) const
{
auto const group_index = group_labels[value_index];
auto const relative_value_index = value_index - group_offsets[group_index];
return {group_index, relative_value_index, relative_value_index + 1};
}
};
/**
* @brief A functor which returns the cumulative input weight for a given index in a
* set of input values.
*
* This functor assumes the weight for all scalars is simply 1. Under this assumption,
* the cumulative weight for a given value index I is simply I+1.
*/
struct cumulative_scalar_weight {
std::tuple<size_type, size_type, double> operator() __device__(size_type value_index) const
{
return {0, value_index, value_index + 1};
}
};
/**
* @brief A functor which returns the cumulative input weight for a given index in a
* set of grouped input centroids.
*
* This functor assumes we are dealing with grouped, weighted centroids.
*/
template <typename GroupLabelsIter, typename GroupOffsetsIter>
struct cumulative_centroid_weight {
double const* cumulative_weights;
GroupLabelsIter group_labels;
GroupOffsetsIter outer_offsets; // groups
cudf::device_span<offset_type const> inner_offsets; // tdigests with a group
std::tuple<size_type, size_type, double> operator() __device__(size_type value_index) const
{
auto const tdigest_index =
static_cast<size_type>(
thrust::upper_bound(thrust::seq, inner_offsets.begin(), inner_offsets.end(), value_index) -
inner_offsets.begin()) -
1;
auto const group_index = group_labels[tdigest_index];
auto const first_tdigest_index = outer_offsets[group_index];
auto const first_weight_index = inner_offsets[first_tdigest_index];
auto const relative_value_index = value_index - first_weight_index;
double const* group_cumulative_weights = cumulative_weights + first_weight_index;
return {group_index, relative_value_index, group_cumulative_weights[relative_value_index]};
}
};
// retrieve group info (total weight, size, start offset) of scalar inputs by group index.
struct scalar_group_info_grouped {
size_type const* group_valid_counts;
offset_type const* group_offsets;
__device__ thrust::tuple<double, size_type, size_type> operator()(size_type group_index) const
{
return {static_cast<double>(group_valid_counts[group_index]),
group_offsets[group_index + 1] - group_offsets[group_index],
group_offsets[group_index]};
}
};
// retrieve group info (total weight, size, start offset) of scalar inputs
struct scalar_group_info {
double const total_weight;
size_type const size;
__device__ thrust::tuple<double, size_type, size_type> operator()(size_type) const
{
return {total_weight, size, 0};
}
};
// retrieve group info of centroid inputs by group index
template <typename GroupOffsetsIter>
struct centroid_group_info {
double const* cumulative_weights;
GroupOffsetsIter outer_offsets;
offset_type const* inner_offsets;
__device__ thrust::tuple<double, size_type, size_type> operator()(size_type group_index) const
{
// if there's no weights in this group of digests at all, return 0.
auto const group_start = inner_offsets[outer_offsets[group_index]];
auto const group_end = inner_offsets[outer_offsets[group_index + 1]];
auto const num_weights = group_end - group_start;
auto const last_weight_index = group_end - 1;
return num_weights == 0
? thrust::tuple<double, size_type, size_type>{0, num_weights, group_start}
: thrust::tuple<double, size_type, size_type>{
cumulative_weights[last_weight_index], num_weights, group_start};
}
};
struct tdigest_min {
__device__ double operator()(thrust::tuple<double, size_type> const& t) const
{
auto const min = thrust::get<0>(t);
auto const size = thrust::get<1>(t);
return size > 0 ? min : std::numeric_limits<double>::max();
}
};
struct tdigest_max {
__device__ double operator()(thrust::tuple<double, size_type> const& t) const
{
auto const max = thrust::get<0>(t);
auto const size = thrust::get<1>(t);
return size > 0 ? max : std::numeric_limits<double>::lowest();
}
};
// a monotonically increasing scale function which produces a distribution
// of centroids that is more densely packed in the middle of the input
// than at the ends.
__device__ double scale_func_k1(double quantile, double delta_norm)
{
double k = delta_norm * asin(2.0 * quantile - 1.0);
k += 1.0;
double const q = (sin(k / delta_norm) + 1.0) / 2.0;
return q;
}
// convert a single-row tdigest column to a scalar.
std::unique_ptr<scalar> to_tdigest_scalar(std::unique_ptr<column>&& tdigest,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(tdigest->size() == 1,
"Encountered invalid tdigest column when converting to scalar");
auto contents = tdigest->release();
return std::make_unique<struct_scalar>(table(std::move(contents.children)), true, stream, mr);
}
/**
* @brief Compute a set of cluster limits (brackets, essentially) for a
* given tdigest based on the specified delta and the total weight of values
* to be added.
*
* The number of clusters generated will always be <= delta_, where delta_ is
* a reasonably small number likely << 10000.
*
* Each input group gets an independent set of clusters generated. 1 thread
* per group.
*
* This kernel is called in a two-pass style. Once to compute the per-group
* cluster sizes and total # of clusters, and once to compute the actual
* weight limits per cluster.
*
* @param delta tdigest compression level
* @param num_groups The number of input groups
* @param nearest_weight A functor which returns the nearest weight in the input
* stream that falls before our current cluster limit
* @param group_info A functor which returns the info for the specified group (total
* weight, size and start offset)
* @param group_cluster_wl Output. The set of cluster weight limits for each group.
* @param group_num_clusters Output. The number of output clusters for each input group.
* @param group_cluster_offsets Offsets per-group to the start of it's clusters
* @param has_nulls Whether or not the input contains nulls
*
*/
template <typename GroupInfo, typename NearestWeightFunc, typename CumulativeWeight>
__global__ void generate_cluster_limits_kernel(int delta,
size_type num_groups,
NearestWeightFunc nearest_weight,
GroupInfo group_info,
CumulativeWeight cumulative_weight,
double* group_cluster_wl,
size_type* group_num_clusters,
offset_type const* group_cluster_offsets,
bool has_nulls)
{
int const tid = threadIdx.x + blockIdx.x * blockDim.x;
auto const group_index = tid;
if (group_index >= num_groups) { return; }
// we will generate at most delta clusters.
double const delta_norm = static_cast<double>(delta) / (2.0 * M_PI);
double total_weight;
size_type group_size, group_start;
thrust::tie(total_weight, group_size, group_start) = group_info(group_index);
// start at the correct place based on our cluster offset.
double* cluster_wl =
group_cluster_wl ? group_cluster_wl + group_cluster_offsets[group_index] : nullptr;
// a group with nothing in it.
group_num_clusters[group_index] = 0;
if (total_weight <= 0) {
// if the input contains nulls we can potentially have a group that generates no
// clusters because -all- of the input values are null. in that case, the reduce_by_key call
// in the tdigest generation step will need a location to store the unused reduction value for
// that group of nulls. these "stubs" will be postprocessed out afterwards.
if (has_nulls) { group_num_clusters[group_index] = 1; }
return;
}
double cur_limit = 0.0;
double cur_weight = 0.0;
double next_limit = -1.0;
int last_inserted_index = -1; // group-relative index into the input stream
// compute the first cluster limit
double nearest_w;
int nearest_w_index; // group-relative index into the input stream
while (true) {
cur_weight = next_limit < 0 ? 0 : max(cur_weight + 1, nearest_w);
if (cur_weight >= total_weight) { break; }
// based on where we are closing the cluster off (not including the incoming weight),
// compute the next cluster limit
double const quantile = cur_weight / total_weight;
next_limit = total_weight * scale_func_k1(quantile, delta_norm);
// if the next limit is < the cur limit, we're past the end of the distribution, so we're done.
if (next_limit <= cur_limit) {
if (cluster_wl) { cluster_wl[group_num_clusters[group_index]] = total_weight; }
group_num_clusters[group_index]++;
break;
}
// compute the weight we will be at in the input values just before closing off the current
// cluster (because adding the next value will cross the current limit).
// NOTE: can't use structured bindings here.
thrust::tie(nearest_w, nearest_w_index) = nearest_weight(next_limit, group_index);
// because of the way the scale functions work, it is possible to generate clusters
// in such a way that we end up with "gaps" where there are no input values that
// fall into a given cluster. An example would be this:
//
// cluster weight limits = 0.00003, 1.008, 3.008
//
// input values(weight) = A(1), B(2), C(3)
//
// naively inserting these values into the clusters simply by taking a lower_bound,
// we would get the following distribution of input values into those 3 clusters.
// (), (A), (B,C)
//
// whereas what we really want is:
//
// (A), (B), (C)
//
// to fix this, we will artificially adjust the output cluster limits to guarantee
// at least 1 input value will be put in each cluster during the reduction step.
// this does not affect final centroid results as we still use the "real" weight limits
// to compute subsequent clusters - the purpose is only to allow cluster selection
// during the reduction step to be trivial.
//
double adjusted_next_limit = next_limit;
int adjusted_w_index = nearest_w_index;
if ((last_inserted_index < 0) || // if we haven't inserted anything yet
(nearest_w_index ==
last_inserted_index)) { // if we land in the same bucket as the previous cap
// force the value into this bucket
adjusted_w_index = (last_inserted_index == group_size - 1)
? last_inserted_index
: max(adjusted_w_index, last_inserted_index + 1);
// the "adjusted" cluster limit must be high enough so that this value will fall in the
// bucket. NOTE: cumulative_weight expects an absolute index into the input value stream, not
// a group-relative index
[[maybe_unused]] auto [r, i, adjusted_w] = cumulative_weight(adjusted_w_index + group_start);
adjusted_next_limit = max(next_limit, adjusted_w);
// update the weight with our adjusted value.
nearest_w = adjusted_w;
}
if (cluster_wl) { cluster_wl[group_num_clusters[group_index]] = adjusted_next_limit; }
last_inserted_index = adjusted_w_index;
group_num_clusters[group_index]++;
cur_limit = next_limit;
}
}
/**
* @brief Compute a set of cluster limits (brackets, essentially) for a
* given tdigest based on the specified delta and the total weight of values
* to be added.
*
* The number of clusters generated will always be <= delta_, where delta_ is
* a reasonably small number likely << 10000.
*
* Each input group gets an independent set of clusters generated.
*
* @param delta_ tdigest compression level
* @param num_groups The number of input groups
* @param nearest_weight A functor which returns the nearest weight in the input
* stream that falls before our current cluster limit
* @param group_info A functor which returns the info for the specified group (total weight,
* size and start offset)
* @param has_nulls Whether or not the input data contains nulls
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns A tuple containing the set of cluster weight limits for each group, a set of
* list-style offsets indicating group sizes, and the total number of clusters
*/
template <typename GroupInfo, typename NearestWeight, typename CumulativeWeight>
std::tuple<rmm::device_uvector<double>, std::unique_ptr<column>, size_type>
generate_group_cluster_info(int delta,
size_type num_groups,
NearestWeight nearest_weight,
GroupInfo group_info,
CumulativeWeight cumulative_weight,
bool has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
constexpr size_type block_size = 256;
cudf::detail::grid_1d const grid(num_groups, block_size);
// compute number of clusters per group
// each thread computes 1 set of clusters (# of cluster sets == # of groups)
rmm::device_uvector<size_type> group_num_clusters(num_groups, stream);
hipLaunchKernelGGL(( generate_cluster_limits_kernel), dim3(grid.num_blocks), dim3(block_size), 0, stream.value(),
delta,
num_groups,
nearest_weight,
group_info,
cumulative_weight,
nullptr,
group_num_clusters.begin(),
nullptr,
has_nulls);
// generate group cluster offsets (where the clusters for a given group start and end)
auto group_cluster_offsets = cudf::make_numeric_column(
data_type{type_id::INT32}, num_groups + 1, mask_state::UNALLOCATED, stream, mr);
auto cluster_size = cudf::detail::make_counting_transform_iterator(
0, [group_num_clusters = group_num_clusters.begin(), num_groups] __device__(size_type index) {
return index == num_groups ? 0 : group_num_clusters[index];
});
thrust::exclusive_scan(rmm::exec_policy(stream),
cluster_size,
cluster_size + num_groups + 1,
group_cluster_offsets->mutable_view().begin<offset_type>(),
0);
// total # of clusters
offset_type total_clusters =
cudf::detail::get_value<offset_type>(group_cluster_offsets->view(), num_groups, stream);
// fill in the actual cluster weight limits
rmm::device_uvector<double> group_cluster_wl(total_clusters, stream);
hipLaunchKernelGGL(( generate_cluster_limits_kernel), dim3(grid.num_blocks), dim3(block_size), 0, stream.value(),
delta,
num_groups,
nearest_weight,
group_info,
cumulative_weight,
group_cluster_wl.begin(),
group_num_clusters.begin(),
group_cluster_offsets->view().begin<offset_type>(),
has_nulls);
return {std::move(group_cluster_wl),
std::move(group_cluster_offsets),
static_cast<size_type>(total_clusters)};
}
std::unique_ptr<column> build_output_column(size_type num_rows,
std::unique_ptr<column>&& means,
std::unique_ptr<column>&& weights,
std::unique_ptr<column>&& offsets,
std::unique_ptr<column>&& min_col,
std::unique_ptr<column>&& max_col,
bool has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// whether or not this weight is a stub
auto is_stub_weight = [weights = weights->view().begin<double>()] __device__(size_type i) {
return weights[i] == 0;
};
// whether or not this particular tdigest is a stub
auto is_stub_digest = [offsets = offsets->view().begin<offset_type>(), is_stub_weight] __device__(
size_type i) { return is_stub_weight(offsets[i]) ? 1 : 0; };
size_type const num_stubs = [&]() {
if (!has_nulls) { return 0; }
auto iter = cudf::detail::make_counting_transform_iterator(0, is_stub_digest);
return thrust::reduce(rmm::exec_policy(stream), iter, iter + num_rows);
}();
// if there are no stub tdigests, we can return immediately.
if (num_stubs == 0) {
return cudf::tdigest::detail::make_tdigest_column(num_rows,
std::move(means),
std::move(weights),
std::move(offsets),
std::move(min_col),
std::move(max_col),
stream,
mr);
}
// otherwise we need to strip out the stubs.
auto remove_stubs = [&](column_view const& col, size_type num_stubs) {
auto result = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, col.size() - num_stubs, mask_state::UNALLOCATED, stream, mr);
thrust::remove_copy_if(rmm::exec_policy(stream),
col.begin<double>(),
col.end<double>(),
thrust::make_counting_iterator(0),
result->mutable_view().begin<double>(),
is_stub_weight);
return result;
};
// remove from the means and weights column
auto _means = remove_stubs(*means, num_stubs);
auto _weights = remove_stubs(*weights, num_stubs);
// adjust offsets.
rmm::device_uvector<offset_type> sizes(num_rows, stream);
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + num_rows,
sizes.begin(),
[offsets = offsets->view().begin<offset_type>()] __device__(size_type i) {
return offsets[i + 1] - offsets[i];
});
auto iter = cudf::detail::make_counting_transform_iterator(
0, [sizes = sizes.begin(), is_stub_digest, num_rows] __device__(size_type i) {
return i == num_rows || is_stub_digest(i) ? 0 : sizes[i];
});
thrust::exclusive_scan(rmm::exec_policy(stream),
iter,
iter + num_rows + 1,
offsets->mutable_view().begin<offset_type>(),
0);
// assemble final column
return cudf::tdigest::detail::make_tdigest_column(num_rows,
std::move(_means),
std::move(_weights),
std::move(offsets),
std::move(min_col),
std::move(max_col),
stream,
mr);
}
/**
* @brief Compute a column of tdigests.
*
* Assembles the output tdigest column based on the specified delta, a stream of
* input values (either scalar or centroids), and an assortment of per-group
* clustering information.
*
* This function is effectively just a reduce_by_key that performs a reduction
* from input values -> centroid clusters as defined by the the cluster weight
* boundaries.
*
* @param delta tdigest compression level
* @param values_begin Beginning of the range of input values.
* @param values_end End of the range of input values.
* @param cumulative_weight Functor which returns cumulative weight and group information for
* an absolute input value index.
* @param min_col Column containing the minimum value per group.
* @param max_col Column containing the maximum value per group.
* @param group_cluster_wl Cluster weight limits for each group.
* @param group_cluster_offsets R-value reference of offsets into the cluster weight limits.
* @param total_clusters Total number of clusters in all groups.
* @param has_nulls Whether or not the input contains nulls
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns A tdigest column with 1 row per output tdigest.
*/
template <typename CentroidIter, typename CumulativeWeight>
std::unique_ptr<column> compute_tdigests(int delta,
CentroidIter centroids_begin,
CentroidIter centroids_end,
CumulativeWeight group_cumulative_weight,
std::unique_ptr<column>&& min_col,
std::unique_ptr<column>&& max_col,
rmm::device_uvector<double> const& group_cluster_wl,
std::unique_ptr<column>&& group_cluster_offsets,
size_type total_clusters,
bool has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// the output for each group is a column of data that represents the tdigest. since we want 1 row
// per group, each row will be a list the length of the tdigest for that group. so our output
// column is of the form:
// struct {
// centroids for the digest
// list {
// struct {
// double // mean
// double // weight
// }
// }
// double // min
// double // max
// }
//
if (total_clusters == 0) { return cudf::tdigest::detail::make_empty_tdigest_column(stream, mr); }
// each input group represents an individual tdigest. within each tdigest, we want the keys
// to represent cluster indices (for example, if a tdigest had 100 clusters, the keys should fall
// into the range 0-99). But since we have multiple tdigests, we need to keep the keys unique
// between the groups, so we add our group start offset.
auto keys = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[delta,
group_cluster_wl = group_cluster_wl.data(),
group_cluster_offsets = group_cluster_offsets->view().begin<offset_type>(),
group_cumulative_weight] __device__(size_type value_index) -> size_type {
// get group index, relative value index within the group and cumulative weight.
[[maybe_unused]] auto [group_index, relative_value_index, cumulative_weight] =
group_cumulative_weight(value_index);
auto const num_clusters =
group_cluster_offsets[group_index + 1] - group_cluster_offsets[group_index];
if (num_clusters == 0) { return group_cluster_offsets[group_index]; }
// compute start of cluster weight limits for this group
double const* weight_limits = group_cluster_wl + group_cluster_offsets[group_index];
// local cluster index
size_type const group_cluster_index =
min(num_clusters - 1,
static_cast<size_type>(
thrust::lower_bound(
thrust::seq, weight_limits, weight_limits + num_clusters, cumulative_weight) -
weight_limits));
// add the cluster offset to generate a globally unique key
return group_cluster_index + group_cluster_offsets[group_index];
});
// mean and weight data
auto centroid_means = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, total_clusters, mask_state::UNALLOCATED, stream, mr);
auto centroid_weights = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, total_clusters, mask_state::UNALLOCATED, stream, mr);
// reduce the centroids down by key.
cudf::mutable_column_view mean_col(*centroid_means);
cudf::mutable_column_view weight_col(*centroid_weights);
// reduce the centroids into the clusters
auto output = thrust::make_zip_iterator(thrust::make_tuple(
mean_col.begin<double>(), weight_col.begin<double>(), thrust::make_discard_iterator()));
auto const num_values = std::distance(centroids_begin, centroids_end);
thrust::reduce_by_key(rmm::exec_policy(stream),
keys,
keys + num_values, // keys
centroids_begin, // values
thrust::make_discard_iterator(), // key output
output, // output
thrust::equal_to{}, // key equality check
merge_centroids{});
// create final tdigest column
return build_output_column(group_cluster_offsets->size() - 1,
std::move(centroid_means),
std::move(centroid_weights),
std::move(group_cluster_offsets),
std::move(min_col),
std::move(max_col),
has_nulls,
stream,
mr);
}
// return the min/max value of scalar inputs by group index
template <typename T>
struct get_scalar_minmax_grouped {
column_device_view const col;
device_span<size_type const> group_offsets;
size_type const* group_valid_counts;
__device__ thrust::tuple<double, double> operator()(size_type group_index)
{
auto const valid_count = group_valid_counts[group_index];
return valid_count > 0
? thrust::make_tuple(
static_cast<double>(col.element<T>(group_offsets[group_index])),
static_cast<double>(col.element<T>(group_offsets[group_index] + valid_count - 1)))
: thrust::make_tuple(0.0, 0.0);
}
};
// return the min/max value of scalar inputs
template <typename T>
struct get_scalar_minmax {
column_device_view const col;
size_type const valid_count;
__device__ thrust::tuple<double, double> operator()(size_type)
{
return valid_count > 0
? thrust::make_tuple(static_cast<double>(col.element<T>(0)),
static_cast<double>(col.element<T>(valid_count - 1)))
: thrust::make_tuple(0.0, 0.0);
}
};
struct typed_group_tdigest {
template <typename T,
std::enable_if_t<cudf::is_numeric<T>() || cudf::is_fixed_point<T>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& col,
cudf::device_span<size_type const> group_offsets,
cudf::device_span<size_type const> group_labels,
cudf::device_span<size_type const> group_valid_counts,
size_type num_groups,
int delta,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// first, generate cluster weight information for each input group
auto [group_cluster_wl, group_cluster_offsets, total_clusters] = generate_group_cluster_info(
delta,
num_groups,
nearest_value_scalar_weights_grouped{group_offsets.begin()},
scalar_group_info_grouped{group_valid_counts.begin(), group_offsets.begin()},
cumulative_scalar_weight_grouped{group_offsets, group_labels},
col.null_count() > 0,
stream,
mr);
// device column view. handy because the .element() function
// automatically handles fixed-point conversions for us
auto d_col = cudf::column_device_view::create(col, stream);
// compute min and max columns
auto min_col = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr);
auto max_col = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr);
thrust::transform(
rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + num_groups,
thrust::make_zip_iterator(thrust::make_tuple(min_col->mutable_view().begin<double>(),
max_col->mutable_view().begin<double>())),
get_scalar_minmax_grouped<T>{*d_col, group_offsets, group_valid_counts.begin()});
// for simple input values, the "centroids" all have a weight of 1.
auto scalar_to_centroid =
cudf::detail::make_counting_transform_iterator(0, make_centroid<T>{*d_col});
// generate the final tdigest
return compute_tdigests(delta,
scalar_to_centroid,
scalar_to_centroid + col.size(),
cumulative_scalar_weight_grouped{group_offsets, group_labels},
std::move(min_col),
std::move(max_col),
group_cluster_wl,
std::move(group_cluster_offsets),
total_clusters,
col.null_count() > 0,
stream,
mr);
}
template <typename T,
typename... Args,
std::enable_if_t<!cudf::is_numeric<T>() && !cudf::is_fixed_point<T>()>* = nullptr>
std::unique_ptr<column> operator()(Args&&...)
{
CUDF_FAIL("Non-numeric type in group_tdigest");
}
};
struct typed_reduce_tdigest {
// this function assumes col is sorted in ascending order with nulls at the end
template <
typename T,
typename std::enable_if_t<cudf::is_numeric<T>() || cudf::is_fixed_point<T>()>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const& col,
int delta,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// treat this the same as the groupby path with a single group. Note: even though
// there is only 1 group there are still multiple keys within the group that represent
// the clustering of (N input values) -> (1 output centroid), so the final computation
// remains a reduce_by_key() and not a reduce().
//
// additionally we get a few optimizations.
// - since we only ever have 1 "group" that is sorted with nulls at the end,
// we can simply process just the non-null values and act as if the column
// is non-nullable, allowing us to process fewer values than if we were doing a groupby.
//
// - several of the functors used during the reduction are cheaper than during a groupby.
auto const valid_count = col.size() - col.null_count();
// first, generate cluster weight information for each input group
auto [cluster_wl, cluster_offsets, total_clusters] =
generate_group_cluster_info(delta,
1,
nearest_value_scalar_weights{valid_count},
scalar_group_info{static_cast<double>(valid_count), valid_count},
cumulative_scalar_weight{},
false,
stream,
mr);
// device column view. handy because the .element() function
// automatically handles fixed-point conversions for us
auto d_col = cudf::column_device_view::create(col, stream);
// compute min and max columns
auto min_col = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, 1, mask_state::UNALLOCATED, stream, mr);
auto max_col = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, 1, mask_state::UNALLOCATED, stream, mr);
thrust::transform(
rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + 1,
thrust::make_zip_iterator(thrust::make_tuple(min_col->mutable_view().begin<double>(),
max_col->mutable_view().begin<double>())),
get_scalar_minmax<T>{*d_col, valid_count});
// for simple input values, the "centroids" all have a weight of 1.
auto scalar_to_centroid =
cudf::detail::make_counting_transform_iterator(0, make_centroid_no_nulls<T>{*d_col});
// generate the final tdigest and wrap it in a struct_scalar
return to_tdigest_scalar(compute_tdigests(delta,
scalar_to_centroid,
scalar_to_centroid + valid_count,
cumulative_scalar_weight{},
std::move(min_col),
std::move(max_col),
cluster_wl,
std::move(cluster_offsets),
total_clusters,
false,
stream,
mr),
stream,
mr);
}
template <
typename T,
typename... Args,
typename std::enable_if_t<!cudf::is_numeric<T>() && !cudf::is_fixed_point<T>()>* = nullptr>
std::unique_ptr<scalar> operator()(Args&&...)
{
CUDF_FAIL("Non-numeric type in group_tdigest");
}
};
// utility for merge_tdigests.
template <typename GroupOffsetsIter>
struct group_num_weights_func {
GroupOffsetsIter outer_offsets;
size_type const* inner_offsets;
__device__ size_type operator()(size_type group_index)
{
auto const tdigest_begin = outer_offsets[group_index];
auto const tdigest_end = outer_offsets[group_index + 1];
return inner_offsets[tdigest_end] - inner_offsets[tdigest_begin];
}
};
// utility for merge_tdigests.
struct group_is_empty {
__device__ bool operator()(size_type group_size) { return group_size == 0; }
};
// utility for merge_tdigests.
template <typename GroupLabelsIter>
struct group_key_func {
GroupLabelsIter group_labels;
size_type const* inner_offsets;
size_type num_inner_offsets;
__device__ size_type operator()(size_type index)
{
// what -original- tdigest index this absolute index corresponds to
auto const iter = thrust::prev(
thrust::upper_bound(thrust::seq, inner_offsets, inner_offsets + num_inner_offsets, index));
auto const tdigest_index = thrust::distance(inner_offsets, iter);
// what group index the original tdigest belongs to
return group_labels[tdigest_index];
}
};
template <typename HGroupOffsetIter, typename GroupOffsetIter, typename GroupLabelIter>
std::unique_ptr<column> merge_tdigests(tdigest_column_view const& tdv,
HGroupOffsetIter h_outer_offsets,
GroupOffsetIter group_offsets,
GroupLabelIter group_labels,
size_t num_group_labels,
size_type num_groups,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// thrust::merge and thrust::merge_by_key don't provide what we need. What we would need is an
// algorithm like a super-merge that takes two layers of keys: one which identifies the outer
// grouping of tdigests, and one which identifies the inner groupings of the tdigests within the
// outer groups.
// TODO: investigate replacing the iterative merge with a single stable_sort_by_key.
// bring tdigest offsets back to the host
auto tdigest_offsets = tdv.centroids().offsets();
std::vector<offset_type> h_inner_offsets(tdigest_offsets.size());
hipMemcpyAsync(h_inner_offsets.data(),
tdigest_offsets.begin<offset_type>(),
sizeof(offset_type) * tdigest_offsets.size(),
hipMemcpyDeviceToHost,
stream);
stream.synchronize();
// extract all means and weights into a table
cudf::table_view tdigests_unsliced({tdv.means(), tdv.weights()});
// generate the merged (but not yet compressed) tdigests for each group.
std::vector<std::unique_ptr<table>> tdigests;
tdigests.reserve(num_groups);
std::transform(h_outer_offsets,
h_outer_offsets + num_groups,
std::next(h_outer_offsets),
std::back_inserter(tdigests),
[&](auto tdigest_start, auto tdigest_end) {
// the range of tdigests in this group
auto const num_tdigests = tdigest_end - tdigest_start;
// slice each tdigest from the input
std::vector<table_view> unmerged_tdigests;
unmerged_tdigests.reserve(num_tdigests);
auto offset_iter = std::next(h_inner_offsets.begin(), tdigest_start);
std::transform(
offset_iter,
offset_iter + num_tdigests,
std::next(offset_iter),
std::back_inserter(unmerged_tdigests),
[&](size_type start, size_type end) {
return cudf::detail::slice(tdigests_unsliced, {start, end}, stream);
});
// merge
return cudf::detail::merge(unmerged_tdigests,
{0},
{order::ASCENDING},
{},
stream,
rmm::mr::get_current_device_resource());
});
// generate min and max values
auto merged_min_col = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr);
auto min_iter =
thrust::make_transform_iterator(thrust::make_zip_iterator(thrust::make_tuple(
tdv.min_begin(), cudf::tdigest::detail::size_begin(tdv))),
tdigest_min{});
thrust::reduce_by_key(rmm::exec_policy(stream),
group_labels,
group_labels + num_group_labels,
min_iter,
thrust::make_discard_iterator(),
merged_min_col->mutable_view().begin<double>(),
thrust::equal_to{}, // key equality check
thrust::minimum{});
auto merged_max_col = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr);
auto max_iter =
thrust::make_transform_iterator(thrust::make_zip_iterator(thrust::make_tuple(
tdv.max_begin(), cudf::tdigest::detail::size_begin(tdv))),
tdigest_max{});
thrust::reduce_by_key(rmm::exec_policy(stream),
group_labels,
group_labels + num_group_labels,
max_iter,
thrust::make_discard_iterator(),
merged_max_col->mutable_view().begin<double>(),
thrust::equal_to{}, // key equality check
thrust::maximum{});
// for any empty groups, set the min and max to be 0. not technically necessary but it makes
// testing simpler.
auto group_num_weights = cudf::detail::make_counting_transform_iterator(
0,
group_num_weights_func<decltype(group_offsets)>{group_offsets,
tdigest_offsets.begin<size_type>()});
thrust::replace_if(rmm::exec_policy(stream),
merged_min_col->mutable_view().begin<double>(),
merged_min_col->mutable_view().end<double>(),
group_num_weights,
group_is_empty{},
0);
thrust::replace_if(rmm::exec_policy(stream),
merged_max_col->mutable_view().begin<double>(),
merged_max_col->mutable_view().end<double>(),
group_num_weights,
group_is_empty{},
0);
// concatenate all the merged tdigests back into one table.
std::vector<table_view> tdigest_views;
tdigest_views.reserve(num_groups);
std::transform(tdigests.begin(),
tdigests.end(),
std::back_inserter(tdigest_views),
[](std::unique_ptr<table> const& t) { return t->view(); });
auto merged = cudf::detail::concatenate(tdigest_views, stream);
// generate cumulative weights
auto merged_weights = merged->get_column(1).view();
auto cumulative_weights = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, merged_weights.size(), mask_state::UNALLOCATED);
auto keys = cudf::detail::make_counting_transform_iterator(
0,
group_key_func<decltype(group_labels)>{
group_labels, tdigest_offsets.begin<size_type>(), tdigest_offsets.size()});
thrust::inclusive_scan_by_key(rmm::exec_policy(stream),
keys,
keys + cumulative_weights->size(),
merged_weights.begin<double>(),
cumulative_weights->mutable_view().begin<double>());
auto const delta = max_centroids;
// generate cluster info
auto [group_cluster_wl, group_cluster_offsets, total_clusters] = generate_group_cluster_info(
delta,
num_groups,
nearest_value_centroid_weights<decltype(group_offsets)>{
cumulative_weights->view().begin<double>(),
group_offsets,
tdigest_offsets.begin<size_type>()},
centroid_group_info<decltype(group_offsets)>{cumulative_weights->view().begin<double>(),
group_offsets,
tdigest_offsets.begin<size_type>()},
cumulative_centroid_weight<decltype(group_labels), decltype(group_offsets)>{
cumulative_weights->view().begin<double>(),
group_labels,
group_offsets,
{tdigest_offsets.begin<offset_type>(), static_cast<size_t>(tdigest_offsets.size())}},
false,
stream,
mr);
// input centroid values
auto centroids = cudf::detail::make_counting_transform_iterator(
0,
make_weighted_centroid{merged->get_column(0).view().begin<double>(),
merged_weights.begin<double>()});
// compute the tdigest
return compute_tdigests(
delta,
centroids,
centroids + merged->num_rows(),
cumulative_centroid_weight<decltype(group_labels), decltype(group_offsets)>{
cumulative_weights->view().begin<double>(),
group_labels,
group_offsets,
{tdigest_offsets.begin<offset_type>(), static_cast<size_t>(tdigest_offsets.size())}},
std::move(merged_min_col),
std::move(merged_max_col),
group_cluster_wl,
std::move(group_cluster_offsets),
total_clusters,
false,
stream,
mr);
}
} // anonymous namespace
std::unique_ptr<scalar> reduce_tdigest(column_view const& col,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (col.size() == 0) { return cudf::tdigest::detail::make_empty_tdigest_scalar(stream, mr); }
// since this isn't coming out of a groupby, we need to sort the inputs in ascending
// order with nulls at the end.
table_view t({col});
auto sorted = cudf::detail::sort(
t, {order::ASCENDING}, {null_order::AFTER}, stream, rmm::mr::get_current_device_resource());
auto const delta = max_centroids;
return cudf::type_dispatcher(
col.type(), typed_reduce_tdigest{}, sorted->get_column(0), delta, stream, mr);
}
std::unique_ptr<scalar> reduce_merge_tdigest(column_view const& input,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
tdigest_column_view tdv(input);
if (input.size() == 0) { return cudf::tdigest::detail::make_empty_tdigest_scalar(stream, mr); }
auto h_group_offsets = cudf::detail::make_counting_transform_iterator(
0, [size = input.size()](size_type i) { return i == 0 ? 0 : size; });
auto group_offsets = cudf::detail::make_counting_transform_iterator(
0, [size = input.size()] __device__(size_type i) { return i == 0 ? 0 : size; });
auto group_labels = thrust::make_constant_iterator(0);
return to_tdigest_scalar(merge_tdigests(tdv,
h_group_offsets,
group_offsets,
group_labels,
input.size(),
1,
max_centroids,
stream,
mr),
stream,
mr);
}
std::unique_ptr<column> group_tdigest(column_view const& col,
cudf::device_span<size_type const> group_offsets,
cudf::device_span<size_type const> group_labels,
cudf::device_span<size_type const> group_valid_counts,
size_type num_groups,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (col.size() == 0) { return cudf::tdigest::detail::make_empty_tdigest_column(stream, mr); }
auto const delta = max_centroids;
return cudf::type_dispatcher(col.type(),
typed_group_tdigest{},
col,
group_offsets,
group_labels,
group_valid_counts,
num_groups,
delta,
stream,
mr);
}
std::unique_ptr<column> group_merge_tdigest(column_view const& input,
cudf::device_span<size_type const> group_offsets,
cudf::device_span<size_type const> group_labels,
size_type num_groups,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
tdigest_column_view tdv(input);
if (num_groups == 0 || input.size() == 0) {
return cudf::tdigest::detail::make_empty_tdigest_column(stream, mr);
}
// bring group offsets back to the host
std::vector<size_type> h_group_offsets(group_offsets.size());
hipMemcpyAsync(h_group_offsets.data(),
group_offsets.begin(),
sizeof(size_type) * group_offsets.size(),
hipMemcpyDeviceToHost,
stream);
return merge_tdigests(tdv,
h_group_offsets.begin(),
group_offsets.data(),
group_labels.data(),
group_labels.size(),
num_groups,
max_centroids,
stream,
mr);
}
} // namespace detail
} // namespace tdigest
} // namespace cudf
| 9cab8d9e4ff29eaabbf25e3ea7ff0583397e2251.cu | /*
* Copyright (c) 2021-2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <quantiles/tdigest/tdigest_util.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/concatenate.hpp>
#include <cudf/detail/copy.hpp>
#include <cudf/detail/get_value.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/merge.cuh>
#include <cudf/detail/sorting.hpp>
#include <cudf/detail/tdigest/tdigest.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/utilities/span.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/advance.h>
#include <thrust/binary_search.h>
#include <thrust/distance.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/merge.h>
#include <thrust/pair.h>
#include <thrust/reduce.h>
#include <thrust/remove.h>
#include <thrust/replace.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/tuple.h>
namespace cudf {
namespace tdigest {
namespace detail {
namespace {
// the most representative point within a cluster of similar
// values. {mean, weight}
// NOTE: Using a tuple here instead of a struct to take advantage of
// thrust zip iterators for output.
using centroid = thrust::tuple<double, double, bool>;
// make a centroid from a scalar with a weight of 1.
template <typename T>
struct make_centroid {
column_device_view const col;
centroid operator() __device__(size_type index) const
{
auto const is_valid = col.is_valid(index);
auto const mean = is_valid ? static_cast<double>(col.element<T>(index)) : 0.0;
auto const weight = is_valid ? 1.0 : 0.0;
return {mean, weight, is_valid};
}
};
// make a centroid from a scalar with a weight of 1. this functor
// assumes any value index it is passed is not null
template <typename T>
struct make_centroid_no_nulls {
column_device_view const col;
centroid operator() __device__(size_type index) const
{
return {static_cast<double>(col.element<T>(index)), 1.0, true};
}
};
// make a centroid from an input stream of mean/weight values.
struct make_weighted_centroid {
double const* mean;
double const* weight;
centroid operator() __device__(size_type index) { return {mean[index], weight[index], true}; }
};
// merge two centroids
struct merge_centroids {
centroid operator() __device__(centroid const& lhs, centroid const& rhs) const
{
bool const lhs_valid = thrust::get<2>(lhs);
bool const rhs_valid = thrust::get<2>(rhs);
if (!lhs_valid && !rhs_valid) { return {0, 0, false}; }
if (!lhs_valid) { return rhs; }
if (!rhs_valid) { return lhs; }
double const lhs_mean = thrust::get<0>(lhs);
double const rhs_mean = thrust::get<0>(rhs);
double const lhs_weight = thrust::get<1>(lhs);
double const rhs_weight = thrust::get<1>(rhs);
double const new_weight = lhs_weight + rhs_weight;
return {(lhs_mean * lhs_weight + rhs_mean * rhs_weight) / new_weight, new_weight, true};
}
};
/**
* @brief A functor which returns the nearest cumulative weight in the grouped input stream prior to
* the specified next weight limit.
*
* This functor assumes the weight for all scalars is simply 1. Under this assumption,
* the nearest weight that will be <= the next limit is simply the nearest integer < the limit,
* which we can get by just taking floor(next_limit). For example if our next limit is 3.56, the
* nearest whole number <= it is floor(3.56) == 3.
*/
struct nearest_value_scalar_weights_grouped {
offset_type const* group_offsets;
thrust::pair<double, int> operator() __device__(double next_limit, size_type group_index) const
{
double const f = floor(next_limit);
auto const relative_weight_index = max(0, static_cast<int>(next_limit) - 1);
auto const group_size = group_offsets[group_index + 1] - group_offsets[group_index];
return {f, relative_weight_index < group_size ? relative_weight_index : group_size - 1};
}
};
/**
* @brief A functor which returns the nearest cumulative weight in the input stream prior to the
* specified next weight limit.
*
* This functor assumes the weight for all scalars is simply 1. Under this assumption,
* the nearest weight that will be <= the next limit is simply the nearest integer < the limit,
* which we can get by just taking floor(next_limit). For example if our next limit is 3.56, the
* nearest whole number <= it is floor(3.56) == 3.
*/
struct nearest_value_scalar_weights {
size_type const input_size;
thrust::pair<double, int> operator() __device__(double next_limit, size_type) const
{
double const f = floor(next_limit);
auto const relative_weight_index = max(0, static_cast<int>(next_limit) - 1);
return {f, relative_weight_index < input_size ? relative_weight_index : input_size - 1};
}
};
/**
* @brief A functor which returns the nearest cumulative weight in the input stream prior to the
* specified next weight limit.
*
* This functor assumes we are dealing with grouped, sorted, weighted centroids.
*/
template <typename GroupOffsetsIter>
struct nearest_value_centroid_weights {
double const* cumulative_weights;
GroupOffsetsIter outer_offsets; // groups
offset_type const* inner_offsets; // tdigests within a group
thrust::pair<double, int> operator() __device__(double next_limit, size_type group_index) const
{
auto const tdigest_begin = outer_offsets[group_index];
auto const tdigest_end = outer_offsets[group_index + 1];
auto const num_weights = inner_offsets[tdigest_end] - inner_offsets[tdigest_begin];
// NOTE: as it is today, this functor will never be called for any digests that are empty, but
// I'll leave this check here for safety.
if (num_weights == 0) { return thrust::pair<double, int>{0, 0}; }
double const* group_cumulative_weights = cumulative_weights + inner_offsets[tdigest_begin];
auto const index = ((thrust::lower_bound(thrust::seq,
group_cumulative_weights,
group_cumulative_weights + num_weights,
next_limit)) -
group_cumulative_weights);
return index == 0 ? thrust::pair<double, int>{0, 0}
: thrust::pair<double, int>{group_cumulative_weights[index - 1],
static_cast<int>(index) - 1};
}
};
/**
* @brief A functor which returns the cumulative input weight for a given index in a
* set of grouped input values.
*
* This functor assumes the weight for all scalars is simply 1. Under this assumption,
* the cumulative weight for a given value index I is simply I+1.
*/
struct cumulative_scalar_weight_grouped {
cudf::device_span<size_type const> group_offsets;
cudf::device_span<size_type const> group_labels;
std::tuple<size_type, size_type, double> operator() __device__(size_type value_index) const
{
auto const group_index = group_labels[value_index];
auto const relative_value_index = value_index - group_offsets[group_index];
return {group_index, relative_value_index, relative_value_index + 1};
}
};
/**
* @brief A functor which returns the cumulative input weight for a given index in a
* set of input values.
*
* This functor assumes the weight for all scalars is simply 1. Under this assumption,
* the cumulative weight for a given value index I is simply I+1.
*/
struct cumulative_scalar_weight {
std::tuple<size_type, size_type, double> operator() __device__(size_type value_index) const
{
return {0, value_index, value_index + 1};
}
};
/**
* @brief A functor which returns the cumulative input weight for a given index in a
* set of grouped input centroids.
*
* This functor assumes we are dealing with grouped, weighted centroids.
*/
template <typename GroupLabelsIter, typename GroupOffsetsIter>
struct cumulative_centroid_weight {
double const* cumulative_weights;
GroupLabelsIter group_labels;
GroupOffsetsIter outer_offsets; // groups
cudf::device_span<offset_type const> inner_offsets; // tdigests with a group
std::tuple<size_type, size_type, double> operator() __device__(size_type value_index) const
{
auto const tdigest_index =
static_cast<size_type>(
thrust::upper_bound(thrust::seq, inner_offsets.begin(), inner_offsets.end(), value_index) -
inner_offsets.begin()) -
1;
auto const group_index = group_labels[tdigest_index];
auto const first_tdigest_index = outer_offsets[group_index];
auto const first_weight_index = inner_offsets[first_tdigest_index];
auto const relative_value_index = value_index - first_weight_index;
double const* group_cumulative_weights = cumulative_weights + first_weight_index;
return {group_index, relative_value_index, group_cumulative_weights[relative_value_index]};
}
};
// retrieve group info (total weight, size, start offset) of scalar inputs by group index.
struct scalar_group_info_grouped {
size_type const* group_valid_counts;
offset_type const* group_offsets;
__device__ thrust::tuple<double, size_type, size_type> operator()(size_type group_index) const
{
return {static_cast<double>(group_valid_counts[group_index]),
group_offsets[group_index + 1] - group_offsets[group_index],
group_offsets[group_index]};
}
};
// retrieve group info (total weight, size, start offset) of scalar inputs
struct scalar_group_info {
double const total_weight;
size_type const size;
__device__ thrust::tuple<double, size_type, size_type> operator()(size_type) const
{
return {total_weight, size, 0};
}
};
// retrieve group info of centroid inputs by group index
template <typename GroupOffsetsIter>
struct centroid_group_info {
double const* cumulative_weights;
GroupOffsetsIter outer_offsets;
offset_type const* inner_offsets;
__device__ thrust::tuple<double, size_type, size_type> operator()(size_type group_index) const
{
// if there's no weights in this group of digests at all, return 0.
auto const group_start = inner_offsets[outer_offsets[group_index]];
auto const group_end = inner_offsets[outer_offsets[group_index + 1]];
auto const num_weights = group_end - group_start;
auto const last_weight_index = group_end - 1;
return num_weights == 0
? thrust::tuple<double, size_type, size_type>{0, num_weights, group_start}
: thrust::tuple<double, size_type, size_type>{
cumulative_weights[last_weight_index], num_weights, group_start};
}
};
struct tdigest_min {
__device__ double operator()(thrust::tuple<double, size_type> const& t) const
{
auto const min = thrust::get<0>(t);
auto const size = thrust::get<1>(t);
return size > 0 ? min : std::numeric_limits<double>::max();
}
};
struct tdigest_max {
__device__ double operator()(thrust::tuple<double, size_type> const& t) const
{
auto const max = thrust::get<0>(t);
auto const size = thrust::get<1>(t);
return size > 0 ? max : std::numeric_limits<double>::lowest();
}
};
// a monotonically increasing scale function which produces a distribution
// of centroids that is more densely packed in the middle of the input
// than at the ends.
__device__ double scale_func_k1(double quantile, double delta_norm)
{
double k = delta_norm * asin(2.0 * quantile - 1.0);
k += 1.0;
double const q = (sin(k / delta_norm) + 1.0) / 2.0;
return q;
}
// convert a single-row tdigest column to a scalar.
std::unique_ptr<scalar> to_tdigest_scalar(std::unique_ptr<column>&& tdigest,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
CUDF_EXPECTS(tdigest->size() == 1,
"Encountered invalid tdigest column when converting to scalar");
auto contents = tdigest->release();
return std::make_unique<struct_scalar>(table(std::move(contents.children)), true, stream, mr);
}
/**
* @brief Compute a set of cluster limits (brackets, essentially) for a
* given tdigest based on the specified delta and the total weight of values
* to be added.
*
* The number of clusters generated will always be <= delta_, where delta_ is
* a reasonably small number likely << 10000.
*
* Each input group gets an independent set of clusters generated. 1 thread
* per group.
*
* This kernel is called in a two-pass style. Once to compute the per-group
* cluster sizes and total # of clusters, and once to compute the actual
* weight limits per cluster.
*
* @param delta tdigest compression level
* @param num_groups The number of input groups
* @param nearest_weight A functor which returns the nearest weight in the input
* stream that falls before our current cluster limit
* @param group_info A functor which returns the info for the specified group (total
* weight, size and start offset)
* @param group_cluster_wl Output. The set of cluster weight limits for each group.
* @param group_num_clusters Output. The number of output clusters for each input group.
* @param group_cluster_offsets Offsets per-group to the start of it's clusters
* @param has_nulls Whether or not the input contains nulls
*
*/
template <typename GroupInfo, typename NearestWeightFunc, typename CumulativeWeight>
__global__ void generate_cluster_limits_kernel(int delta,
size_type num_groups,
NearestWeightFunc nearest_weight,
GroupInfo group_info,
CumulativeWeight cumulative_weight,
double* group_cluster_wl,
size_type* group_num_clusters,
offset_type const* group_cluster_offsets,
bool has_nulls)
{
int const tid = threadIdx.x + blockIdx.x * blockDim.x;
auto const group_index = tid;
if (group_index >= num_groups) { return; }
// we will generate at most delta clusters.
double const delta_norm = static_cast<double>(delta) / (2.0 * M_PI);
double total_weight;
size_type group_size, group_start;
thrust::tie(total_weight, group_size, group_start) = group_info(group_index);
// start at the correct place based on our cluster offset.
double* cluster_wl =
group_cluster_wl ? group_cluster_wl + group_cluster_offsets[group_index] : nullptr;
// a group with nothing in it.
group_num_clusters[group_index] = 0;
if (total_weight <= 0) {
// if the input contains nulls we can potentially have a group that generates no
// clusters because -all- of the input values are null. in that case, the reduce_by_key call
// in the tdigest generation step will need a location to store the unused reduction value for
// that group of nulls. these "stubs" will be postprocessed out afterwards.
if (has_nulls) { group_num_clusters[group_index] = 1; }
return;
}
double cur_limit = 0.0;
double cur_weight = 0.0;
double next_limit = -1.0;
int last_inserted_index = -1; // group-relative index into the input stream
// compute the first cluster limit
double nearest_w;
int nearest_w_index; // group-relative index into the input stream
while (true) {
cur_weight = next_limit < 0 ? 0 : max(cur_weight + 1, nearest_w);
if (cur_weight >= total_weight) { break; }
// based on where we are closing the cluster off (not including the incoming weight),
// compute the next cluster limit
double const quantile = cur_weight / total_weight;
next_limit = total_weight * scale_func_k1(quantile, delta_norm);
// if the next limit is < the cur limit, we're past the end of the distribution, so we're done.
if (next_limit <= cur_limit) {
if (cluster_wl) { cluster_wl[group_num_clusters[group_index]] = total_weight; }
group_num_clusters[group_index]++;
break;
}
// compute the weight we will be at in the input values just before closing off the current
// cluster (because adding the next value will cross the current limit).
// NOTE: can't use structured bindings here.
thrust::tie(nearest_w, nearest_w_index) = nearest_weight(next_limit, group_index);
// because of the way the scale functions work, it is possible to generate clusters
// in such a way that we end up with "gaps" where there are no input values that
// fall into a given cluster. An example would be this:
//
// cluster weight limits = 0.00003, 1.008, 3.008
//
// input values(weight) = A(1), B(2), C(3)
//
// naively inserting these values into the clusters simply by taking a lower_bound,
// we would get the following distribution of input values into those 3 clusters.
// (), (A), (B,C)
//
// whereas what we really want is:
//
// (A), (B), (C)
//
// to fix this, we will artificially adjust the output cluster limits to guarantee
// at least 1 input value will be put in each cluster during the reduction step.
// this does not affect final centroid results as we still use the "real" weight limits
// to compute subsequent clusters - the purpose is only to allow cluster selection
// during the reduction step to be trivial.
//
double adjusted_next_limit = next_limit;
int adjusted_w_index = nearest_w_index;
if ((last_inserted_index < 0) || // if we haven't inserted anything yet
(nearest_w_index ==
last_inserted_index)) { // if we land in the same bucket as the previous cap
// force the value into this bucket
adjusted_w_index = (last_inserted_index == group_size - 1)
? last_inserted_index
: max(adjusted_w_index, last_inserted_index + 1);
// the "adjusted" cluster limit must be high enough so that this value will fall in the
// bucket. NOTE: cumulative_weight expects an absolute index into the input value stream, not
// a group-relative index
[[maybe_unused]] auto [r, i, adjusted_w] = cumulative_weight(adjusted_w_index + group_start);
adjusted_next_limit = max(next_limit, adjusted_w);
// update the weight with our adjusted value.
nearest_w = adjusted_w;
}
if (cluster_wl) { cluster_wl[group_num_clusters[group_index]] = adjusted_next_limit; }
last_inserted_index = adjusted_w_index;
group_num_clusters[group_index]++;
cur_limit = next_limit;
}
}
/**
* @brief Compute a set of cluster limits (brackets, essentially) for a
* given tdigest based on the specified delta and the total weight of values
* to be added.
*
* The number of clusters generated will always be <= delta_, where delta_ is
* a reasonably small number likely << 10000.
*
* Each input group gets an independent set of clusters generated.
*
* @param delta_ tdigest compression level
* @param num_groups The number of input groups
* @param nearest_weight A functor which returns the nearest weight in the input
* stream that falls before our current cluster limit
* @param group_info A functor which returns the info for the specified group (total weight,
* size and start offset)
* @param has_nulls Whether or not the input data contains nulls
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns A tuple containing the set of cluster weight limits for each group, a set of
* list-style offsets indicating group sizes, and the total number of clusters
*/
template <typename GroupInfo, typename NearestWeight, typename CumulativeWeight>
std::tuple<rmm::device_uvector<double>, std::unique_ptr<column>, size_type>
generate_group_cluster_info(int delta,
size_type num_groups,
NearestWeight nearest_weight,
GroupInfo group_info,
CumulativeWeight cumulative_weight,
bool has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
constexpr size_type block_size = 256;
cudf::detail::grid_1d const grid(num_groups, block_size);
// compute number of clusters per group
// each thread computes 1 set of clusters (# of cluster sets == # of groups)
rmm::device_uvector<size_type> group_num_clusters(num_groups, stream);
generate_cluster_limits_kernel<<<grid.num_blocks, block_size, 0, stream.value()>>>(
delta,
num_groups,
nearest_weight,
group_info,
cumulative_weight,
nullptr,
group_num_clusters.begin(),
nullptr,
has_nulls);
// generate group cluster offsets (where the clusters for a given group start and end)
auto group_cluster_offsets = cudf::make_numeric_column(
data_type{type_id::INT32}, num_groups + 1, mask_state::UNALLOCATED, stream, mr);
auto cluster_size = cudf::detail::make_counting_transform_iterator(
0, [group_num_clusters = group_num_clusters.begin(), num_groups] __device__(size_type index) {
return index == num_groups ? 0 : group_num_clusters[index];
});
thrust::exclusive_scan(rmm::exec_policy(stream),
cluster_size,
cluster_size + num_groups + 1,
group_cluster_offsets->mutable_view().begin<offset_type>(),
0);
// total # of clusters
offset_type total_clusters =
cudf::detail::get_value<offset_type>(group_cluster_offsets->view(), num_groups, stream);
// fill in the actual cluster weight limits
rmm::device_uvector<double> group_cluster_wl(total_clusters, stream);
generate_cluster_limits_kernel<<<grid.num_blocks, block_size, 0, stream.value()>>>(
delta,
num_groups,
nearest_weight,
group_info,
cumulative_weight,
group_cluster_wl.begin(),
group_num_clusters.begin(),
group_cluster_offsets->view().begin<offset_type>(),
has_nulls);
return {std::move(group_cluster_wl),
std::move(group_cluster_offsets),
static_cast<size_type>(total_clusters)};
}
std::unique_ptr<column> build_output_column(size_type num_rows,
std::unique_ptr<column>&& means,
std::unique_ptr<column>&& weights,
std::unique_ptr<column>&& offsets,
std::unique_ptr<column>&& min_col,
std::unique_ptr<column>&& max_col,
bool has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// whether or not this weight is a stub
auto is_stub_weight = [weights = weights->view().begin<double>()] __device__(size_type i) {
return weights[i] == 0;
};
// whether or not this particular tdigest is a stub
auto is_stub_digest = [offsets = offsets->view().begin<offset_type>(), is_stub_weight] __device__(
size_type i) { return is_stub_weight(offsets[i]) ? 1 : 0; };
size_type const num_stubs = [&]() {
if (!has_nulls) { return 0; }
auto iter = cudf::detail::make_counting_transform_iterator(0, is_stub_digest);
return thrust::reduce(rmm::exec_policy(stream), iter, iter + num_rows);
}();
// if there are no stub tdigests, we can return immediately.
if (num_stubs == 0) {
return cudf::tdigest::detail::make_tdigest_column(num_rows,
std::move(means),
std::move(weights),
std::move(offsets),
std::move(min_col),
std::move(max_col),
stream,
mr);
}
// otherwise we need to strip out the stubs.
auto remove_stubs = [&](column_view const& col, size_type num_stubs) {
auto result = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, col.size() - num_stubs, mask_state::UNALLOCATED, stream, mr);
thrust::remove_copy_if(rmm::exec_policy(stream),
col.begin<double>(),
col.end<double>(),
thrust::make_counting_iterator(0),
result->mutable_view().begin<double>(),
is_stub_weight);
return result;
};
// remove from the means and weights column
auto _means = remove_stubs(*means, num_stubs);
auto _weights = remove_stubs(*weights, num_stubs);
// adjust offsets.
rmm::device_uvector<offset_type> sizes(num_rows, stream);
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + num_rows,
sizes.begin(),
[offsets = offsets->view().begin<offset_type>()] __device__(size_type i) {
return offsets[i + 1] - offsets[i];
});
auto iter = cudf::detail::make_counting_transform_iterator(
0, [sizes = sizes.begin(), is_stub_digest, num_rows] __device__(size_type i) {
return i == num_rows || is_stub_digest(i) ? 0 : sizes[i];
});
thrust::exclusive_scan(rmm::exec_policy(stream),
iter,
iter + num_rows + 1,
offsets->mutable_view().begin<offset_type>(),
0);
// assemble final column
return cudf::tdigest::detail::make_tdigest_column(num_rows,
std::move(_means),
std::move(_weights),
std::move(offsets),
std::move(min_col),
std::move(max_col),
stream,
mr);
}
/**
* @brief Compute a column of tdigests.
*
* Assembles the output tdigest column based on the specified delta, a stream of
* input values (either scalar or centroids), and an assortment of per-group
* clustering information.
*
* This function is effectively just a reduce_by_key that performs a reduction
* from input values -> centroid clusters as defined by the the cluster weight
* boundaries.
*
* @param delta tdigest compression level
* @param values_begin Beginning of the range of input values.
* @param values_end End of the range of input values.
* @param cumulative_weight Functor which returns cumulative weight and group information for
* an absolute input value index.
* @param min_col Column containing the minimum value per group.
* @param max_col Column containing the maximum value per group.
* @param group_cluster_wl Cluster weight limits for each group.
* @param group_cluster_offsets R-value reference of offsets into the cluster weight limits.
* @param total_clusters Total number of clusters in all groups.
* @param has_nulls Whether or not the input contains nulls
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
*
* @returns A tdigest column with 1 row per output tdigest.
*/
template <typename CentroidIter, typename CumulativeWeight>
std::unique_ptr<column> compute_tdigests(int delta,
CentroidIter centroids_begin,
CentroidIter centroids_end,
CumulativeWeight group_cumulative_weight,
std::unique_ptr<column>&& min_col,
std::unique_ptr<column>&& max_col,
rmm::device_uvector<double> const& group_cluster_wl,
std::unique_ptr<column>&& group_cluster_offsets,
size_type total_clusters,
bool has_nulls,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// the output for each group is a column of data that represents the tdigest. since we want 1 row
// per group, each row will be a list the length of the tdigest for that group. so our output
// column is of the form:
// struct {
// centroids for the digest
// list {
// struct {
// double // mean
// double // weight
// }
// }
// double // min
// double // max
// }
//
if (total_clusters == 0) { return cudf::tdigest::detail::make_empty_tdigest_column(stream, mr); }
// each input group represents an individual tdigest. within each tdigest, we want the keys
// to represent cluster indices (for example, if a tdigest had 100 clusters, the keys should fall
// into the range 0-99). But since we have multiple tdigests, we need to keep the keys unique
// between the groups, so we add our group start offset.
auto keys = thrust::make_transform_iterator(
thrust::make_counting_iterator(0),
[delta,
group_cluster_wl = group_cluster_wl.data(),
group_cluster_offsets = group_cluster_offsets->view().begin<offset_type>(),
group_cumulative_weight] __device__(size_type value_index) -> size_type {
// get group index, relative value index within the group and cumulative weight.
[[maybe_unused]] auto [group_index, relative_value_index, cumulative_weight] =
group_cumulative_weight(value_index);
auto const num_clusters =
group_cluster_offsets[group_index + 1] - group_cluster_offsets[group_index];
if (num_clusters == 0) { return group_cluster_offsets[group_index]; }
// compute start of cluster weight limits for this group
double const* weight_limits = group_cluster_wl + group_cluster_offsets[group_index];
// local cluster index
size_type const group_cluster_index =
min(num_clusters - 1,
static_cast<size_type>(
thrust::lower_bound(
thrust::seq, weight_limits, weight_limits + num_clusters, cumulative_weight) -
weight_limits));
// add the cluster offset to generate a globally unique key
return group_cluster_index + group_cluster_offsets[group_index];
});
// mean and weight data
auto centroid_means = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, total_clusters, mask_state::UNALLOCATED, stream, mr);
auto centroid_weights = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, total_clusters, mask_state::UNALLOCATED, stream, mr);
// reduce the centroids down by key.
cudf::mutable_column_view mean_col(*centroid_means);
cudf::mutable_column_view weight_col(*centroid_weights);
// reduce the centroids into the clusters
auto output = thrust::make_zip_iterator(thrust::make_tuple(
mean_col.begin<double>(), weight_col.begin<double>(), thrust::make_discard_iterator()));
auto const num_values = std::distance(centroids_begin, centroids_end);
thrust::reduce_by_key(rmm::exec_policy(stream),
keys,
keys + num_values, // keys
centroids_begin, // values
thrust::make_discard_iterator(), // key output
output, // output
thrust::equal_to{}, // key equality check
merge_centroids{});
// create final tdigest column
return build_output_column(group_cluster_offsets->size() - 1,
std::move(centroid_means),
std::move(centroid_weights),
std::move(group_cluster_offsets),
std::move(min_col),
std::move(max_col),
has_nulls,
stream,
mr);
}
// return the min/max value of scalar inputs by group index
template <typename T>
struct get_scalar_minmax_grouped {
column_device_view const col;
device_span<size_type const> group_offsets;
size_type const* group_valid_counts;
__device__ thrust::tuple<double, double> operator()(size_type group_index)
{
auto const valid_count = group_valid_counts[group_index];
return valid_count > 0
? thrust::make_tuple(
static_cast<double>(col.element<T>(group_offsets[group_index])),
static_cast<double>(col.element<T>(group_offsets[group_index] + valid_count - 1)))
: thrust::make_tuple(0.0, 0.0);
}
};
// return the min/max value of scalar inputs
template <typename T>
struct get_scalar_minmax {
column_device_view const col;
size_type const valid_count;
__device__ thrust::tuple<double, double> operator()(size_type)
{
return valid_count > 0
? thrust::make_tuple(static_cast<double>(col.element<T>(0)),
static_cast<double>(col.element<T>(valid_count - 1)))
: thrust::make_tuple(0.0, 0.0);
}
};
struct typed_group_tdigest {
template <typename T,
std::enable_if_t<cudf::is_numeric<T>() || cudf::is_fixed_point<T>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& col,
cudf::device_span<size_type const> group_offsets,
cudf::device_span<size_type const> group_labels,
cudf::device_span<size_type const> group_valid_counts,
size_type num_groups,
int delta,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// first, generate cluster weight information for each input group
auto [group_cluster_wl, group_cluster_offsets, total_clusters] = generate_group_cluster_info(
delta,
num_groups,
nearest_value_scalar_weights_grouped{group_offsets.begin()},
scalar_group_info_grouped{group_valid_counts.begin(), group_offsets.begin()},
cumulative_scalar_weight_grouped{group_offsets, group_labels},
col.null_count() > 0,
stream,
mr);
// device column view. handy because the .element() function
// automatically handles fixed-point conversions for us
auto d_col = cudf::column_device_view::create(col, stream);
// compute min and max columns
auto min_col = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr);
auto max_col = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr);
thrust::transform(
rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + num_groups,
thrust::make_zip_iterator(thrust::make_tuple(min_col->mutable_view().begin<double>(),
max_col->mutable_view().begin<double>())),
get_scalar_minmax_grouped<T>{*d_col, group_offsets, group_valid_counts.begin()});
// for simple input values, the "centroids" all have a weight of 1.
auto scalar_to_centroid =
cudf::detail::make_counting_transform_iterator(0, make_centroid<T>{*d_col});
// generate the final tdigest
return compute_tdigests(delta,
scalar_to_centroid,
scalar_to_centroid + col.size(),
cumulative_scalar_weight_grouped{group_offsets, group_labels},
std::move(min_col),
std::move(max_col),
group_cluster_wl,
std::move(group_cluster_offsets),
total_clusters,
col.null_count() > 0,
stream,
mr);
}
template <typename T,
typename... Args,
std::enable_if_t<!cudf::is_numeric<T>() && !cudf::is_fixed_point<T>()>* = nullptr>
std::unique_ptr<column> operator()(Args&&...)
{
CUDF_FAIL("Non-numeric type in group_tdigest");
}
};
struct typed_reduce_tdigest {
// this function assumes col is sorted in ascending order with nulls at the end
template <
typename T,
typename std::enable_if_t<cudf::is_numeric<T>() || cudf::is_fixed_point<T>()>* = nullptr>
std::unique_ptr<scalar> operator()(column_view const& col,
int delta,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// treat this the same as the groupby path with a single group. Note: even though
// there is only 1 group there are still multiple keys within the group that represent
// the clustering of (N input values) -> (1 output centroid), so the final computation
// remains a reduce_by_key() and not a reduce().
//
// additionally we get a few optimizations.
// - since we only ever have 1 "group" that is sorted with nulls at the end,
// we can simply process just the non-null values and act as if the column
// is non-nullable, allowing us to process fewer values than if we were doing a groupby.
//
// - several of the functors used during the reduction are cheaper than during a groupby.
auto const valid_count = col.size() - col.null_count();
// first, generate cluster weight information for each input group
auto [cluster_wl, cluster_offsets, total_clusters] =
generate_group_cluster_info(delta,
1,
nearest_value_scalar_weights{valid_count},
scalar_group_info{static_cast<double>(valid_count), valid_count},
cumulative_scalar_weight{},
false,
stream,
mr);
// device column view. handy because the .element() function
// automatically handles fixed-point conversions for us
auto d_col = cudf::column_device_view::create(col, stream);
// compute min and max columns
auto min_col = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, 1, mask_state::UNALLOCATED, stream, mr);
auto max_col = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, 1, mask_state::UNALLOCATED, stream, mr);
thrust::transform(
rmm::exec_policy(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(0) + 1,
thrust::make_zip_iterator(thrust::make_tuple(min_col->mutable_view().begin<double>(),
max_col->mutable_view().begin<double>())),
get_scalar_minmax<T>{*d_col, valid_count});
// for simple input values, the "centroids" all have a weight of 1.
auto scalar_to_centroid =
cudf::detail::make_counting_transform_iterator(0, make_centroid_no_nulls<T>{*d_col});
// generate the final tdigest and wrap it in a struct_scalar
return to_tdigest_scalar(compute_tdigests(delta,
scalar_to_centroid,
scalar_to_centroid + valid_count,
cumulative_scalar_weight{},
std::move(min_col),
std::move(max_col),
cluster_wl,
std::move(cluster_offsets),
total_clusters,
false,
stream,
mr),
stream,
mr);
}
template <
typename T,
typename... Args,
typename std::enable_if_t<!cudf::is_numeric<T>() && !cudf::is_fixed_point<T>()>* = nullptr>
std::unique_ptr<scalar> operator()(Args&&...)
{
CUDF_FAIL("Non-numeric type in group_tdigest");
}
};
// utility for merge_tdigests.
template <typename GroupOffsetsIter>
struct group_num_weights_func {
GroupOffsetsIter outer_offsets;
size_type const* inner_offsets;
__device__ size_type operator()(size_type group_index)
{
auto const tdigest_begin = outer_offsets[group_index];
auto const tdigest_end = outer_offsets[group_index + 1];
return inner_offsets[tdigest_end] - inner_offsets[tdigest_begin];
}
};
// utility for merge_tdigests.
struct group_is_empty {
__device__ bool operator()(size_type group_size) { return group_size == 0; }
};
// utility for merge_tdigests.
template <typename GroupLabelsIter>
struct group_key_func {
GroupLabelsIter group_labels;
size_type const* inner_offsets;
size_type num_inner_offsets;
__device__ size_type operator()(size_type index)
{
// what -original- tdigest index this absolute index corresponds to
auto const iter = thrust::prev(
thrust::upper_bound(thrust::seq, inner_offsets, inner_offsets + num_inner_offsets, index));
auto const tdigest_index = thrust::distance(inner_offsets, iter);
// what group index the original tdigest belongs to
return group_labels[tdigest_index];
}
};
template <typename HGroupOffsetIter, typename GroupOffsetIter, typename GroupLabelIter>
std::unique_ptr<column> merge_tdigests(tdigest_column_view const& tdv,
HGroupOffsetIter h_outer_offsets,
GroupOffsetIter group_offsets,
GroupLabelIter group_labels,
size_t num_group_labels,
size_type num_groups,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
// thrust::merge and thrust::merge_by_key don't provide what we need. What we would need is an
// algorithm like a super-merge that takes two layers of keys: one which identifies the outer
// grouping of tdigests, and one which identifies the inner groupings of the tdigests within the
// outer groups.
// TODO: investigate replacing the iterative merge with a single stable_sort_by_key.
// bring tdigest offsets back to the host
auto tdigest_offsets = tdv.centroids().offsets();
std::vector<offset_type> h_inner_offsets(tdigest_offsets.size());
cudaMemcpyAsync(h_inner_offsets.data(),
tdigest_offsets.begin<offset_type>(),
sizeof(offset_type) * tdigest_offsets.size(),
cudaMemcpyDeviceToHost,
stream);
stream.synchronize();
// extract all means and weights into a table
cudf::table_view tdigests_unsliced({tdv.means(), tdv.weights()});
// generate the merged (but not yet compressed) tdigests for each group.
std::vector<std::unique_ptr<table>> tdigests;
tdigests.reserve(num_groups);
std::transform(h_outer_offsets,
h_outer_offsets + num_groups,
std::next(h_outer_offsets),
std::back_inserter(tdigests),
[&](auto tdigest_start, auto tdigest_end) {
// the range of tdigests in this group
auto const num_tdigests = tdigest_end - tdigest_start;
// slice each tdigest from the input
std::vector<table_view> unmerged_tdigests;
unmerged_tdigests.reserve(num_tdigests);
auto offset_iter = std::next(h_inner_offsets.begin(), tdigest_start);
std::transform(
offset_iter,
offset_iter + num_tdigests,
std::next(offset_iter),
std::back_inserter(unmerged_tdigests),
[&](size_type start, size_type end) {
return cudf::detail::slice(tdigests_unsliced, {start, end}, stream);
});
// merge
return cudf::detail::merge(unmerged_tdigests,
{0},
{order::ASCENDING},
{},
stream,
rmm::mr::get_current_device_resource());
});
// generate min and max values
auto merged_min_col = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr);
auto min_iter =
thrust::make_transform_iterator(thrust::make_zip_iterator(thrust::make_tuple(
tdv.min_begin(), cudf::tdigest::detail::size_begin(tdv))),
tdigest_min{});
thrust::reduce_by_key(rmm::exec_policy(stream),
group_labels,
group_labels + num_group_labels,
min_iter,
thrust::make_discard_iterator(),
merged_min_col->mutable_view().begin<double>(),
thrust::equal_to{}, // key equality check
thrust::minimum{});
auto merged_max_col = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, num_groups, mask_state::UNALLOCATED, stream, mr);
auto max_iter =
thrust::make_transform_iterator(thrust::make_zip_iterator(thrust::make_tuple(
tdv.max_begin(), cudf::tdigest::detail::size_begin(tdv))),
tdigest_max{});
thrust::reduce_by_key(rmm::exec_policy(stream),
group_labels,
group_labels + num_group_labels,
max_iter,
thrust::make_discard_iterator(),
merged_max_col->mutable_view().begin<double>(),
thrust::equal_to{}, // key equality check
thrust::maximum{});
// for any empty groups, set the min and max to be 0. not technically necessary but it makes
// testing simpler.
auto group_num_weights = cudf::detail::make_counting_transform_iterator(
0,
group_num_weights_func<decltype(group_offsets)>{group_offsets,
tdigest_offsets.begin<size_type>()});
thrust::replace_if(rmm::exec_policy(stream),
merged_min_col->mutable_view().begin<double>(),
merged_min_col->mutable_view().end<double>(),
group_num_weights,
group_is_empty{},
0);
thrust::replace_if(rmm::exec_policy(stream),
merged_max_col->mutable_view().begin<double>(),
merged_max_col->mutable_view().end<double>(),
group_num_weights,
group_is_empty{},
0);
// concatenate all the merged tdigests back into one table.
std::vector<table_view> tdigest_views;
tdigest_views.reserve(num_groups);
std::transform(tdigests.begin(),
tdigests.end(),
std::back_inserter(tdigest_views),
[](std::unique_ptr<table> const& t) { return t->view(); });
auto merged = cudf::detail::concatenate(tdigest_views, stream);
// generate cumulative weights
auto merged_weights = merged->get_column(1).view();
auto cumulative_weights = cudf::make_numeric_column(
data_type{type_id::FLOAT64}, merged_weights.size(), mask_state::UNALLOCATED);
auto keys = cudf::detail::make_counting_transform_iterator(
0,
group_key_func<decltype(group_labels)>{
group_labels, tdigest_offsets.begin<size_type>(), tdigest_offsets.size()});
thrust::inclusive_scan_by_key(rmm::exec_policy(stream),
keys,
keys + cumulative_weights->size(),
merged_weights.begin<double>(),
cumulative_weights->mutable_view().begin<double>());
auto const delta = max_centroids;
// generate cluster info
auto [group_cluster_wl, group_cluster_offsets, total_clusters] = generate_group_cluster_info(
delta,
num_groups,
nearest_value_centroid_weights<decltype(group_offsets)>{
cumulative_weights->view().begin<double>(),
group_offsets,
tdigest_offsets.begin<size_type>()},
centroid_group_info<decltype(group_offsets)>{cumulative_weights->view().begin<double>(),
group_offsets,
tdigest_offsets.begin<size_type>()},
cumulative_centroid_weight<decltype(group_labels), decltype(group_offsets)>{
cumulative_weights->view().begin<double>(),
group_labels,
group_offsets,
{tdigest_offsets.begin<offset_type>(), static_cast<size_t>(tdigest_offsets.size())}},
false,
stream,
mr);
// input centroid values
auto centroids = cudf::detail::make_counting_transform_iterator(
0,
make_weighted_centroid{merged->get_column(0).view().begin<double>(),
merged_weights.begin<double>()});
// compute the tdigest
return compute_tdigests(
delta,
centroids,
centroids + merged->num_rows(),
cumulative_centroid_weight<decltype(group_labels), decltype(group_offsets)>{
cumulative_weights->view().begin<double>(),
group_labels,
group_offsets,
{tdigest_offsets.begin<offset_type>(), static_cast<size_t>(tdigest_offsets.size())}},
std::move(merged_min_col),
std::move(merged_max_col),
group_cluster_wl,
std::move(group_cluster_offsets),
total_clusters,
false,
stream,
mr);
}
} // anonymous namespace
std::unique_ptr<scalar> reduce_tdigest(column_view const& col,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (col.size() == 0) { return cudf::tdigest::detail::make_empty_tdigest_scalar(stream, mr); }
// since this isn't coming out of a groupby, we need to sort the inputs in ascending
// order with nulls at the end.
table_view t({col});
auto sorted = cudf::detail::sort(
t, {order::ASCENDING}, {null_order::AFTER}, stream, rmm::mr::get_current_device_resource());
auto const delta = max_centroids;
return cudf::type_dispatcher(
col.type(), typed_reduce_tdigest{}, sorted->get_column(0), delta, stream, mr);
}
std::unique_ptr<scalar> reduce_merge_tdigest(column_view const& input,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
tdigest_column_view tdv(input);
if (input.size() == 0) { return cudf::tdigest::detail::make_empty_tdigest_scalar(stream, mr); }
auto h_group_offsets = cudf::detail::make_counting_transform_iterator(
0, [size = input.size()](size_type i) { return i == 0 ? 0 : size; });
auto group_offsets = cudf::detail::make_counting_transform_iterator(
0, [size = input.size()] __device__(size_type i) { return i == 0 ? 0 : size; });
auto group_labels = thrust::make_constant_iterator(0);
return to_tdigest_scalar(merge_tdigests(tdv,
h_group_offsets,
group_offsets,
group_labels,
input.size(),
1,
max_centroids,
stream,
mr),
stream,
mr);
}
std::unique_ptr<column> group_tdigest(column_view const& col,
cudf::device_span<size_type const> group_offsets,
cudf::device_span<size_type const> group_labels,
cudf::device_span<size_type const> group_valid_counts,
size_type num_groups,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
if (col.size() == 0) { return cudf::tdigest::detail::make_empty_tdigest_column(stream, mr); }
auto const delta = max_centroids;
return cudf::type_dispatcher(col.type(),
typed_group_tdigest{},
col,
group_offsets,
group_labels,
group_valid_counts,
num_groups,
delta,
stream,
mr);
}
std::unique_ptr<column> group_merge_tdigest(column_view const& input,
cudf::device_span<size_type const> group_offsets,
cudf::device_span<size_type const> group_labels,
size_type num_groups,
int max_centroids,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
tdigest_column_view tdv(input);
if (num_groups == 0 || input.size() == 0) {
return cudf::tdigest::detail::make_empty_tdigest_column(stream, mr);
}
// bring group offsets back to the host
std::vector<size_type> h_group_offsets(group_offsets.size());
cudaMemcpyAsync(h_group_offsets.data(),
group_offsets.begin(),
sizeof(size_type) * group_offsets.size(),
cudaMemcpyDeviceToHost,
stream);
return merge_tdigests(tdv,
h_group_offsets.begin(),
group_offsets.data(),
group_labels.data(),
group_labels.size(),
num_groups,
max_centroids,
stream,
mr);
}
} // namespace detail
} // namespace tdigest
} // namespace cudf
|
0dcb16219489c8718eceeccb34437a05633fdc17.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "check_gpu_hit_structures.h"
#include "Hit.h"
#include "HitStructures.h"
#include "HitStructuresCU.h"
#include "reorganize_gplex.h"
#include "gpu_utils.h"
#include <iostream>
__global__ void get_hit_pos_and_err(LayerOfHitsCU *layers,
int ilay, int hit_idx, float *pos, float *err, int pos_size, int err_size) {
if (threadIdx.x + blockDim.x * blockIdx.x == 0) {
LayerOfHitsCU &layer = layers[ilay];
Hit &hit = layer.m_hits[hit_idx];
float *posArray = get_posArray(hit);
float *errArray = get_errArray(hit);
for (int i = 0; i < pos_size; ++i) {
pos[i] = posArray[i];
}
for (int i = 0; i < err_size; ++i) {
err[i] = errArray[i];
}
}
}
void compare_carrays(const float *h_a, const float *d_a,
const float prec, const int n)
{
for (int i = 0; i < n; ++i) {
// should be relative comparison, verify if div by 0 will happen
if (std::abs(h_a[i] - d_a[i]) > prec) {
std::cerr << i << " : " << h_a[i] << " / " << d_a[i] << std::endl;
}
}
}
void check_event_of_hits_gpu(const EventOfHits& event_of_hits)
{
EventOfHitsCU event_of_hits_cu;
event_of_hits_cu.reserve_layers(event_of_hits);
event_of_hits_cu.copyFromCPU(event_of_hits, 0);
constexpr int pos_size = 3;
constexpr int err_size = 6;
float *d_pos, *d_err;
float pos[pos_size], err[err_size];
hipMalloc((void**)&d_pos, pos_size*sizeof(float));
hipMalloc((void**)&d_err, err_size*sizeof(float));
dim3 grid(1, 1, 1);
dim3 block(1, 1, 1);
int ilay = 2;
int hit_idx = 3;
hipLaunchKernelGGL(( get_hit_pos_and_err) , dim3(grid), dim3(block) , 0, 0,
event_of_hits_cu.m_layers_of_hits.data(), ilay, hit_idx, d_pos, d_err, pos_size, err_size);
hipMemcpy(pos, d_pos, pos_size*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(err, d_err, err_size*sizeof(float), hipMemcpyDeviceToHost);
//std::cerr << "pos ......................\n";
compare_carrays(event_of_hits.m_layers_of_hits[ilay].m_hits[hit_idx].posArray(),
pos, 1e-3, pos_size);
//std::cerr << "err ......................\n";
compare_carrays(event_of_hits.m_layers_of_hits[ilay].m_hits[hit_idx].errArray(),
err, 1e-3, err_size);
hipFree(d_pos);
hipFree(d_err);
}
__global__ void get_cand_pos_and_err(EtaBinOfCandidatesCU *etabin_of_cands,
const int ebin, const int itrack, float *pos, float *err,
const int pos_size, const int err_size)
{
if (threadIdx.x + blockDim.x * blockIdx.x == 0) {
Track &track = etabin_of_cands[ebin].m_candidates[itrack];
float *posArray = get_posArray(track);
float *errArray = get_errArray(track);
for (int i = 0; i < pos_size; ++i) {
pos[i] = posArray[i];
}
for (int i = 0; i < err_size; ++i) {
err[i] = errArray[i];
}
}
}
void check_event_of_cands_gpu(const EventOfCandidates& event_of_cands)
{
EventOfCandidatesCU event_of_cands_cu;
event_of_cands_cu.allocGPU(event_of_cands);
event_of_cands_cu.copyFromCPU(event_of_cands);
constexpr int pos_size = 6;
constexpr int err_size = 21;
float *d_pos, *d_err;
float pos[pos_size], err[err_size];
hipMalloc((void**)&d_pos, pos_size*sizeof(float));
hipMalloc((void**)&d_err, err_size*sizeof(float));
dim3 grid(1, 1, 1);
dim3 block(1, 1, 1);
int etabin = ::min(2, Config::nEtaBin-1);
int itrack = 3;
hipLaunchKernelGGL(( get_cand_pos_and_err) , dim3(grid), dim3(block) , 0, 0,
event_of_cands_cu.m_etabins_of_candidates,
etabin, itrack, d_pos, d_err, pos_size, err_size);
cudaCheckErrorSync();
hipMemcpy(pos, d_pos, pos_size*sizeof(float), hipMemcpyDeviceToHost);
cudaCheckErrorSync();
hipMemcpy(err, d_err, err_size*sizeof(float), hipMemcpyDeviceToHost);
cudaCheckErrorSync();
/*std::cerr << "pos ......................\n";*/
compare_carrays(event_of_cands.m_etabins_of_candidates[etabin].m_candidates[itrack].posArray(),
pos, 1e-3, pos_size);
/*std::cerr << "err ......................\n";*/
compare_carrays(event_of_cands.m_etabins_of_candidates[etabin].m_candidates[itrack].errArray(),
err, 1e-3, err_size);
hipFree(d_pos);
hipFree(d_err);
//event_of_cands_cu.copyToCPU(event_of_cands);
event_of_cands_cu.deallocGPU();
cudaCheckErrorSync();
}
| 0dcb16219489c8718eceeccb34437a05633fdc17.cu | #include "check_gpu_hit_structures.h"
#include "Hit.h"
#include "HitStructures.h"
#include "HitStructuresCU.h"
#include "reorganize_gplex.h"
#include "gpu_utils.h"
#include <iostream>
__global__ void get_hit_pos_and_err(LayerOfHitsCU *layers,
int ilay, int hit_idx, float *pos, float *err, int pos_size, int err_size) {
if (threadIdx.x + blockDim.x * blockIdx.x == 0) {
LayerOfHitsCU &layer = layers[ilay];
Hit &hit = layer.m_hits[hit_idx];
float *posArray = get_posArray(hit);
float *errArray = get_errArray(hit);
for (int i = 0; i < pos_size; ++i) {
pos[i] = posArray[i];
}
for (int i = 0; i < err_size; ++i) {
err[i] = errArray[i];
}
}
}
void compare_carrays(const float *h_a, const float *d_a,
const float prec, const int n)
{
for (int i = 0; i < n; ++i) {
// should be relative comparison, verify if div by 0 will happen
if (std::abs(h_a[i] - d_a[i]) > prec) {
std::cerr << i << " : " << h_a[i] << " / " << d_a[i] << std::endl;
}
}
}
void check_event_of_hits_gpu(const EventOfHits& event_of_hits)
{
EventOfHitsCU event_of_hits_cu;
event_of_hits_cu.reserve_layers(event_of_hits);
event_of_hits_cu.copyFromCPU(event_of_hits, 0);
constexpr int pos_size = 3;
constexpr int err_size = 6;
float *d_pos, *d_err;
float pos[pos_size], err[err_size];
cudaMalloc((void**)&d_pos, pos_size*sizeof(float));
cudaMalloc((void**)&d_err, err_size*sizeof(float));
dim3 grid(1, 1, 1);
dim3 block(1, 1, 1);
int ilay = 2;
int hit_idx = 3;
get_hit_pos_and_err <<< grid, block >>>
(event_of_hits_cu.m_layers_of_hits.data(), ilay, hit_idx, d_pos, d_err, pos_size, err_size);
cudaMemcpy(pos, d_pos, pos_size*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(err, d_err, err_size*sizeof(float), cudaMemcpyDeviceToHost);
//std::cerr << "pos ......................\n";
compare_carrays(event_of_hits.m_layers_of_hits[ilay].m_hits[hit_idx].posArray(),
pos, 1e-3, pos_size);
//std::cerr << "err ......................\n";
compare_carrays(event_of_hits.m_layers_of_hits[ilay].m_hits[hit_idx].errArray(),
err, 1e-3, err_size);
cudaFree(d_pos);
cudaFree(d_err);
}
__global__ void get_cand_pos_and_err(EtaBinOfCandidatesCU *etabin_of_cands,
const int ebin, const int itrack, float *pos, float *err,
const int pos_size, const int err_size)
{
if (threadIdx.x + blockDim.x * blockIdx.x == 0) {
Track &track = etabin_of_cands[ebin].m_candidates[itrack];
float *posArray = get_posArray(track);
float *errArray = get_errArray(track);
for (int i = 0; i < pos_size; ++i) {
pos[i] = posArray[i];
}
for (int i = 0; i < err_size; ++i) {
err[i] = errArray[i];
}
}
}
void check_event_of_cands_gpu(const EventOfCandidates& event_of_cands)
{
EventOfCandidatesCU event_of_cands_cu;
event_of_cands_cu.allocGPU(event_of_cands);
event_of_cands_cu.copyFromCPU(event_of_cands);
constexpr int pos_size = 6;
constexpr int err_size = 21;
float *d_pos, *d_err;
float pos[pos_size], err[err_size];
cudaMalloc((void**)&d_pos, pos_size*sizeof(float));
cudaMalloc((void**)&d_err, err_size*sizeof(float));
dim3 grid(1, 1, 1);
dim3 block(1, 1, 1);
int etabin = std::min(2, Config::nEtaBin-1);
int itrack = 3;
get_cand_pos_and_err <<< grid, block >>>
(event_of_cands_cu.m_etabins_of_candidates,
etabin, itrack, d_pos, d_err, pos_size, err_size);
cudaCheckErrorSync();
cudaMemcpy(pos, d_pos, pos_size*sizeof(float), cudaMemcpyDeviceToHost);
cudaCheckErrorSync();
cudaMemcpy(err, d_err, err_size*sizeof(float), cudaMemcpyDeviceToHost);
cudaCheckErrorSync();
/*std::cerr << "pos ......................\n";*/
compare_carrays(event_of_cands.m_etabins_of_candidates[etabin].m_candidates[itrack].posArray(),
pos, 1e-3, pos_size);
/*std::cerr << "err ......................\n";*/
compare_carrays(event_of_cands.m_etabins_of_candidates[etabin].m_candidates[itrack].errArray(),
err, 1e-3, err_size);
cudaFree(d_pos);
cudaFree(d_err);
//event_of_cands_cu.copyToCPU(event_of_cands);
event_of_cands_cu.deallocGPU();
cudaCheckErrorSync();
}
|
827a57f552ac05e572c0dfaa1a282d7c6d7b19a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "Visualizer/Visualizer.cuh"
#include "Geometry/Scene.cuh"
#include <stdio.h>
#include <chrono>
inline __device__ __host__ unsigned int divide_up(unsigned int num, unsigned int denum);
void update(bool * keys);
bool updateCamera(bool * keys,Camera& cam,float dt);
int main(int argc, char *argv[])
{
const unsigned int k = 1;
const unsigned int width = 1080 * k;
const unsigned int height = 720 * k;
const unsigned int num_pixel = width * height;
Visualizer visu(width, height);
Scene scene(&visu);
const dim3 block_size(8,16);
const dim3 grid_size = dim3(divide_up(height, block_size.x), divide_up(width, block_size.y));
RGBColor * d_fbf;
hipMalloc((void**)&d_fbf, num_pixel * sizeof(RGBColor));
Camera cam(Math::makeVector(-4.0f, 0.0f, 0.0f), Math::makeVector(-3.0f, 0.0f, 0.0f), 0.4f, 1.0f, 9.f / 16.f);
scene.setCam(cam);
bool keys[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int nb_pass = 1;
std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
hiprandState_t *d_rand_state;
hipMalloc((void **)&d_rand_state, width * height * sizeof(hiprandState_t));
scene::render_init << <grid_size, block_size >> > (width, height, d_rand_state);
std::cout << "Le rendu peut commencer !\n\n Appuyer sur espace" << std::endl;
while (1) {
t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<float> time_span = std::chrono::duration_cast<std::chrono::duration<float>>(t2 - t1);
float dt = time_span.count();
t1 = t2;
update(keys);
bool reset = updateCamera(keys,cam,dt);
if (reset) {
scene.setCam(cam);
nb_pass = 1;
}
scene.compute(d_fbf, grid_size, block_size, nb_pass,reset, d_rand_state);
hipDeviceSynchronize();
visu.blit(d_fbf, block_size, grid_size,nb_pass);
visu.update();
nb_pass++;
std::cout << 1.f / dt << std::endl;
}
visu.waitKeyPressed();
//Liberation GPU
hipFree(d_fbf);
hipDeviceReset();
//Liberation CPU
return 0;
}
inline __device__ __host__ unsigned int divide_up(unsigned int num, unsigned int denum)
{
unsigned int res = num / denum;
if (res * denum < num)
{
res += 1;
}
return res;
}
void update(bool * keys)
{
SDL_Event event;
while (SDL_PollEvent(&event))
{
if (event.type == SDL_KEYDOWN)
{
switch (event.key.keysym.sym)
{
case SDLK_UP:
keys[0] = 1;
break;
case SDLK_DOWN:
keys[1] = 1;
break;
case SDLK_LEFT:
keys[2] = 1;
break;
case SDLK_RIGHT:
keys[3] = 1;
break;
case SDLK_z:
keys[4] = 1;
break;
case SDLK_s:
keys[5] = 1;
break;
case SDLK_d:
keys[6] = 1;
break;
case SDLK_q:
keys[7] = 1;
break;
case SDLK_SPACE:
keys[8] = 1;
break;
case SDLK_LCTRL:
keys[9] = 1;
break;
default:
break;
}
}
else if (event.type == SDL_KEYUP)
{
switch (event.key.keysym.sym)
{
case SDLK_UP:
keys[0] = 0;
break;
case SDLK_DOWN:
keys[1] = 0;
break;
case SDLK_LEFT:
keys[2] = 0;
break;
case SDLK_RIGHT:
keys[3] = 0;
break;
case SDLK_z:
keys[4] = 0;
break;
case SDLK_s:
keys[5] = 0;
break;
case SDLK_d:
keys[6] = 0;
break;
case SDLK_q:
keys[7] = 0;
break;
case SDLK_SPACE:
keys[8] = 0;
break;
case SDLK_LCTRL:
keys[9] = 0;
break;
default:
break;
}
}
else if (event.type == SDL_QUIT)
{
exit(0);
}
}
}
bool updateCamera(bool * keys,Camera& cam, float dt) {
bool result = false;
float forward = 0;
float upward = 0;
float rightward = 0;
float inclination = acos(cam.getFront()[2]);
float azimuth = atan2(cam.getFront()[1],cam.getFront()[0]);
const float speed = 2;
const float angle_speed = 2;
if (keys[0])
{
inclination -= angle_speed * dt;
result = true;
}
if (keys[1])
{
inclination += angle_speed * dt;
result = true;
}
if (keys[2])
{
azimuth += angle_speed * dt;
result = true;
}
if (keys[3])
{
azimuth -= angle_speed * dt;
result = true;
}
if (keys[4])
{
forward += speed * dt;
result = true;
}
if (keys[5])
{
forward -= speed * dt;
result = true;
}
if (keys[6])
{
rightward += speed * dt;
result = true;
}
if (keys[7])
{
rightward -= speed * dt;
result = true;
}
if (keys[8])
{
upward += speed * dt;
result = true;
}
if (keys[9])
{
upward -= speed * dt;
result = true;
}
if (inclination > 3)
{
inclination = 3;
}
else if (inclination < 0.1)
{
inclination = 0.1;
}
Math::Vector3f translation = Math::makeVector(rightward, forward, upward);
//bool reset = translation != Math::makeVector(0.0f, 0.0f, 0.0f);
//reset = reset || inclination != 0 || azimuth != 0;
if (result) {
cam.translateLocal(translation);
Math::Vector3f direction = Math::makeVector(sin(inclination) * cos(azimuth), sin(inclination) * sin(azimuth), cos(inclination));
cam.setTarget(cam.getPosition()+direction);
}
return result;
}
| 827a57f552ac05e572c0dfaa1a282d7c6d7b19a4.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "Visualizer/Visualizer.cuh"
#include "Geometry/Scene.cuh"
#include <stdio.h>
#include <chrono>
inline __device__ __host__ unsigned int divide_up(unsigned int num, unsigned int denum);
void update(bool * keys);
bool updateCamera(bool * keys,Camera& cam,float dt);
int main(int argc, char *argv[])
{
const unsigned int k = 1;
const unsigned int width = 1080 * k;
const unsigned int height = 720 * k;
const unsigned int num_pixel = width * height;
Visualizer visu(width, height);
Scene scene(&visu);
const dim3 block_size(8,16);
const dim3 grid_size = dim3(divide_up(height, block_size.x), divide_up(width, block_size.y));
RGBColor * d_fbf;
cudaMalloc((void**)&d_fbf, num_pixel * sizeof(RGBColor));
Camera cam(Math::makeVector(-4.0f, 0.0f, 0.0f), Math::makeVector(-3.0f, 0.0f, 0.0f), 0.4f, 1.0f, 9.f / 16.f);
scene.setCam(cam);
bool keys[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int nb_pass = 1;
std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
curandState *d_rand_state;
cudaMalloc((void **)&d_rand_state, width * height * sizeof(curandState));
scene::render_init << <grid_size, block_size >> > (width, height, d_rand_state);
std::cout << "Le rendu peut commencer !\n\n Appuyer sur espace" << std::endl;
while (1) {
t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<float> time_span = std::chrono::duration_cast<std::chrono::duration<float>>(t2 - t1);
float dt = time_span.count();
t1 = t2;
update(keys);
bool reset = updateCamera(keys,cam,dt);
if (reset) {
scene.setCam(cam);
nb_pass = 1;
}
scene.compute(d_fbf, grid_size, block_size, nb_pass,reset, d_rand_state);
cudaDeviceSynchronize();
visu.blit(d_fbf, block_size, grid_size,nb_pass);
visu.update();
nb_pass++;
std::cout << 1.f / dt << std::endl;
}
visu.waitKeyPressed();
//Liberation GPU
cudaFree(d_fbf);
cudaDeviceReset();
//Liberation CPU
return 0;
}
inline __device__ __host__ unsigned int divide_up(unsigned int num, unsigned int denum)
{
unsigned int res = num / denum;
if (res * denum < num)
{
res += 1;
}
return res;
}
void update(bool * keys)
{
SDL_Event event;
while (SDL_PollEvent(&event))
{
if (event.type == SDL_KEYDOWN)
{
switch (event.key.keysym.sym)
{
case SDLK_UP:
keys[0] = 1;
break;
case SDLK_DOWN:
keys[1] = 1;
break;
case SDLK_LEFT:
keys[2] = 1;
break;
case SDLK_RIGHT:
keys[3] = 1;
break;
case SDLK_z:
keys[4] = 1;
break;
case SDLK_s:
keys[5] = 1;
break;
case SDLK_d:
keys[6] = 1;
break;
case SDLK_q:
keys[7] = 1;
break;
case SDLK_SPACE:
keys[8] = 1;
break;
case SDLK_LCTRL:
keys[9] = 1;
break;
default:
break;
}
}
else if (event.type == SDL_KEYUP)
{
switch (event.key.keysym.sym)
{
case SDLK_UP:
keys[0] = 0;
break;
case SDLK_DOWN:
keys[1] = 0;
break;
case SDLK_LEFT:
keys[2] = 0;
break;
case SDLK_RIGHT:
keys[3] = 0;
break;
case SDLK_z:
keys[4] = 0;
break;
case SDLK_s:
keys[5] = 0;
break;
case SDLK_d:
keys[6] = 0;
break;
case SDLK_q:
keys[7] = 0;
break;
case SDLK_SPACE:
keys[8] = 0;
break;
case SDLK_LCTRL:
keys[9] = 0;
break;
default:
break;
}
}
else if (event.type == SDL_QUIT)
{
exit(0);
}
}
}
bool updateCamera(bool * keys,Camera& cam, float dt) {
bool result = false;
float forward = 0;
float upward = 0;
float rightward = 0;
float inclination = acos(cam.getFront()[2]);
float azimuth = atan2(cam.getFront()[1],cam.getFront()[0]);
const float speed = 2;
const float angle_speed = 2;
if (keys[0])
{
inclination -= angle_speed * dt;
result = true;
}
if (keys[1])
{
inclination += angle_speed * dt;
result = true;
}
if (keys[2])
{
azimuth += angle_speed * dt;
result = true;
}
if (keys[3])
{
azimuth -= angle_speed * dt;
result = true;
}
if (keys[4])
{
forward += speed * dt;
result = true;
}
if (keys[5])
{
forward -= speed * dt;
result = true;
}
if (keys[6])
{
rightward += speed * dt;
result = true;
}
if (keys[7])
{
rightward -= speed * dt;
result = true;
}
if (keys[8])
{
upward += speed * dt;
result = true;
}
if (keys[9])
{
upward -= speed * dt;
result = true;
}
if (inclination > 3)
{
inclination = 3;
}
else if (inclination < 0.1)
{
inclination = 0.1;
}
Math::Vector3f translation = Math::makeVector(rightward, forward, upward);
//bool reset = translation != Math::makeVector(0.0f, 0.0f, 0.0f);
//reset = reset || inclination != 0 || azimuth != 0;
if (result) {
cam.translateLocal(translation);
Math::Vector3f direction = Math::makeVector(sin(inclination) * cos(azimuth), sin(inclination) * sin(azimuth), cos(inclination));
cam.setTarget(cam.getPosition()+direction);
}
return result;
}
|
b5d4c63aaadf404041f418aa439b8c30d8c1da11.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "full_screen_opengl.h"
struct CudaRenderContext {
unsigned int height, width;
cuda::raw_ptr<Pixel> out;
};
__global__ void renderKernel(CudaRenderContext ctx) {
auto x = blockIdx.x * blockDim.x + threadIdx.x;
auto y = blockIdx.y * blockDim.y + threadIdx.y;
//auto threadId = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) +
// (threadIdx.y * blockDim.x) + threadIdx.x;
int i = (ctx.height - y - 1) * ctx.width + x; // pixel index
ctx.out[i].x = x;
ctx.out[i].y = y;
ctx.out[i].color.components[0] = x * 255 / ctx.width;
ctx.out[i].color.components[1] = 0;
ctx.out[i].color.components[2] = y * 255 / ctx.height;
ctx.out[i].color.components[3] = y * 255 / ctx.height;
}
void FullScreenOpenGLScene::renderCuda() {
CudaRenderContext ctx;
ctx.width = width;
ctx.height = height;
ctx.out = vboPtr_;
dim3 block(16, 16, 1);
dim3 grid(width / block.x, height / block.y, 1);
hipLaunchKernelGGL(( renderKernel), dim3(grid), dim3(block), 0, 0, ctx);
CUDA_CALL(hipDeviceSynchronize());
} | b5d4c63aaadf404041f418aa439b8c30d8c1da11.cu | #include "full_screen_opengl.h"
struct CudaRenderContext {
unsigned int height, width;
cuda::raw_ptr<Pixel> out;
};
__global__ void renderKernel(CudaRenderContext ctx) {
auto x = blockIdx.x * blockDim.x + threadIdx.x;
auto y = blockIdx.y * blockDim.y + threadIdx.y;
//auto threadId = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y) +
// (threadIdx.y * blockDim.x) + threadIdx.x;
int i = (ctx.height - y - 1) * ctx.width + x; // pixel index
ctx.out[i].x = x;
ctx.out[i].y = y;
ctx.out[i].color.components[0] = x * 255 / ctx.width;
ctx.out[i].color.components[1] = 0;
ctx.out[i].color.components[2] = y * 255 / ctx.height;
ctx.out[i].color.components[3] = y * 255 / ctx.height;
}
void FullScreenOpenGLScene::renderCuda() {
CudaRenderContext ctx;
ctx.width = width;
ctx.height = height;
ctx.out = vboPtr_;
dim3 block(16, 16, 1);
dim3 grid(width / block.x, height / block.y, 1);
renderKernel<<<grid, block, 0>>>(ctx);
CUDA_CALL(cudaDeviceSynchronize());
} |
b0621a4181c50f8066df05c0acab837c96e0e019.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// MP 5 Scan
// Given a list (lst) of length n
// Output its prefix sum = {lst[0], lst[0] + lst[1], lst[0] + lst[1] + ... + lst[n-1]}
// Due Tuesday, January 22, 2013 at 11:59 p.m. PST
#include <wb.h>
#define BLOCK_SIZE 512 //@@ You can change this
#define wbCheck(stmt) do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while(0)
__global__ void scan(float * input, float * output, int len) {
//@@ Modify the body of this function to complete the functionality of
//@@ the scan on the device
//@@ You may need multiple kernel calls; write your kernels before this
//@@ function and call them from here
}
int main(int argc, char ** argv) {
wbArg_t args;
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float * deviceOutput;
int numElements; // number of elements in the list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numElements);
hostOutput = (float*) malloc(numElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ", numElements);
wbTime_start(GPU, "Allocating GPU memory.");
wbCheck(hipMalloc((void**)&deviceInput, numElements*sizeof(float)));
wbCheck(hipMalloc((void**)&deviceOutput, numElements*sizeof(float)));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Clearing output memory.");
wbCheck(hipMemset(deviceInput, 0, numElements*sizeof(float)));
wbTime_stop(GPU, "Clearing output memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
wbCheck(hipMemcpy(deviceInput, hostInput, numElements*sizeof(float), hipMemcpyHostToDevice));
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
wbTime_start(Compute, "Performing CUDA computation");
//@@ Modify this to complete the functionality of the scan
//@@ on the deivce
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
wbCheck(hipMemcpy(hostOutput, deviceOutput, numElements*sizeof(float), hipMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
hipFree(deviceInput);
hipFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, numElements);
free(hostInput);
free(hostOutput);
return 0;
}
| b0621a4181c50f8066df05c0acab837c96e0e019.cu | // MP 5 Scan
// Given a list (lst) of length n
// Output its prefix sum = {lst[0], lst[0] + lst[1], lst[0] + lst[1] + ... + lst[n-1]}
// Due Tuesday, January 22, 2013 at 11:59 p.m. PST
#include <wb.h>
#define BLOCK_SIZE 512 //@@ You can change this
#define wbCheck(stmt) do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
return -1; \
} \
} while(0)
__global__ void scan(float * input, float * output, int len) {
//@@ Modify the body of this function to complete the functionality of
//@@ the scan on the device
//@@ You may need multiple kernel calls; write your kernels before this
//@@ function and call them from here
}
int main(int argc, char ** argv) {
wbArg_t args;
float * hostInput; // The input 1D list
float * hostOutput; // The output list
float * deviceInput;
float * deviceOutput;
int numElements; // number of elements in the list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput = (float *) wbImport(wbArg_getInputFile(args, 0), &numElements);
hostOutput = (float*) malloc(numElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ", numElements);
wbTime_start(GPU, "Allocating GPU memory.");
wbCheck(cudaMalloc((void**)&deviceInput, numElements*sizeof(float)));
wbCheck(cudaMalloc((void**)&deviceOutput, numElements*sizeof(float)));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Clearing output memory.");
wbCheck(cudaMemset(deviceInput, 0, numElements*sizeof(float)));
wbTime_stop(GPU, "Clearing output memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
wbCheck(cudaMemcpy(deviceInput, hostInput, numElements*sizeof(float), cudaMemcpyHostToDevice));
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
wbTime_start(Compute, "Performing CUDA computation");
//@@ Modify this to complete the functionality of the scan
//@@ on the deivce
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
wbCheck(cudaMemcpy(hostOutput, deviceOutput, numElements*sizeof(float), cudaMemcpyDeviceToHost));
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
cudaFree(deviceInput);
cudaFree(deviceOutput);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, numElements);
free(hostInput);
free(hostOutput);
return 0;
}
|
9d8b66d6b538254e55fe32e1290f833708f3b0b5.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/core/op_registration/op_registration.h>
#include <ATen/native/hip/Resize.cuh>
#include <ATen/native/ResizeCommon.h>
namespace at {
namespace native {
namespace {
Tensor& resize_cuda_(
Tensor& self,
IntArrayRef size,
c10::optional<MemoryFormat> optional_memory_format) {
if (self.has_names()) {
return resize_named_tensor_(self, size, optional_memory_format);
}
auto* self_ = self.unsafeGetTensorImpl();
resize_impl_cuda_(self_, size, /*strides=*/c10::nullopt);
if (optional_memory_format.has_value()) {
auto memory_format =
optional_memory_format.value();
TORCH_CHECK(
memory_format != MemoryFormat::Preserve,
"Unsupported memory format",
memory_format);
self_->empty_tensor_restride(memory_format);
}
return self;
}
static auto registry = torch::RegisterOperators()
.op(torch::RegisterOperators::options()
.schema("aten::resize_(Tensor(a!) self, int[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)")
.impl_unboxedOnlyKernel<decltype(resize_cuda_), &resize_cuda_>(DispatchKey::CUDATensorId))
;
} // namespace
} // namespace native
} // namespace at
| 9d8b66d6b538254e55fe32e1290f833708f3b0b5.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/core/op_registration/op_registration.h>
#include <ATen/native/cuda/Resize.cuh>
#include <ATen/native/ResizeCommon.h>
namespace at {
namespace native {
namespace {
Tensor& resize_cuda_(
Tensor& self,
IntArrayRef size,
c10::optional<MemoryFormat> optional_memory_format) {
if (self.has_names()) {
return resize_named_tensor_(self, size, optional_memory_format);
}
auto* self_ = self.unsafeGetTensorImpl();
resize_impl_cuda_(self_, size, /*strides=*/c10::nullopt);
if (optional_memory_format.has_value()) {
auto memory_format =
optional_memory_format.value();
TORCH_CHECK(
memory_format != MemoryFormat::Preserve,
"Unsupported memory format",
memory_format);
self_->empty_tensor_restride(memory_format);
}
return self;
}
static auto registry = torch::RegisterOperators()
.op(torch::RegisterOperators::options()
.schema("aten::resize_(Tensor(a!) self, int[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)")
.impl_unboxedOnlyKernel<decltype(resize_cuda_), &resize_cuda_>(DispatchKey::CUDATensorId))
;
} // namespace
} // namespace native
} // namespace at
|
1d9b7fb369473be61af351637b83e72fb98cc8ee.hip | // !!! This is a file automatically generated by hipify!!!
/* authors: Kostas Papagiannopoulos, RU
Arturo Cedillo, TU/e
Mathias Morbitzer, RU
Project Lola Scam Van [Low-latency scalar multiplication for VANETs]
This file is part of Project Lola Scam Van. The DIES research group, University of Twente
is free to use and modify the project for research and educational purposes.
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <time.h>
#include "finite_field_arithmetic.cuh"
#include "EC_double_jacobian.cuh"
//#include "EC_double_projective_serial.cuh"
//#include "EC_add_projective_serial.cuh"
//#include "EC_mixed_add_jacobian.cuh"
#include "show_and_move.cuh"
hipError_t cuda_kernel( uint32_t *r, uint32_t *x1, uint32_t *y1, uint32_t *z1, uint32_t *x2, uint32_t *y2, size_t size);
__global__ void kernel(uint32_t* x1,uint32_t* y1,uint32_t* z1,uint32_t* x2,uint32_t* y2)
{
for (int j=0;j<224;j++){
EC_double_jacobian_parallel(x1,y1,z1);
}
//Here we can use EC_double, EC_add to do the scalar multiplication.
//We should consider sliding window and mixed addition
}
int main()
{
const uint32_t arraySize = 8;
uint32_t x1[arraySize] = { 2, 0, 0, 0, 0, 0, 0, 0 };
uint32_t y1[arraySize] = { 3, 0, 0, 0, 0, 0, 0, 0 };
uint32_t z1[arraySize] = { 4, 0, 0, 0, 0, 0, 0, 0 };
uint32_t x2[arraySize] = { 7, 0, 0, 0, 0, 0, 0, 0 };
uint32_t y2[arraySize] = { 9 , 0, 0, 0, 0, 0, 0, 0 };
uint32_t r[arraySize] = { 0 };
hipError_t cudaStatus = cuda_kernel(r,x1,y1,z1,x2,y2,arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "cuda_kernel failed!");
return 1;
}
printf("End of Kernel\n");
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
system("PAUSE");
return 0;
}
hipError_t cuda_kernel( uint32_t *r, uint32_t *x1 , uint32_t *y1, uint32_t *z1 , uint32_t *x2 , uint32_t *y2 , size_t size)
{
uint32_t *dev_x1 = 0;
uint32_t *dev_y1 = 0;
uint32_t *dev_z1 = 0;
uint32_t *dev_x2 = 0;
uint32_t *dev_y2 = 0;
uint32_t *dev_r = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_r, size * sizeof(uint32_t));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_x1, size * sizeof(uint32_t));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_y1, size * sizeof(uint32_t));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_z1, size * sizeof(uint32_t));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_x2, size * sizeof(uint32_t));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_y2, size * sizeof(uint32_t));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_x1, x1, size * sizeof(uint32_t), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_y1, y1, size * sizeof(uint32_t), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_z1, z1, size * sizeof(uint32_t), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_x2, x2, size * sizeof(uint32_t), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_y2, y2, size * sizeof(uint32_t), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
clock_t start,end;
start=clock();
//CUrrent kernel config: 1block, 1thread per block. Decent parallel results with <<<16,64>>> on 525m GT GPU
hipLaunchKernelGGL(( kernel), dim3(1), dim3(2), 0, 0, dev_x1,dev_y1,dev_z1,dev_x2,dev_y2);
cudaStatus = hipDeviceSynchronize();
end=clock();
float t=(float)(end-start)/CLOCKS_PER_SEC;
printf("time ellapsed: %f\n",t);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching kernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(r, dev_r, size * sizeof(uint32_t), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_r);
hipFree(dev_x1);
hipFree(dev_y1);
hipFree(dev_z1);
hipFree(dev_x2);
hipFree(dev_y2);
return cudaStatus;
}
| 1d9b7fb369473be61af351637b83e72fb98cc8ee.cu | /* authors: Kostas Papagiannopoulos, RU
Arturo Cedillo, TU/e
Mathias Morbitzer, RU
Project Lola Scam Van [Low-latency scalar multiplication for VANETs]
This file is part of Project Lola Scam Van. The DIES research group, University of Twente
is free to use and modify the project for research and educational purposes.
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <time.h>
#include "finite_field_arithmetic.cuh"
#include "EC_double_jacobian.cuh"
//#include "EC_double_projective_serial.cuh"
//#include "EC_add_projective_serial.cuh"
//#include "EC_mixed_add_jacobian.cuh"
#include "show_and_move.cuh"
cudaError_t cuda_kernel( uint32_t *r, uint32_t *x1, uint32_t *y1, uint32_t *z1, uint32_t *x2, uint32_t *y2, size_t size);
__global__ void kernel(uint32_t* x1,uint32_t* y1,uint32_t* z1,uint32_t* x2,uint32_t* y2)
{
for (int j=0;j<224;j++){
EC_double_jacobian_parallel(x1,y1,z1);
}
//Here we can use EC_double, EC_add to do the scalar multiplication.
//We should consider sliding window and mixed addition
}
int main()
{
const uint32_t arraySize = 8;
uint32_t x1[arraySize] = { 2, 0, 0, 0, 0, 0, 0, 0 };
uint32_t y1[arraySize] = { 3, 0, 0, 0, 0, 0, 0, 0 };
uint32_t z1[arraySize] = { 4, 0, 0, 0, 0, 0, 0, 0 };
uint32_t x2[arraySize] = { 7, 0, 0, 0, 0, 0, 0, 0 };
uint32_t y2[arraySize] = { 9 , 0, 0, 0, 0, 0, 0, 0 };
uint32_t r[arraySize] = { 0 };
cudaError_t cudaStatus = cuda_kernel(r,x1,y1,z1,x2,y2,arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cuda_kernel failed!");
return 1;
}
printf("End of Kernel\n");
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
system("PAUSE");
return 0;
}
cudaError_t cuda_kernel( uint32_t *r, uint32_t *x1 , uint32_t *y1, uint32_t *z1 , uint32_t *x2 , uint32_t *y2 , size_t size)
{
uint32_t *dev_x1 = 0;
uint32_t *dev_y1 = 0;
uint32_t *dev_z1 = 0;
uint32_t *dev_x2 = 0;
uint32_t *dev_y2 = 0;
uint32_t *dev_r = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_r, size * sizeof(uint32_t));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_x1, size * sizeof(uint32_t));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_y1, size * sizeof(uint32_t));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_z1, size * sizeof(uint32_t));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_x2, size * sizeof(uint32_t));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_y2, size * sizeof(uint32_t));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_x1, x1, size * sizeof(uint32_t), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_y1, y1, size * sizeof(uint32_t), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_z1, z1, size * sizeof(uint32_t), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_x2, x2, size * sizeof(uint32_t), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_y2, y2, size * sizeof(uint32_t), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
clock_t start,end;
start=clock();
//CUrrent kernel config: 1block, 1thread per block. Decent parallel results with <<<16,64>>> on 525m GT GPU
kernel<<<1, 2>>>(dev_x1,dev_y1,dev_z1,dev_x2,dev_y2);
cudaStatus = cudaDeviceSynchronize();
end=clock();
float t=(float)(end-start)/CLOCKS_PER_SEC;
printf("time ellapsed: %f\n",t);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching kernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(r, dev_r, size * sizeof(uint32_t), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_r);
cudaFree(dev_x1);
cudaFree(dev_y1);
cudaFree(dev_z1);
cudaFree(dev_x2);
cudaFree(dev_y2);
return cudaStatus;
}
|
70b91fba7ae8105ed9ee2b4a6a76014e3a34ef8c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//original c by brade conte, ported to CUDA by jody
#define uchar unsigned char // 8-bit byte
#define uint unsigned int // 32-bit word
#define DBL_INT_ADD(a,b,c) if (a > 0xffffffff - (c)) ++b; a += c;
#define ROTLEFT(a,b) (((a) << (b)) | ((a) >> (32-(b))))
#define ROTRIGHT(a,b) (((a) >> (b)) | ((a) << (32-(b))))
#define CH(x,y,z) (((x) & (y)) ^ (~(x) & (z)))
#define MAJ(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
#define EP0(x) (ROTRIGHT(x,2) ^ ROTRIGHT(x,13) ^ ROTRIGHT(x,22))
#define EP1(x) (ROTRIGHT(x,6) ^ ROTRIGHT(x,11) ^ ROTRIGHT(x,25))
#define SIG0(x) (ROTRIGHT(x,7) ^ ROTRIGHT(x,18) ^ ((x) >> 3))
#define SIG1(x) (ROTRIGHT(x,17) ^ ROTRIGHT(x,19) ^ ((x) >> 10))
typedef struct{
uchar data[64];
uint datalen;
uint bitlen[2];
uint state[8];
} SHA256_CTX;
__device__ uint k[64] = {
0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
};
SHA256_CTX *cpuSHA_CTX;
SHA256_CTX *gpuSHA_CTX;
int BLOCKS = 10;
int THREADS = 500;
extern "C"
extern "C"
__device__ void sha256_transform(SHA256_CTX *ctx, uchar data[])
{
int q = blockIdx.x * blockDim.x + threadIdx.x;
uint a,b,c,d,e,f,g,h,i,j,t1,t2,m[64];
for (i=0,j=0; i < 16; ++i, j += 4)
m[i] = (data[j] << 24) | (data[j+1] << 16) | (data[j+2] << 8) | (data[j+3]);
for ( ; i < 64; ++i)
m[i] = SIG1(m[i-2]) + m[i-7] + SIG0(m[i-15]) + m[i-16];
a = ctx[q].state[0];
b = ctx[q].state[1];
c = ctx[q].state[2];
d = ctx[q].state[3];
e = ctx[q].state[4];
f = ctx[q].state[5];
g = ctx[q].state[6];
h = ctx[q].state[7];
for (i = 0; i < 64; ++i) {
t1 = h + EP1(e) + CH(e,f,g) + k[i] + m[i];
t2 = EP0(a) + MAJ(a,b,c);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
ctx[q].state[0] += a;
ctx[q].state[1] += b;
ctx[q].state[2] += c;
ctx[q].state[3] += d;
ctx[q].state[4] += e;
ctx[q].state[5] += f;
ctx[q].state[6] += g;
ctx[q].state[7] += h;
}
__global__ void sha256_final(SHA256_CTX *ctx, uchar *gpuResult)
{
int q = blockIdx.x * blockDim.x + threadIdx.x;
uint i;
i = ctx[q].datalen;
// Pad whatever data is left in the buffer.
if (ctx[q].datalen < 56) {
ctx[q].data[i++] = 0x80;
while (i < 56)
ctx[q].data[i++] = 0x00;
}
else {
ctx[q].data[i++] = 0x80;
while (i < 64)
ctx[q].data[i++] = 0x00;
sha256_transform(ctx,ctx[q].data);
memset(ctx[q].data,0,56);
}
// Append to the padding the total message's length in bits and transform.
DBL_INT_ADD(ctx[q].bitlen[0],ctx[q].bitlen[1],ctx[q].datalen * 8);
ctx[q].data[63] = ctx[q].bitlen[0];
ctx[q].data[62] = ctx[q].bitlen[0] >> 8;
ctx[q].data[61] = ctx[q].bitlen[0] >> 16;
ctx[q].data[60] = ctx[q].bitlen[0] >> 24;
ctx[q].data[59] = ctx[q].bitlen[1];
ctx[q].data[58] = ctx[q].bitlen[1] >> 8;
ctx[q].data[57] = ctx[q].bitlen[1] >> 16;
ctx[q].data[56] = ctx[q].bitlen[1] >> 24;
sha256_transform(ctx,ctx[q].data);
// Since this implementation uses little endian byte ordering and SHA uses big endian,
// reverse all the bytes when copying the final state to the output hash.
for (i=0; i < 4; ++i) {
gpuResult[i] = (ctx[q].state[0] >> (24-i*8)) & 0x000000ff;
gpuResult[i+4] = (ctx[q].state[1] >> (24-i*8)) & 0x000000ff;
gpuResult[i+8] = (ctx[q].state[2] >> (24-i*8)) & 0x000000ff;
gpuResult[i+12] = (ctx[q].state[3] >> (24-i*8)) & 0x000000ff;
gpuResult[i+16] = (ctx[q].state[4] >> (24-i*8)) & 0x000000ff;
gpuResult[i+20] = (ctx[q].state[5] >> (24-i*8)) & 0x000000ff;
gpuResult[i+24] = (ctx[q].state[6] >> (24-i*8)) & 0x000000ff;
gpuResult[i+28] = (ctx[q].state[7] >> (24-i*8)) & 0x000000ff;
}
/*
printf("\n");
for(int a=0; a<32; a++){
printf("%02x", hash[a]);
}
printf("\n");
*/
} | 70b91fba7ae8105ed9ee2b4a6a76014e3a34ef8c.cu | #include "includes.h"
//original c by brade conte, ported to CUDA by jody
#define uchar unsigned char // 8-bit byte
#define uint unsigned int // 32-bit word
#define DBL_INT_ADD(a,b,c) if (a > 0xffffffff - (c)) ++b; a += c;
#define ROTLEFT(a,b) (((a) << (b)) | ((a) >> (32-(b))))
#define ROTRIGHT(a,b) (((a) >> (b)) | ((a) << (32-(b))))
#define CH(x,y,z) (((x) & (y)) ^ (~(x) & (z)))
#define MAJ(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
#define EP0(x) (ROTRIGHT(x,2) ^ ROTRIGHT(x,13) ^ ROTRIGHT(x,22))
#define EP1(x) (ROTRIGHT(x,6) ^ ROTRIGHT(x,11) ^ ROTRIGHT(x,25))
#define SIG0(x) (ROTRIGHT(x,7) ^ ROTRIGHT(x,18) ^ ((x) >> 3))
#define SIG1(x) (ROTRIGHT(x,17) ^ ROTRIGHT(x,19) ^ ((x) >> 10))
typedef struct{
uchar data[64];
uint datalen;
uint bitlen[2];
uint state[8];
} SHA256_CTX;
__device__ uint k[64] = {
0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
};
SHA256_CTX *cpuSHA_CTX;
SHA256_CTX *gpuSHA_CTX;
int BLOCKS = 10;
int THREADS = 500;
extern "C"
extern "C"
__device__ void sha256_transform(SHA256_CTX *ctx, uchar data[])
{
int q = blockIdx.x * blockDim.x + threadIdx.x;
uint a,b,c,d,e,f,g,h,i,j,t1,t2,m[64];
for (i=0,j=0; i < 16; ++i, j += 4)
m[i] = (data[j] << 24) | (data[j+1] << 16) | (data[j+2] << 8) | (data[j+3]);
for ( ; i < 64; ++i)
m[i] = SIG1(m[i-2]) + m[i-7] + SIG0(m[i-15]) + m[i-16];
a = ctx[q].state[0];
b = ctx[q].state[1];
c = ctx[q].state[2];
d = ctx[q].state[3];
e = ctx[q].state[4];
f = ctx[q].state[5];
g = ctx[q].state[6];
h = ctx[q].state[7];
for (i = 0; i < 64; ++i) {
t1 = h + EP1(e) + CH(e,f,g) + k[i] + m[i];
t2 = EP0(a) + MAJ(a,b,c);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
ctx[q].state[0] += a;
ctx[q].state[1] += b;
ctx[q].state[2] += c;
ctx[q].state[3] += d;
ctx[q].state[4] += e;
ctx[q].state[5] += f;
ctx[q].state[6] += g;
ctx[q].state[7] += h;
}
__global__ void sha256_final(SHA256_CTX *ctx, uchar *gpuResult)
{
int q = blockIdx.x * blockDim.x + threadIdx.x;
uint i;
i = ctx[q].datalen;
// Pad whatever data is left in the buffer.
if (ctx[q].datalen < 56) {
ctx[q].data[i++] = 0x80;
while (i < 56)
ctx[q].data[i++] = 0x00;
}
else {
ctx[q].data[i++] = 0x80;
while (i < 64)
ctx[q].data[i++] = 0x00;
sha256_transform(ctx,ctx[q].data);
memset(ctx[q].data,0,56);
}
// Append to the padding the total message's length in bits and transform.
DBL_INT_ADD(ctx[q].bitlen[0],ctx[q].bitlen[1],ctx[q].datalen * 8);
ctx[q].data[63] = ctx[q].bitlen[0];
ctx[q].data[62] = ctx[q].bitlen[0] >> 8;
ctx[q].data[61] = ctx[q].bitlen[0] >> 16;
ctx[q].data[60] = ctx[q].bitlen[0] >> 24;
ctx[q].data[59] = ctx[q].bitlen[1];
ctx[q].data[58] = ctx[q].bitlen[1] >> 8;
ctx[q].data[57] = ctx[q].bitlen[1] >> 16;
ctx[q].data[56] = ctx[q].bitlen[1] >> 24;
sha256_transform(ctx,ctx[q].data);
// Since this implementation uses little endian byte ordering and SHA uses big endian,
// reverse all the bytes when copying the final state to the output hash.
for (i=0; i < 4; ++i) {
gpuResult[i] = (ctx[q].state[0] >> (24-i*8)) & 0x000000ff;
gpuResult[i+4] = (ctx[q].state[1] >> (24-i*8)) & 0x000000ff;
gpuResult[i+8] = (ctx[q].state[2] >> (24-i*8)) & 0x000000ff;
gpuResult[i+12] = (ctx[q].state[3] >> (24-i*8)) & 0x000000ff;
gpuResult[i+16] = (ctx[q].state[4] >> (24-i*8)) & 0x000000ff;
gpuResult[i+20] = (ctx[q].state[5] >> (24-i*8)) & 0x000000ff;
gpuResult[i+24] = (ctx[q].state[6] >> (24-i*8)) & 0x000000ff;
gpuResult[i+28] = (ctx[q].state[7] >> (24-i*8)) & 0x000000ff;
}
/*
printf("\n");
for(int a=0; a<32; a++){
printf("%02x", hash[a]);
}
printf("\n");
*/
} |
8279c140c9b423b723affecaea60125bdd177822.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
#include "opencv2/gpu/device/static_check.hpp"
namespace cv { namespace gpu { namespace device
{
namespace row_filter
{
#define MAX_KERNEL_SIZE 32
__constant__ float c_kernel[MAX_KERNEL_SIZE];
void loadKernel(const float* kernel, int ksize, hipStream_t stream)
{
if (stream == 0)
cudaSafeCall( hipMemcpyToSymbol(c_kernel, kernel, ksize * sizeof(float), 0, hipMemcpyDeviceToDevice) );
else
cudaSafeCall( hipMemcpyToSymbolAsync(c_kernel, kernel, ksize * sizeof(float), 0, hipMemcpyDeviceToDevice, stream) );
}
template <int KSIZE, typename T, typename D, typename B>
__global__ void linearRowFilter(const PtrStepSz<T> src, PtrStep<D> dst, const int anchor, const B brd)
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
const int BLOCK_DIM_X = 32;
const int BLOCK_DIM_Y = 8;
const int PATCH_PER_BLOCK = 4;
const int HALO_SIZE = 1;
#else
const int BLOCK_DIM_X = 32;
const int BLOCK_DIM_Y = 4;
const int PATCH_PER_BLOCK = 4;
const int HALO_SIZE = 1;
#endif
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_t;
__shared__ sum_t smem[BLOCK_DIM_Y][(PATCH_PER_BLOCK + 2 * HALO_SIZE) * BLOCK_DIM_X];
const int y = blockIdx.y * BLOCK_DIM_Y + threadIdx.y;
if (y >= src.rows)
return;
const T* src_row = src.ptr(y);
const int xStart = blockIdx.x * (PATCH_PER_BLOCK * BLOCK_DIM_X) + threadIdx.x;
if (blockIdx.x > 0)
{
//Load left halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + j * BLOCK_DIM_X] = saturate_cast<sum_t>(src_row[xStart - (HALO_SIZE - j) * BLOCK_DIM_X]);
}
else
{
//Load left halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + j * BLOCK_DIM_X] = saturate_cast<sum_t>(brd.at_low(xStart - (HALO_SIZE - j) * BLOCK_DIM_X, src_row));
}
if (blockIdx.x + 2 < gridDim.x)
{
//Load main data
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(src_row[xStart + j * BLOCK_DIM_X]);
//Load right halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(src_row[xStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_X]);
}
else
{
//Load main data
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(brd.at_high(xStart + j * BLOCK_DIM_X, src_row));
//Load right halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(brd.at_high(xStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_X, src_row));
}
__syncthreads();
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
{
const int x = xStart + j * BLOCK_DIM_X;
if (x < src.cols)
{
sum_t sum = VecTraits<sum_t>::all(0);
#pragma unroll
for (int k = 0; k < KSIZE; ++k)
sum = sum + smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X - anchor + k] * c_kernel[k];
dst(y, x) = saturate_cast<D>(sum);
}
}
}
template <int KSIZE, typename T, typename D, template<typename> class B>
void linearRowFilter_caller(PtrStepSz<T> src, PtrStepSz<D> dst, int anchor, int cc, hipStream_t stream)
{
int BLOCK_DIM_X;
int BLOCK_DIM_Y;
int PATCH_PER_BLOCK;
if (cc >= 20)
{
BLOCK_DIM_X = 32;
BLOCK_DIM_Y = 8;
PATCH_PER_BLOCK = 4;
}
else
{
BLOCK_DIM_X = 32;
BLOCK_DIM_Y = 4;
PATCH_PER_BLOCK = 4;
}
const dim3 block(BLOCK_DIM_X, BLOCK_DIM_Y);
const dim3 grid(divUp(src.cols, BLOCK_DIM_X * PATCH_PER_BLOCK), divUp(src.rows, BLOCK_DIM_Y));
B<T> brd(src.cols);
hipLaunchKernelGGL(( linearRowFilter<KSIZE, T, D>), dim3(grid), dim3(block), 0, stream, src, dst, anchor, brd);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, typename D>
void linearRowFilter_gpu(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<D> dst, int anchor, int cc, hipStream_t stream);
static const caller_t callers[5][33] =
{
{
0,
linearRowFilter_caller< 1, T, D, BrdRowReflect101>,
linearRowFilter_caller< 2, T, D, BrdRowReflect101>,
linearRowFilter_caller< 3, T, D, BrdRowReflect101>,
linearRowFilter_caller< 4, T, D, BrdRowReflect101>,
linearRowFilter_caller< 5, T, D, BrdRowReflect101>,
linearRowFilter_caller< 6, T, D, BrdRowReflect101>,
linearRowFilter_caller< 7, T, D, BrdRowReflect101>,
linearRowFilter_caller< 8, T, D, BrdRowReflect101>,
linearRowFilter_caller< 9, T, D, BrdRowReflect101>,
linearRowFilter_caller<10, T, D, BrdRowReflect101>,
linearRowFilter_caller<11, T, D, BrdRowReflect101>,
linearRowFilter_caller<12, T, D, BrdRowReflect101>,
linearRowFilter_caller<13, T, D, BrdRowReflect101>,
linearRowFilter_caller<14, T, D, BrdRowReflect101>,
linearRowFilter_caller<15, T, D, BrdRowReflect101>,
linearRowFilter_caller<16, T, D, BrdRowReflect101>,
linearRowFilter_caller<17, T, D, BrdRowReflect101>,
linearRowFilter_caller<18, T, D, BrdRowReflect101>,
linearRowFilter_caller<19, T, D, BrdRowReflect101>,
linearRowFilter_caller<20, T, D, BrdRowReflect101>,
linearRowFilter_caller<21, T, D, BrdRowReflect101>,
linearRowFilter_caller<22, T, D, BrdRowReflect101>,
linearRowFilter_caller<23, T, D, BrdRowReflect101>,
linearRowFilter_caller<24, T, D, BrdRowReflect101>,
linearRowFilter_caller<25, T, D, BrdRowReflect101>,
linearRowFilter_caller<26, T, D, BrdRowReflect101>,
linearRowFilter_caller<27, T, D, BrdRowReflect101>,
linearRowFilter_caller<28, T, D, BrdRowReflect101>,
linearRowFilter_caller<29, T, D, BrdRowReflect101>,
linearRowFilter_caller<30, T, D, BrdRowReflect101>,
linearRowFilter_caller<31, T, D, BrdRowReflect101>,
linearRowFilter_caller<32, T, D, BrdRowReflect101>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowReplicate>,
linearRowFilter_caller< 2, T, D, BrdRowReplicate>,
linearRowFilter_caller< 3, T, D, BrdRowReplicate>,
linearRowFilter_caller< 4, T, D, BrdRowReplicate>,
linearRowFilter_caller< 5, T, D, BrdRowReplicate>,
linearRowFilter_caller< 6, T, D, BrdRowReplicate>,
linearRowFilter_caller< 7, T, D, BrdRowReplicate>,
linearRowFilter_caller< 8, T, D, BrdRowReplicate>,
linearRowFilter_caller< 9, T, D, BrdRowReplicate>,
linearRowFilter_caller<10, T, D, BrdRowReplicate>,
linearRowFilter_caller<11, T, D, BrdRowReplicate>,
linearRowFilter_caller<12, T, D, BrdRowReplicate>,
linearRowFilter_caller<13, T, D, BrdRowReplicate>,
linearRowFilter_caller<14, T, D, BrdRowReplicate>,
linearRowFilter_caller<15, T, D, BrdRowReplicate>,
linearRowFilter_caller<16, T, D, BrdRowReplicate>,
linearRowFilter_caller<17, T, D, BrdRowReplicate>,
linearRowFilter_caller<18, T, D, BrdRowReplicate>,
linearRowFilter_caller<19, T, D, BrdRowReplicate>,
linearRowFilter_caller<20, T, D, BrdRowReplicate>,
linearRowFilter_caller<21, T, D, BrdRowReplicate>,
linearRowFilter_caller<22, T, D, BrdRowReplicate>,
linearRowFilter_caller<23, T, D, BrdRowReplicate>,
linearRowFilter_caller<24, T, D, BrdRowReplicate>,
linearRowFilter_caller<25, T, D, BrdRowReplicate>,
linearRowFilter_caller<26, T, D, BrdRowReplicate>,
linearRowFilter_caller<27, T, D, BrdRowReplicate>,
linearRowFilter_caller<28, T, D, BrdRowReplicate>,
linearRowFilter_caller<29, T, D, BrdRowReplicate>,
linearRowFilter_caller<30, T, D, BrdRowReplicate>,
linearRowFilter_caller<31, T, D, BrdRowReplicate>,
linearRowFilter_caller<32, T, D, BrdRowReplicate>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowConstant>,
linearRowFilter_caller< 2, T, D, BrdRowConstant>,
linearRowFilter_caller< 3, T, D, BrdRowConstant>,
linearRowFilter_caller< 4, T, D, BrdRowConstant>,
linearRowFilter_caller< 5, T, D, BrdRowConstant>,
linearRowFilter_caller< 6, T, D, BrdRowConstant>,
linearRowFilter_caller< 7, T, D, BrdRowConstant>,
linearRowFilter_caller< 8, T, D, BrdRowConstant>,
linearRowFilter_caller< 9, T, D, BrdRowConstant>,
linearRowFilter_caller<10, T, D, BrdRowConstant>,
linearRowFilter_caller<11, T, D, BrdRowConstant>,
linearRowFilter_caller<12, T, D, BrdRowConstant>,
linearRowFilter_caller<13, T, D, BrdRowConstant>,
linearRowFilter_caller<14, T, D, BrdRowConstant>,
linearRowFilter_caller<15, T, D, BrdRowConstant>,
linearRowFilter_caller<16, T, D, BrdRowConstant>,
linearRowFilter_caller<17, T, D, BrdRowConstant>,
linearRowFilter_caller<18, T, D, BrdRowConstant>,
linearRowFilter_caller<19, T, D, BrdRowConstant>,
linearRowFilter_caller<20, T, D, BrdRowConstant>,
linearRowFilter_caller<21, T, D, BrdRowConstant>,
linearRowFilter_caller<22, T, D, BrdRowConstant>,
linearRowFilter_caller<23, T, D, BrdRowConstant>,
linearRowFilter_caller<24, T, D, BrdRowConstant>,
linearRowFilter_caller<25, T, D, BrdRowConstant>,
linearRowFilter_caller<26, T, D, BrdRowConstant>,
linearRowFilter_caller<27, T, D, BrdRowConstant>,
linearRowFilter_caller<28, T, D, BrdRowConstant>,
linearRowFilter_caller<29, T, D, BrdRowConstant>,
linearRowFilter_caller<30, T, D, BrdRowConstant>,
linearRowFilter_caller<31, T, D, BrdRowConstant>,
linearRowFilter_caller<32, T, D, BrdRowConstant>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowReflect>,
linearRowFilter_caller< 2, T, D, BrdRowReflect>,
linearRowFilter_caller< 3, T, D, BrdRowReflect>,
linearRowFilter_caller< 4, T, D, BrdRowReflect>,
linearRowFilter_caller< 5, T, D, BrdRowReflect>,
linearRowFilter_caller< 6, T, D, BrdRowReflect>,
linearRowFilter_caller< 7, T, D, BrdRowReflect>,
linearRowFilter_caller< 8, T, D, BrdRowReflect>,
linearRowFilter_caller< 9, T, D, BrdRowReflect>,
linearRowFilter_caller<10, T, D, BrdRowReflect>,
linearRowFilter_caller<11, T, D, BrdRowReflect>,
linearRowFilter_caller<12, T, D, BrdRowReflect>,
linearRowFilter_caller<13, T, D, BrdRowReflect>,
linearRowFilter_caller<14, T, D, BrdRowReflect>,
linearRowFilter_caller<15, T, D, BrdRowReflect>,
linearRowFilter_caller<16, T, D, BrdRowReflect>,
linearRowFilter_caller<17, T, D, BrdRowReflect>,
linearRowFilter_caller<18, T, D, BrdRowReflect>,
linearRowFilter_caller<19, T, D, BrdRowReflect>,
linearRowFilter_caller<20, T, D, BrdRowReflect>,
linearRowFilter_caller<21, T, D, BrdRowReflect>,
linearRowFilter_caller<22, T, D, BrdRowReflect>,
linearRowFilter_caller<23, T, D, BrdRowReflect>,
linearRowFilter_caller<24, T, D, BrdRowReflect>,
linearRowFilter_caller<25, T, D, BrdRowReflect>,
linearRowFilter_caller<26, T, D, BrdRowReflect>,
linearRowFilter_caller<27, T, D, BrdRowReflect>,
linearRowFilter_caller<28, T, D, BrdRowReflect>,
linearRowFilter_caller<29, T, D, BrdRowReflect>,
linearRowFilter_caller<30, T, D, BrdRowReflect>,
linearRowFilter_caller<31, T, D, BrdRowReflect>,
linearRowFilter_caller<32, T, D, BrdRowReflect>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowWrap>,
linearRowFilter_caller< 2, T, D, BrdRowWrap>,
linearRowFilter_caller< 3, T, D, BrdRowWrap>,
linearRowFilter_caller< 4, T, D, BrdRowWrap>,
linearRowFilter_caller< 5, T, D, BrdRowWrap>,
linearRowFilter_caller< 6, T, D, BrdRowWrap>,
linearRowFilter_caller< 7, T, D, BrdRowWrap>,
linearRowFilter_caller< 8, T, D, BrdRowWrap>,
linearRowFilter_caller< 9, T, D, BrdRowWrap>,
linearRowFilter_caller<10, T, D, BrdRowWrap>,
linearRowFilter_caller<11, T, D, BrdRowWrap>,
linearRowFilter_caller<12, T, D, BrdRowWrap>,
linearRowFilter_caller<13, T, D, BrdRowWrap>,
linearRowFilter_caller<14, T, D, BrdRowWrap>,
linearRowFilter_caller<15, T, D, BrdRowWrap>,
linearRowFilter_caller<16, T, D, BrdRowWrap>,
linearRowFilter_caller<17, T, D, BrdRowWrap>,
linearRowFilter_caller<18, T, D, BrdRowWrap>,
linearRowFilter_caller<19, T, D, BrdRowWrap>,
linearRowFilter_caller<20, T, D, BrdRowWrap>,
linearRowFilter_caller<21, T, D, BrdRowWrap>,
linearRowFilter_caller<22, T, D, BrdRowWrap>,
linearRowFilter_caller<23, T, D, BrdRowWrap>,
linearRowFilter_caller<24, T, D, BrdRowWrap>,
linearRowFilter_caller<25, T, D, BrdRowWrap>,
linearRowFilter_caller<26, T, D, BrdRowWrap>,
linearRowFilter_caller<27, T, D, BrdRowWrap>,
linearRowFilter_caller<28, T, D, BrdRowWrap>,
linearRowFilter_caller<29, T, D, BrdRowWrap>,
linearRowFilter_caller<30, T, D, BrdRowWrap>,
linearRowFilter_caller<31, T, D, BrdRowWrap>,
linearRowFilter_caller<32, T, D, BrdRowWrap>
}
};
loadKernel(kernel, ksize, stream);
callers[brd_type][ksize]((PtrStepSz<T>)src, (PtrStepSz<D>)dst, anchor, cc, stream);
}
template void linearRowFilter_gpu<uchar , float >(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
template void linearRowFilter_gpu<uchar4, float4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
template void linearRowFilter_gpu<short3, float3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
template void linearRowFilter_gpu<int , float >(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
template void linearRowFilter_gpu<float , float >(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
} // namespace row_filter
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */
| 8279c140c9b423b723affecaea60125bdd177822.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 1993-2011, NVIDIA Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
#include "opencv2/gpu/device/static_check.hpp"
namespace cv { namespace gpu { namespace device
{
namespace row_filter
{
#define MAX_KERNEL_SIZE 32
__constant__ float c_kernel[MAX_KERNEL_SIZE];
void loadKernel(const float* kernel, int ksize, cudaStream_t stream)
{
if (stream == 0)
cudaSafeCall( cudaMemcpyToSymbol(c_kernel, kernel, ksize * sizeof(float), 0, cudaMemcpyDeviceToDevice) );
else
cudaSafeCall( cudaMemcpyToSymbolAsync(c_kernel, kernel, ksize * sizeof(float), 0, cudaMemcpyDeviceToDevice, stream) );
}
template <int KSIZE, typename T, typename D, typename B>
__global__ void linearRowFilter(const PtrStepSz<T> src, PtrStep<D> dst, const int anchor, const B brd)
{
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
const int BLOCK_DIM_X = 32;
const int BLOCK_DIM_Y = 8;
const int PATCH_PER_BLOCK = 4;
const int HALO_SIZE = 1;
#else
const int BLOCK_DIM_X = 32;
const int BLOCK_DIM_Y = 4;
const int PATCH_PER_BLOCK = 4;
const int HALO_SIZE = 1;
#endif
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type sum_t;
__shared__ sum_t smem[BLOCK_DIM_Y][(PATCH_PER_BLOCK + 2 * HALO_SIZE) * BLOCK_DIM_X];
const int y = blockIdx.y * BLOCK_DIM_Y + threadIdx.y;
if (y >= src.rows)
return;
const T* src_row = src.ptr(y);
const int xStart = blockIdx.x * (PATCH_PER_BLOCK * BLOCK_DIM_X) + threadIdx.x;
if (blockIdx.x > 0)
{
//Load left halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + j * BLOCK_DIM_X] = saturate_cast<sum_t>(src_row[xStart - (HALO_SIZE - j) * BLOCK_DIM_X]);
}
else
{
//Load left halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + j * BLOCK_DIM_X] = saturate_cast<sum_t>(brd.at_low(xStart - (HALO_SIZE - j) * BLOCK_DIM_X, src_row));
}
if (blockIdx.x + 2 < gridDim.x)
{
//Load main data
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(src_row[xStart + j * BLOCK_DIM_X]);
//Load right halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(src_row[xStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_X]);
}
else
{
//Load main data
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(brd.at_high(xStart + j * BLOCK_DIM_X, src_row));
//Load right halo
#pragma unroll
for (int j = 0; j < HALO_SIZE; ++j)
smem[threadIdx.y][threadIdx.x + (PATCH_PER_BLOCK + HALO_SIZE) * BLOCK_DIM_X + j * BLOCK_DIM_X] = saturate_cast<sum_t>(brd.at_high(xStart + (PATCH_PER_BLOCK + j) * BLOCK_DIM_X, src_row));
}
__syncthreads();
#pragma unroll
for (int j = 0; j < PATCH_PER_BLOCK; ++j)
{
const int x = xStart + j * BLOCK_DIM_X;
if (x < src.cols)
{
sum_t sum = VecTraits<sum_t>::all(0);
#pragma unroll
for (int k = 0; k < KSIZE; ++k)
sum = sum + smem[threadIdx.y][threadIdx.x + HALO_SIZE * BLOCK_DIM_X + j * BLOCK_DIM_X - anchor + k] * c_kernel[k];
dst(y, x) = saturate_cast<D>(sum);
}
}
}
template <int KSIZE, typename T, typename D, template<typename> class B>
void linearRowFilter_caller(PtrStepSz<T> src, PtrStepSz<D> dst, int anchor, int cc, cudaStream_t stream)
{
int BLOCK_DIM_X;
int BLOCK_DIM_Y;
int PATCH_PER_BLOCK;
if (cc >= 20)
{
BLOCK_DIM_X = 32;
BLOCK_DIM_Y = 8;
PATCH_PER_BLOCK = 4;
}
else
{
BLOCK_DIM_X = 32;
BLOCK_DIM_Y = 4;
PATCH_PER_BLOCK = 4;
}
const dim3 block(BLOCK_DIM_X, BLOCK_DIM_Y);
const dim3 grid(divUp(src.cols, BLOCK_DIM_X * PATCH_PER_BLOCK), divUp(src.rows, BLOCK_DIM_Y));
B<T> brd(src.cols);
linearRowFilter<KSIZE, T, D><<<grid, block, 0, stream>>>(src, dst, anchor, brd);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, typename D>
void linearRowFilter_gpu(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<D> dst, int anchor, int cc, cudaStream_t stream);
static const caller_t callers[5][33] =
{
{
0,
linearRowFilter_caller< 1, T, D, BrdRowReflect101>,
linearRowFilter_caller< 2, T, D, BrdRowReflect101>,
linearRowFilter_caller< 3, T, D, BrdRowReflect101>,
linearRowFilter_caller< 4, T, D, BrdRowReflect101>,
linearRowFilter_caller< 5, T, D, BrdRowReflect101>,
linearRowFilter_caller< 6, T, D, BrdRowReflect101>,
linearRowFilter_caller< 7, T, D, BrdRowReflect101>,
linearRowFilter_caller< 8, T, D, BrdRowReflect101>,
linearRowFilter_caller< 9, T, D, BrdRowReflect101>,
linearRowFilter_caller<10, T, D, BrdRowReflect101>,
linearRowFilter_caller<11, T, D, BrdRowReflect101>,
linearRowFilter_caller<12, T, D, BrdRowReflect101>,
linearRowFilter_caller<13, T, D, BrdRowReflect101>,
linearRowFilter_caller<14, T, D, BrdRowReflect101>,
linearRowFilter_caller<15, T, D, BrdRowReflect101>,
linearRowFilter_caller<16, T, D, BrdRowReflect101>,
linearRowFilter_caller<17, T, D, BrdRowReflect101>,
linearRowFilter_caller<18, T, D, BrdRowReflect101>,
linearRowFilter_caller<19, T, D, BrdRowReflect101>,
linearRowFilter_caller<20, T, D, BrdRowReflect101>,
linearRowFilter_caller<21, T, D, BrdRowReflect101>,
linearRowFilter_caller<22, T, D, BrdRowReflect101>,
linearRowFilter_caller<23, T, D, BrdRowReflect101>,
linearRowFilter_caller<24, T, D, BrdRowReflect101>,
linearRowFilter_caller<25, T, D, BrdRowReflect101>,
linearRowFilter_caller<26, T, D, BrdRowReflect101>,
linearRowFilter_caller<27, T, D, BrdRowReflect101>,
linearRowFilter_caller<28, T, D, BrdRowReflect101>,
linearRowFilter_caller<29, T, D, BrdRowReflect101>,
linearRowFilter_caller<30, T, D, BrdRowReflect101>,
linearRowFilter_caller<31, T, D, BrdRowReflect101>,
linearRowFilter_caller<32, T, D, BrdRowReflect101>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowReplicate>,
linearRowFilter_caller< 2, T, D, BrdRowReplicate>,
linearRowFilter_caller< 3, T, D, BrdRowReplicate>,
linearRowFilter_caller< 4, T, D, BrdRowReplicate>,
linearRowFilter_caller< 5, T, D, BrdRowReplicate>,
linearRowFilter_caller< 6, T, D, BrdRowReplicate>,
linearRowFilter_caller< 7, T, D, BrdRowReplicate>,
linearRowFilter_caller< 8, T, D, BrdRowReplicate>,
linearRowFilter_caller< 9, T, D, BrdRowReplicate>,
linearRowFilter_caller<10, T, D, BrdRowReplicate>,
linearRowFilter_caller<11, T, D, BrdRowReplicate>,
linearRowFilter_caller<12, T, D, BrdRowReplicate>,
linearRowFilter_caller<13, T, D, BrdRowReplicate>,
linearRowFilter_caller<14, T, D, BrdRowReplicate>,
linearRowFilter_caller<15, T, D, BrdRowReplicate>,
linearRowFilter_caller<16, T, D, BrdRowReplicate>,
linearRowFilter_caller<17, T, D, BrdRowReplicate>,
linearRowFilter_caller<18, T, D, BrdRowReplicate>,
linearRowFilter_caller<19, T, D, BrdRowReplicate>,
linearRowFilter_caller<20, T, D, BrdRowReplicate>,
linearRowFilter_caller<21, T, D, BrdRowReplicate>,
linearRowFilter_caller<22, T, D, BrdRowReplicate>,
linearRowFilter_caller<23, T, D, BrdRowReplicate>,
linearRowFilter_caller<24, T, D, BrdRowReplicate>,
linearRowFilter_caller<25, T, D, BrdRowReplicate>,
linearRowFilter_caller<26, T, D, BrdRowReplicate>,
linearRowFilter_caller<27, T, D, BrdRowReplicate>,
linearRowFilter_caller<28, T, D, BrdRowReplicate>,
linearRowFilter_caller<29, T, D, BrdRowReplicate>,
linearRowFilter_caller<30, T, D, BrdRowReplicate>,
linearRowFilter_caller<31, T, D, BrdRowReplicate>,
linearRowFilter_caller<32, T, D, BrdRowReplicate>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowConstant>,
linearRowFilter_caller< 2, T, D, BrdRowConstant>,
linearRowFilter_caller< 3, T, D, BrdRowConstant>,
linearRowFilter_caller< 4, T, D, BrdRowConstant>,
linearRowFilter_caller< 5, T, D, BrdRowConstant>,
linearRowFilter_caller< 6, T, D, BrdRowConstant>,
linearRowFilter_caller< 7, T, D, BrdRowConstant>,
linearRowFilter_caller< 8, T, D, BrdRowConstant>,
linearRowFilter_caller< 9, T, D, BrdRowConstant>,
linearRowFilter_caller<10, T, D, BrdRowConstant>,
linearRowFilter_caller<11, T, D, BrdRowConstant>,
linearRowFilter_caller<12, T, D, BrdRowConstant>,
linearRowFilter_caller<13, T, D, BrdRowConstant>,
linearRowFilter_caller<14, T, D, BrdRowConstant>,
linearRowFilter_caller<15, T, D, BrdRowConstant>,
linearRowFilter_caller<16, T, D, BrdRowConstant>,
linearRowFilter_caller<17, T, D, BrdRowConstant>,
linearRowFilter_caller<18, T, D, BrdRowConstant>,
linearRowFilter_caller<19, T, D, BrdRowConstant>,
linearRowFilter_caller<20, T, D, BrdRowConstant>,
linearRowFilter_caller<21, T, D, BrdRowConstant>,
linearRowFilter_caller<22, T, D, BrdRowConstant>,
linearRowFilter_caller<23, T, D, BrdRowConstant>,
linearRowFilter_caller<24, T, D, BrdRowConstant>,
linearRowFilter_caller<25, T, D, BrdRowConstant>,
linearRowFilter_caller<26, T, D, BrdRowConstant>,
linearRowFilter_caller<27, T, D, BrdRowConstant>,
linearRowFilter_caller<28, T, D, BrdRowConstant>,
linearRowFilter_caller<29, T, D, BrdRowConstant>,
linearRowFilter_caller<30, T, D, BrdRowConstant>,
linearRowFilter_caller<31, T, D, BrdRowConstant>,
linearRowFilter_caller<32, T, D, BrdRowConstant>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowReflect>,
linearRowFilter_caller< 2, T, D, BrdRowReflect>,
linearRowFilter_caller< 3, T, D, BrdRowReflect>,
linearRowFilter_caller< 4, T, D, BrdRowReflect>,
linearRowFilter_caller< 5, T, D, BrdRowReflect>,
linearRowFilter_caller< 6, T, D, BrdRowReflect>,
linearRowFilter_caller< 7, T, D, BrdRowReflect>,
linearRowFilter_caller< 8, T, D, BrdRowReflect>,
linearRowFilter_caller< 9, T, D, BrdRowReflect>,
linearRowFilter_caller<10, T, D, BrdRowReflect>,
linearRowFilter_caller<11, T, D, BrdRowReflect>,
linearRowFilter_caller<12, T, D, BrdRowReflect>,
linearRowFilter_caller<13, T, D, BrdRowReflect>,
linearRowFilter_caller<14, T, D, BrdRowReflect>,
linearRowFilter_caller<15, T, D, BrdRowReflect>,
linearRowFilter_caller<16, T, D, BrdRowReflect>,
linearRowFilter_caller<17, T, D, BrdRowReflect>,
linearRowFilter_caller<18, T, D, BrdRowReflect>,
linearRowFilter_caller<19, T, D, BrdRowReflect>,
linearRowFilter_caller<20, T, D, BrdRowReflect>,
linearRowFilter_caller<21, T, D, BrdRowReflect>,
linearRowFilter_caller<22, T, D, BrdRowReflect>,
linearRowFilter_caller<23, T, D, BrdRowReflect>,
linearRowFilter_caller<24, T, D, BrdRowReflect>,
linearRowFilter_caller<25, T, D, BrdRowReflect>,
linearRowFilter_caller<26, T, D, BrdRowReflect>,
linearRowFilter_caller<27, T, D, BrdRowReflect>,
linearRowFilter_caller<28, T, D, BrdRowReflect>,
linearRowFilter_caller<29, T, D, BrdRowReflect>,
linearRowFilter_caller<30, T, D, BrdRowReflect>,
linearRowFilter_caller<31, T, D, BrdRowReflect>,
linearRowFilter_caller<32, T, D, BrdRowReflect>
},
{
0,
linearRowFilter_caller< 1, T, D, BrdRowWrap>,
linearRowFilter_caller< 2, T, D, BrdRowWrap>,
linearRowFilter_caller< 3, T, D, BrdRowWrap>,
linearRowFilter_caller< 4, T, D, BrdRowWrap>,
linearRowFilter_caller< 5, T, D, BrdRowWrap>,
linearRowFilter_caller< 6, T, D, BrdRowWrap>,
linearRowFilter_caller< 7, T, D, BrdRowWrap>,
linearRowFilter_caller< 8, T, D, BrdRowWrap>,
linearRowFilter_caller< 9, T, D, BrdRowWrap>,
linearRowFilter_caller<10, T, D, BrdRowWrap>,
linearRowFilter_caller<11, T, D, BrdRowWrap>,
linearRowFilter_caller<12, T, D, BrdRowWrap>,
linearRowFilter_caller<13, T, D, BrdRowWrap>,
linearRowFilter_caller<14, T, D, BrdRowWrap>,
linearRowFilter_caller<15, T, D, BrdRowWrap>,
linearRowFilter_caller<16, T, D, BrdRowWrap>,
linearRowFilter_caller<17, T, D, BrdRowWrap>,
linearRowFilter_caller<18, T, D, BrdRowWrap>,
linearRowFilter_caller<19, T, D, BrdRowWrap>,
linearRowFilter_caller<20, T, D, BrdRowWrap>,
linearRowFilter_caller<21, T, D, BrdRowWrap>,
linearRowFilter_caller<22, T, D, BrdRowWrap>,
linearRowFilter_caller<23, T, D, BrdRowWrap>,
linearRowFilter_caller<24, T, D, BrdRowWrap>,
linearRowFilter_caller<25, T, D, BrdRowWrap>,
linearRowFilter_caller<26, T, D, BrdRowWrap>,
linearRowFilter_caller<27, T, D, BrdRowWrap>,
linearRowFilter_caller<28, T, D, BrdRowWrap>,
linearRowFilter_caller<29, T, D, BrdRowWrap>,
linearRowFilter_caller<30, T, D, BrdRowWrap>,
linearRowFilter_caller<31, T, D, BrdRowWrap>,
linearRowFilter_caller<32, T, D, BrdRowWrap>
}
};
loadKernel(kernel, ksize, stream);
callers[brd_type][ksize]((PtrStepSz<T>)src, (PtrStepSz<D>)dst, anchor, cc, stream);
}
template void linearRowFilter_gpu<uchar , float >(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
template void linearRowFilter_gpu<uchar4, float4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
template void linearRowFilter_gpu<short3, float3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
template void linearRowFilter_gpu<int , float >(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
template void linearRowFilter_gpu<float , float >(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
} // namespace row_filter
}}} // namespace cv { namespace gpu { namespace device
#endif /* CUDA_DISABLER */
|
b7ae22e453f01f0bf4c1054fff45bfc5b5e4075c.hip | // !!! This is a file automatically generated by hipify!!!
#include "flamegpu/flame_api.h"
#include "flamegpu/runtime/flamegpu_api.h"
#include "flamegpu/util/compute_capability.cuh"
#include "helpers/device_initialisation.h"
#include "gtest/gtest.h"
namespace test_cuda_simulation {
const char *MODEL_NAME = "Model";
const char *MODEL_NAME2 = "Model2";
const char *AGENT_NAME = "Agent";
const char *AGENT_NAME2 = "Agent2";
const char *FUNCTION_NAME = "Function";
const char *LAYER_NAME = "Layer";
const char VARIABLE_NAME[5] = "test"; // Have to define this in this form to use with compile time hash stuff
__device__ const char dVARIABLE_NAME[5] = "test"; // Have to define this in this form to use with compile time hash stuff
const int AGENT_COUNT = 10;
const int MULTIPLIER = 3;
__device__ const int dMULTIPLIER = 3;
int externalCounter = 0;
FLAMEGPU_AGENT_FUNCTION(DeathTestFunc, MsgNone, MsgNone) {
unsigned int x = FLAMEGPU->getVariable<unsigned int>("x");
// Agents with even value for 'x' die
if (x % 2 == 0)
return DEAD;
return ALIVE;
}
FLAMEGPU_STEP_FUNCTION(IncrementCounter) {
externalCounter++;
}
TEST(TestCUDASimulation, ApplyConfigDerivedContextCreation) {
// Simply get the result from the method provided by the helper file.
ASSERT_TRUE(getCUDASimulationContextCreationTestResult());
// Reset the device, just to be sure.
ASSERT_EQ(hipSuccess, hipDeviceReset());
}
// Test that the CUDASimulation applyConfig_derived works for multiple GPU device_id values (if available)
TEST(TestCUDASimulation, AllDeviceIdValues) {
// Get the number of devices
int device_count = 1;
if (hipSuccess != hipGetDeviceCount(&device_count) || device_count <= 0) {
// Skip the test, if no CUDA or GPUs.
return;
}
for (int i = 0; i < device_count; i++) {
// Check if the specified device is allowed to run the tests to determine if the test should throw or not. This is system dependent so must be dynamic.
bool shouldThrowCCException = !util::compute_capability::checkComputeCapability(i);
// Initialise and run a simple model on each device in the system. This test is pointless on single GPU machines.
ModelDescription m(MODEL_NAME);
m.newAgent(AGENT_NAME);
// Scoping
{
CUDASimulation c(m);
// Set the device ID
c.CUDAConfig().device_id = i;
c.SimulationConfig().steps = 1;
// Apply the config (and therefore set the device.)
if (shouldThrowCCException) {
// Should throw InvalidCUDAComputeCapability if bad compute capability.
EXPECT_THROW(c.applyConfig(), InvalidCUDAComputeCapability);
EXPECT_THROW(c.simulate(), InvalidCUDAComputeCapability);
} else {
// Should not get any excpetions if CC is valid.
EXPECT_NO_THROW(c.applyConfig());
EXPECT_NO_THROW(c.simulate());
}
}
}
// Return to prior state for remaining tests.
ASSERT_EQ(hipSuccess, hipSetDevice(0));
}
TEST(TestSimulation, ArgParse_inputfile_long) {
ModelDescription m(MODEL_NAME);
CUDASimulation c(m);
const char *argv[3] = { "prog.exe", "--in", "test" };
EXPECT_EQ(c.getSimulationConfig().input_file, "");
EXPECT_THROW(c.initialise(sizeof(argv)/sizeof(char*), argv), UnsupportedFileType); // cant detect filetype
EXPECT_EQ(c.getSimulationConfig().input_file, argv[2]);
// Blank init resets value to default
c.initialise(0, nullptr);
EXPECT_EQ(c.getSimulationConfig().input_file, "");
}
TEST(TestSimulation, ArgParse_inputfile_short) {
ModelDescription m(MODEL_NAME);
CUDASimulation c(m);
const char *argv[3] = { "prog.exe", "-i", "I_DO_NOT_EXIST.xml" };
EXPECT_EQ(c.getSimulationConfig().input_file, "");
EXPECT_THROW(c.initialise(sizeof(argv) / sizeof(char*), argv), InvalidInputFile); // File doesn't exist
EXPECT_EQ(c.getSimulationConfig().input_file, argv[2]);
// Blank init resets value to default
c.initialise(0, nullptr);
EXPECT_EQ(c.getSimulationConfig().input_file, "");
}
TEST(TestSimulation, ArgParse_steps_long) {
ModelDescription m(MODEL_NAME);
CUDASimulation c(m);
const char *argv[3] = { "prog.exe", "--steps", "12" };
EXPECT_EQ(c.getSimulationConfig().steps, 0u);
c.initialise(sizeof(argv) / sizeof(char*), argv);
EXPECT_EQ(c.getSimulationConfig().steps, 12u);
// Blank init resets value to default
c.initialise(0, nullptr);
EXPECT_EQ(c.getSimulationConfig().steps, 0u);
}
TEST(TestSimulation, ArgParse_steps_short) {
ModelDescription m(MODEL_NAME);
CUDASimulation c(m);
const char *argv[3] = { "prog.exe", "-s", "12" };
EXPECT_EQ(c.getSimulationConfig().steps, 0u);
c.initialise(sizeof(argv) / sizeof(char*), argv);
EXPECT_EQ(c.getSimulationConfig().steps, 12u);
// Blank init resets value to default
c.initialise(0, nullptr);
EXPECT_EQ(c.getSimulationConfig().steps, 0u);
}
TEST(TestSimulation, ArgParse_randomseed_long) {
ModelDescription m(MODEL_NAME);
CUDASimulation c(m);
const char *argv[3] = { "prog.exe", "--random", "12" };
EXPECT_NE(c.getSimulationConfig().random_seed, 12u);
c.initialise(sizeof(argv) / sizeof(char*), argv);
EXPECT_EQ(c.getSimulationConfig().random_seed, 12u);
// Blank init resets value to default
c.initialise(0, nullptr);
EXPECT_NE(c.getSimulationConfig().random_seed, 12u);
}
TEST(TestSimulation, ArgParse_randomseed_short) {
ModelDescription m(MODEL_NAME);
CUDASimulation c(m);
const char *argv[3] = { "prog.exe", "-r", "12" };
EXPECT_NE(c.getSimulationConfig().random_seed, 12u);
c.initialise(sizeof(argv) / sizeof(char*), argv);
EXPECT_EQ(c.getSimulationConfig().random_seed, 12u);
// Blank init resets value to default
c.initialise(0, nullptr);
EXPECT_NE(c.getSimulationConfig().random_seed, 12u);
}
TEST(TestCUDASimulation, ArgParse_device_long) {
ASSERT_EQ(hipGetLastError(), hipSuccess);
ModelDescription m(MODEL_NAME);
CUDASimulation c(m);
const char *argv[3] = { "prog.exe", "--device", "1200" };
EXPECT_EQ(c.getCUDAConfig().device_id, 0);
// Setting an invalid device ID is the only safe way to do this without making internal methods accessible
// As can set to a valid device, we haven't build code for
EXPECT_THROW(c.initialise(sizeof(argv) / sizeof(char*), argv), InvalidCUDAdevice);
EXPECT_EQ(c.getCUDAConfig().device_id, 1200);
// Blank init resets value to default
ASSERT_EQ(hipGetLastError(), hipSuccess);
c.initialise(0, nullptr);
EXPECT_EQ(c.getCUDAConfig().device_id, 0);
ASSERT_EQ(hipGetLastError(), hipSuccess);
}
TEST(TestCUDASimulation, ArgParse_device_short) {
ASSERT_EQ(hipGetLastError(), hipSuccess);
ModelDescription m(MODEL_NAME);
CUDASimulation c(m);
const char *argv[3] = { "prog.exe", "-d", "1200" };
EXPECT_EQ(c.getCUDAConfig().device_id, 0);
// Setting an invalid device ID is the only safe way to do this without making internal methods accessible
// As can set to a valid device, we haven't build code for
EXPECT_THROW(c.initialise(sizeof(argv) / sizeof(char*), argv), InvalidCUDAdevice);
EXPECT_EQ(c.getCUDAConfig().device_id, 1200);
// Blank init resets value to default
ASSERT_EQ(hipGetLastError(), hipSuccess);
c.initialise(0, nullptr);
EXPECT_EQ(c.getCUDAConfig().device_id, 0);
ASSERT_EQ(hipGetLastError(), hipSuccess);
}
FLAMEGPU_AGENT_FUNCTION(SetGetFn, MsgNone, MsgNone) {
int i = FLAMEGPU->getVariable<int>(dVARIABLE_NAME);
FLAMEGPU->setVariable<int>(dVARIABLE_NAME, i * dMULTIPLIER);
return ALIVE;
}
TEST(TestCUDASimulation, SetGetPopulationData) {
ModelDescription m(MODEL_NAME);
AgentDescription &a = m.newAgent(AGENT_NAME);
m.newLayer(LAYER_NAME).addAgentFunction(a.newFunction(FUNCTION_NAME, SetGetFn));
a.newVariable<int>(VARIABLE_NAME);
AgentPopulation pop(a, static_cast<unsigned int>(AGENT_COUNT));
for (int _i = 0; _i < AGENT_COUNT; ++_i) {
AgentInstance i = pop.getNextInstance();
i.setVariable<int>(VARIABLE_NAME, _i);
EXPECT_THROW(i.setVariable<float>(VARIABLE_NAME, static_cast<float>(_i)), InvalidVarType);
}
CUDASimulation c(m);
c.SimulationConfig().steps = 1;
c.setPopulationData(pop);
c.simulate();
c.getPopulationData(pop);
for (int _i = 0; _i < AGENT_COUNT; ++_i) {
AgentInstance i = pop.getInstanceAt(_i);
EXPECT_EQ(i.getVariable<int>(VARIABLE_NAME), _i * MULTIPLIER);
i.setVariable<int>(VARIABLE_NAME, _i * 2);
}
c.setPopulationData(pop);
c.simulate();
c.getPopulationData(pop);
for (int _i = 0; _i < AGENT_COUNT; ++_i) {
AgentInstance i = pop.getInstanceAt(_i);
EXPECT_EQ(i.getVariable<int>(VARIABLE_NAME), _i * MULTIPLIER * 2);
EXPECT_THROW(i.getVariable<float>(VARIABLE_NAME), InvalidVarType);
}
}
TEST(TestCUDASimulation, SetGetPopulationData_InvalidCudaAgent) {
ModelDescription m2(MODEL_NAME2);
AgentDescription &a2 = m2.newAgent(AGENT_NAME2);
ModelDescription m(MODEL_NAME);
// AgentDescription &a = m.newAgent(AGENT_NAME);
AgentPopulation pop(a2, static_cast<unsigned int>(AGENT_COUNT));
CUDASimulation c(m);
EXPECT_THROW(c.setPopulationData(pop), InvalidCudaAgent);
EXPECT_THROW(c.getPopulationData(pop), InvalidCudaAgent);
}
TEST(TestCUDASimulation, GetAgent) {
ModelDescription m(MODEL_NAME);
AgentDescription &a = m.newAgent(AGENT_NAME);
m.newLayer(LAYER_NAME).addAgentFunction(a.newFunction(FUNCTION_NAME, SetGetFn));
a.newVariable<int>(VARIABLE_NAME);
AgentPopulation pop(a, static_cast<unsigned int>(AGENT_COUNT));
for (int _i = 0; _i < AGENT_COUNT; ++_i) {
AgentInstance i = pop.getNextInstance();
i.setVariable<int>(VARIABLE_NAME, _i);
}
CUDASimulation c(m);
c.SimulationConfig().steps = 1;
c.setPopulationData(pop);
c.simulate();
AgentInterface &agent = c.getAgent(AGENT_NAME);
for (int _i = 0; _i < AGENT_COUNT; ++_i) {
int host = 0;
hipMemcpy(&host, reinterpret_cast<int*>(agent.getStateVariablePtr(ModelData::DEFAULT_STATE, VARIABLE_NAME)) + _i, sizeof(int), hipMemcpyDeviceToHost);
EXPECT_EQ(host, _i * MULTIPLIER);
host = _i * 2;
hipMemcpy(reinterpret_cast<int*>(agent.getStateVariablePtr(ModelData::DEFAULT_STATE, VARIABLE_NAME)) + _i, &host, sizeof(int), hipMemcpyHostToDevice);
}
c.simulate();
agent = c.getAgent(AGENT_NAME);
for (int _i = 0; _i < AGENT_COUNT; ++_i) {
int host = 0;
hipMemcpy(&host, reinterpret_cast<int*>(agent.getStateVariablePtr(ModelData::DEFAULT_STATE, VARIABLE_NAME)) + _i, sizeof(int), hipMemcpyDeviceToHost);
EXPECT_EQ(host, _i * 2 * MULTIPLIER);
}
}
TEST(TestCUDASimulation, Step) {
// Test that step does a single step
ModelDescription m(MODEL_NAME);
AgentDescription &a = m.newAgent(AGENT_NAME);
AgentPopulation pop(a, static_cast<unsigned int>(AGENT_COUNT));
m.addStepFunction(IncrementCounter);
CUDASimulation c(m);
c.setPopulationData(pop);
externalCounter = 0;
c.resetStepCounter();
c.step();
EXPECT_EQ(externalCounter, 1);
EXPECT_EQ(c.getStepCounter(), 1u);
externalCounter = 0;
c.resetStepCounter();
for (unsigned int i = 0; i < 5; ++i) {
c.step();
}
EXPECT_EQ(externalCounter, 5);
EXPECT_EQ(c.getStepCounter(), 5u);
}
FLAMEGPU_AGENT_FUNCTION(add_fn, MsgNone, MsgNone) {
FLAMEGPU->setVariable<int>("i", FLAMEGPU->getVariable<int>("i") + 1);
FLAMEGPU->setVariable<int>("j", FLAMEGPU->getVariable<int>("j") + 1);
return ALIVE;
}
TEST(TestCUDASimulation, SharedAgentFunction) {
// Test that two different agents can share an agent function name/implementation
ModelDescription model("test");
auto &agent1 = model.newAgent("a1");
auto &agent2 = model.newAgent("a2");
agent1.newVariable<int>("i", 1);
agent1.newVariable<int>("j", -1);
agent2.newVariable<int>("i", -1);
agent2.newVariable<int>("j", 1);
auto &a1f = agent1.newFunction("add", add_fn);
auto &a2f = agent2.newFunction("add", add_fn);
auto &layer = model.newLayer();
layer.addAgentFunction(a1f);
layer.addAgentFunction(a2f);
CUDASimulation cudaSimulation(model);
cudaSimulation.applyConfig();
const unsigned int populationSize = 5;
AgentPopulation pop1(agent1, populationSize);
AgentPopulation pop2(agent2, populationSize);
for (unsigned int i = 0; i < populationSize; i++) {
pop1.getNextInstance();
pop2.getNextInstance();
}
cudaSimulation.setPopulationData(pop1);
cudaSimulation.setPopulationData(pop2);
const unsigned int steps = 5;
for (unsigned int i = 0; i < steps; ++i) {
cudaSimulation.step();
}
cudaSimulation.getPopulationData(pop1);
cudaSimulation.getPopulationData(pop2);
for (unsigned int i = 0; i < populationSize; i++) {
auto instance = pop1.getInstanceAt(i);
EXPECT_EQ(instance.getVariable<int>("i"), 6);
EXPECT_EQ(instance.getVariable<int>("j"), 4);
}
for (unsigned int i = 0; i < populationSize; i++) {
auto instance = pop2.getInstanceAt(i);
EXPECT_EQ(instance.getVariable<int>("i"), 4);
EXPECT_EQ(instance.getVariable<int>("j"), 6);
}
}
TEST(TestSimulation, Simulate) {
// Simulation is abstract, so test via CUDASimulation
// Depends on CUDASimulation::step()
// Test that step does a single step
ModelDescription m(MODEL_NAME);
AgentDescription &a = m.newAgent(AGENT_NAME);
AgentPopulation pop(a, static_cast<unsigned int>(AGENT_COUNT));
m.addStepFunction(IncrementCounter);
CUDASimulation c(m);
c.setPopulationData(pop);
externalCounter = 0;
c.resetStepCounter();
c.SimulationConfig().steps = 7;
c.simulate();
EXPECT_EQ(externalCounter, 7);
EXPECT_EQ(c.getStepCounter(), 7u);
externalCounter = 0;
c.resetStepCounter();
c.SimulationConfig().steps = 3;
c.simulate();
EXPECT_EQ(externalCounter, 3);
EXPECT_EQ(c.getStepCounter(), 3u);
}
// Show that blank init resets the vals?
TEST(TestCUDASimulation, AgentDeath) {
std::default_random_engine generator;
std::uniform_int_distribution<unsigned int> distribution(0, 12);
// Test that step does a single step
ModelDescription m(MODEL_NAME);
AgentDescription &a = m.newAgent(AGENT_NAME);
a.newVariable<unsigned int>("x");
a.newFunction("DeathFunc", DeathTestFunc).setAllowAgentDeath(true);
m.newLayer().addAgentFunction(DeathTestFunc);
CUDASimulation c(m);
AgentPopulation pop(a, static_cast<unsigned int>(AGENT_COUNT));
std::vector<unsigned int> expected_output;
for (unsigned int i = 0; i < AGENT_COUNT; ++i) {
auto p = pop.getNextInstance();
unsigned int rng = distribution(generator);
p.setVariable<unsigned int>("x", rng);
if (rng % 2 != 0)
expected_output.push_back(rng);
}
c.setPopulationData(pop);
c.SimulationConfig().steps = 1;
c.simulate();
c.getPopulationData(pop);
EXPECT_EQ(static_cast<size_t>(pop.getCurrentListSize()), expected_output.size());
for (unsigned int i = 0; i < pop.getCurrentListSize(); ++i) {
AgentInstance ai = pop.getInstanceAt(i);
// Check x is an expected value
EXPECT_EQ(expected_output[i], ai.getVariable<unsigned int>("x"));
}
}
// test the programatically accessible simulation time elapsed.
TEST(TestCUDASimulation, getSimulationElapsedTime) {
// Define a simple model - doesn't need to do anything other than take some time.
ModelDescription m(MODEL_NAME);
AgentDescription &a = m.newAgent(AGENT_NAME);
AgentPopulation pop(a, static_cast<unsigned int>(AGENT_COUNT));
m.addStepFunction(IncrementCounter);
CUDASimulation c(m);
c.setPopulationData(pop);
// Try getting the timer before running simulate, which should return 0
EXPECT_EQ(c.getSimulationElapsedTime(), 0.0f);
// Call simulate to run 1 steps, which should take some length of time
c.SimulationConfig().steps = 1;
c.simulate();
EXPECT_GT(c.getSimulationElapsedTime(), 0.0f);
// Then run 10 steps, which should be longer / not the same.
float simulate1StepDuration = c.getSimulationElapsedTime();
c.SimulationConfig().steps = 10;
c.simulate();
float simulate10StepDuration = c.getSimulationElapsedTime();
EXPECT_GT(simulate10StepDuration, 0.0f);
EXPECT_NE(simulate1StepDuration, simulate10StepDuration);
}
// test that we can have 2 instances of the same ModelDescription simultaneously
TEST(TestCUDASimulation, MultipleInstances) {
// Define a simple model - doesn't need to do anything other than take some time.
ModelDescription m(MODEL_NAME);
AgentDescription &a = m.newAgent(AGENT_NAME);
AgentPopulation pop(a, static_cast<unsigned int>(AGENT_COUNT));
m.addStepFunction(IncrementCounter);
CUDASimulation c1(m);
c1.setPopulationData(pop);
// Set population data should trigger initialiseSingletons(), which is what leads to crash if EnvManager has matching name/id
EXPECT_NO_THROW(CUDASimulation c2(m); c2.setPopulationData(pop););
}
} // namespace test_cuda_simulation
| b7ae22e453f01f0bf4c1054fff45bfc5b5e4075c.cu | #include "flamegpu/flame_api.h"
#include "flamegpu/runtime/flamegpu_api.h"
#include "flamegpu/util/compute_capability.cuh"
#include "helpers/device_initialisation.h"
#include "gtest/gtest.h"
namespace test_cuda_simulation {
const char *MODEL_NAME = "Model";
const char *MODEL_NAME2 = "Model2";
const char *AGENT_NAME = "Agent";
const char *AGENT_NAME2 = "Agent2";
const char *FUNCTION_NAME = "Function";
const char *LAYER_NAME = "Layer";
const char VARIABLE_NAME[5] = "test"; // Have to define this in this form to use with compile time hash stuff
__device__ const char dVARIABLE_NAME[5] = "test"; // Have to define this in this form to use with compile time hash stuff
const int AGENT_COUNT = 10;
const int MULTIPLIER = 3;
__device__ const int dMULTIPLIER = 3;
int externalCounter = 0;
FLAMEGPU_AGENT_FUNCTION(DeathTestFunc, MsgNone, MsgNone) {
unsigned int x = FLAMEGPU->getVariable<unsigned int>("x");
// Agents with even value for 'x' die
if (x % 2 == 0)
return DEAD;
return ALIVE;
}
FLAMEGPU_STEP_FUNCTION(IncrementCounter) {
externalCounter++;
}
TEST(TestCUDASimulation, ApplyConfigDerivedContextCreation) {
// Simply get the result from the method provided by the helper file.
ASSERT_TRUE(getCUDASimulationContextCreationTestResult());
// Reset the device, just to be sure.
ASSERT_EQ(cudaSuccess, cudaDeviceReset());
}
// Test that the CUDASimulation applyConfig_derived works for multiple GPU device_id values (if available)
TEST(TestCUDASimulation, AllDeviceIdValues) {
// Get the number of devices
int device_count = 1;
if (cudaSuccess != cudaGetDeviceCount(&device_count) || device_count <= 0) {
// Skip the test, if no CUDA or GPUs.
return;
}
for (int i = 0; i < device_count; i++) {
// Check if the specified device is allowed to run the tests to determine if the test should throw or not. This is system dependent so must be dynamic.
bool shouldThrowCCException = !util::compute_capability::checkComputeCapability(i);
// Initialise and run a simple model on each device in the system. This test is pointless on single GPU machines.
ModelDescription m(MODEL_NAME);
m.newAgent(AGENT_NAME);
// Scoping
{
CUDASimulation c(m);
// Set the device ID
c.CUDAConfig().device_id = i;
c.SimulationConfig().steps = 1;
// Apply the config (and therefore set the device.)
if (shouldThrowCCException) {
// Should throw InvalidCUDAComputeCapability if bad compute capability.
EXPECT_THROW(c.applyConfig(), InvalidCUDAComputeCapability);
EXPECT_THROW(c.simulate(), InvalidCUDAComputeCapability);
} else {
// Should not get any excpetions if CC is valid.
EXPECT_NO_THROW(c.applyConfig());
EXPECT_NO_THROW(c.simulate());
}
}
}
// Return to prior state for remaining tests.
ASSERT_EQ(cudaSuccess, cudaSetDevice(0));
}
TEST(TestSimulation, ArgParse_inputfile_long) {
ModelDescription m(MODEL_NAME);
CUDASimulation c(m);
const char *argv[3] = { "prog.exe", "--in", "test" };
EXPECT_EQ(c.getSimulationConfig().input_file, "");
EXPECT_THROW(c.initialise(sizeof(argv)/sizeof(char*), argv), UnsupportedFileType); // cant detect filetype
EXPECT_EQ(c.getSimulationConfig().input_file, argv[2]);
// Blank init resets value to default
c.initialise(0, nullptr);
EXPECT_EQ(c.getSimulationConfig().input_file, "");
}
TEST(TestSimulation, ArgParse_inputfile_short) {
ModelDescription m(MODEL_NAME);
CUDASimulation c(m);
const char *argv[3] = { "prog.exe", "-i", "I_DO_NOT_EXIST.xml" };
EXPECT_EQ(c.getSimulationConfig().input_file, "");
EXPECT_THROW(c.initialise(sizeof(argv) / sizeof(char*), argv), InvalidInputFile); // File doesn't exist
EXPECT_EQ(c.getSimulationConfig().input_file, argv[2]);
// Blank init resets value to default
c.initialise(0, nullptr);
EXPECT_EQ(c.getSimulationConfig().input_file, "");
}
TEST(TestSimulation, ArgParse_steps_long) {
ModelDescription m(MODEL_NAME);
CUDASimulation c(m);
const char *argv[3] = { "prog.exe", "--steps", "12" };
EXPECT_EQ(c.getSimulationConfig().steps, 0u);
c.initialise(sizeof(argv) / sizeof(char*), argv);
EXPECT_EQ(c.getSimulationConfig().steps, 12u);
// Blank init resets value to default
c.initialise(0, nullptr);
EXPECT_EQ(c.getSimulationConfig().steps, 0u);
}
TEST(TestSimulation, ArgParse_steps_short) {
ModelDescription m(MODEL_NAME);
CUDASimulation c(m);
const char *argv[3] = { "prog.exe", "-s", "12" };
EXPECT_EQ(c.getSimulationConfig().steps, 0u);
c.initialise(sizeof(argv) / sizeof(char*), argv);
EXPECT_EQ(c.getSimulationConfig().steps, 12u);
// Blank init resets value to default
c.initialise(0, nullptr);
EXPECT_EQ(c.getSimulationConfig().steps, 0u);
}
TEST(TestSimulation, ArgParse_randomseed_long) {
ModelDescription m(MODEL_NAME);
CUDASimulation c(m);
const char *argv[3] = { "prog.exe", "--random", "12" };
EXPECT_NE(c.getSimulationConfig().random_seed, 12u);
c.initialise(sizeof(argv) / sizeof(char*), argv);
EXPECT_EQ(c.getSimulationConfig().random_seed, 12u);
// Blank init resets value to default
c.initialise(0, nullptr);
EXPECT_NE(c.getSimulationConfig().random_seed, 12u);
}
TEST(TestSimulation, ArgParse_randomseed_short) {
ModelDescription m(MODEL_NAME);
CUDASimulation c(m);
const char *argv[3] = { "prog.exe", "-r", "12" };
EXPECT_NE(c.getSimulationConfig().random_seed, 12u);
c.initialise(sizeof(argv) / sizeof(char*), argv);
EXPECT_EQ(c.getSimulationConfig().random_seed, 12u);
// Blank init resets value to default
c.initialise(0, nullptr);
EXPECT_NE(c.getSimulationConfig().random_seed, 12u);
}
TEST(TestCUDASimulation, ArgParse_device_long) {
ASSERT_EQ(cudaGetLastError(), cudaSuccess);
ModelDescription m(MODEL_NAME);
CUDASimulation c(m);
const char *argv[3] = { "prog.exe", "--device", "1200" };
EXPECT_EQ(c.getCUDAConfig().device_id, 0);
// Setting an invalid device ID is the only safe way to do this without making internal methods accessible
// As can set to a valid device, we haven't build code for
EXPECT_THROW(c.initialise(sizeof(argv) / sizeof(char*), argv), InvalidCUDAdevice);
EXPECT_EQ(c.getCUDAConfig().device_id, 1200);
// Blank init resets value to default
ASSERT_EQ(cudaGetLastError(), cudaSuccess);
c.initialise(0, nullptr);
EXPECT_EQ(c.getCUDAConfig().device_id, 0);
ASSERT_EQ(cudaGetLastError(), cudaSuccess);
}
TEST(TestCUDASimulation, ArgParse_device_short) {
ASSERT_EQ(cudaGetLastError(), cudaSuccess);
ModelDescription m(MODEL_NAME);
CUDASimulation c(m);
const char *argv[3] = { "prog.exe", "-d", "1200" };
EXPECT_EQ(c.getCUDAConfig().device_id, 0);
// Setting an invalid device ID is the only safe way to do this without making internal methods accessible
// As can set to a valid device, we haven't build code for
EXPECT_THROW(c.initialise(sizeof(argv) / sizeof(char*), argv), InvalidCUDAdevice);
EXPECT_EQ(c.getCUDAConfig().device_id, 1200);
// Blank init resets value to default
ASSERT_EQ(cudaGetLastError(), cudaSuccess);
c.initialise(0, nullptr);
EXPECT_EQ(c.getCUDAConfig().device_id, 0);
ASSERT_EQ(cudaGetLastError(), cudaSuccess);
}
FLAMEGPU_AGENT_FUNCTION(SetGetFn, MsgNone, MsgNone) {
int i = FLAMEGPU->getVariable<int>(dVARIABLE_NAME);
FLAMEGPU->setVariable<int>(dVARIABLE_NAME, i * dMULTIPLIER);
return ALIVE;
}
TEST(TestCUDASimulation, SetGetPopulationData) {
ModelDescription m(MODEL_NAME);
AgentDescription &a = m.newAgent(AGENT_NAME);
m.newLayer(LAYER_NAME).addAgentFunction(a.newFunction(FUNCTION_NAME, SetGetFn));
a.newVariable<int>(VARIABLE_NAME);
AgentPopulation pop(a, static_cast<unsigned int>(AGENT_COUNT));
for (int _i = 0; _i < AGENT_COUNT; ++_i) {
AgentInstance i = pop.getNextInstance();
i.setVariable<int>(VARIABLE_NAME, _i);
EXPECT_THROW(i.setVariable<float>(VARIABLE_NAME, static_cast<float>(_i)), InvalidVarType);
}
CUDASimulation c(m);
c.SimulationConfig().steps = 1;
c.setPopulationData(pop);
c.simulate();
c.getPopulationData(pop);
for (int _i = 0; _i < AGENT_COUNT; ++_i) {
AgentInstance i = pop.getInstanceAt(_i);
EXPECT_EQ(i.getVariable<int>(VARIABLE_NAME), _i * MULTIPLIER);
i.setVariable<int>(VARIABLE_NAME, _i * 2);
}
c.setPopulationData(pop);
c.simulate();
c.getPopulationData(pop);
for (int _i = 0; _i < AGENT_COUNT; ++_i) {
AgentInstance i = pop.getInstanceAt(_i);
EXPECT_EQ(i.getVariable<int>(VARIABLE_NAME), _i * MULTIPLIER * 2);
EXPECT_THROW(i.getVariable<float>(VARIABLE_NAME), InvalidVarType);
}
}
TEST(TestCUDASimulation, SetGetPopulationData_InvalidCudaAgent) {
ModelDescription m2(MODEL_NAME2);
AgentDescription &a2 = m2.newAgent(AGENT_NAME2);
ModelDescription m(MODEL_NAME);
// AgentDescription &a = m.newAgent(AGENT_NAME);
AgentPopulation pop(a2, static_cast<unsigned int>(AGENT_COUNT));
CUDASimulation c(m);
EXPECT_THROW(c.setPopulationData(pop), InvalidCudaAgent);
EXPECT_THROW(c.getPopulationData(pop), InvalidCudaAgent);
}
TEST(TestCUDASimulation, GetAgent) {
ModelDescription m(MODEL_NAME);
AgentDescription &a = m.newAgent(AGENT_NAME);
m.newLayer(LAYER_NAME).addAgentFunction(a.newFunction(FUNCTION_NAME, SetGetFn));
a.newVariable<int>(VARIABLE_NAME);
AgentPopulation pop(a, static_cast<unsigned int>(AGENT_COUNT));
for (int _i = 0; _i < AGENT_COUNT; ++_i) {
AgentInstance i = pop.getNextInstance();
i.setVariable<int>(VARIABLE_NAME, _i);
}
CUDASimulation c(m);
c.SimulationConfig().steps = 1;
c.setPopulationData(pop);
c.simulate();
AgentInterface &agent = c.getAgent(AGENT_NAME);
for (int _i = 0; _i < AGENT_COUNT; ++_i) {
int host = 0;
cudaMemcpy(&host, reinterpret_cast<int*>(agent.getStateVariablePtr(ModelData::DEFAULT_STATE, VARIABLE_NAME)) + _i, sizeof(int), cudaMemcpyDeviceToHost);
EXPECT_EQ(host, _i * MULTIPLIER);
host = _i * 2;
cudaMemcpy(reinterpret_cast<int*>(agent.getStateVariablePtr(ModelData::DEFAULT_STATE, VARIABLE_NAME)) + _i, &host, sizeof(int), cudaMemcpyHostToDevice);
}
c.simulate();
agent = c.getAgent(AGENT_NAME);
for (int _i = 0; _i < AGENT_COUNT; ++_i) {
int host = 0;
cudaMemcpy(&host, reinterpret_cast<int*>(agent.getStateVariablePtr(ModelData::DEFAULT_STATE, VARIABLE_NAME)) + _i, sizeof(int), cudaMemcpyDeviceToHost);
EXPECT_EQ(host, _i * 2 * MULTIPLIER);
}
}
TEST(TestCUDASimulation, Step) {
// Test that step does a single step
ModelDescription m(MODEL_NAME);
AgentDescription &a = m.newAgent(AGENT_NAME);
AgentPopulation pop(a, static_cast<unsigned int>(AGENT_COUNT));
m.addStepFunction(IncrementCounter);
CUDASimulation c(m);
c.setPopulationData(pop);
externalCounter = 0;
c.resetStepCounter();
c.step();
EXPECT_EQ(externalCounter, 1);
EXPECT_EQ(c.getStepCounter(), 1u);
externalCounter = 0;
c.resetStepCounter();
for (unsigned int i = 0; i < 5; ++i) {
c.step();
}
EXPECT_EQ(externalCounter, 5);
EXPECT_EQ(c.getStepCounter(), 5u);
}
FLAMEGPU_AGENT_FUNCTION(add_fn, MsgNone, MsgNone) {
FLAMEGPU->setVariable<int>("i", FLAMEGPU->getVariable<int>("i") + 1);
FLAMEGPU->setVariable<int>("j", FLAMEGPU->getVariable<int>("j") + 1);
return ALIVE;
}
TEST(TestCUDASimulation, SharedAgentFunction) {
// Test that two different agents can share an agent function name/implementation
ModelDescription model("test");
auto &agent1 = model.newAgent("a1");
auto &agent2 = model.newAgent("a2");
agent1.newVariable<int>("i", 1);
agent1.newVariable<int>("j", -1);
agent2.newVariable<int>("i", -1);
agent2.newVariable<int>("j", 1);
auto &a1f = agent1.newFunction("add", add_fn);
auto &a2f = agent2.newFunction("add", add_fn);
auto &layer = model.newLayer();
layer.addAgentFunction(a1f);
layer.addAgentFunction(a2f);
CUDASimulation cudaSimulation(model);
cudaSimulation.applyConfig();
const unsigned int populationSize = 5;
AgentPopulation pop1(agent1, populationSize);
AgentPopulation pop2(agent2, populationSize);
for (unsigned int i = 0; i < populationSize; i++) {
pop1.getNextInstance();
pop2.getNextInstance();
}
cudaSimulation.setPopulationData(pop1);
cudaSimulation.setPopulationData(pop2);
const unsigned int steps = 5;
for (unsigned int i = 0; i < steps; ++i) {
cudaSimulation.step();
}
cudaSimulation.getPopulationData(pop1);
cudaSimulation.getPopulationData(pop2);
for (unsigned int i = 0; i < populationSize; i++) {
auto instance = pop1.getInstanceAt(i);
EXPECT_EQ(instance.getVariable<int>("i"), 6);
EXPECT_EQ(instance.getVariable<int>("j"), 4);
}
for (unsigned int i = 0; i < populationSize; i++) {
auto instance = pop2.getInstanceAt(i);
EXPECT_EQ(instance.getVariable<int>("i"), 4);
EXPECT_EQ(instance.getVariable<int>("j"), 6);
}
}
TEST(TestSimulation, Simulate) {
// Simulation is abstract, so test via CUDASimulation
// Depends on CUDASimulation::step()
// Test that step does a single step
ModelDescription m(MODEL_NAME);
AgentDescription &a = m.newAgent(AGENT_NAME);
AgentPopulation pop(a, static_cast<unsigned int>(AGENT_COUNT));
m.addStepFunction(IncrementCounter);
CUDASimulation c(m);
c.setPopulationData(pop);
externalCounter = 0;
c.resetStepCounter();
c.SimulationConfig().steps = 7;
c.simulate();
EXPECT_EQ(externalCounter, 7);
EXPECT_EQ(c.getStepCounter(), 7u);
externalCounter = 0;
c.resetStepCounter();
c.SimulationConfig().steps = 3;
c.simulate();
EXPECT_EQ(externalCounter, 3);
EXPECT_EQ(c.getStepCounter(), 3u);
}
// Show that blank init resets the vals?
TEST(TestCUDASimulation, AgentDeath) {
std::default_random_engine generator;
std::uniform_int_distribution<unsigned int> distribution(0, 12);
// Test that step does a single step
ModelDescription m(MODEL_NAME);
AgentDescription &a = m.newAgent(AGENT_NAME);
a.newVariable<unsigned int>("x");
a.newFunction("DeathFunc", DeathTestFunc).setAllowAgentDeath(true);
m.newLayer().addAgentFunction(DeathTestFunc);
CUDASimulation c(m);
AgentPopulation pop(a, static_cast<unsigned int>(AGENT_COUNT));
std::vector<unsigned int> expected_output;
for (unsigned int i = 0; i < AGENT_COUNT; ++i) {
auto p = pop.getNextInstance();
unsigned int rng = distribution(generator);
p.setVariable<unsigned int>("x", rng);
if (rng % 2 != 0)
expected_output.push_back(rng);
}
c.setPopulationData(pop);
c.SimulationConfig().steps = 1;
c.simulate();
c.getPopulationData(pop);
EXPECT_EQ(static_cast<size_t>(pop.getCurrentListSize()), expected_output.size());
for (unsigned int i = 0; i < pop.getCurrentListSize(); ++i) {
AgentInstance ai = pop.getInstanceAt(i);
// Check x is an expected value
EXPECT_EQ(expected_output[i], ai.getVariable<unsigned int>("x"));
}
}
// test the programatically accessible simulation time elapsed.
TEST(TestCUDASimulation, getSimulationElapsedTime) {
// Define a simple model - doesn't need to do anything other than take some time.
ModelDescription m(MODEL_NAME);
AgentDescription &a = m.newAgent(AGENT_NAME);
AgentPopulation pop(a, static_cast<unsigned int>(AGENT_COUNT));
m.addStepFunction(IncrementCounter);
CUDASimulation c(m);
c.setPopulationData(pop);
// Try getting the timer before running simulate, which should return 0
EXPECT_EQ(c.getSimulationElapsedTime(), 0.0f);
// Call simulate to run 1 steps, which should take some length of time
c.SimulationConfig().steps = 1;
c.simulate();
EXPECT_GT(c.getSimulationElapsedTime(), 0.0f);
// Then run 10 steps, which should be longer / not the same.
float simulate1StepDuration = c.getSimulationElapsedTime();
c.SimulationConfig().steps = 10;
c.simulate();
float simulate10StepDuration = c.getSimulationElapsedTime();
EXPECT_GT(simulate10StepDuration, 0.0f);
EXPECT_NE(simulate1StepDuration, simulate10StepDuration);
}
// test that we can have 2 instances of the same ModelDescription simultaneously
TEST(TestCUDASimulation, MultipleInstances) {
// Define a simple model - doesn't need to do anything other than take some time.
ModelDescription m(MODEL_NAME);
AgentDescription &a = m.newAgent(AGENT_NAME);
AgentPopulation pop(a, static_cast<unsigned int>(AGENT_COUNT));
m.addStepFunction(IncrementCounter);
CUDASimulation c1(m);
c1.setPopulationData(pop);
// Set population data should trigger initialiseSingletons(), which is what leads to crash if EnvManager has matching name/id
EXPECT_NO_THROW(CUDASimulation c2(m); c2.setPopulationData(pop););
}
} // namespace test_cuda_simulation
|
67b6ba7a2990099697fdc3e645401e99eb569b8e.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <thrust/system/hip/execution_policy.h>
#include <typeinfo>
#include "time_invocation_cuda.hpp"
#include "utility.hpp"
template<class Vector>
struct inclusive_scan_functor
{
typename Vector::iterator first, last, result;
__host__ __device__
inclusive_scan_functor(Vector& vec)
: first(vec.begin()), last(vec.end()), result(vec.begin())
{}
__host__ __device__
void operator()()
{
thrust::inclusive_scan(thrust::hip::par, first, last, result);
}
};
template<class Vector>
inclusive_scan_functor<Vector> make_inclusive_scan_functor(Vector& vec)
{
return inclusive_scan_functor<Vector>(vec);
}
template<class T>
double time(size_t n)
{
thrust::device_vector<T> vec(n);
auto f = make_inclusive_scan_functor(vec);
return time_function(f);
}
int main(int argc, char** argv)
{
double (*call_me)(size_t) = time<int>;
std::string type = "int";
if(argc >= 2)
{
type = argv[1];
}
size_t n = 1 << 20;
if(argc >= 3)
{
n = atoi(argv[2]);
}
if(type == "int")
{
call_me = time<double>;
}
else if(type == "long")
{
call_me = time<uint64_t>;
}
else if(type == "float")
{
call_me = time<float>;
}
else if(type == "double")
{
call_me = time<double>;
}
else
{
throw std::runtime_error("Unrecognized type");
}
std::clog << "T: " << type << std::endl;
std::clog << "n: " << n << std::endl;
auto ms = call_me(n);
std::clog << "ms: " << ms << std::endl;
std::cout << ms;
return 0;
}
| 67b6ba7a2990099697fdc3e645401e99eb569b8e.cu | #include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <thrust/system/cuda/execution_policy.h>
#include <typeinfo>
#include "time_invocation_cuda.hpp"
#include "utility.hpp"
template<class Vector>
struct inclusive_scan_functor
{
typename Vector::iterator first, last, result;
__host__ __device__
inclusive_scan_functor(Vector& vec)
: first(vec.begin()), last(vec.end()), result(vec.begin())
{}
__host__ __device__
void operator()()
{
thrust::inclusive_scan(thrust::cuda::par, first, last, result);
}
};
template<class Vector>
inclusive_scan_functor<Vector> make_inclusive_scan_functor(Vector& vec)
{
return inclusive_scan_functor<Vector>(vec);
}
template<class T>
double time(size_t n)
{
thrust::device_vector<T> vec(n);
auto f = make_inclusive_scan_functor(vec);
return time_function(f);
}
int main(int argc, char** argv)
{
double (*call_me)(size_t) = time<int>;
std::string type = "int";
if(argc >= 2)
{
type = argv[1];
}
size_t n = 1 << 20;
if(argc >= 3)
{
n = atoi(argv[2]);
}
if(type == "int")
{
call_me = time<double>;
}
else if(type == "long")
{
call_me = time<uint64_t>;
}
else if(type == "float")
{
call_me = time<float>;
}
else if(type == "double")
{
call_me = time<double>;
}
else
{
throw std::runtime_error("Unrecognized type");
}
std::clog << "T: " << type << std::endl;
std::clog << "n: " << n << std::endl;
auto ms = call_me(n);
std::clog << "ms: " << ms << std::endl;
std::cout << ms;
return 0;
}
|
db5bcae7dd4602a538825c1f3eb13f6fce07fa6d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/top_k.h>
#include <MmulHelper.h>
#include <NDArrayFactory.h>
#include <Status.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static __device__ void _swapRows(T* matrix, Nd4jLong* shape, int theFirst, int theSecond, Nd4jLong N) {
if (theFirst != theSecond) {
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (auto i = start; i < N; i += step) {
Nd4jLong iCoord1[] = {theFirst, i};
Nd4jLong iCoord2[] = {theSecond, i};
auto iIndex1 = shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), iCoord1, 2);
auto iIndex2 = shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), iCoord2, 2);
//atomicExch(&matrix[iIndex1], matrix[iIndex2]);
T e0 = matrix[iIndex1];
T e1 = matrix[iIndex2];
matrix[iIndex1] = e0;
matrix[iIndex2] = e1;
}
}
}
// BUILD_SINGLE_TEMPLATE(template void _swapRows, (NDArray* matrix, int theFirst, int theSecond), FLOAT_TYPES);
//
// void swapRows(NDArray* matrix, int theFirst, int theSecond) {
// BUILD_SINGLE_SELECTOR(matrix->dataType(), _swapRows, (matrix, theFirst, theSecond), FLOAT_TYPES);
// }
template <typename T>
static void _invertLowerMatrix(NDArray* inputMatrix, NDArray* invertedMatrix) {
}
BUILD_SINGLE_TEMPLATE(template void _invertLowerMatrix, (NDArray* inputMatrix, NDArray* invertedMatrix);, FLOAT_TYPES);
void invertLowerMatrix(NDArray* inputMatrix, NDArray* invertedMatrix) {
BUILD_SINGLE_SELECTOR(inputMatrix->dataType(), _invertLowerMatrix, (inputMatrix, invertedMatrix), FLOAT_TYPES);
}
template <typename T>
static void _invertUpperMatrix(NDArray* inputMatrix, NDArray* invertedMatrix) {
}
BUILD_SINGLE_TEMPLATE(template void _invertUpperMatrix, (NDArray* inputMatrix, NDArray* invertedMatrix);, FLOAT_TYPES);
void invertUpperMatrix(NDArray* inputMatrix, NDArray* invertedMatrix) {
BUILD_SINGLE_SELECTOR(inputMatrix->dataType(), _invertUpperMatrix, (inputMatrix, invertedMatrix), FLOAT_TYPES);
}
template <typename T>
static __global__ void lupKernel(T* compound, Nd4jLong* compoundShape, T* permutation, Nd4jLong* permutationShape, Nd4jLong rowNum) {
int swapCount = 0;
for(int i = blockIdx.x; i < rowNum; i += gridDim.x ) {
auto pivotValue = T(0.0);
auto pivot = -1;
for(int rowCounter = i; rowCounter < rowNum; rowCounter++ ) {
Nd4jLong rowCoord[] = {rowCounter, i};
auto rowPos = shape::getOffset(0, shape::shapeOf(compoundShape), shape::stride(compoundShape), rowCoord, 2);
if(nd4j::math::nd4j_abs(compound[rowPos]) > pivotValue ) {
pivotValue = nd4j::math::nd4j_abs(compound[rowPos]);
pivot = rowCounter;
}
}
if( pivotValue != T(0.0) ) {
_swapRows<T>(compound, compoundShape, pivot, i, rowNum);
_swapRows<T>(permutation, permutationShape, pivot, i, rowNum);
if (pivot != i)
swapCount++;
for( int j = i + 1; j < rowNum; j++ ) {
Nd4jLong posJIbuf[] = {j, i};
Nd4jLong posIIbuf[] = {i, i};
auto posJI = shape::getOffset(0, shape::shapeOf(compoundShape), shape::stride(compoundShape), posJIbuf, 2);
auto posII = shape::getOffset(0, shape::shapeOf(compoundShape), shape::stride(compoundShape), posIIbuf, 2);
compound[posJI] /= compound[posII];
for( int k = i + 1; k < rowNum; k++ ) {
Nd4jLong posJKbuf[] = {j, k};
Nd4jLong posIKbuf[] = {i, k};
auto posJK = shape::getOffset(0, shape::shapeOf(compoundShape), shape::stride(compoundShape), posJKbuf, 2);
auto posIK = shape::getOffset(0, shape::shapeOf(compoundShape), shape::stride(compoundShape), posIKbuf, 2);
T arg = compound[posJI] * compound[posIK];
compound[posJK] -= arg;
}
}
}
}
}
template <typename T>
static __global__ void determinantKernel(T* compound, Nd4jLong* shape, T* result) {
__shared__ Nd4jLong len;
if (threadIdx.x == 0) {
len = shape::length(shape);
}
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto i = start; i < len; i += step) {
Nd4jLong di[] = {i, i};
auto pos = shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), di, 2);
math::atomics::nd4j_atomicMul(result, compound[pos]);
}
}
template <typename T>
static __global__ void determinantFullKernel(T* input, Nd4jLong* inputShape, T* output, Nd4jLong* outputShape, Nd4jLong* tadShape, Nd4jLong* tadOffsets) {
}
template <typename T>
static NDArray _lup(LaunchContext* context, NDArray* input, NDArray* compound, NDArray* permutation) {
NDArray determinant = NDArrayFactory::create<T>(1.f);
auto rowNum = input->rows();
auto columnNum = input->columns();
NDArray compoundMatrix = *input; // copy
NDArray permutationMatrix(input, false, input->getContext()); // has same shape as input and contiguous strides
permutationMatrix.setIdentity();
T pivotValue; // = T(0.0);
int pivot; // = -1;
int swapCount = 0;
T* compoundBuf = reinterpret_cast<T*>(compoundMatrix.specialBuffer());
T* permutationBuf = reinterpret_cast<T*>(permutationMatrix.specialBuffer());
auto stream = context->getCudaStream();
hipLaunchKernelGGL(( lupKernel<T>), dim3(256), dim3(256), 1024, *stream, compoundBuf, compoundMatrix.specialShapeInfo(), permutationBuf, permutationMatrix.specialShapeInfo(), rowNum);
hipLaunchKernelGGL(( determinantKernel<T>), dim3(256), dim3(256), 1024, *stream, compoundBuf, compoundMatrix.specialShapeInfo(), reinterpret_cast<T*>(determinant.specialBuffer()));
// for (int e = 0; e < rowNum; e++) {
// // nd4j_printf("Compound matrix diag %i %f.\n", e, (*compoundMatrix)(e, e));
// determinant *= compoundMatrix.e<T>(e, e);
// }
if (swapCount % 2) determinant = -determinant;
if (compound != nullptr)
compound->assign(compoundMatrix);
if (permutation != nullptr)
permutation->assign(permutationMatrix);
return determinant;
}
BUILD_SINGLE_TEMPLATE(template NDArray _lup, (LaunchContext* context, NDArray* input, NDArray* output, NDArray* permutation), FLOAT_TYPES);
template <typename T>
static int _determinant(nd4j::LaunchContext* context, NDArray* input, NDArray* output) {
Nd4jLong n = input->sizeAt(-1);
Nd4jLong n2 = n * n;
std::vector<int> dims();
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), {input->rankOf() - 2, input->rankOf() - 1});
//auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {output->rankOf() - 1});
//auto matrix = NDArrayFactory::create(input->ordering(), {n, n}, input->dataType(), input->getContext()); //, block.getWorkspace());
auto stream = context->getCudaStream();
auto inputBuf = reinterpret_cast<T*>(input->specialBuffer());
auto outputBuf = reinterpret_cast<T*>(output->specialBuffer());
dim3 launchDims(256, 256, 1024);
hipLaunchKernelGGL(( determinantFullKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, inputBuf, input->specialShapeInfo(), outputBuf, output->specialShapeInfo(), packX.specialShapeInfo(), packX.specialOffsets());
// for (int e = 0; e < output->lengthOf(); e++) {
// for (int k = e * n2, row = 0; k < (e + 1) * n2; ++k, ++row)
// matrix.p(row, input->e<T>(k));
//// output->p(e, lup_<T>(&matrix, (NDArray*)nullptr, (NDArray*)nullptr));
// }
return Status::OK();
}
BUILD_SINGLE_TEMPLATE(template int _determinant, (nd4j::LaunchContext* context, NDArray* input, NDArray* output), FLOAT_TYPES);
int determinant(nd4j::LaunchContext * context, NDArray* input, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), return _determinant, (context, input, output), FLOAT_TYPES);
}
template <typename T>
int log_abs_determinant_(NDArray* input, NDArray* output) {
return ND4J_STATUS_OK;
}
BUILD_SINGLE_TEMPLATE(template int log_abs_determinant_, (NDArray* input, NDArray* output), FLOAT_TYPES);
int log_abs_determinant(nd4j::LaunchContext * context, NDArray* input, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), return log_abs_determinant_, (input, output), FLOAT_TYPES);
}
template <typename T>
static int _inverse(NDArray* input, NDArray* output) {
return Status::OK();
}
int inverse(nd4j::LaunchContext * context, NDArray* input, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), return _inverse, (input, output), FLOAT_TYPES);
}
bool checkCholeskyInput(nd4j::LaunchContext * context, NDArray const* input) {
return false;
}
template <typename T>
int cholesky_(NDArray* input, NDArray* output, bool inplace) {
return Status::OK();
}
int cholesky(nd4j::LaunchContext * context, NDArray* input, NDArray* output, bool inplace) {
BUILD_SINGLE_SELECTOR(input->dataType(), return cholesky_, (input, output, inplace), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int cholesky_, (NDArray* input, NDArray* output, bool inplace), FLOAT_TYPES);
BUILD_SINGLE_TEMPLATE(template int _inverse, (NDArray* input, NDArray* output), FLOAT_TYPES);
int logdetFunctor(nd4j::LaunchContext * context, NDArray* input, NDArray* output) {
return 119;
}
}
}
}
| db5bcae7dd4602a538825c1f3eb13f6fce07fa6d.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/top_k.h>
#include <MmulHelper.h>
#include <NDArrayFactory.h>
#include <Status.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static __device__ void _swapRows(T* matrix, Nd4jLong* shape, int theFirst, int theSecond, Nd4jLong N) {
if (theFirst != theSecond) {
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (auto i = start; i < N; i += step) {
Nd4jLong iCoord1[] = {theFirst, i};
Nd4jLong iCoord2[] = {theSecond, i};
auto iIndex1 = shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), iCoord1, 2);
auto iIndex2 = shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), iCoord2, 2);
//atomicExch(&matrix[iIndex1], matrix[iIndex2]);
T e0 = matrix[iIndex1];
T e1 = matrix[iIndex2];
matrix[iIndex1] = e0;
matrix[iIndex2] = e1;
}
}
}
// BUILD_SINGLE_TEMPLATE(template void _swapRows, (NDArray* matrix, int theFirst, int theSecond), FLOAT_TYPES);
//
// void swapRows(NDArray* matrix, int theFirst, int theSecond) {
// BUILD_SINGLE_SELECTOR(matrix->dataType(), _swapRows, (matrix, theFirst, theSecond), FLOAT_TYPES);
// }
template <typename T>
static void _invertLowerMatrix(NDArray* inputMatrix, NDArray* invertedMatrix) {
}
BUILD_SINGLE_TEMPLATE(template void _invertLowerMatrix, (NDArray* inputMatrix, NDArray* invertedMatrix);, FLOAT_TYPES);
void invertLowerMatrix(NDArray* inputMatrix, NDArray* invertedMatrix) {
BUILD_SINGLE_SELECTOR(inputMatrix->dataType(), _invertLowerMatrix, (inputMatrix, invertedMatrix), FLOAT_TYPES);
}
template <typename T>
static void _invertUpperMatrix(NDArray* inputMatrix, NDArray* invertedMatrix) {
}
BUILD_SINGLE_TEMPLATE(template void _invertUpperMatrix, (NDArray* inputMatrix, NDArray* invertedMatrix);, FLOAT_TYPES);
void invertUpperMatrix(NDArray* inputMatrix, NDArray* invertedMatrix) {
BUILD_SINGLE_SELECTOR(inputMatrix->dataType(), _invertUpperMatrix, (inputMatrix, invertedMatrix), FLOAT_TYPES);
}
template <typename T>
static __global__ void lupKernel(T* compound, Nd4jLong* compoundShape, T* permutation, Nd4jLong* permutationShape, Nd4jLong rowNum) {
int swapCount = 0;
for(int i = blockIdx.x; i < rowNum; i += gridDim.x ) {
auto pivotValue = T(0.0);
auto pivot = -1;
for(int rowCounter = i; rowCounter < rowNum; rowCounter++ ) {
Nd4jLong rowCoord[] = {rowCounter, i};
auto rowPos = shape::getOffset(0, shape::shapeOf(compoundShape), shape::stride(compoundShape), rowCoord, 2);
if(nd4j::math::nd4j_abs(compound[rowPos]) > pivotValue ) {
pivotValue = nd4j::math::nd4j_abs(compound[rowPos]);
pivot = rowCounter;
}
}
if( pivotValue != T(0.0) ) {
_swapRows<T>(compound, compoundShape, pivot, i, rowNum);
_swapRows<T>(permutation, permutationShape, pivot, i, rowNum);
if (pivot != i)
swapCount++;
for( int j = i + 1; j < rowNum; j++ ) {
Nd4jLong posJIbuf[] = {j, i};
Nd4jLong posIIbuf[] = {i, i};
auto posJI = shape::getOffset(0, shape::shapeOf(compoundShape), shape::stride(compoundShape), posJIbuf, 2);
auto posII = shape::getOffset(0, shape::shapeOf(compoundShape), shape::stride(compoundShape), posIIbuf, 2);
compound[posJI] /= compound[posII];
for( int k = i + 1; k < rowNum; k++ ) {
Nd4jLong posJKbuf[] = {j, k};
Nd4jLong posIKbuf[] = {i, k};
auto posJK = shape::getOffset(0, shape::shapeOf(compoundShape), shape::stride(compoundShape), posJKbuf, 2);
auto posIK = shape::getOffset(0, shape::shapeOf(compoundShape), shape::stride(compoundShape), posIKbuf, 2);
T arg = compound[posJI] * compound[posIK];
compound[posJK] -= arg;
}
}
}
}
}
template <typename T>
static __global__ void determinantKernel(T* compound, Nd4jLong* shape, T* result) {
__shared__ Nd4jLong len;
if (threadIdx.x == 0) {
len = shape::length(shape);
}
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto i = start; i < len; i += step) {
Nd4jLong di[] = {i, i};
auto pos = shape::getOffset(0, shape::shapeOf(shape), shape::stride(shape), di, 2);
math::atomics::nd4j_atomicMul(result, compound[pos]);
}
}
template <typename T>
static __global__ void determinantFullKernel(T* input, Nd4jLong* inputShape, T* output, Nd4jLong* outputShape, Nd4jLong* tadShape, Nd4jLong* tadOffsets) {
}
template <typename T>
static NDArray _lup(LaunchContext* context, NDArray* input, NDArray* compound, NDArray* permutation) {
NDArray determinant = NDArrayFactory::create<T>(1.f);
auto rowNum = input->rows();
auto columnNum = input->columns();
NDArray compoundMatrix = *input; // copy
NDArray permutationMatrix(input, false, input->getContext()); // has same shape as input and contiguous strides
permutationMatrix.setIdentity();
T pivotValue; // = T(0.0);
int pivot; // = -1;
int swapCount = 0;
T* compoundBuf = reinterpret_cast<T*>(compoundMatrix.specialBuffer());
T* permutationBuf = reinterpret_cast<T*>(permutationMatrix.specialBuffer());
auto stream = context->getCudaStream();
lupKernel<T><<<256, 256, 1024, *stream>>>(compoundBuf, compoundMatrix.specialShapeInfo(), permutationBuf, permutationMatrix.specialShapeInfo(), rowNum);
determinantKernel<T><<<256, 256, 1024, *stream>>>(compoundBuf, compoundMatrix.specialShapeInfo(), reinterpret_cast<T*>(determinant.specialBuffer()));
// for (int e = 0; e < rowNum; e++) {
// // nd4j_printf("Compound matrix diag %i %f.\n", e, (*compoundMatrix)(e, e));
// determinant *= compoundMatrix.e<T>(e, e);
// }
if (swapCount % 2) determinant = -determinant;
if (compound != nullptr)
compound->assign(compoundMatrix);
if (permutation != nullptr)
permutation->assign(permutationMatrix);
return determinant;
}
BUILD_SINGLE_TEMPLATE(template NDArray _lup, (LaunchContext* context, NDArray* input, NDArray* output, NDArray* permutation), FLOAT_TYPES);
template <typename T>
static int _determinant(nd4j::LaunchContext* context, NDArray* input, NDArray* output) {
Nd4jLong n = input->sizeAt(-1);
Nd4jLong n2 = n * n;
std::vector<int> dims();
auto packX = ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), {input->rankOf() - 2, input->rankOf() - 1});
//auto packZ = ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {output->rankOf() - 1});
//auto matrix = NDArrayFactory::create(input->ordering(), {n, n}, input->dataType(), input->getContext()); //, block.getWorkspace());
auto stream = context->getCudaStream();
auto inputBuf = reinterpret_cast<T*>(input->specialBuffer());
auto outputBuf = reinterpret_cast<T*>(output->specialBuffer());
dim3 launchDims(256, 256, 1024);
determinantFullKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(inputBuf, input->specialShapeInfo(), outputBuf, output->specialShapeInfo(), packX.specialShapeInfo(), packX.specialOffsets());
// for (int e = 0; e < output->lengthOf(); e++) {
// for (int k = e * n2, row = 0; k < (e + 1) * n2; ++k, ++row)
// matrix.p(row, input->e<T>(k));
//// output->p(e, lup_<T>(&matrix, (NDArray*)nullptr, (NDArray*)nullptr));
// }
return Status::OK();
}
BUILD_SINGLE_TEMPLATE(template int _determinant, (nd4j::LaunchContext* context, NDArray* input, NDArray* output), FLOAT_TYPES);
int determinant(nd4j::LaunchContext * context, NDArray* input, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), return _determinant, (context, input, output), FLOAT_TYPES);
}
template <typename T>
int log_abs_determinant_(NDArray* input, NDArray* output) {
return ND4J_STATUS_OK;
}
BUILD_SINGLE_TEMPLATE(template int log_abs_determinant_, (NDArray* input, NDArray* output), FLOAT_TYPES);
int log_abs_determinant(nd4j::LaunchContext * context, NDArray* input, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), return log_abs_determinant_, (input, output), FLOAT_TYPES);
}
template <typename T>
static int _inverse(NDArray* input, NDArray* output) {
return Status::OK();
}
int inverse(nd4j::LaunchContext * context, NDArray* input, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), return _inverse, (input, output), FLOAT_TYPES);
}
bool checkCholeskyInput(nd4j::LaunchContext * context, NDArray const* input) {
return false;
}
template <typename T>
int cholesky_(NDArray* input, NDArray* output, bool inplace) {
return Status::OK();
}
int cholesky(nd4j::LaunchContext * context, NDArray* input, NDArray* output, bool inplace) {
BUILD_SINGLE_SELECTOR(input->dataType(), return cholesky_, (input, output, inplace), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int cholesky_, (NDArray* input, NDArray* output, bool inplace), FLOAT_TYPES);
BUILD_SINGLE_TEMPLATE(template int _inverse, (NDArray* input, NDArray* output), FLOAT_TYPES);
int logdetFunctor(nd4j::LaunchContext * context, NDArray* input, NDArray* output) {
return 119;
}
}
}
}
|
a930719017174cdfc59fc9ad3fb56852638ed0cd.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018-2020 NVIDIA Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// Released under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
#include <cuda/std/cstdint>
#include <cuda/std/atomic>
// TODO: It would be great if this example could NOT depend on Thrust.
#include <thrust/pair.h>
#include <thrust/functional.h>
#include <thrust/allocate_unique.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <cassert>
#include <random>
#include <iostream>
#include <cstdio>
#include <cassert>
// TODO: This should be upstreamed and then removed.
namespace thrust {
using universal_raw_memory_resource =
thrust::system::cuda::detail::cuda_memory_resource<
thrust::system::cuda::detail::hipMallocManaged, hipFree, void*
>;
template <typename T>
using universal_allocator =
thrust::mr::stateless_resource_allocator<T, universal_raw_memory_resource>;
template <typename T>
using universal_vector = thrust::device_vector<T, universal_allocator<T>>;
} // thrust
template <
typename Key, typename Value,
typename Hash = thrust::identity<Key>,
typename KeyEqual = thrust::equal_to<Key>,
typename MemoryResource = thrust::universal_raw_memory_resource
>
struct concurrent_hash_table {
// Elements transition from state_empty -> state_reserved ->
// state_filled; no other transitions are allowed.
enum state_type {
state_empty, state_reserved, state_filled
};
using key_type = Key;
using mapped_type = Value;
using size_type = cuda::std::uint64_t;
using key_allocator = thrust::mr::stateless_resource_allocator<
key_type, MemoryResource
>;
using mapped_allocator = thrust::mr::stateless_resource_allocator<
mapped_type, MemoryResource
>;
using state_allocator = thrust::mr::stateless_resource_allocator<
cuda::std::atomic<state_type>, MemoryResource
>;
using key_iterator = typename key_allocator::pointer;
using value_iterator = typename mapped_allocator::pointer;
using state_iterator = typename state_allocator::pointer;
// This whole thing is silly and should be a lambda, or at least a private
// nested class, but alas, NVCC doesn't like that.
struct element_destroyer {
private:
size_type capacity_;
key_iterator keys_;
value_iterator values_;
state_iterator states_;
public:
__host__ __device__
element_destroyer(size_type capacity,
key_iterator keys,
value_iterator values,
state_iterator states)
: capacity_(capacity), keys_(keys), values_(values), states_(states)
{}
element_destroyer(element_destroyer const&) = default;
__host__ __device__
void operator()(size_type i) {
if (state_empty != states_[i]) {
(keys_ + i)->~key_type();
(values_ + i)->~mapped_type();
}
}
};
private:
size_type capacity_;
key_iterator keys_;
value_iterator values_;
state_iterator states_;
Hash hash_;
KeyEqual key_equal_;
public:
__host__
concurrent_hash_table(size_type capacity,
Hash hash = Hash(),
KeyEqual key_equal = KeyEqual())
: capacity_(capacity)
, keys_(key_allocator{}.allocate(capacity_))
, values_(mapped_allocator{}.allocate(capacity_))
, states_(state_allocator{}.allocate(capacity_))
, hash_(std::move(hash))
, key_equal_(std::move(key_equal))
{
thrust::uninitialized_fill(thrust::device,
states_, states_ + capacity_,
state_empty);
}
__host__
~concurrent_hash_table()
{
thrust::for_each(thrust::device,
thrust::counting_iterator<size_type>(0),
thrust::counting_iterator<size_type>(capacity_),
element_destroyer(capacity_, keys_, values_, states_));
}
// TODO: Change return type to an enum with three possible values, succeeded,
// exists, and full.
template <typename UKey, typename... Args>
__host__ __device__
thrust::pair<value_iterator, bool>
try_emplace(UKey&& key, Args&&... args) {
auto index{hash_(key) % capacity_};
// Linearly probe the storage space up to `capacity_` times; if we haven't
// succeeded by then, the container is full.
for (size_type i = 0; i < capacity_; ++i) {
state_type old = states_[index].load(cuda::std::memory_order_acquire);
while (old == state_empty) {
// As long as the state of this element is empty, attempt to set it to
// reserved.
if (states_[index].compare_exchange_weak(
old, state_reserved, cuda::std::memory_order_acq_rel))
{
// We succeeded; the element is now "locked" as reserved.
new (keys_ + index) key_type(std::forward<UKey>(key));
new (values_ + index) mapped_type(std::forward<Args>(args)...);
states_[index].store(state_filled, cuda::std::memory_order_release);
return thrust::make_pair(values_ + index, true);
}
}
// If we are here, the element we are probing is not empty and we didn't
// fill it, so we need to wait for it to be filled.
while (state_filled != states_[index].load(cuda::std::memory_order_acquire))
;
// Now we know that the element we are probing has been filled by someone
// else, so we check if our key is equal to it.
if (key_equal_(keys_[index], key))
// It is, so the element already exists.
return thrust::make_pair(values_ + index, false);
// Otherwise, the element isn't a match, so move on to the next element.
index = (index + 1) % capacity_;
}
// If we are here, the container is full.
return thrust::make_pair(value_iterator{}, false);
}
__host__ __device__
mapped_type& operator[](key_type const& key) {
return (*try_emplace(key).first);
}
__host__ __device__
mapped_type& operator[](key_type&& key) {
return (*try_emplace(std::move(key)).first);
}
};
template <typename T>
struct identity_modulo {
private:
T const modulo_;
public:
__host__ __device__
identity_modulo(T modulo) : modulo_(std::move(modulo)) {}
identity_modulo(identity_modulo const&) = default;
__host__ __device__
T operator()(T i) { return i % modulo_; }
};
int main() {
{
using table = concurrent_hash_table<int, cuda::std::atomic<int>>;
auto freq = thrust::allocate_unique<table>(thrust::universal_allocator<table>{}, 8);
thrust::universal_vector<int> input = [] {
thrust::universal_vector<int> v(2048);
std::mt19937 gen(1337);
std::uniform_int_distribution<long> dis(0, 7);
thrust::generate(v.begin(), v.end(), [&] { return dis(gen); });
return v;
}();
thrust::for_each(thrust::device, input.begin(), input.end(),
[freq = freq.get()] __device__ (int i) {
(*freq)[i].fetch_add(1, cuda::std::memory_order_relaxed);
}
);
thrust::host_vector<int> gold(8);
thrust::for_each(input.begin(), input.end(), [&] (int i) { ++gold[i]; });
for (cuda::std::uint64_t i = 0; i < 8; ++i)
std::cout << "i: " << i
<< " gold: " << gold[i]
<< " observed: " << (*freq)[i] << "\n";
assert(hipSuccess == hipDeviceSynchronize());
}
{
using table = concurrent_hash_table<int, cuda::std::atomic<int>, identity_modulo<int>>;
auto freq = thrust::allocate_unique<table>(thrust::universal_allocator<table>{}, 8, identity_modulo<int>(4));
thrust::universal_vector<int> input = [] {
thrust::universal_vector<int> v(2048);
std::mt19937 gen(1337);
std::uniform_int_distribution<long> dis(0, 7);
thrust::generate(v.begin(), v.end(), [&] { return dis(gen); });
return v;
}();
thrust::for_each(thrust::device, input.begin(), input.end(),
[freq = freq.get()] __device__ (int i) {
(*freq)[i].fetch_add(1, cuda::std::memory_order_relaxed);
}
);
thrust::host_vector<int> gold(8);
thrust::for_each(input.begin(), input.end(), [&] (int i) { ++gold[i]; });
for (cuda::std::uint64_t i = 0; i < 8; ++i)
std::cout << "i: " << i
<< " gold: " << gold[i]
<< " observed: " << (*freq)[i] << "\n";
assert(hipSuccess == hipDeviceSynchronize());
}
}
| a930719017174cdfc59fc9ad3fb56852638ed0cd.cu | // Copyright (c) 2018-2020 NVIDIA Corporation
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// Released under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
#include <cuda/std/cstdint>
#include <cuda/std/atomic>
// TODO: It would be great if this example could NOT depend on Thrust.
#include <thrust/pair.h>
#include <thrust/functional.h>
#include <thrust/allocate_unique.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <cassert>
#include <random>
#include <iostream>
#include <cstdio>
#include <cassert>
// TODO: This should be upstreamed and then removed.
namespace thrust {
using universal_raw_memory_resource =
thrust::system::cuda::detail::cuda_memory_resource<
thrust::system::cuda::detail::cudaMallocManaged, cudaFree, void*
>;
template <typename T>
using universal_allocator =
thrust::mr::stateless_resource_allocator<T, universal_raw_memory_resource>;
template <typename T>
using universal_vector = thrust::device_vector<T, universal_allocator<T>>;
} // thrust
template <
typename Key, typename Value,
typename Hash = thrust::identity<Key>,
typename KeyEqual = thrust::equal_to<Key>,
typename MemoryResource = thrust::universal_raw_memory_resource
>
struct concurrent_hash_table {
// Elements transition from state_empty -> state_reserved ->
// state_filled; no other transitions are allowed.
enum state_type {
state_empty, state_reserved, state_filled
};
using key_type = Key;
using mapped_type = Value;
using size_type = cuda::std::uint64_t;
using key_allocator = thrust::mr::stateless_resource_allocator<
key_type, MemoryResource
>;
using mapped_allocator = thrust::mr::stateless_resource_allocator<
mapped_type, MemoryResource
>;
using state_allocator = thrust::mr::stateless_resource_allocator<
cuda::std::atomic<state_type>, MemoryResource
>;
using key_iterator = typename key_allocator::pointer;
using value_iterator = typename mapped_allocator::pointer;
using state_iterator = typename state_allocator::pointer;
// This whole thing is silly and should be a lambda, or at least a private
// nested class, but alas, NVCC doesn't like that.
struct element_destroyer {
private:
size_type capacity_;
key_iterator keys_;
value_iterator values_;
state_iterator states_;
public:
__host__ __device__
element_destroyer(size_type capacity,
key_iterator keys,
value_iterator values,
state_iterator states)
: capacity_(capacity), keys_(keys), values_(values), states_(states)
{}
element_destroyer(element_destroyer const&) = default;
__host__ __device__
void operator()(size_type i) {
if (state_empty != states_[i]) {
(keys_ + i)->~key_type();
(values_ + i)->~mapped_type();
}
}
};
private:
size_type capacity_;
key_iterator keys_;
value_iterator values_;
state_iterator states_;
Hash hash_;
KeyEqual key_equal_;
public:
__host__
concurrent_hash_table(size_type capacity,
Hash hash = Hash(),
KeyEqual key_equal = KeyEqual())
: capacity_(capacity)
, keys_(key_allocator{}.allocate(capacity_))
, values_(mapped_allocator{}.allocate(capacity_))
, states_(state_allocator{}.allocate(capacity_))
, hash_(std::move(hash))
, key_equal_(std::move(key_equal))
{
thrust::uninitialized_fill(thrust::device,
states_, states_ + capacity_,
state_empty);
}
__host__
~concurrent_hash_table()
{
thrust::for_each(thrust::device,
thrust::counting_iterator<size_type>(0),
thrust::counting_iterator<size_type>(capacity_),
element_destroyer(capacity_, keys_, values_, states_));
}
// TODO: Change return type to an enum with three possible values, succeeded,
// exists, and full.
template <typename UKey, typename... Args>
__host__ __device__
thrust::pair<value_iterator, bool>
try_emplace(UKey&& key, Args&&... args) {
auto index{hash_(key) % capacity_};
// Linearly probe the storage space up to `capacity_` times; if we haven't
// succeeded by then, the container is full.
for (size_type i = 0; i < capacity_; ++i) {
state_type old = states_[index].load(cuda::std::memory_order_acquire);
while (old == state_empty) {
// As long as the state of this element is empty, attempt to set it to
// reserved.
if (states_[index].compare_exchange_weak(
old, state_reserved, cuda::std::memory_order_acq_rel))
{
// We succeeded; the element is now "locked" as reserved.
new (keys_ + index) key_type(std::forward<UKey>(key));
new (values_ + index) mapped_type(std::forward<Args>(args)...);
states_[index].store(state_filled, cuda::std::memory_order_release);
return thrust::make_pair(values_ + index, true);
}
}
// If we are here, the element we are probing is not empty and we didn't
// fill it, so we need to wait for it to be filled.
while (state_filled != states_[index].load(cuda::std::memory_order_acquire))
;
// Now we know that the element we are probing has been filled by someone
// else, so we check if our key is equal to it.
if (key_equal_(keys_[index], key))
// It is, so the element already exists.
return thrust::make_pair(values_ + index, false);
// Otherwise, the element isn't a match, so move on to the next element.
index = (index + 1) % capacity_;
}
// If we are here, the container is full.
return thrust::make_pair(value_iterator{}, false);
}
__host__ __device__
mapped_type& operator[](key_type const& key) {
return (*try_emplace(key).first);
}
__host__ __device__
mapped_type& operator[](key_type&& key) {
return (*try_emplace(std::move(key)).first);
}
};
template <typename T>
struct identity_modulo {
private:
T const modulo_;
public:
__host__ __device__
identity_modulo(T modulo) : modulo_(std::move(modulo)) {}
identity_modulo(identity_modulo const&) = default;
__host__ __device__
T operator()(T i) { return i % modulo_; }
};
int main() {
{
using table = concurrent_hash_table<int, cuda::std::atomic<int>>;
auto freq = thrust::allocate_unique<table>(thrust::universal_allocator<table>{}, 8);
thrust::universal_vector<int> input = [] {
thrust::universal_vector<int> v(2048);
std::mt19937 gen(1337);
std::uniform_int_distribution<long> dis(0, 7);
thrust::generate(v.begin(), v.end(), [&] { return dis(gen); });
return v;
}();
thrust::for_each(thrust::device, input.begin(), input.end(),
[freq = freq.get()] __device__ (int i) {
(*freq)[i].fetch_add(1, cuda::std::memory_order_relaxed);
}
);
thrust::host_vector<int> gold(8);
thrust::for_each(input.begin(), input.end(), [&] (int i) { ++gold[i]; });
for (cuda::std::uint64_t i = 0; i < 8; ++i)
std::cout << "i: " << i
<< " gold: " << gold[i]
<< " observed: " << (*freq)[i] << "\n";
assert(cudaSuccess == cudaDeviceSynchronize());
}
{
using table = concurrent_hash_table<int, cuda::std::atomic<int>, identity_modulo<int>>;
auto freq = thrust::allocate_unique<table>(thrust::universal_allocator<table>{}, 8, identity_modulo<int>(4));
thrust::universal_vector<int> input = [] {
thrust::universal_vector<int> v(2048);
std::mt19937 gen(1337);
std::uniform_int_distribution<long> dis(0, 7);
thrust::generate(v.begin(), v.end(), [&] { return dis(gen); });
return v;
}();
thrust::for_each(thrust::device, input.begin(), input.end(),
[freq = freq.get()] __device__ (int i) {
(*freq)[i].fetch_add(1, cuda::std::memory_order_relaxed);
}
);
thrust::host_vector<int> gold(8);
thrust::for_each(input.begin(), input.end(), [&] (int i) { ++gold[i]; });
for (cuda::std::uint64_t i = 0; i < 8; ++i)
std::cout << "i: " << i
<< " gold: " << gold[i]
<< " observed: " << (*freq)[i] << "\n";
assert(cudaSuccess == cudaDeviceSynchronize());
}
}
|
7a5e6795063c1b3ef0c1ba5f1ec7affa71a6b9fb.hip | // !!! This is a file automatically generated by hipify!!!
// Includes
#include <stdio.h>
#include <stdlib.h>
// includes from project
// includes from CUDA
#include <hip/hip_runtime.h>
//#include <helper_math.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1 = 0;
float Value2 = 0;
float Value3;
float Value;
float I1=A[i];
float I2=B[i];
// Excessive Division access
if((i%32)<=27){
for(unsigned k=0; k<iterations;k++) {
Value1=I1*I2;
Value3=I1*I2;
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value+Value2;
}
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
} | 7a5e6795063c1b3ef0c1ba5f1ec7affa71a6b9fb.cu | // Includes
#include <stdio.h>
#include <stdlib.h>
// includes from project
// includes from CUDA
#include <cuda_runtime.h>
//#include <helper_math.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int iterations)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1 = 0;
float Value2 = 0;
float Value3;
float Value;
float I1=A[i];
float I2=B[i];
// Excessive Division access
if((i%32)<=27){
for(unsigned k=0; k<iterations;k++) {
Value1=I1*I2;
Value3=I1*I2;
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
}
}
__syncthreads();
Value=Value1;
C[i]=Value+Value2;
}
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(cudaEventRecord(start));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
} |
5a67a6efda383c89fab30514a9f35e6507aba2d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__
void crossEntropyCostDerivative(float *desiredOutput, unsigned int length, float *networkOutput, float* result)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < length;
i += blockDim.x * gridDim.x)
{
result[i]=-desiredOutput[i]/(0.00001f+networkOutput[i])+(1.0f-desiredOutput[i])/(1.00001f-networkOutput[i]);
}
}
| 5a67a6efda383c89fab30514a9f35e6507aba2d2.cu | extern "C"
__global__
void crossEntropyCostDerivative(float *desiredOutput, unsigned int length, float *networkOutput, float* result)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < length;
i += blockDim.x * gridDim.x)
{
result[i]=-desiredOutput[i]/(0.00001f+networkOutput[i])+(1.0f-desiredOutput[i])/(1.00001f-networkOutput[i]);
}
}
|
c131a3c9c61662322aaedf8135f4112b52c99de3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
__global__ void VecAdd(float* A, float* B, float*
C, int N){
// Host code
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
int main(int argc,char **argv) {
int N = pow(2,15);
size_t size = N * sizeof(float);
int loop;
// Allocate input vectors h_A and h_B in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
FILE *arrayfile_a;
FILE *arrayfile_b;
FILE *arrayfile_c;
if (argc<2){
printf("Too few arguments.\nUsage is ./ee16b068_3.out file1.txt file2.txt ");
return 1;
}
// Initialize input vectors
arrayfile_a = fopen(argv[1], "r");
arrayfile_b = fopen(argv[2], "r");
arrayfile_c = fopen("ee16b068_3_out.txt", "w");
// Read first two arrays
printf("\nArray A (first 10 values) \n ");
for (loop = 0; loop < N; loop++)
{
fscanf(arrayfile_a, "%f", &h_A[loop]);
if (loop<10){
printf("%f ", h_A[loop]);
}
}
printf("\nArray B (first 10 values) \n ");
for (loop = 0; loop < N; loop++)
{
fscanf(arrayfile_b, "%f", &h_B[loop]);
if (loop<10){
printf("%f ", h_B[loop]);
}
}
//printf("Array A (first 10 values) \n ");
//for(loop = 0; loop < N; loop++){
//h_A[loop] = rand() % 100 + 1;
//if (loop<10){
// printf("%f ", h_A[loop]);
//}
//}
/* printf("\nArray B (first 10 values) \n ");
for(loop = 0; loop < N; loop++){
h_B[loop] = rand() % 100 + 1;
if (loop<10){
printf("%f ", h_B[loop]);
}
} */
// Allocate vectors in device memory
float* d_A; hipMalloc(&d_A, size);
float* d_B; hipMalloc(&d_B, size);
float* d_C; hipMalloc(&d_C, size);
// Copy vectors from host memory to device memory
hipMemcpy(d_A, h_A, size,hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size,hipMemcpyHostToDevice);
// Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) /threadsPerBlock;
hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A,d_B, d_C, N);
// h_C contains the result in host memory
hipMemcpy(h_C, d_C, size,hipMemcpyDeviceToHost);
printf("\nArray C (first 10 outputs)\n");
for(loop = 0; loop < 10; loop++)
printf("%f ", h_C[loop]);
// Log outputs
printf("\nWritting to file ee16b068_3_out.txt as <vec a> <vec b> <vec>");
for (loop=0;loop<N;loop++){
fprintf(arrayfile_c,"%f %f %f\n",h_A[loop],h_B[loop],h_C[loop]);
}
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// Free host memory
free(h_A);
free(h_B);
free(arrayfile_a); free(arrayfile_b);
return 0;
} | c131a3c9c61662322aaedf8135f4112b52c99de3.cu | #include <stdlib.h>
#include <stdio.h>
__global__ void VecAdd(float* A, float* B, float*
C, int N){
// Host code
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
int main(int argc,char **argv) {
int N = pow(2,15);
size_t size = N * sizeof(float);
int loop;
// Allocate input vectors h_A and h_B in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
FILE *arrayfile_a;
FILE *arrayfile_b;
FILE *arrayfile_c;
if (argc<2){
printf("Too few arguments.\nUsage is ./ee16b068_3.out file1.txt file2.txt ");
return 1;
}
// Initialize input vectors
arrayfile_a = fopen(argv[1], "r");
arrayfile_b = fopen(argv[2], "r");
arrayfile_c = fopen("ee16b068_3_out.txt", "w");
// Read first two arrays
printf("\nArray A (first 10 values) \n ");
for (loop = 0; loop < N; loop++)
{
fscanf(arrayfile_a, "%f", &h_A[loop]);
if (loop<10){
printf("%f ", h_A[loop]);
}
}
printf("\nArray B (first 10 values) \n ");
for (loop = 0; loop < N; loop++)
{
fscanf(arrayfile_b, "%f", &h_B[loop]);
if (loop<10){
printf("%f ", h_B[loop]);
}
}
//printf("Array A (first 10 values) \n ");
//for(loop = 0; loop < N; loop++){
//h_A[loop] = rand() % 100 + 1;
//if (loop<10){
// printf("%f ", h_A[loop]);
//}
//}
/* printf("\nArray B (first 10 values) \n ");
for(loop = 0; loop < N; loop++){
h_B[loop] = rand() % 100 + 1;
if (loop<10){
printf("%f ", h_B[loop]);
}
} */
// Allocate vectors in device memory
float* d_A; cudaMalloc(&d_A, size);
float* d_B; cudaMalloc(&d_B, size);
float* d_C; cudaMalloc(&d_C, size);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size,cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size,cudaMemcpyHostToDevice);
// Invoke kernel
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) /threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A,d_B, d_C, N);
// h_C contains the result in host memory
cudaMemcpy(h_C, d_C, size,cudaMemcpyDeviceToHost);
printf("\nArray C (first 10 outputs)\n");
for(loop = 0; loop < 10; loop++)
printf("%f ", h_C[loop]);
// Log outputs
printf("\nWritting to file ee16b068_3_out.txt as <vec a> <vec b> <vec>");
for (loop=0;loop<N;loop++){
fprintf(arrayfile_c,"%f %f %f\n",h_A[loop],h_B[loop],h_C[loop]);
}
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Free host memory
free(h_A);
free(h_B);
free(arrayfile_a); free(arrayfile_b);
return 0;
} |
023a4303cba14b01ceefb234ce4e26e817551328.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMath.cu"
#else
THC_API void
THCTensor_(fill)(THCState* state, THCTensor *self_, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (!THC_pointwiseApply1<real>(
state, self_, TensorFillOp<real>(value))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(zero)(THCState *state, THCTensor *self_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (THCTensor_(isContiguous)(state, self_)) {
THCudaCheck(hipMemsetAsync(THCTensor_(data)(state, self_),
0,
sizeof(real) * THCTensor_(nElement)(state, self_),
THCState_getCurrentStream(state)));
} else {
if (!THC_pointwiseApply1<real>(
state, self_,
TensorFillOp<real>(ScalarConvert<int, real>::to(0)))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(zerosLike)(THCState *state, THCTensor *r_, THCTensor *input)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input));
THCTensor_(resizeAs)(state, r_, input);
THCTensor_(zero)(state, r_);
}
THC_API void
THCTensor_(onesLike)(THCState *state, THCTensor *r_, THCTensor *input)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input));
THCTensor_(resizeAs)(state, r_, input);
THCTensor_(fill)(state, r_, ScalarConvert<int, real>::to(1));
}
ptrdiff_t
THCTensor_(numel)(THCState *state, THCTensor *t)
{
return THCTensor_(nElement)(state, t);
}
void THCTensor_(cat)(THCState *state, THCTensor *result,
THCTensor *ta, THCTensor *tb, int dimension)
{
THCTensor* inputs[2];
inputs[0] = ta;
inputs[1] = tb;
THCTensor_(catArray)(state, result, inputs, 2, dimension);
}
void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension);
inline void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension)
{
int first_dims = THCTensor_(nDimension)(state, first);
int second_dims = THCTensor_(nDimension)(state, second);
THArgCheck(first_dims == second_dims, 0,
"Tensors must have same number of dimensions: got %d and %d",
first_dims, second_dims);
for (int dim = 0; dim < first_dims; dim++) {
if (dim == dimension) {
continue;
}
int64_t first_dim_size = THCTensor_(size)(state, first, dim);
int64_t second_dim_size = THCTensor_(size)(state, second, dim);
THArgCheck(first_dim_size == second_dim_size, 0,
"Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d",
dimension, (long long)first_dim_size, (long long)second_dim_size, dim);
}
}
void THCTensor_(catArray)(THCState *state, THCTensor *result,
THCTensor **inputs, int numInputs, int dimension)
{
THLongStorage *size;
int i, j, cohortMax;
int64_t offset;
bool hasEmptyInput = false;
THCTensor *notEmptyTensor = NULL;
// Even in the case where dimension is negative (i.e. when we want
// to cat along the last dimension), this logic still works, as the
// loop below will overwrite the value
int nDims = dimension + 1;
// cat_dimension is the actual dimension we cat along
int cat_dimension = dimension;
for (i = 0; i < numInputs; i++)
{
int inputDim = THCTensor_(nDimension)(state, inputs[i]);
hasEmptyInput |= !inputDim;
if (inputDim > 0) {
nDims = inputDim;
notEmptyTensor = inputs[i];
}
}
// If all inputs are empty tensors, return an empty tensor
if (notEmptyTensor == NULL) {
return;
}
// In the event that the user specified -1 as the concat dimension, then
// we want to pick the nDims as dimension to cat along (and thus nDims - 1 as the
// value due to 0-based indexing). If the nDims is // 0 (i.e. we are catting all
// empty tensors), then we set cat_dimension to be 0
if (dimension + TH_INDEX_BASE == -1) {
cat_dimension = nDims ? (nDims - 1) : 0;
}
THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs);
THArgCheck(cat_dimension >= 0, 4, "invalid dimension %d", dimension + TH_INDEX_BASE);
size = THLongStorage_newWithSize(nDims);
// Compute size of the result in the cat dimension
int64_t cat_dim_size = 0;
for (int i = 0; i < numInputs; i++) {
THCTensor *tensor = inputs[i];
if (THCTensor_(nDimension)(state, tensor) == 0) {
continue;
}
THCTensor_(check_shape_except_dim)(state, notEmptyTensor, tensor, cat_dimension);
cat_dim_size += THCTensor_(size)(state, tensor, cat_dimension);
}
// Compute the size of the result
for (int dim = 0; dim < nDims; dim++) {
int64_t result_dim_size = THCTensor_(size)(state, notEmptyTensor, dim);
if (dim == cat_dimension) {
result_dim_size = cat_dim_size;
}
THLongStorage_data(size)[dim] = result_dim_size;
}
THCTensor_(resizeLegacy)(state, result, size, NULL);
THLongStorage_free(size);
// We parallelize the copy if all 6 conditions pass:
//
// 1. There is more than one input tensor
// 2. No empty inputs
// 3. The result tensor is 32-bit indexable
// 4. The number of dimensions is <= 4
// 5. All input tensors are contiguous (output tensor may be non-contig)
// 6. All input tensors can use 32-bit indexing
// 7. All input tensors are on the same device
if (numInputs > 1 &&
!hasEmptyInput &&
THCTensor_(nDimension)(state, result) <= CAT_ARRAY_MAX_INPUT_DIMS &&
THCTensor_canUse32BitIndexMath(state, result) &&
THCTensor_allContiguous(state, inputs, numInputs) &&
THCTensor_all32BitIndexable(state, inputs, numInputs) &&
THCTensor_allSameDevice(state, inputs, numInputs)) {
// First, let's set up our kernel parameters. We start with a raw pointer to the storage
// for the output Tensor.
real *data = THCTensor_(data)(state, result);
// Kernel Parameter
size_t tensorMetadataSize = sizeof(CatArrInputTensor<real, unsigned int>) * CAT_ARRAY_BATCH_SIZE;
CatArrInputTensor<real, unsigned int> *d_inputs;
THCudaCheck(THCudaMalloc(state, (void**) &d_inputs, tensorMetadataSize));
OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param;
// Next, let's initialize the size, stride arrays for the output Tensor.
for (i = 0; i < nDims; ++i) {
param.outputSize[i] = THCTensor_(size)(state, result, i);
param.outputStride[i] = THCTensor_(stride)(state, result, i);
}
THCStream* stream = THCState_getStream(state);
// Template Declarations for dim = 1, 2, 3, 4
#define HANDLE_CASE(DIMS) \
hipLaunchKernelGGL(( CatArrayBatchedCopy<real, unsigned int, DIMS>), dim3(catGrid), dim3(applyBlock), 0, stream->stream, data, d_inputs, param, cat_dimension, param.outputStride[cat_dimension]);
// Now we loop
offset = 0;
for (i = 0; i < numInputs; i += CAT_ARRAY_BATCH_SIZE) {
// Re-allocate stackInputs every iteration to avoid read-after-write hazard
CatArrInputTensor<real, unsigned int>* stackInputs = (CatArrInputTensor<real, unsigned int>*) THCudaHostAlloc(state, tensorMetadataSize);
cohortMax = 0;
for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) {
int64_t dimSize = cat_dimension < THCTensor_(nDimension)(state, inputs[i+j])
? THCTensor_(size)(state, inputs[i+j], cat_dimension)
: 1;
stackInputs[j].input = THCTensor_(data)(state, inputs[i+j]);
stackInputs[j].offset = offset;
stackInputs[j].dimSize = dimSize;
stackInputs[j].nElements = THCTensor_(nElement)(state, inputs[i+j]);
cohortMax = cohortMax > (int) stackInputs[j].nElements ? cohortMax : (int) stackInputs[j].nElements;
// update offset
offset += dimSize;
}
THCudaCheck(hipMemcpyAsync(
d_inputs,
stackInputs,
j * sizeof(CatArrInputTensor<real, unsigned int>),
hipMemcpyHostToDevice,
stream->stream));
THCudaHostRecord(state, stackInputs);
THCudaHostFree(state, stackInputs);
// Next, let's consider how we set our kernel launch parameters.
// We borrow from THCApply, which the kernel's internal indexing
// is based on.
dim3 applyBlock = getApplyBlock();
//Get grid where x dim fills half gpu and y dim is number of tensors.
//This will have cating two tensors fill the entire grid, but prevent
//many threads from needlessly load meta data if their sizes is small.
dim3 catGrid;
getCatGrid(state, j, catGrid);
switch (nDims) {
case 1:
HANDLE_CASE(1);
break;
case 2:
HANDLE_CASE(2);
break;
case 3:
HANDLE_CASE(3);
break;
case 4:
HANDLE_CASE(4);
break;
}
THCudaCheck(hipGetLastError());
}
THCudaCheck(THCudaFree(state, d_inputs));
#undef HANDLE_CASE
} else {
offset = 0;
for (j = 0; j < numInputs; j++)
{
// No reason to copy when input is empty
if (!THCTensor_(nDimension)(state, inputs[j])) continue;
int64_t dimSize = cat_dimension < THCTensor_(nDimension)(state, inputs[j])
? THCTensor_(size)(state, inputs[j], cat_dimension)
: 1;
THCTensor *nt = THCTensor_(newWithTensor)(state, result);
THCTensor_(narrow)(state, nt, NULL, cat_dimension, offset, dimSize);
THCTensor_(copy)(state, nt, inputs[j]);
THCTensor_(free)(state, nt);
offset += dimSize;
}
}
}
void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor,
THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self ));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor));
using namespace thrust::placeholders;
THCThrustAllocator thrustAlloc(state);
self = THCTensor_(newContiguous)(state, self);
thrust::device_ptr<real> self_data(THCTensor_(data)(state, self));
int num_dim = THCTensor_(nDimension)(state, self);
int64_t N = THCTensor_(nElement)(state, self);
THCudaLongTensor_resize2d(state, tensor, N, num_dim);
tensor = THCudaLongTensor_newContiguous(state, tensor);
thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor));
thrust::counting_iterator<int64_t> idxfirst(0);
thrust::counting_iterator<int64_t> idxlast = idxfirst + N;
typedef thrust::device_ptr<int64_t> Iter;
strided_range<Iter> strided_tensor(tensor_data,
tensor_data+N*num_dim, num_dim);
#if TORCH_HIP_VERSION >= 7000
hipStream_t stream = THCState_getCurrentStream(state);
#endif
strided_range<Iter>::iterator dend = thrust::copy_if(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(stream),
#endif
idxfirst,
idxlast,
self_data,
strided_tensor.begin(),
NonZeroOp<real>()
);
int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend);
int64_t div = 1;
for (int dim = num_dim-1; dim >= 0; dim--) {
strided_range<Iter> stride_dim(tensor_data+dim,
tensor_data+N*num_dim, num_dim);
thrust::transform(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(stream),
#endif
strided_tensor.begin(),
strided_tensor.end(),
stride_dim.begin(),
idx_functor(div, self->size[dim])
);
div *= self->size[dim];
}
THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim);
THCTensor_(free)(state, self);
THCudaLongTensor_free(state, tensor);
THCudaCheck(hipGetLastError());
}
void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
int nDimension = THCTensor_(nDimension)(state, src_);
THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector");
if (nDimension == 2) {
int64_t stride0 = THCTensor_(stride)(state, src_, 0);
int64_t stride1 = THCTensor_(stride)(state, src_, 1);
int64_t size0 = THCTensor_(size)(state, src_, 0);
int64_t size1 = THCTensor_(size)(state, src_, 1);
int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1);
THCTensor_(resize1d)(state, self_, size);
int64_t strideSelf = THCTensor_(stride)(state, self_, 0);
const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x)));
int64_t start = (k >= 0 ? k * stride1 : -k * stride0);
hipLaunchKernelGGL(( THCTensor_copyFromDiagonal<real>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf);
} else {
ptrdiff_t totalElements = THCTensor_(nElement)(state, src_);
ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k;
int64_t strideSrc = THCTensor_(stride)(state, src_, 0);
THCTensor_(resize2d)(state, self_, size, size);
THCTensor_(zero)(state, self_);
int64_t stride0 = THCTensor_(stride)(state, self_, 0);
int64_t stride1 = THCTensor_(stride)(state, self_, 1);
const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x)));
ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0);
hipLaunchKernelGGL(( THCTensor_copyToDiagonal<real>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(eye)(THCState *state, THCTensor *self_, int64_t n, int64_t m)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
THArgCheck(n > 0, 1, "invalid argument");
if(m <= 0)
m = n;
THCTensor_(resize2d)(state, self_, n, m);
THCTensor_(zero)(state, self_);
int64_t sz = THMin(n, m);
int64_t stride = THCTensor_(stride)(state, self_, 0) +
THCTensor_(stride)(state, self_, 1);
THCTensor *diag = THCTensor_(newWithStorage1d)(state, self_->storage,
self_->storageOffset, sz, stride);
THCTensor_(fill)(state, diag, ScalarConvert<int, real>::to(1));
THCTensor_(free)(state, diag);
}
accreal THCTensor_(trace)(THCState *state, THCTensor *src_) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_));
THArgCheck((src_->_dim() == 2), 1, "expected a matrix");
THCTensor *diag = THCTensor_(new)(state);
THCTensor_(diag)(state, diag, src_, 0);
accreal trace = THCTensor_(sumall)(state, diag);
THCTensor_(free)(state, diag);
return trace;
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(linspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n);
if (n == 1) THCTensor_(fill)(state, r_, a);
else {
THCTensor *r = THCTensor_(isContiguous)(state, r_)
? r_ // if r_ is contiguous we can direct work on it
: THCTensor_(newContiguous)(state, r_);
real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a),
ScalarConvert<int64_t,real>::to(n - 1));
LinspaceOp<real> linspace_method(a, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + n, linspace_method);
if (!THCTensor_(isContiguous)(state, r_)) { // We need to move data back to r_
THCTensor_(freeCopyTo)(state, r, r_);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(logspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n);
if (n == 1) THCTensor_(fill)(state, r_, THCNumerics<real>::exp10(a));
else {
THCTensor *r = THCTensor_(isContiguous)(state, r_)
? r_
: THCTensor_(newContiguous)(state, r_);
real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a),
ScalarConvert<int64_t,real>::to(n - 1));
LogspaceOp<real> logspace_method(a, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + n, logspace_method);
if (!THCTensor_(isContiguous)(state, r_)) {
THCTensor_(freeCopyTo)(state, r, r_);
}
}
THCudaCheck(hipGetLastError());
}
#endif
void THCTensor_(range)(THCState *state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(step > 0 || step < 0, 3, "step must be nonzero");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound inconsistent with step sign");
ptrdiff_t size = (ptrdiff_t) (((xmax - xmin) / step) + 1);
if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size);
THCTensor *r = THCTensor_(newContiguous)(state, r_);
LinspaceOp<real,accreal> linspace_method(xmin, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + size, linspace_method);
THCTensor_(freeCopyTo)(state, r, r_);
THCudaCheck(hipGetLastError());
}
void THCTensor_(arange)(THCState* state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(step > 0 || step < 0, 3, "step must be nonzero");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound inconsistent with step sign");
ptrdiff_t size = (ptrdiff_t) ceil(ScalarConvert<accreal, double>::to(xmax - xmin) / step);
if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size);
THCTensor *r = THCTensor_(newContiguous)(state, r_);
LinspaceOp<real,accreal> linspace_method(xmin, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + size, linspace_method);
THCTensor_(freeCopyTo)(state, r, r_);
THCudaCheck(hipGetLastError());
}
#endif
| 023a4303cba14b01ceefb234ce4e26e817551328.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMath.cu"
#else
THC_API void
THCTensor_(fill)(THCState* state, THCTensor *self_, real value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (!THC_pointwiseApply1<real>(
state, self_, TensorFillOp<real>(value))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(zero)(THCState *state, THCTensor *self_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (THCTensor_(isContiguous)(state, self_)) {
THCudaCheck(cudaMemsetAsync(THCTensor_(data)(state, self_),
0,
sizeof(real) * THCTensor_(nElement)(state, self_),
THCState_getCurrentStream(state)));
} else {
if (!THC_pointwiseApply1<real>(
state, self_,
TensorFillOp<real>(ScalarConvert<int, real>::to(0)))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(zerosLike)(THCState *state, THCTensor *r_, THCTensor *input)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input));
THCTensor_(resizeAs)(state, r_, input);
THCTensor_(zero)(state, r_);
}
THC_API void
THCTensor_(onesLike)(THCState *state, THCTensor *r_, THCTensor *input)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, r_, input));
THCTensor_(resizeAs)(state, r_, input);
THCTensor_(fill)(state, r_, ScalarConvert<int, real>::to(1));
}
ptrdiff_t
THCTensor_(numel)(THCState *state, THCTensor *t)
{
return THCTensor_(nElement)(state, t);
}
void THCTensor_(cat)(THCState *state, THCTensor *result,
THCTensor *ta, THCTensor *tb, int dimension)
{
THCTensor* inputs[2];
inputs[0] = ta;
inputs[1] = tb;
THCTensor_(catArray)(state, result, inputs, 2, dimension);
}
void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension);
inline void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension)
{
int first_dims = THCTensor_(nDimension)(state, first);
int second_dims = THCTensor_(nDimension)(state, second);
THArgCheck(first_dims == second_dims, 0,
"Tensors must have same number of dimensions: got %d and %d",
first_dims, second_dims);
for (int dim = 0; dim < first_dims; dim++) {
if (dim == dimension) {
continue;
}
int64_t first_dim_size = THCTensor_(size)(state, first, dim);
int64_t second_dim_size = THCTensor_(size)(state, second, dim);
THArgCheck(first_dim_size == second_dim_size, 0,
"Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d",
dimension, (long long)first_dim_size, (long long)second_dim_size, dim);
}
}
void THCTensor_(catArray)(THCState *state, THCTensor *result,
THCTensor **inputs, int numInputs, int dimension)
{
THLongStorage *size;
int i, j, cohortMax;
int64_t offset;
bool hasEmptyInput = false;
THCTensor *notEmptyTensor = NULL;
// Even in the case where dimension is negative (i.e. when we want
// to cat along the last dimension), this logic still works, as the
// loop below will overwrite the value
int nDims = dimension + 1;
// cat_dimension is the actual dimension we cat along
int cat_dimension = dimension;
for (i = 0; i < numInputs; i++)
{
int inputDim = THCTensor_(nDimension)(state, inputs[i]);
hasEmptyInput |= !inputDim;
if (inputDim > 0) {
nDims = inputDim;
notEmptyTensor = inputs[i];
}
}
// If all inputs are empty tensors, return an empty tensor
if (notEmptyTensor == NULL) {
return;
}
// In the event that the user specified -1 as the concat dimension, then
// we want to pick the nDims as dimension to cat along (and thus nDims - 1 as the
// value due to 0-based indexing). If the nDims is // 0 (i.e. we are catting all
// empty tensors), then we set cat_dimension to be 0
if (dimension + TH_INDEX_BASE == -1) {
cat_dimension = nDims ? (nDims - 1) : 0;
}
THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs);
THArgCheck(cat_dimension >= 0, 4, "invalid dimension %d", dimension + TH_INDEX_BASE);
size = THLongStorage_newWithSize(nDims);
// Compute size of the result in the cat dimension
int64_t cat_dim_size = 0;
for (int i = 0; i < numInputs; i++) {
THCTensor *tensor = inputs[i];
if (THCTensor_(nDimension)(state, tensor) == 0) {
continue;
}
THCTensor_(check_shape_except_dim)(state, notEmptyTensor, tensor, cat_dimension);
cat_dim_size += THCTensor_(size)(state, tensor, cat_dimension);
}
// Compute the size of the result
for (int dim = 0; dim < nDims; dim++) {
int64_t result_dim_size = THCTensor_(size)(state, notEmptyTensor, dim);
if (dim == cat_dimension) {
result_dim_size = cat_dim_size;
}
THLongStorage_data(size)[dim] = result_dim_size;
}
THCTensor_(resizeLegacy)(state, result, size, NULL);
THLongStorage_free(size);
// We parallelize the copy if all 6 conditions pass:
//
// 1. There is more than one input tensor
// 2. No empty inputs
// 3. The result tensor is 32-bit indexable
// 4. The number of dimensions is <= 4
// 5. All input tensors are contiguous (output tensor may be non-contig)
// 6. All input tensors can use 32-bit indexing
// 7. All input tensors are on the same device
if (numInputs > 1 &&
!hasEmptyInput &&
THCTensor_(nDimension)(state, result) <= CAT_ARRAY_MAX_INPUT_DIMS &&
THCTensor_canUse32BitIndexMath(state, result) &&
THCTensor_allContiguous(state, inputs, numInputs) &&
THCTensor_all32BitIndexable(state, inputs, numInputs) &&
THCTensor_allSameDevice(state, inputs, numInputs)) {
// First, let's set up our kernel parameters. We start with a raw pointer to the storage
// for the output Tensor.
real *data = THCTensor_(data)(state, result);
// Kernel Parameter
size_t tensorMetadataSize = sizeof(CatArrInputTensor<real, unsigned int>) * CAT_ARRAY_BATCH_SIZE;
CatArrInputTensor<real, unsigned int> *d_inputs;
THCudaCheck(THCudaMalloc(state, (void**) &d_inputs, tensorMetadataSize));
OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param;
// Next, let's initialize the size, stride arrays for the output Tensor.
for (i = 0; i < nDims; ++i) {
param.outputSize[i] = THCTensor_(size)(state, result, i);
param.outputStride[i] = THCTensor_(stride)(state, result, i);
}
THCStream* stream = THCState_getStream(state);
// Template Declarations for dim = 1, 2, 3, 4
#define HANDLE_CASE(DIMS) \
CatArrayBatchedCopy<real, unsigned int, DIMS><<<catGrid, applyBlock, 0, stream->stream>>>(data, d_inputs, param, cat_dimension, param.outputStride[cat_dimension]);
// Now we loop
offset = 0;
for (i = 0; i < numInputs; i += CAT_ARRAY_BATCH_SIZE) {
// Re-allocate stackInputs every iteration to avoid read-after-write hazard
CatArrInputTensor<real, unsigned int>* stackInputs = (CatArrInputTensor<real, unsigned int>*) THCudaHostAlloc(state, tensorMetadataSize);
cohortMax = 0;
for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) {
int64_t dimSize = cat_dimension < THCTensor_(nDimension)(state, inputs[i+j])
? THCTensor_(size)(state, inputs[i+j], cat_dimension)
: 1;
stackInputs[j].input = THCTensor_(data)(state, inputs[i+j]);
stackInputs[j].offset = offset;
stackInputs[j].dimSize = dimSize;
stackInputs[j].nElements = THCTensor_(nElement)(state, inputs[i+j]);
cohortMax = cohortMax > (int) stackInputs[j].nElements ? cohortMax : (int) stackInputs[j].nElements;
// update offset
offset += dimSize;
}
THCudaCheck(cudaMemcpyAsync(
d_inputs,
stackInputs,
j * sizeof(CatArrInputTensor<real, unsigned int>),
cudaMemcpyHostToDevice,
stream->stream));
THCudaHostRecord(state, stackInputs);
THCudaHostFree(state, stackInputs);
// Next, let's consider how we set our kernel launch parameters.
// We borrow from THCApply, which the kernel's internal indexing
// is based on.
dim3 applyBlock = getApplyBlock();
//Get grid where x dim fills half gpu and y dim is number of tensors.
//This will have cating two tensors fill the entire grid, but prevent
//many threads from needlessly load meta data if their sizes is small.
dim3 catGrid;
getCatGrid(state, j, catGrid);
switch (nDims) {
case 1:
HANDLE_CASE(1);
break;
case 2:
HANDLE_CASE(2);
break;
case 3:
HANDLE_CASE(3);
break;
case 4:
HANDLE_CASE(4);
break;
}
THCudaCheck(cudaGetLastError());
}
THCudaCheck(THCudaFree(state, d_inputs));
#undef HANDLE_CASE
} else {
offset = 0;
for (j = 0; j < numInputs; j++)
{
// No reason to copy when input is empty
if (!THCTensor_(nDimension)(state, inputs[j])) continue;
int64_t dimSize = cat_dimension < THCTensor_(nDimension)(state, inputs[j])
? THCTensor_(size)(state, inputs[j], cat_dimension)
: 1;
THCTensor *nt = THCTensor_(newWithTensor)(state, result);
THCTensor_(narrow)(state, nt, NULL, cat_dimension, offset, dimSize);
THCTensor_(copy)(state, nt, inputs[j]);
THCTensor_(free)(state, nt);
offset += dimSize;
}
}
}
void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor,
THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self ));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor));
using namespace thrust::placeholders;
THCThrustAllocator thrustAlloc(state);
self = THCTensor_(newContiguous)(state, self);
thrust::device_ptr<real> self_data(THCTensor_(data)(state, self));
int num_dim = THCTensor_(nDimension)(state, self);
int64_t N = THCTensor_(nElement)(state, self);
THCudaLongTensor_resize2d(state, tensor, N, num_dim);
tensor = THCudaLongTensor_newContiguous(state, tensor);
thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor));
thrust::counting_iterator<int64_t> idxfirst(0);
thrust::counting_iterator<int64_t> idxlast = idxfirst + N;
typedef thrust::device_ptr<int64_t> Iter;
strided_range<Iter> strided_tensor(tensor_data,
tensor_data+N*num_dim, num_dim);
#if CUDA_VERSION >= 7000
cudaStream_t stream = THCState_getCurrentStream(state);
#endif
strided_range<Iter>::iterator dend = thrust::copy_if(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(stream),
#endif
idxfirst,
idxlast,
self_data,
strided_tensor.begin(),
NonZeroOp<real>()
);
int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend);
int64_t div = 1;
for (int dim = num_dim-1; dim >= 0; dim--) {
strided_range<Iter> stride_dim(tensor_data+dim,
tensor_data+N*num_dim, num_dim);
thrust::transform(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(stream),
#endif
strided_tensor.begin(),
strided_tensor.end(),
stride_dim.begin(),
idx_functor(div, self->size[dim])
);
div *= self->size[dim];
}
THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim);
THCTensor_(free)(state, self);
THCudaLongTensor_free(state, tensor);
THCudaCheck(cudaGetLastError());
}
void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
int nDimension = THCTensor_(nDimension)(state, src_);
THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector");
if (nDimension == 2) {
int64_t stride0 = THCTensor_(stride)(state, src_, 0);
int64_t stride1 = THCTensor_(stride)(state, src_, 1);
int64_t size0 = THCTensor_(size)(state, src_, 0);
int64_t size1 = THCTensor_(size)(state, src_, 1);
int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1);
THCTensor_(resize1d)(state, self_, size);
int64_t strideSelf = THCTensor_(stride)(state, self_, 0);
const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x)));
int64_t start = (k >= 0 ? k * stride1 : -k * stride0);
THCTensor_copyFromDiagonal<real><<<grid, threads, 0, THCState_getCurrentStream(state)>>>
(THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf);
} else {
ptrdiff_t totalElements = THCTensor_(nElement)(state, src_);
ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k;
int64_t strideSrc = THCTensor_(stride)(state, src_, 0);
THCTensor_(resize2d)(state, self_, size, size);
THCTensor_(zero)(state, self_);
int64_t stride0 = THCTensor_(stride)(state, self_, 0);
int64_t stride1 = THCTensor_(stride)(state, self_, 1);
const dim3 threads(min((int64_t)THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x)));
ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0);
THCTensor_copyToDiagonal<real><<<grid, threads, 0, THCState_getCurrentStream(state)>>>
(THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(eye)(THCState *state, THCTensor *self_, int64_t n, int64_t m)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
THArgCheck(n > 0, 1, "invalid argument");
if(m <= 0)
m = n;
THCTensor_(resize2d)(state, self_, n, m);
THCTensor_(zero)(state, self_);
int64_t sz = THMin(n, m);
int64_t stride = THCTensor_(stride)(state, self_, 0) +
THCTensor_(stride)(state, self_, 1);
THCTensor *diag = THCTensor_(newWithStorage1d)(state, self_->storage,
self_->storageOffset, sz, stride);
THCTensor_(fill)(state, diag, ScalarConvert<int, real>::to(1));
THCTensor_(free)(state, diag);
}
accreal THCTensor_(trace)(THCState *state, THCTensor *src_) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_));
THArgCheck((src_->_dim() == 2), 1, "expected a matrix");
THCTensor *diag = THCTensor_(new)(state);
THCTensor_(diag)(state, diag, src_, 0);
accreal trace = THCTensor_(sumall)(state, diag);
THCTensor_(free)(state, diag);
return trace;
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(linspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n);
if (n == 1) THCTensor_(fill)(state, r_, a);
else {
THCTensor *r = THCTensor_(isContiguous)(state, r_)
? r_ // if r_ is contiguous we can direct work on it
: THCTensor_(newContiguous)(state, r_);
real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a),
ScalarConvert<int64_t,real>::to(n - 1));
LinspaceOp<real> linspace_method(a, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + n, linspace_method);
if (!THCTensor_(isContiguous)(state, r_)) { // We need to move data back to r_
THCTensor_(freeCopyTo)(state, r, r_);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(logspace)(THCState *state, THCTensor *r_, real a, real b, int64_t n) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points");
if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n);
if (n == 1) THCTensor_(fill)(state, r_, THCNumerics<real>::exp10(a));
else {
THCTensor *r = THCTensor_(isContiguous)(state, r_)
? r_
: THCTensor_(newContiguous)(state, r_);
real step = THCNumerics<real>::div(THCNumerics<real>::sub(b, a),
ScalarConvert<int64_t,real>::to(n - 1));
LogspaceOp<real> logspace_method(a, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + n, logspace_method);
if (!THCTensor_(isContiguous)(state, r_)) {
THCTensor_(freeCopyTo)(state, r, r_);
}
}
THCudaCheck(cudaGetLastError());
}
#endif
void THCTensor_(range)(THCState *state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(step > 0 || step < 0, 3, "step must be nonzero");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound inconsistent with step sign");
ptrdiff_t size = (ptrdiff_t) (((xmax - xmin) / step) + 1);
if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size);
THCTensor *r = THCTensor_(newContiguous)(state, r_);
LinspaceOp<real,accreal> linspace_method(xmin, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + size, linspace_method);
THCTensor_(freeCopyTo)(state, r, r_);
THCudaCheck(cudaGetLastError());
}
void THCTensor_(arange)(THCState* state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(step > 0 || step < 0, 3, "step must be nonzero");
THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin))
, 2, "upper bound and larger bound inconsistent with step sign");
ptrdiff_t size = (ptrdiff_t) ceil(ScalarConvert<accreal, double>::to(xmax - xmin) / step);
if (THCTensor_(nElement)(state, r_) != size) THCTensor_(resize1d)(state, r_, size);
THCTensor *r = THCTensor_(newContiguous)(state, r_);
LinspaceOp<real,accreal> linspace_method(xmin, step);
thrust::device_ptr<real> data_(THCTensor_(data)(state, r));
thrust::tabulate(data_, data_ + size, linspace_method);
THCTensor_(freeCopyTo)(state, r, r_);
THCudaCheck(cudaGetLastError());
}
#endif
|
ff8c5916e75d0e2e1b21bf2b3d1d6aba9a0bf60a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <functions/linearReg.cuh>
#include <raft/random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct LinRegLossInputs {
T tolerance;
T n_rows;
T n_cols;
int len;
};
template <typename T>
class LinRegLossTest : public ::testing::TestWithParam<LinRegLossInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<LinRegLossInputs<T>>::GetParam();
int len = params.len;
int n_rows = params.n_rows;
int n_cols = params.n_cols;
T *labels, *coef;
raft::handle_t handle;
hipStream_t stream = handle.get_stream();
raft::allocate(in, len, stream);
raft::allocate(out, 1, stream);
raft::allocate(out_lasso, 1, stream);
raft::allocate(out_ridge, 1, stream);
raft::allocate(out_elasticnet, 1, stream);
raft::allocate(out_grad, n_cols, stream);
raft::allocate(out_lasso_grad, n_cols, stream);
raft::allocate(out_ridge_grad, n_cols, stream);
raft::allocate(out_elasticnet_grad, n_cols, stream);
raft::allocate(out_ref, 1, stream);
raft::allocate(out_lasso_ref, 1, stream);
raft::allocate(out_ridge_ref, 1, stream);
raft::allocate(out_elasticnet_ref, 1, stream);
raft::allocate(out_grad_ref, n_cols, stream);
raft::allocate(out_lasso_grad_ref, n_cols, stream);
raft::allocate(out_ridge_grad_ref, n_cols, stream);
raft::allocate(out_elasticnet_grad_ref, n_cols, stream);
raft::allocate(labels, params.n_rows, stream);
raft::allocate(coef, params.n_cols, stream);
T h_in[len] = {0.1, 0.35, -0.9, -1.4, 2.0, 3.1};
raft::update_device(in, h_in, len, stream);
T h_labels[n_rows] = {0.3, 2.0, -1.1};
raft::update_device(labels, h_labels, n_rows, stream);
T h_coef[n_cols] = {0.35, -0.24};
raft::update_device(coef, h_coef, n_cols, stream);
T h_out_ref[1] = {1.854842};
raft::update_device(out_ref, h_out_ref, 1, stream);
T h_out_lasso_ref[1] = {2.2088};
raft::update_device(out_lasso_ref, h_out_lasso_ref, 1, stream);
T h_out_ridge_ref[1] = {1.9629};
raft::update_device(out_ridge_ref, h_out_ridge_ref, 1, stream);
T h_out_elasticnet_ref[1] = {2.0858};
raft::update_device(out_elasticnet_ref, h_out_elasticnet_ref, 1, stream);
T h_out_grad_ref[n_cols] = {-0.56995, -3.12486};
raft::update_device(out_grad_ref, h_out_grad_ref, n_cols, stream);
T h_out_lasso_grad_ref[n_cols] = {0.03005, -3.724866};
raft::update_device(out_lasso_grad_ref, h_out_lasso_grad_ref, n_cols, stream);
T h_out_ridge_grad_ref[n_cols] = {-0.14995, -3.412866};
raft::update_device(out_ridge_grad_ref, h_out_ridge_grad_ref, n_cols, stream);
T h_out_elasticnet_grad_ref[n_cols] = {-0.05995, -3.568866};
raft::update_device(out_elasticnet_grad_ref, h_out_elasticnet_grad_ref, n_cols, stream);
T alpha = 0.6;
T l1_ratio = 0.5;
linearRegLoss(handle,
in,
params.n_rows,
params.n_cols,
labels,
coef,
out,
penalty::NONE,
alpha,
l1_ratio,
stream);
raft::update_device(in, h_in, len, stream);
linearRegLossGrads(handle,
in,
params.n_rows,
params.n_cols,
labels,
coef,
out_grad,
penalty::NONE,
alpha,
l1_ratio,
stream);
raft::update_device(in, h_in, len, stream);
linearRegLoss(handle,
in,
params.n_rows,
params.n_cols,
labels,
coef,
out_lasso,
penalty::L1,
alpha,
l1_ratio,
stream);
raft::update_device(in, h_in, len, stream);
linearRegLossGrads(handle,
in,
params.n_rows,
params.n_cols,
labels,
coef,
out_lasso_grad,
penalty::L1,
alpha,
l1_ratio,
stream);
raft::update_device(in, h_in, len, stream);
linearRegLoss(handle,
in,
params.n_rows,
params.n_cols,
labels,
coef,
out_ridge,
penalty::L2,
alpha,
l1_ratio,
stream);
linearRegLossGrads(handle,
in,
params.n_rows,
params.n_cols,
labels,
coef,
out_ridge_grad,
penalty::L2,
alpha,
l1_ratio,
stream);
raft::update_device(in, h_in, len, stream);
linearRegLoss(handle,
in,
params.n_rows,
params.n_cols,
labels,
coef,
out_elasticnet,
penalty::ELASTICNET,
alpha,
l1_ratio,
stream);
linearRegLossGrads(handle,
in,
params.n_rows,
params.n_cols,
labels,
coef,
out_elasticnet_grad,
penalty::ELASTICNET,
alpha,
l1_ratio,
stream);
raft::update_device(in, h_in, len, stream);
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(coef));
}
void TearDown() override
{
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipFree(out_lasso));
CUDA_CHECK(hipFree(out_ridge));
CUDA_CHECK(hipFree(out_elasticnet));
CUDA_CHECK(hipFree(out_grad));
CUDA_CHECK(hipFree(out_lasso_grad));
CUDA_CHECK(hipFree(out_ridge_grad));
CUDA_CHECK(hipFree(out_elasticnet_grad));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out_lasso_ref));
CUDA_CHECK(hipFree(out_ridge_ref));
CUDA_CHECK(hipFree(out_elasticnet_ref));
CUDA_CHECK(hipFree(out_grad_ref));
CUDA_CHECK(hipFree(out_lasso_grad_ref));
CUDA_CHECK(hipFree(out_ridge_grad_ref));
CUDA_CHECK(hipFree(out_elasticnet_grad_ref));
}
protected:
LinRegLossInputs<T> params;
T* in;
T *out, *out_lasso, *out_ridge, *out_elasticnet;
T *out_ref, *out_lasso_ref, *out_ridge_ref, *out_elasticnet_ref;
T *out_grad, *out_lasso_grad, *out_ridge_grad, *out_elasticnet_grad;
T *out_grad_ref, *out_lasso_grad_ref, *out_ridge_grad_ref, *out_elasticnet_grad_ref;
};
const std::vector<LinRegLossInputs<float>> inputsf = {{0.01f, 3, 2, 6}};
const std::vector<LinRegLossInputs<double>> inputsd = {{0.01, 3, 2, 6}};
typedef LinRegLossTest<float> LinRegLossTestF;
TEST_P(LinRegLossTestF, Result)
{
ASSERT_TRUE(devArrMatch(out_ref, out, 1, raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(
devArrMatch(out_lasso_ref, out_lasso, 1, raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(
devArrMatch(out_ridge_ref, out_ridge, 1, raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(
out_elasticnet_ref, out_elasticnet, 1, raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(
out_grad_ref, out_grad, params.n_cols, raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref,
out_lasso_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref,
out_ridge_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref,
out_elasticnet_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
}
typedef LinRegLossTest<double> LinRegLossTestD;
TEST_P(LinRegLossTestD, Result)
{
ASSERT_TRUE(devArrMatch(out_ref, out, 1, raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(
devArrMatch(out_lasso_ref, out_lasso, 1, raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(
devArrMatch(out_ridge_ref, out_ridge, 1, raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(
out_elasticnet_ref, out_elasticnet, 1, raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(
out_grad_ref, out_grad, params.n_cols, raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref,
out_lasso_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref,
out_ridge_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref,
out_elasticnet_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(LinRegLossTests, LinRegLossTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(LinRegLossTests, LinRegLossTestD, ::testing::ValuesIn(inputsd));
} // end namespace Functions
} // end namespace MLCommon
| ff8c5916e75d0e2e1b21bf2b3d1d6aba9a0bf60a.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <functions/linearReg.cuh>
#include <raft/random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct LinRegLossInputs {
T tolerance;
T n_rows;
T n_cols;
int len;
};
template <typename T>
class LinRegLossTest : public ::testing::TestWithParam<LinRegLossInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<LinRegLossInputs<T>>::GetParam();
int len = params.len;
int n_rows = params.n_rows;
int n_cols = params.n_cols;
T *labels, *coef;
raft::handle_t handle;
cudaStream_t stream = handle.get_stream();
raft::allocate(in, len, stream);
raft::allocate(out, 1, stream);
raft::allocate(out_lasso, 1, stream);
raft::allocate(out_ridge, 1, stream);
raft::allocate(out_elasticnet, 1, stream);
raft::allocate(out_grad, n_cols, stream);
raft::allocate(out_lasso_grad, n_cols, stream);
raft::allocate(out_ridge_grad, n_cols, stream);
raft::allocate(out_elasticnet_grad, n_cols, stream);
raft::allocate(out_ref, 1, stream);
raft::allocate(out_lasso_ref, 1, stream);
raft::allocate(out_ridge_ref, 1, stream);
raft::allocate(out_elasticnet_ref, 1, stream);
raft::allocate(out_grad_ref, n_cols, stream);
raft::allocate(out_lasso_grad_ref, n_cols, stream);
raft::allocate(out_ridge_grad_ref, n_cols, stream);
raft::allocate(out_elasticnet_grad_ref, n_cols, stream);
raft::allocate(labels, params.n_rows, stream);
raft::allocate(coef, params.n_cols, stream);
T h_in[len] = {0.1, 0.35, -0.9, -1.4, 2.0, 3.1};
raft::update_device(in, h_in, len, stream);
T h_labels[n_rows] = {0.3, 2.0, -1.1};
raft::update_device(labels, h_labels, n_rows, stream);
T h_coef[n_cols] = {0.35, -0.24};
raft::update_device(coef, h_coef, n_cols, stream);
T h_out_ref[1] = {1.854842};
raft::update_device(out_ref, h_out_ref, 1, stream);
T h_out_lasso_ref[1] = {2.2088};
raft::update_device(out_lasso_ref, h_out_lasso_ref, 1, stream);
T h_out_ridge_ref[1] = {1.9629};
raft::update_device(out_ridge_ref, h_out_ridge_ref, 1, stream);
T h_out_elasticnet_ref[1] = {2.0858};
raft::update_device(out_elasticnet_ref, h_out_elasticnet_ref, 1, stream);
T h_out_grad_ref[n_cols] = {-0.56995, -3.12486};
raft::update_device(out_grad_ref, h_out_grad_ref, n_cols, stream);
T h_out_lasso_grad_ref[n_cols] = {0.03005, -3.724866};
raft::update_device(out_lasso_grad_ref, h_out_lasso_grad_ref, n_cols, stream);
T h_out_ridge_grad_ref[n_cols] = {-0.14995, -3.412866};
raft::update_device(out_ridge_grad_ref, h_out_ridge_grad_ref, n_cols, stream);
T h_out_elasticnet_grad_ref[n_cols] = {-0.05995, -3.568866};
raft::update_device(out_elasticnet_grad_ref, h_out_elasticnet_grad_ref, n_cols, stream);
T alpha = 0.6;
T l1_ratio = 0.5;
linearRegLoss(handle,
in,
params.n_rows,
params.n_cols,
labels,
coef,
out,
penalty::NONE,
alpha,
l1_ratio,
stream);
raft::update_device(in, h_in, len, stream);
linearRegLossGrads(handle,
in,
params.n_rows,
params.n_cols,
labels,
coef,
out_grad,
penalty::NONE,
alpha,
l1_ratio,
stream);
raft::update_device(in, h_in, len, stream);
linearRegLoss(handle,
in,
params.n_rows,
params.n_cols,
labels,
coef,
out_lasso,
penalty::L1,
alpha,
l1_ratio,
stream);
raft::update_device(in, h_in, len, stream);
linearRegLossGrads(handle,
in,
params.n_rows,
params.n_cols,
labels,
coef,
out_lasso_grad,
penalty::L1,
alpha,
l1_ratio,
stream);
raft::update_device(in, h_in, len, stream);
linearRegLoss(handle,
in,
params.n_rows,
params.n_cols,
labels,
coef,
out_ridge,
penalty::L2,
alpha,
l1_ratio,
stream);
linearRegLossGrads(handle,
in,
params.n_rows,
params.n_cols,
labels,
coef,
out_ridge_grad,
penalty::L2,
alpha,
l1_ratio,
stream);
raft::update_device(in, h_in, len, stream);
linearRegLoss(handle,
in,
params.n_rows,
params.n_cols,
labels,
coef,
out_elasticnet,
penalty::ELASTICNET,
alpha,
l1_ratio,
stream);
linearRegLossGrads(handle,
in,
params.n_rows,
params.n_cols,
labels,
coef,
out_elasticnet_grad,
penalty::ELASTICNET,
alpha,
l1_ratio,
stream);
raft::update_device(in, h_in, len, stream);
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(coef));
}
void TearDown() override
{
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaFree(out_lasso));
CUDA_CHECK(cudaFree(out_ridge));
CUDA_CHECK(cudaFree(out_elasticnet));
CUDA_CHECK(cudaFree(out_grad));
CUDA_CHECK(cudaFree(out_lasso_grad));
CUDA_CHECK(cudaFree(out_ridge_grad));
CUDA_CHECK(cudaFree(out_elasticnet_grad));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out_lasso_ref));
CUDA_CHECK(cudaFree(out_ridge_ref));
CUDA_CHECK(cudaFree(out_elasticnet_ref));
CUDA_CHECK(cudaFree(out_grad_ref));
CUDA_CHECK(cudaFree(out_lasso_grad_ref));
CUDA_CHECK(cudaFree(out_ridge_grad_ref));
CUDA_CHECK(cudaFree(out_elasticnet_grad_ref));
}
protected:
LinRegLossInputs<T> params;
T* in;
T *out, *out_lasso, *out_ridge, *out_elasticnet;
T *out_ref, *out_lasso_ref, *out_ridge_ref, *out_elasticnet_ref;
T *out_grad, *out_lasso_grad, *out_ridge_grad, *out_elasticnet_grad;
T *out_grad_ref, *out_lasso_grad_ref, *out_ridge_grad_ref, *out_elasticnet_grad_ref;
};
const std::vector<LinRegLossInputs<float>> inputsf = {{0.01f, 3, 2, 6}};
const std::vector<LinRegLossInputs<double>> inputsd = {{0.01, 3, 2, 6}};
typedef LinRegLossTest<float> LinRegLossTestF;
TEST_P(LinRegLossTestF, Result)
{
ASSERT_TRUE(devArrMatch(out_ref, out, 1, raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(
devArrMatch(out_lasso_ref, out_lasso, 1, raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(
devArrMatch(out_ridge_ref, out_ridge, 1, raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(
out_elasticnet_ref, out_elasticnet, 1, raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(
out_grad_ref, out_grad, params.n_cols, raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref,
out_lasso_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref,
out_ridge_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref,
out_elasticnet_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
}
typedef LinRegLossTest<double> LinRegLossTestD;
TEST_P(LinRegLossTestD, Result)
{
ASSERT_TRUE(devArrMatch(out_ref, out, 1, raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(
devArrMatch(out_lasso_ref, out_lasso, 1, raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(
devArrMatch(out_ridge_ref, out_ridge, 1, raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(
out_elasticnet_ref, out_elasticnet, 1, raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(
out_grad_ref, out_grad, params.n_cols, raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref,
out_lasso_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref,
out_ridge_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref,
out_elasticnet_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(LinRegLossTests, LinRegLossTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(LinRegLossTests, LinRegLossTestD, ::testing::ValuesIn(inputsd));
} // end namespace Functions
} // end namespace MLCommon
|
2339e2218c3b568ba723f5a98abd915c9092a10d.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2020, the YACCLAB contributors, as
// shown by the AUTHORS file. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include <opencv2/cudafeatures2d.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// 3D Oliveira 3D (we think we are the first)
#define BLOCK_X 4
#define BLOCK_Y 4
#define BLOCK_Z 4
using namespace cv;
namespace {
// Returns the root index of the UFTree
__device__ unsigned Find(const int *s_buf, unsigned n) {
// Warning: do not call Find on a background pixel
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
assert(label > 0);
}
return n;
}
// Merges the UFTrees of a and b, linking one root to the other
__device__ void Union(int *s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a + 1);
done = (old == b + 1);
b = old - 1;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b + 1);
done = (old == a + 1);
a = old - 1;
}
else {
done = true;
}
} while (!done);
}
__global__ void Initialization(const cuda::PtrStepSz3b img, cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z;
unsigned img_index = z * (img.stepz / img.elem_size) + y * (img.stepy / img.elem_size) + x;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
if (img[img_index]) {
labels[labels_index] = labels_index + 1;
}
else {
labels[labels_index] = 0;
}
}
}
__global__ void Merge(cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
if (labels[labels_index]) {
if (z > 0) {
unsigned current_plane = labels_index - (labels.stepz / labels.elem_size);
if (y > 0) {
unsigned current_row = current_plane - (labels.stepy / labels.elem_size);
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
{
unsigned current_row = current_plane;
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
if (y + 1 < labels.y) {
unsigned current_row = current_plane + (labels.stepy / labels.elem_size);
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
}
{
if (y > 0) {
unsigned current_row = labels_index - (labels.stepy / labels.elem_size);
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
{
if (x > 0 && labels[labels_index - 1]) {
Union(labels.data, labels_index, labels_index - 1);
}
}
}
}
}
}
__global__ void PathCompression(cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
unsigned int val = labels[labels_index];
if (val) {
labels[labels_index] = Find(labels.data, labels_index) + 1;
}
}
}
}
class UF_3D : public GpuLabeling3D<Connectivity3D::CONN_26> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
UF_3D() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
grid_size_ = dim3((d_img_.x + BLOCK_X - 1) / BLOCK_X, (d_img_.y + BLOCK_Y - 1) / BLOCK_Y, (d_img_.z + BLOCK_Z - 1) / BLOCK_Z);
block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z);
//cuda::PtrStep3b ptr_step_prima(d_img_labels_);
// Phase 1
Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//cuda::PtrStepSz3i ptr_step_size(d_img_labels_);
// Immagine di debug della prima fase
//cuda::GpuMat d_local_labels;
//d_img_labels_.copyTo(d_local_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//Mat1i local_labels(img_.size());
//d_local_labels.download(local_labels);
// Phase 2
Merge << <grid_size_, block_size_ >> > (d_img_labels_);
// Immagine di debug della seconda fase
//cuda::GpuMat d_global_labels;
//d_img_labels_.copyTo(d_global_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//Mat1i global_labels(img_.size());
//d_global_labels.download(global_labels);
// Phase 3
PathCompression << <grid_size_, block_size_ >> > (d_img_labels_);
hipDeviceSynchronize();
//d_img_labels_.download(img_labels_);
//Mat errors;
//bool correct = CheckLabeledVolume(img_, img_labels_, errors);
//volwrite("C:\\Users\\Stefano\\Desktop\\debug\\UF_errors", errors);
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
perf_.stop();
return perf_.last();
}
double Dealloc() {
perf_.start();
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void LocalScan() {
grid_size_ = dim3((d_img_.x + BLOCK_X - 1) / BLOCK_X, (d_img_.y + BLOCK_Y - 1) / BLOCK_Y, (d_img_.z + BLOCK_Z - 1) / BLOCK_Z);
block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z);
Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
hipDeviceSynchronize();
}
void GlobalScan() {
Merge << <grid_size_, block_size_ >> > (d_img_labels_);
PathCompression << <grid_size_, block_size_ >> > (d_img_labels_);
hipDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
LocalScan();
GlobalScan();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(UF_3D);
| 2339e2218c3b568ba723f5a98abd915c9092a10d.cu | // Copyright (c) 2020, the YACCLAB contributors, as
// shown by the AUTHORS file. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include <opencv2/cudafeatures2d.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// 3D Oliveira 3D (we think we are the first)
#define BLOCK_X 4
#define BLOCK_Y 4
#define BLOCK_Z 4
using namespace cv;
namespace {
// Returns the root index of the UFTree
__device__ unsigned Find(const int *s_buf, unsigned n) {
// Warning: do not call Find on a background pixel
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
assert(label > 0);
}
return n;
}
// Merges the UFTrees of a and b, linking one root to the other
__device__ void Union(int *s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a + 1);
done = (old == b + 1);
b = old - 1;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b + 1);
done = (old == a + 1);
a = old - 1;
}
else {
done = true;
}
} while (!done);
}
__global__ void Initialization(const cuda::PtrStepSz3b img, cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z;
unsigned img_index = z * (img.stepz / img.elem_size) + y * (img.stepy / img.elem_size) + x;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
if (img[img_index]) {
labels[labels_index] = labels_index + 1;
}
else {
labels[labels_index] = 0;
}
}
}
__global__ void Merge(cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
if (labels[labels_index]) {
if (z > 0) {
unsigned current_plane = labels_index - (labels.stepz / labels.elem_size);
if (y > 0) {
unsigned current_row = current_plane - (labels.stepy / labels.elem_size);
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
{
unsigned current_row = current_plane;
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
if (y + 1 < labels.y) {
unsigned current_row = current_plane + (labels.stepy / labels.elem_size);
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
}
{
if (y > 0) {
unsigned current_row = labels_index - (labels.stepy / labels.elem_size);
if (x > 0 && labels[current_row - 1]) {
Union(labels.data, labels_index, current_row - 1);
}
if (labels[current_row]) {
Union(labels.data, labels_index, current_row);
}
if (x + 1 < labels.x && labels[current_row + 1]) {
Union(labels.data, labels_index, current_row + 1);
}
}
{
if (x > 0 && labels[labels_index - 1]) {
Union(labels.data, labels_index, labels_index - 1);
}
}
}
}
}
}
__global__ void PathCompression(cuda::PtrStepSz3i labels) {
unsigned x = blockIdx.x * BLOCK_X + threadIdx.x;
unsigned y = blockIdx.y * BLOCK_Y + threadIdx.y;
unsigned z = blockIdx.z * BLOCK_Z + threadIdx.z;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
unsigned int val = labels[labels_index];
if (val) {
labels[labels_index] = Find(labels.data, labels_index) + 1;
}
}
}
}
class UF_3D : public GpuLabeling3D<Connectivity3D::CONN_26> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
UF_3D() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
grid_size_ = dim3((d_img_.x + BLOCK_X - 1) / BLOCK_X, (d_img_.y + BLOCK_Y - 1) / BLOCK_Y, (d_img_.z + BLOCK_Z - 1) / BLOCK_Z);
block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z);
//cuda::PtrStep3b ptr_step_prima(d_img_labels_);
// Phase 1
Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
//cuda::PtrStepSz3i ptr_step_size(d_img_labels_);
// Immagine di debug della prima fase
//cuda::GpuMat d_local_labels;
//d_img_labels_.copyTo(d_local_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//Mat1i local_labels(img_.size());
//d_local_labels.download(local_labels);
// Phase 2
Merge << <grid_size_, block_size_ >> > (d_img_labels_);
// Immagine di debug della seconda fase
//cuda::GpuMat d_global_labels;
//d_img_labels_.copyTo(d_global_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//Mat1i global_labels(img_.size());
//d_global_labels.download(global_labels);
// Phase 3
PathCompression << <grid_size_, block_size_ >> > (d_img_labels_);
cudaDeviceSynchronize();
//d_img_labels_.download(img_labels_);
//Mat errors;
//bool correct = CheckLabeledVolume(img_, img_labels_, errors);
//volwrite("C:\\Users\\Stefano\\Desktop\\debug\\UF_errors", errors);
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
perf_.stop();
return perf_.last();
}
double Dealloc() {
perf_.start();
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void LocalScan() {
grid_size_ = dim3((d_img_.x + BLOCK_X - 1) / BLOCK_X, (d_img_.y + BLOCK_Y - 1) / BLOCK_Y, (d_img_.z + BLOCK_Z - 1) / BLOCK_Z);
block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z);
Initialization << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
cudaDeviceSynchronize();
}
void GlobalScan() {
Merge << <grid_size_, block_size_ >> > (d_img_labels_);
PathCompression << <grid_size_, block_size_ >> > (d_img_labels_);
cudaDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
LocalScan();
GlobalScan();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(UF_3D);
|
1c44780a6892ceebd060449c2e1892e6ede58008.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuNVSM/gradient_check.h"
template <typename ModelT>
bool GradientCheckFn<ModelT>::operator()(
ModelT* const model,
const typename ModelT::Batch& batch,
const typename ModelT::ForwardResult& result,
const typename ModelT::Gradients& gradients,
const FloatT epsilon,
const FloatT relative_error_threshold,
const std::stringstream& rng_state,
RNG* const rng) {
PROFILE_FUNCTION();
DCHECK(model->initialized());
CHECK_GE(epsilon, 0.0);
CHECK_GE(relative_error_threshold, 0.0);
DCHECK(!rng_state.eof() && rng_state.good());
// Disable verbose logging;
const int32 verbose_loglevel = FLAGS_v;
FLAGS_v = 1;
const hipStream_t stream = DefaultStream::get()->next();
// Sanity check to make sure we're getting the right RNG state.
CHECK_EQ(result.get_cost(),
model->get_cost(batch, &rng_state, rng));
bool checked = true;
for (const auto& pair : model->params_) {
const ParamIdentifier param_id = pair.first;
Parameters<FloatT>* const param = pair.second;
Storage<FloatT>* const storage =
dynamic_cast<Storage<FloatT>*>(pair.second);
CHECK_NOTNULL(storage);
for (size_t param_idx = 0; param_idx < storage->num_parameters(); ++param_idx) {
const FloatT gradient_predict =
- param->get_parameter_gradient(gradients, param_idx); // TODO(cvangysel): remove negation
// Add epsilon to weight.
storage->increment_parameter(param_idx, epsilon);
// Compute cost with epsilon added to weight.
const FloatT cost_added_epsilon = model->get_cost(
batch, &rng_state, rng);
// Subtract epsilon from weight.
storage->increment_parameter(param_idx, -2.0 * epsilon);
// Compute cost with epsilon removed from weight.
const FloatT cost_removed_epsilon = model->get_cost(
batch, &rng_state, rng);
// Restore original weight.
storage->increment_parameter(param_idx, epsilon);
const FloatT gradient_approx =
(cost_added_epsilon - cost_removed_epsilon) /
(2.0 * epsilon);
const FloatT relative_error =
abs(gradient_predict - gradient_approx) /
max(abs(gradient_predict), abs(gradient_approx));
const FloatT ratio = (gradient_approx != 0.0) ?
(gradient_predict / gradient_approx) : NAN;
if (gradient_predict * gradient_approx < 0.0) {
LOG(ERROR) << "Parameter " << param_idx << " of "
<< ParamName[param_id] << " has gradient with incorrect direction "
<< "(approx=" << gradient_approx << ", "
<< "predict=" << gradient_predict << ", "
<< "ratio=" << ratio << ", "
<< "relative error=" << relative_error << ").";
checked = false;
} else if (relative_error >= relative_error_threshold) {
VLOG(1) << "Parameter " << param_idx << " of "
<< ParamName[param_id] << " most likely has incorrect gradient "
<< "(approx=" << gradient_approx << ", "
<< "predict=" << gradient_predict << ", "
<< "ratio=" << ratio << ", "
<< "relative error=" << relative_error << ").";
if (!std::isnan(ratio)) {
checked = false;
}
} else if ((gradient_approx != 0.0) || (gradient_predict != 0.0)) {
VLOG(2) << "Parameter " << param_idx << " of "
<< ParamName[param_id] << " has correct gradient "
<< "(approx=" << gradient_approx << ", "
<< "predict=" << gradient_predict << ", "
<< "ratio=" << ratio << ", "
<< "relative error=" << relative_error << ").";
}
// TODO(cvangysel): CHECK_DOUBLE_EQ was failing here, while the gradient was checking.
// This happened after upgrading to CUDA 8; it's probably better to check for the relative error here.
CHECK_NEAR(
result.get_cost(),
model->get_cost(batch, &rng_state, rng),
1e-5);
}
}
// TODO(cvangysel): same comment as above.
// Sanity check to make sure we're getting the right RNG state.
CHECK_NEAR(
result.get_cost(),
model->get_cost(batch, &rng_state, rng),
1e-5);
// Enable verbose logging again.
FLAGS_v = verbose_loglevel;
if (!checked) {
CHECK_NOTNULL(result.get_similarity_probs());
FloatT* data = get_array(result.get_similarity_probs()->getStream(),
*result.get_similarity_probs());
std::vector<FloatT> probs(data, data + result.get_similarity_probs()->size());
delete [] data;
LOG(ERROR) << "Similarity probs: " << probs;
}
return checked;
}
// Explicit instantiations.
template class GradientCheckFn<DefaultModel>;
template class GradientCheckFn<Model<EntityEntity::Objective>>;
template class GradientCheckFn<Model<TermTerm::Objective>>;
template class GradientCheckFn<Model<TextEntityEntityEntity::Objective>>;
template class GradientCheckFn<Model<TextEntityTermTerm::Objective>>;
| 1c44780a6892ceebd060449c2e1892e6ede58008.cu | #include "cuNVSM/gradient_check.h"
template <typename ModelT>
bool GradientCheckFn<ModelT>::operator()(
ModelT* const model,
const typename ModelT::Batch& batch,
const typename ModelT::ForwardResult& result,
const typename ModelT::Gradients& gradients,
const FloatT epsilon,
const FloatT relative_error_threshold,
const std::stringstream& rng_state,
RNG* const rng) {
PROFILE_FUNCTION();
DCHECK(model->initialized());
CHECK_GE(epsilon, 0.0);
CHECK_GE(relative_error_threshold, 0.0);
DCHECK(!rng_state.eof() && rng_state.good());
// Disable verbose logging;
const int32 verbose_loglevel = FLAGS_v;
FLAGS_v = 1;
const cudaStream_t stream = DefaultStream::get()->next();
// Sanity check to make sure we're getting the right RNG state.
CHECK_EQ(result.get_cost(),
model->get_cost(batch, &rng_state, rng));
bool checked = true;
for (const auto& pair : model->params_) {
const ParamIdentifier param_id = pair.first;
Parameters<FloatT>* const param = pair.second;
Storage<FloatT>* const storage =
dynamic_cast<Storage<FloatT>*>(pair.second);
CHECK_NOTNULL(storage);
for (size_t param_idx = 0; param_idx < storage->num_parameters(); ++param_idx) {
const FloatT gradient_predict =
- param->get_parameter_gradient(gradients, param_idx); // TODO(cvangysel): remove negation
// Add epsilon to weight.
storage->increment_parameter(param_idx, epsilon);
// Compute cost with epsilon added to weight.
const FloatT cost_added_epsilon = model->get_cost(
batch, &rng_state, rng);
// Subtract epsilon from weight.
storage->increment_parameter(param_idx, -2.0 * epsilon);
// Compute cost with epsilon removed from weight.
const FloatT cost_removed_epsilon = model->get_cost(
batch, &rng_state, rng);
// Restore original weight.
storage->increment_parameter(param_idx, epsilon);
const FloatT gradient_approx =
(cost_added_epsilon - cost_removed_epsilon) /
(2.0 * epsilon);
const FloatT relative_error =
abs(gradient_predict - gradient_approx) /
max(abs(gradient_predict), abs(gradient_approx));
const FloatT ratio = (gradient_approx != 0.0) ?
(gradient_predict / gradient_approx) : NAN;
if (gradient_predict * gradient_approx < 0.0) {
LOG(ERROR) << "Parameter " << param_idx << " of "
<< ParamName[param_id] << " has gradient with incorrect direction "
<< "(approx=" << gradient_approx << ", "
<< "predict=" << gradient_predict << ", "
<< "ratio=" << ratio << ", "
<< "relative error=" << relative_error << ").";
checked = false;
} else if (relative_error >= relative_error_threshold) {
VLOG(1) << "Parameter " << param_idx << " of "
<< ParamName[param_id] << " most likely has incorrect gradient "
<< "(approx=" << gradient_approx << ", "
<< "predict=" << gradient_predict << ", "
<< "ratio=" << ratio << ", "
<< "relative error=" << relative_error << ").";
if (!std::isnan(ratio)) {
checked = false;
}
} else if ((gradient_approx != 0.0) || (gradient_predict != 0.0)) {
VLOG(2) << "Parameter " << param_idx << " of "
<< ParamName[param_id] << " has correct gradient "
<< "(approx=" << gradient_approx << ", "
<< "predict=" << gradient_predict << ", "
<< "ratio=" << ratio << ", "
<< "relative error=" << relative_error << ").";
}
// TODO(cvangysel): CHECK_DOUBLE_EQ was failing here, while the gradient was checking.
// This happened after upgrading to CUDA 8; it's probably better to check for the relative error here.
CHECK_NEAR(
result.get_cost(),
model->get_cost(batch, &rng_state, rng),
1e-5);
}
}
// TODO(cvangysel): same comment as above.
// Sanity check to make sure we're getting the right RNG state.
CHECK_NEAR(
result.get_cost(),
model->get_cost(batch, &rng_state, rng),
1e-5);
// Enable verbose logging again.
FLAGS_v = verbose_loglevel;
if (!checked) {
CHECK_NOTNULL(result.get_similarity_probs());
FloatT* data = get_array(result.get_similarity_probs()->getStream(),
*result.get_similarity_probs());
std::vector<FloatT> probs(data, data + result.get_similarity_probs()->size());
delete [] data;
LOG(ERROR) << "Similarity probs: " << probs;
}
return checked;
}
// Explicit instantiations.
template class GradientCheckFn<DefaultModel>;
template class GradientCheckFn<Model<EntityEntity::Objective>>;
template class GradientCheckFn<Model<TermTerm::Objective>>;
template class GradientCheckFn<Model<TextEntityEntityEntity::Objective>>;
template class GradientCheckFn<Model<TextEntityTermTerm::Objective>>;
|
254dc9b1bac71b28b2942b6de86f480cc7da5030.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <marginalized/kernel.h>
#include <misc/numpy_type.h>
using namespace graphdot;
using namespace numpy_type;
${node_kernel}
${edge_kernel}
using node_t = ${node_t};
using edge_t = ${edge_t};
using graph_t = graphdot::marginalized::graph_t<node_t, edge_t>;
using scratch_t = graphdot::marginalized::block_scratch;
using job_t = graphdot::marginalized::job_t;
using solver_t = graphdot::marginalized::octile_block_solver<graph_t>;
__constant__ char shmem_bytes_per_warp[solver_t::shmem_bytes_per_warp];
extern "C" {
__global__ void graph_kernel_solver(
graph_t const * graphs,
scratch_t * scratch,
job_t * jobs,
unsigned int * i_job_global,
const unsigned n_jobs,
const float q,
const float q0,
const int lmin
) {
extern __shared__ char shmem[];
__shared__ unsigned int i_job;
while (true) {
if (threadIdx.x == 0) i_job = atomicInc(i_job_global, 0xFFFFFFFF);
__syncthreads();
if (i_job >= n_jobs) break;
solver_t::compute<node_kernel, edge_kernel> (
graphs[ jobs[i_job].i ],
graphs[ jobs[i_job].j ],
scratch[ blockIdx.x ],
shmem, q, q0, lmin,
jobs[i_job].vr);
__syncthreads();
}
}
}
| 254dc9b1bac71b28b2942b6de86f480cc7da5030.cu | #include <marginalized/kernel.h>
#include <misc/numpy_type.h>
using namespace graphdot;
using namespace numpy_type;
${node_kernel}
${edge_kernel}
using node_t = ${node_t};
using edge_t = ${edge_t};
using graph_t = graphdot::marginalized::graph_t<node_t, edge_t>;
using scratch_t = graphdot::marginalized::block_scratch;
using job_t = graphdot::marginalized::job_t;
using solver_t = graphdot::marginalized::octile_block_solver<graph_t>;
__constant__ char shmem_bytes_per_warp[solver_t::shmem_bytes_per_warp];
extern "C" {
__global__ void graph_kernel_solver(
graph_t const * graphs,
scratch_t * scratch,
job_t * jobs,
unsigned int * i_job_global,
const unsigned n_jobs,
const float q,
const float q0,
const int lmin
) {
extern __shared__ char shmem[];
__shared__ unsigned int i_job;
while (true) {
if (threadIdx.x == 0) i_job = atomicInc(i_job_global, 0xFFFFFFFF);
__syncthreads();
if (i_job >= n_jobs) break;
solver_t::compute<node_kernel, edge_kernel> (
graphs[ jobs[i_job].i ],
graphs[ jobs[i_job].j ],
scratch[ blockIdx.x ],
shmem, q, q0, lmin,
jobs[i_job].vr);
__syncthreads();
}
}
}
|
89bb9c74f20d5d1956aff48f5596c4b09f9416b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <float.h>
#include <math.h>
#include <iostream>
#include "libarff/arff_parser.h"
#include "libarff/arff_data.h"
using namespace std;
__global__ void sKNN(float *attr, int *val, int n_att, int n_inst, float *distance, int com, float *smallestDistance){
//calculate tid
int column = (blockDim.x * blockIdx.x) + threadIdx.x;
int row = (blockDim.y * blockIdx.y) + threadIdx.y;
int tid = (blockDim.x*gridDim.x*row)+column;
if (tid < n_inst * com){
smallestDistance[tid] = FLT_MAX;
}
if (column < n_inst && row < n_inst){
float diff;
distance[row * n_inst + column] = 0; //Distance to 0 so the first one is a min distance
for(int k = 0; k < n_att; k++) // compute the distance between the two instances
{
//mirar para cargar en shared el val j y asi no tener problemas de stride
diff = attr[row* n_att +k] - attr[column*n_att+k];
distance[row * n_inst + column] += diff * diff;
}
distance[row * n_inst + column] = sqrt(distance[row * n_inst + column]);
if (row == column){ // when it is the same point
distance[row * n_inst + column] = FLT_MAX;
}
//for(int a = 0; a<n_inst; a++){
// for(int b = 0; b<n_inst; b++){
// if (row == a && column == b){
// printf("element (%d, %d): %f \n",a, b, distance[row * n_inst + column]);
// }
// }
//}
}
}
//Acabar aqui el kernel, hacer el sort con thrust, y luego otro kernel
__global__ void pred(int *pred, int com, int n_inst, float *distance, float *smallestDistance, int* smallestDistanceClass, int *val){
int tid = (blockDim.x * blockIdx.x) + threadIdx.x;
if (tid < n_inst){
for(int j = 0; j < n_inst; j++){
for (int n = tid * com ; n<tid * com + com; n++)
{
if(distance[n_inst*j+tid] < smallestDistance[n]) // select the closest one
{
for (int t=tid * com + com-1; t>n; t--)
{
smallestDistance[t] = smallestDistance[t-1];
smallestDistanceClass[t] = smallestDistanceClass[t-1];
}
smallestDistance[n] = distance[n_inst*j+tid];
smallestDistanceClass[n] = val[j];
break;
}
}
}
int freq = 0;
int predict=0;
for ( int m = tid * com; m<tid * com + com; m++)
{
int tfreq = 1;
int tpredict=smallestDistanceClass[m];
for (int s = m+1 ; s< tid * com + com; s++)
{
if (tpredict==smallestDistanceClass[s])
{
tfreq++;
}
}
if (tfreq>freq)
{
predict=smallestDistanceClass[m];
freq=tfreq;
}
}
pred[tid]= predict;
}
}
int* KNN(ArffData* dataset, int com)
{
int threadperblockdim = 16;
int griddim = (dataset->num_instances() + threadperblockdim - 1) / threadperblockdim;
dim3 blocksize(threadperblockdim,threadperblockdim);
dim3 gridsize(griddim,griddim);
int n_att = dataset->num_attributes() - 1;
int n_inst = dataset->num_instances();
int *h_pred= (int*)malloc(n_inst * sizeof(int));
int *h_val= (int*)malloc(n_inst * sizeof(int));
float *h_at= (float*)malloc(n_inst * n_att * sizeof(float));
float *d_at, *d_dist;
int *d_val, *d_pred;
hipMalloc(&d_at, n_inst * n_att * sizeof(float));
hipMalloc(&d_dist, n_inst * n_inst * sizeof(float));
hipMalloc(&d_val, n_inst* sizeof(int));
hipMalloc(&d_pred, n_inst* sizeof(int));
float* smallestDistance;
int* smallestDistanceClass;
hipMalloc(&smallestDistance,n_inst * com * sizeof(float));
hipMalloc(&smallestDistanceClass,n_inst * com * sizeof(int));
for (int i = 0; i<n_inst; i++){
h_val[i] = dataset->get_instance(i)->get(dataset->num_attributes() - 1)->operator int32();
for( int k = 0; k < n_att; k++){
h_at[i*n_att+k] = dataset->get_instance(i)->get(k)->operator float();
}
}
hipMemcpy(d_val,h_val, n_inst* sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_at,h_at, n_att * n_inst* sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sKNN), dim3(gridsize) , dim3(blocksize), 0, 0, d_at, d_val, n_att, n_inst, d_dist, com, smallestDistance);
int threadperblock = 256;
int blocks = (dataset->num_instances() + threadperblock - 1) / threadperblock;
hipLaunchKernelGGL(( pred), dim3(blocks) ,dim3(threadperblock), 0, 0, d_pred, com, n_inst, d_dist, smallestDistance, smallestDistanceClass, d_val);
hipMemcpy(h_pred, d_pred, n_inst* sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_at);
hipFree(d_dist);
hipFree(d_val);
hipFree(d_pred);
hipFree(smallestDistance);
hipFree(smallestDistanceClass);
return h_pred;
}
int* computeConfusionMatrix(int* predictions, ArffData* dataset)
{
int* confusionMatrix = (int*)calloc(dataset->num_classes() * dataset->num_classes(), sizeof(int)); // matriz size numberClasses x numberClasses
for(int i = 0; i < dataset->num_instances(); i++) // for each instance compare the true class and predicted class
{
int trueClass = dataset->get_instance(i)->get(dataset->num_attributes() - 1)->operator int32();
int predictedClass = predictions[i];
confusionMatrix[trueClass*dataset->num_classes() + predictedClass]++;
}
return confusionMatrix;
}
float computeAccuracy(int* confusionMatrix, ArffData* dataset)
{
int successfulPredictions = 0;
for(int i = 0; i < dataset->num_classes(); i++)
{
successfulPredictions += confusionMatrix[i*dataset->num_classes() + i]; // elements in the diagonal are correct predictions
}
return successfulPredictions / (float) dataset->num_instances();
}
int main(int argc, char *argv[])
{
if(argc != 3)
{
cout << "Usage: ./main datasets/datasetFile.arff" << endl;
cout << "Usage: k value" << endl;
exit(0);
}
ArffParser parser(argv[1]);
ArffData *dataset = parser.parse();
struct timespec start, end;
int k;
sscanf(argv[2], "%d", &k);
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
int* predictions = KNN(dataset,k);
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
uint64_t diff = (1000000000L * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec) / 1e6;
int* confusionMatrix = computeConfusionMatrix(predictions, dataset);
float accuracy = computeAccuracy(confusionMatrix, dataset);
printf("The KNN classifier for %lu instances required %llu ms CPU time, accuracy was %.4f\n", dataset->num_instances(), (long long unsigned int) diff, accuracy);
}
| 89bb9c74f20d5d1956aff48f5596c4b09f9416b5.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <float.h>
#include <math.h>
#include <iostream>
#include "libarff/arff_parser.h"
#include "libarff/arff_data.h"
using namespace std;
__global__ void sKNN(float *attr, int *val, int n_att, int n_inst, float *distance, int com, float *smallestDistance){
//calculate tid
int column = (blockDim.x * blockIdx.x) + threadIdx.x;
int row = (blockDim.y * blockIdx.y) + threadIdx.y;
int tid = (blockDim.x*gridDim.x*row)+column;
if (tid < n_inst * com){
smallestDistance[tid] = FLT_MAX;
}
if (column < n_inst && row < n_inst){
float diff;
distance[row * n_inst + column] = 0; //Distance to 0 so the first one is a min distance
for(int k = 0; k < n_att; k++) // compute the distance between the two instances
{
//mirar para cargar en shared el val j y asi no tener problemas de stride
diff = attr[row* n_att +k] - attr[column*n_att+k];
distance[row * n_inst + column] += diff * diff;
}
distance[row * n_inst + column] = sqrt(distance[row * n_inst + column]);
if (row == column){ // when it is the same point
distance[row * n_inst + column] = FLT_MAX;
}
//for(int a = 0; a<n_inst; a++){
// for(int b = 0; b<n_inst; b++){
// if (row == a && column == b){
// printf("element (%d, %d): %f \n",a, b, distance[row * n_inst + column]);
// }
// }
//}
}
}
//Acabar aqui el kernel, hacer el sort con thrust, y luego otro kernel
__global__ void pred(int *pred, int com, int n_inst, float *distance, float *smallestDistance, int* smallestDistanceClass, int *val){
int tid = (blockDim.x * blockIdx.x) + threadIdx.x;
if (tid < n_inst){
for(int j = 0; j < n_inst; j++){
for (int n = tid * com ; n<tid * com + com; n++)
{
if(distance[n_inst*j+tid] < smallestDistance[n]) // select the closest one
{
for (int t=tid * com + com-1; t>n; t--)
{
smallestDistance[t] = smallestDistance[t-1];
smallestDistanceClass[t] = smallestDistanceClass[t-1];
}
smallestDistance[n] = distance[n_inst*j+tid];
smallestDistanceClass[n] = val[j];
break;
}
}
}
int freq = 0;
int predict=0;
for ( int m = tid * com; m<tid * com + com; m++)
{
int tfreq = 1;
int tpredict=smallestDistanceClass[m];
for (int s = m+1 ; s< tid * com + com; s++)
{
if (tpredict==smallestDistanceClass[s])
{
tfreq++;
}
}
if (tfreq>freq)
{
predict=smallestDistanceClass[m];
freq=tfreq;
}
}
pred[tid]= predict;
}
}
int* KNN(ArffData* dataset, int com)
{
int threadperblockdim = 16;
int griddim = (dataset->num_instances() + threadperblockdim - 1) / threadperblockdim;
dim3 blocksize(threadperblockdim,threadperblockdim);
dim3 gridsize(griddim,griddim);
int n_att = dataset->num_attributes() - 1;
int n_inst = dataset->num_instances();
int *h_pred= (int*)malloc(n_inst * sizeof(int));
int *h_val= (int*)malloc(n_inst * sizeof(int));
float *h_at= (float*)malloc(n_inst * n_att * sizeof(float));
float *d_at, *d_dist;
int *d_val, *d_pred;
cudaMalloc(&d_at, n_inst * n_att * sizeof(float));
cudaMalloc(&d_dist, n_inst * n_inst * sizeof(float));
cudaMalloc(&d_val, n_inst* sizeof(int));
cudaMalloc(&d_pred, n_inst* sizeof(int));
float* smallestDistance;
int* smallestDistanceClass;
cudaMalloc(&smallestDistance,n_inst * com * sizeof(float));
cudaMalloc(&smallestDistanceClass,n_inst * com * sizeof(int));
for (int i = 0; i<n_inst; i++){
h_val[i] = dataset->get_instance(i)->get(dataset->num_attributes() - 1)->operator int32();
for( int k = 0; k < n_att; k++){
h_at[i*n_att+k] = dataset->get_instance(i)->get(k)->operator float();
}
}
cudaMemcpy(d_val,h_val, n_inst* sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_at,h_at, n_att * n_inst* sizeof(float), cudaMemcpyHostToDevice);
sKNN<<<gridsize , blocksize>>>(d_at, d_val, n_att, n_inst, d_dist, com, smallestDistance);
int threadperblock = 256;
int blocks = (dataset->num_instances() + threadperblock - 1) / threadperblock;
pred<<<blocks ,threadperblock>>>(d_pred, com, n_inst, d_dist, smallestDistance, smallestDistanceClass, d_val);
cudaMemcpy(h_pred, d_pred, n_inst* sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_at);
cudaFree(d_dist);
cudaFree(d_val);
cudaFree(d_pred);
cudaFree(smallestDistance);
cudaFree(smallestDistanceClass);
return h_pred;
}
int* computeConfusionMatrix(int* predictions, ArffData* dataset)
{
int* confusionMatrix = (int*)calloc(dataset->num_classes() * dataset->num_classes(), sizeof(int)); // matriz size numberClasses x numberClasses
for(int i = 0; i < dataset->num_instances(); i++) // for each instance compare the true class and predicted class
{
int trueClass = dataset->get_instance(i)->get(dataset->num_attributes() - 1)->operator int32();
int predictedClass = predictions[i];
confusionMatrix[trueClass*dataset->num_classes() + predictedClass]++;
}
return confusionMatrix;
}
float computeAccuracy(int* confusionMatrix, ArffData* dataset)
{
int successfulPredictions = 0;
for(int i = 0; i < dataset->num_classes(); i++)
{
successfulPredictions += confusionMatrix[i*dataset->num_classes() + i]; // elements in the diagonal are correct predictions
}
return successfulPredictions / (float) dataset->num_instances();
}
int main(int argc, char *argv[])
{
if(argc != 3)
{
cout << "Usage: ./main datasets/datasetFile.arff" << endl;
cout << "Usage: k value" << endl;
exit(0);
}
ArffParser parser(argv[1]);
ArffData *dataset = parser.parse();
struct timespec start, end;
int k;
sscanf(argv[2], "%d", &k);
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
int* predictions = KNN(dataset,k);
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
uint64_t diff = (1000000000L * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec) / 1e6;
int* confusionMatrix = computeConfusionMatrix(predictions, dataset);
float accuracy = computeAccuracy(confusionMatrix, dataset);
printf("The KNN classifier for %lu instances required %llu ms CPU time, accuracy was %.4f\n", dataset->num_instances(), (long long unsigned int) diff, accuracy);
}
|
69511bbbf53ad427f87f3a0be917a8f03eed6519.hip | // !!! This is a file automatically generated by hipify!!!
#include "FCLayer.h"
//#include <hip/hip_runtime.h>
//#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
#include "math_functions.h"
#include <cmath>
#define GPU_WARP_DISPATCHERS 2
#define GPU_WARP_SIZE 32
void print_d_var2(float *d_v, int r, int c, bool print_elem = true) {
std::cout << "*****************************" << std::endl;
float *h_v = (float *)malloc(sizeof(float) * r * c);
hipMemcpy(h_v, d_v, sizeof(float) * r * c, hipMemcpyDeviceToHost);
float mini = h_v[0], maxi = h_v[0];
int mini_idx = 0, maxi_idx = 0;
float sum = 0.0;
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
if (print_elem)
printf("%f\t", h_v[j + i * c]);
if (h_v[j + i * c] < mini) {
mini = h_v[j + i * c];
mini_idx = j + i * c;
}
if (h_v[j + i * c] > maxi) {
maxi = h_v[j + i * c];
maxi_idx = j + i * c;
}
sum += h_v[j + i * c];
}
if (print_elem)
std::cout << std::endl;
}
std::cout << "Shape = (" << r << ", " << c << ")" << std::endl;
std::cout << "Minimum at index " << mini_idx << " = " << mini << std::endl;
std::cout << "Maximum at index " << maxi_idx << " = " << maxi << std::endl;
std::cout << "Average of all elements = " << sum / (r * c) << std::endl;
free(h_v);
}
int my_ceilf_division_FCLayer(float a, float b) {
return 1 + ((a - 1) / b);
}
__global__ void FloatGPUMemset_GPUKernel(float *d_array,
int array_size, float val) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_array[idx % array_size] = val;
}
//__global__ void ReAlignMemory_ShiftRight_GPUKernel(float *d_mat,
// int total_size,
// int elem_size,
// int num_elems) {
// int compact_size = total_size - num_elems;
// int read_idx = (threadIdx.x % compact_size);
// int write_idx = (read_idx + ceil((float) read_idx / elem_size));
// if (read_idx % elem_size == 0)
// write_idx++;
// float val_to_copy = d_mat[read_idx];
// __syncthreads();
// d_mat[write_idx] = val_to_copy;
//}
//__global__ void ReAlignMemory_ShiftLeft_GPUKernel(float *d_data,
// int total_size,
// int rows,
// int cols) {
// int compact_size = total_size - rows;
// int write_idx = (threadIdx.x % compact_size);
// int targ_cols = cols - 1;
// int read_idx = write_idx + ceil((float)write_idx / targ_cols);
// if (write_idx % targ_cols == 0)
// read_idx++;
// float val_to_copy = d_data[read_idx];
// __syncthreads();
// d_data[write_idx] = val_to_copy;
//}
__global__ void FillOnes_GPUKernel(float *d_data, int elem_size,
int num_batches) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x) % (num_batches + 1);
d_data[idx * elem_size] = 1.0f;
}
__global__ void InitIdentityMatrix_GPUKernel(float *d_mat, int d_mat_side) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x) % (d_mat_side * d_mat_side);
d_mat[idx] = (idx % (d_mat_side + 1)) == 0 ? 1.0f : 0.0f;
}
__global__ void WeightMatrixRegularizeElemWise_GPUKernel(float *d_mat_in,
int d_mat_cols,
float reg_inp_scalar,
int d_mat_size) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < d_mat_size && idx >= d_mat_cols)
//d_mat_in[idx] -= (reg_inp_scalar * d_mat_in[idx] * d_mat_in[idx]);
d_mat_in[idx] *= reg_inp_scalar;
}
__global__ void ElemwiseGradCompute_GPUKernel(float *d_data,
float *d_out_minus_labels,
float *d_elem_grads,
int input_batch_size,
int input_neurons,
int output_neurons) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x)
% (input_batch_size * (input_neurons + 1) * output_neurons);
int idx_y = idx / output_neurons;
int d_data_idx_T_y = idx_y / input_batch_size;
int d_data_idx_T_x = idx_y - (d_data_idx_T_y * input_batch_size);
d_elem_grads[idx] = d_data[d_data_idx_T_y
+ d_data_idx_T_x * (input_neurons + 1)]
* d_out_minus_labels[idx - d_data_idx_T_y
* input_batch_size
* output_neurons];
}
__global__ void
ComputeGradientsFromElemGrads_GPUKernel(float *d_elem_grads,
float *d_softmax_gradients,
float learning_rate,
float momentum,
int input_batch_size,
int input_neurons,
int output_neurons) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x)
% ((input_neurons + 1) * output_neurons);
int idx_y = idx / output_neurons;
int idx_x = idx - idx_y * output_neurons;
int idx_elem_grads_y = idx_y * input_batch_size;
float sum = 0.0f;
for (int i = 0; i < input_batch_size; i++) {
sum += d_elem_grads[idx_x + (idx_elem_grads_y + i) * output_neurons];
}
d_softmax_gradients[idx] = (learning_rate / input_batch_size) * sum
- momentum * d_softmax_gradients[idx];
}
__global__ void ComputeSoftmaxLoss_GPUKernel(float *d_out, float *d_labels,
float *d_out_minus_labels,
float coeff,
int input_batch_size,
int output_neurons) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x)
% (input_batch_size * output_neurons);
//d_out_minus_labels[idx] = coeff * (d_out[idx] - d_labels[idx]);
if (d_labels[idx] == 1)
d_out_minus_labels[idx] = coeff * (1.0f - d_out[idx]);
else
d_out_minus_labels[idx] = coeff * d_out[idx];
}
__global__ void ReluBackprop_GPUKernel(float *d_backprop_derivatives,
float *d_out_xw_act,
float *d_fwd_layer_derivatives,
float relu_clip,
int derivative_matrix_size) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x) % derivative_matrix_size;
d_fwd_layer_derivatives[idx] = d_out_xw_act[idx] > relu_clip
? d_backprop_derivatives[idx] : relu_clip;
}
__global__ void SigmoidBackprop_GPUKernel(float *d_backprop_derivatives,
float *d_out_xw_act,
float *d_fwd_layer_derivatives,
int derivative_matrix_size) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x) % derivative_matrix_size;
d_fwd_layer_derivatives[idx] = d_out_xw_act[idx]
* (1.0f - d_out_xw_act[idx])
* d_backprop_derivatives[idx];
}
__global__ void ReplaceVal_GPUKernel(float *d_mat, int total_size,
float val, float replace_val) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x) % total_size;
if (d_mat[idx] == val)
d_mat[idx] = replace_val;
}
__global__ void SubtractElemwise_GPUKernel(float *d_mat, float delta,
int total_size) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x);
d_mat[idx] -= (delta * (idx < total_size));
}
__global__ void Replace2Vals_GPUKernel(float *d_mat, int total_size,
float val0, float val1,
float replace_val0, float replace_val1) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x) % total_size;
if (d_mat[idx] < val0)
d_mat[idx] = replace_val0;
else if (d_mat[idx] > val1)
d_mat[idx] = replace_val1;
}
__global__ void ShiftRight_PopulateHelper_GPUKernel(float *d_mat,
float *d_helper,
int total_size,
int rows, int cols) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x) % total_size;
int i = floor(0.5f * (sqrt((float)1 + 8 * idx) - 1.0f)) + 1;
int j = idx - i * (i - 1) / 2;
int read_idx = j + i * cols;
d_helper[idx] = d_mat[read_idx];
}
__global__ void ReAlignMemory_ShiftRight_GPUKernel(float *d_mat,
float *d_helper,
int total_size,
int cols,
int thread_chunk_size) {
extern __shared__ float read_vals[];
int shared_mem_idx, read_idx, read_idx_row, write_idx;
int row_linear_idx = blockIdx.x * cols;
int read_idx_base = row_linear_idx
+ (threadIdx.x * thread_chunk_size) % cols;
int row_last_linear_idx = row_linear_idx + cols;
for (read_idx = read_idx_base; read_idx < row_last_linear_idx;
read_idx++) {
read_idx_row = read_idx / cols;
shared_mem_idx = read_idx - row_linear_idx;
if (read_idx >= read_idx_row * (1 + cols)) {
read_vals[shared_mem_idx] = d_mat[read_idx];
}
else {
read_vals[shared_mem_idx] = d_helper[read_idx - cols * read_idx_row
+ (read_idx_row - 1)
* read_idx_row / 2];
}
}
__syncthreads();
for (read_idx = read_idx_base; read_idx < row_last_linear_idx;
read_idx++) {
write_idx = (read_idx + ceil((float)read_idx / cols)) + !(read_idx % cols);
d_mat[write_idx] = read_vals[read_idx - row_linear_idx];
if ((write_idx - 1) % (cols + 1) == 0) {
d_mat[write_idx - 1] = 1.0f;
}
}
}
void ReAlignMemory_ShiftRight(float *d_mat, float *d_helper,
int rows, int cols, int max_threadblock_size) {
int org_size = rows * cols;
int reqd_threads = rows * (rows - 1) / 2;
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
if (threadblock_size > max_threadblock_size)
threadblock_size = max_threadblock_size;
int num_threadblocks = my_ceilf_division_FCLayer(reqd_threads, threadblock_size);
int thread_chunk_size = my_ceilf_division_FCLayer(cols, max_threadblock_size);
ShiftRight_PopulateHelper_GPUKernel << < num_threadblocks,
threadblock_size >> >
(d_mat, d_helper, org_size,
rows, cols);
reqd_threads = my_ceilf_division_FCLayer(cols, thread_chunk_size);
threadblock_size = my_ceilf_division_FCLayer(reqd_threads, GPU_WARP_SIZE)
* GPU_WARP_SIZE;
ReAlignMemory_ShiftRight_GPUKernel << < rows,
threadblock_size,
sizeof(float) * cols >> >
(d_mat, d_helper,
org_size, cols,
thread_chunk_size);
}
__global__ void ShiftLeft_PopulateHelper_GPUKernel(float *d_mat,
float *d_helper,
int total_size,
int rows, int cols) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x) % total_size;
int i = floor(0.5f * (sqrt((float)1 + 8 * idx) - 1.0f));
int j = cols - (idx - i * (i - 1) / 2) - 1 + i;
int read_idx = j + i * cols;
d_helper[idx] = d_mat[read_idx];
}
__global__ void ReAlignMemory_ShiftLeft_GPUKernel(float *d_mat,
float *d_helper,
int total_size,
int cols,
int thread_chunk_size) {
extern __shared__ float read_vals[];
int shared_mem_idx, read_idx, read_idx_row, write_idx;
int row_linear_idx = blockIdx.x * cols;
int rows = total_size / cols;
int read_idx_base = row_linear_idx
+ (threadIdx.x * thread_chunk_size) % cols + 1;
int read_idx_lateral;
int row_last_linear_idx = row_linear_idx + cols;
for (read_idx = read_idx_base; read_idx < row_last_linear_idx;
read_idx++) {
read_idx_row = read_idx / cols;
shared_mem_idx = read_idx - row_linear_idx - 1;
if (read_idx < ((read_idx_row + 1) * (cols - 1))
|| blockIdx.x == (rows - 1)) {
read_vals[shared_mem_idx] = d_mat[read_idx];
}
else {
read_idx_lateral = row_linear_idx + cols - shared_mem_idx - 2;
read_vals[shared_mem_idx] = d_helper[read_idx_lateral
- cols * read_idx_row
+ (read_idx_row - 1)
* read_idx_row / 2 + read_idx_row];
}
}
__syncthreads();
for (read_idx = read_idx_base; read_idx < row_last_linear_idx;
read_idx++) {
read_idx_row = read_idx / cols;
shared_mem_idx = read_idx - row_linear_idx - 1;
write_idx = row_linear_idx + shared_mem_idx - read_idx_row;
d_mat[write_idx] = read_vals[shared_mem_idx];
}
}
void ReAlignMemory_ShiftLeft(float *d_mat, float *d_helper,
int rows, int cols, int max_threadblock_size) {
int org_size = rows * cols;
int reqd_threads = rows * (rows - 1) / 2;
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
if (threadblock_size > max_threadblock_size)
threadblock_size = max_threadblock_size;
int num_threadblocks = my_ceilf_division_FCLayer(reqd_threads, threadblock_size);
int thread_chunk_size = my_ceilf_division_FCLayer((cols - 1), max_threadblock_size);
ShiftLeft_PopulateHelper_GPUKernel << < num_threadblocks,
threadblock_size >> >
(d_mat, d_helper, org_size,
rows, cols);
reqd_threads = my_ceilf_division_FCLayer((cols - 1), thread_chunk_size);
threadblock_size = my_ceilf_division_FCLayer(reqd_threads, GPU_WARP_SIZE)
* GPU_WARP_SIZE;
ReAlignMemory_ShiftLeft_GPUKernel << < rows,
threadblock_size,
sizeof(float) * (cols - 1) >> >
(d_mat, d_helper,
org_size, cols,
thread_chunk_size);
}
void ReAlignMemory_ShiftLeft_CPU(float *d_mat, int rows, int cols) {
int sz = rows * (cols - 1);
float *tmp0 = (float *)malloc(sizeof(float) * rows * cols);
float *tmp1 = (float *)malloc(sizeof(float) * sz);
hipMemcpy(tmp0, d_mat, sizeof(float) * rows * cols, hipMemcpyDeviceToHost);
for (int i = 0; i < rows; i++) {
for (int j = 1; j < cols; j++) {
tmp1[(j - 1) + i * (cols - 1)] = tmp0[j + i * cols];
}
}
hipMemcpy(d_mat, tmp1, sizeof(float) * sz, hipMemcpyHostToDevice);
free(tmp0);
free(tmp1);
}
void SubtractElemwise(float *d_mat, float delta, int mat_size) {
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(mat_size, threadblock_size);
SubtractElemwise_GPUKernel << < num_threadblocks, threadblock_size >> >
(d_mat, delta, mat_size);
}
void FloatGPUMemset(float *d_array, int array_size, float val) {
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(array_size, threadblock_size);
FloatGPUMemset_GPUKernel << < num_threadblocks,
threadblock_size >> >
(d_array, array_size, val);
}
void ReplaceVal(float *d_mat, int total_size, float val, float replace_val) {
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(total_size, threadblock_size);
ReplaceVal_GPUKernel << < num_threadblocks, threadblock_size >> >
(d_mat, total_size, val, replace_val);
}
void Replace2Vals(float *d_mat, int total_size, float val0, float val1,
float replace_val0, float replace_val1) {
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(total_size, threadblock_size);
Replace2Vals_GPUKernel << < num_threadblocks, threadblock_size >> >
(d_mat, total_size, val0, val1,
replace_val0, replace_val1);
}
void FillOnes(float *d_data, int batch_size, int elem_size) {
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(batch_size, threadblock_size);
FillOnes_GPUKernel << < num_threadblocks,
threadblock_size >> > (d_data, elem_size + 1,
batch_size);
}
void InitIdentityMatrix(float *d_mat, int side) {
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer((side * side), threadblock_size);
InitIdentityMatrix_GPUKernel << < num_threadblocks,
threadblock_size >> > (d_mat, side);
}
void WeightMatrixRegularizeElemWise(float *d_mat_in, int d_mat_cols,
float reg_inp_scalar, int d_mat_size) {
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(d_mat_size, threadblock_size);
WeightMatrixRegularizeElemWise_GPUKernel << < num_threadblocks,
threadblock_size >> >
(d_mat_in, d_mat_cols,
reg_inp_scalar, d_mat_size);
}
void ElemwiseGradCompute(float *d_data, float *d_out_minus_labels,
float *d_elem_grads, int input_batch_size,
int input_neurons, int output_neurons) {
int reqd_threads = (input_batch_size * (input_neurons + 1))
* output_neurons;
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(reqd_threads, threadblock_size);
ElemwiseGradCompute_GPUKernel << < num_threadblocks, threadblock_size >> >
(d_data, d_out_minus_labels, d_elem_grads,
input_batch_size, input_neurons,
output_neurons);
}
void ComputeGradientsFromElemGrads(float *d_elem_grads,
float *d_softmax_gradients,
float learning_rate, float momentum,
int input_batch_size, int input_neurons,
int output_neurons) {
int reqd_threads = (input_neurons + 1) * output_neurons;
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(reqd_threads, threadblock_size);
ComputeGradientsFromElemGrads_GPUKernel << < num_threadblocks,
threadblock_size >> >
(d_elem_grads,
d_softmax_gradients,
learning_rate, momentum,
input_batch_size, input_neurons,
output_neurons);
}
void ComputeSoftmaxLoss(float *d_out, float *d_labels,
float *d_out_minus_labels, float coeff,
int input_batch_size, int output_neurons) {
int reqd_threads = input_batch_size * output_neurons;
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(reqd_threads, threadblock_size);
ComputeSoftmaxLoss_GPUKernel << < num_threadblocks, threadblock_size >> >
(d_out, d_labels,
d_out_minus_labels, coeff,
input_batch_size, output_neurons);
}
void ReluBackprop(float *d_backprop_derivatives, float *d_out_xw_act,
float *d_fwd_layer_derivatives, float relu_clip,
int derivative_matrix_size) {
int reqd_threads = derivative_matrix_size;
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(reqd_threads, threadblock_size);
ReluBackprop_GPUKernel << < num_threadblocks, threadblock_size >> >
(d_backprop_derivatives, d_out_xw_act,
d_fwd_layer_derivatives, relu_clip,
derivative_matrix_size);
}
void SigmoidBackprop(float *d_backprop_derivatives, float *d_out_xw_act,
float *d_fwd_layer_derivatives,
int derivative_matrix_size) {
int reqd_threads = derivative_matrix_size;
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(reqd_threads, threadblock_size);
SigmoidBackprop_GPUKernel << < num_threadblocks, threadblock_size >> >
(d_backprop_derivatives, d_out_xw_act,
d_fwd_layer_derivatives,
derivative_matrix_size);
}
//void ComputePrevLayerDerivsFromElemGrads_efficient(float *d_elem_grads,
// float *d_prev_layer_derivatives,
// int input_batch_size,
// int input_neurons,
// int output_neurons) {
// int num_threadblocks = (input_neurons + 1) * input_batch_size;
// int sum_stride = 2;
// int threadblock_size = std::ceilf(std::ceilf((float)output_neurons
// / sum_stride)
// / GPU_WARP_SIZE) * GPU_WARP_SIZE;
// ComputePrevLayerDerivsFromElemGrads_efficient_GPUKernel <<< num_threadblocks,
// threadblock_size >>>
// (d_elem_grads,
// d_prev_layer_derivatives,
// input_batch_size,
// input_neurons,
// output_neurons, sum_stride);
//}
//void ComputePrevLayerDerivsFromElemGrads(float *d_elem_grads,
// float *d_prev_layer_derivatives,
// int input_batch_size,
// int input_neurons,
// int output_neurons) {
// int reqd_threads = (input_neurons + 1) * input_batch_size;
// int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
// int num_threadblocks = std::ceilf((float)reqd_threads / threadblock_size);
// ComputePrevLayerDerivsFromElemGrads_GPUKernel <<< num_threadblocks,
// threadblock_size >>>
// (d_elem_grads,
// d_prev_layer_derivatives,
// input_batch_size,
// input_neurons,
// output_neurons);
//} | 69511bbbf53ad427f87f3a0be917a8f03eed6519.cu | #include "FCLayer.h"
//#include <cuda.h>
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include "math_functions.h"
#include <cmath>
#define GPU_WARP_DISPATCHERS 2
#define GPU_WARP_SIZE 32
void print_d_var2(float *d_v, int r, int c, bool print_elem = true) {
std::cout << "*****************************" << std::endl;
float *h_v = (float *)malloc(sizeof(float) * r * c);
cudaMemcpy(h_v, d_v, sizeof(float) * r * c, cudaMemcpyDeviceToHost);
float mini = h_v[0], maxi = h_v[0];
int mini_idx = 0, maxi_idx = 0;
float sum = 0.0;
for (int i = 0; i < r; i++) {
for (int j = 0; j < c; j++) {
if (print_elem)
printf("%f\t", h_v[j + i * c]);
if (h_v[j + i * c] < mini) {
mini = h_v[j + i * c];
mini_idx = j + i * c;
}
if (h_v[j + i * c] > maxi) {
maxi = h_v[j + i * c];
maxi_idx = j + i * c;
}
sum += h_v[j + i * c];
}
if (print_elem)
std::cout << std::endl;
}
std::cout << "Shape = (" << r << ", " << c << ")" << std::endl;
std::cout << "Minimum at index " << mini_idx << " = " << mini << std::endl;
std::cout << "Maximum at index " << maxi_idx << " = " << maxi << std::endl;
std::cout << "Average of all elements = " << sum / (r * c) << std::endl;
free(h_v);
}
int my_ceilf_division_FCLayer(float a, float b) {
return 1 + ((a - 1) / b);
}
__global__ void FloatGPUMemset_GPUKernel(float *d_array,
int array_size, float val) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_array[idx % array_size] = val;
}
//__global__ void ReAlignMemory_ShiftRight_GPUKernel(float *d_mat,
// int total_size,
// int elem_size,
// int num_elems) {
// int compact_size = total_size - num_elems;
// int read_idx = (threadIdx.x % compact_size);
// int write_idx = (read_idx + ceil((float) read_idx / elem_size));
// if (read_idx % elem_size == 0)
// write_idx++;
// float val_to_copy = d_mat[read_idx];
// __syncthreads();
// d_mat[write_idx] = val_to_copy;
//}
//__global__ void ReAlignMemory_ShiftLeft_GPUKernel(float *d_data,
// int total_size,
// int rows,
// int cols) {
// int compact_size = total_size - rows;
// int write_idx = (threadIdx.x % compact_size);
// int targ_cols = cols - 1;
// int read_idx = write_idx + ceil((float)write_idx / targ_cols);
// if (write_idx % targ_cols == 0)
// read_idx++;
// float val_to_copy = d_data[read_idx];
// __syncthreads();
// d_data[write_idx] = val_to_copy;
//}
__global__ void FillOnes_GPUKernel(float *d_data, int elem_size,
int num_batches) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x) % (num_batches + 1);
d_data[idx * elem_size] = 1.0f;
}
__global__ void InitIdentityMatrix_GPUKernel(float *d_mat, int d_mat_side) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x) % (d_mat_side * d_mat_side);
d_mat[idx] = (idx % (d_mat_side + 1)) == 0 ? 1.0f : 0.0f;
}
__global__ void WeightMatrixRegularizeElemWise_GPUKernel(float *d_mat_in,
int d_mat_cols,
float reg_inp_scalar,
int d_mat_size) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < d_mat_size && idx >= d_mat_cols)
//d_mat_in[idx] -= (reg_inp_scalar * d_mat_in[idx] * d_mat_in[idx]);
d_mat_in[idx] *= reg_inp_scalar;
}
__global__ void ElemwiseGradCompute_GPUKernel(float *d_data,
float *d_out_minus_labels,
float *d_elem_grads,
int input_batch_size,
int input_neurons,
int output_neurons) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x)
% (input_batch_size * (input_neurons + 1) * output_neurons);
int idx_y = idx / output_neurons;
int d_data_idx_T_y = idx_y / input_batch_size;
int d_data_idx_T_x = idx_y - (d_data_idx_T_y * input_batch_size);
d_elem_grads[idx] = d_data[d_data_idx_T_y
+ d_data_idx_T_x * (input_neurons + 1)]
* d_out_minus_labels[idx - d_data_idx_T_y
* input_batch_size
* output_neurons];
}
__global__ void
ComputeGradientsFromElemGrads_GPUKernel(float *d_elem_grads,
float *d_softmax_gradients,
float learning_rate,
float momentum,
int input_batch_size,
int input_neurons,
int output_neurons) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x)
% ((input_neurons + 1) * output_neurons);
int idx_y = idx / output_neurons;
int idx_x = idx - idx_y * output_neurons;
int idx_elem_grads_y = idx_y * input_batch_size;
float sum = 0.0f;
for (int i = 0; i < input_batch_size; i++) {
sum += d_elem_grads[idx_x + (idx_elem_grads_y + i) * output_neurons];
}
d_softmax_gradients[idx] = (learning_rate / input_batch_size) * sum
- momentum * d_softmax_gradients[idx];
}
__global__ void ComputeSoftmaxLoss_GPUKernel(float *d_out, float *d_labels,
float *d_out_minus_labels,
float coeff,
int input_batch_size,
int output_neurons) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x)
% (input_batch_size * output_neurons);
//d_out_minus_labels[idx] = coeff * (d_out[idx] - d_labels[idx]);
if (d_labels[idx] == 1)
d_out_minus_labels[idx] = coeff * (1.0f - d_out[idx]);
else
d_out_minus_labels[idx] = coeff * d_out[idx];
}
__global__ void ReluBackprop_GPUKernel(float *d_backprop_derivatives,
float *d_out_xw_act,
float *d_fwd_layer_derivatives,
float relu_clip,
int derivative_matrix_size) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x) % derivative_matrix_size;
d_fwd_layer_derivatives[idx] = d_out_xw_act[idx] > relu_clip
? d_backprop_derivatives[idx] : relu_clip;
}
__global__ void SigmoidBackprop_GPUKernel(float *d_backprop_derivatives,
float *d_out_xw_act,
float *d_fwd_layer_derivatives,
int derivative_matrix_size) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x) % derivative_matrix_size;
d_fwd_layer_derivatives[idx] = d_out_xw_act[idx]
* (1.0f - d_out_xw_act[idx])
* d_backprop_derivatives[idx];
}
__global__ void ReplaceVal_GPUKernel(float *d_mat, int total_size,
float val, float replace_val) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x) % total_size;
if (d_mat[idx] == val)
d_mat[idx] = replace_val;
}
__global__ void SubtractElemwise_GPUKernel(float *d_mat, float delta,
int total_size) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x);
d_mat[idx] -= (delta * (idx < total_size));
}
__global__ void Replace2Vals_GPUKernel(float *d_mat, int total_size,
float val0, float val1,
float replace_val0, float replace_val1) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x) % total_size;
if (d_mat[idx] < val0)
d_mat[idx] = replace_val0;
else if (d_mat[idx] > val1)
d_mat[idx] = replace_val1;
}
__global__ void ShiftRight_PopulateHelper_GPUKernel(float *d_mat,
float *d_helper,
int total_size,
int rows, int cols) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x) % total_size;
int i = floor(0.5f * (sqrt((float)1 + 8 * idx) - 1.0f)) + 1;
int j = idx - i * (i - 1) / 2;
int read_idx = j + i * cols;
d_helper[idx] = d_mat[read_idx];
}
__global__ void ReAlignMemory_ShiftRight_GPUKernel(float *d_mat,
float *d_helper,
int total_size,
int cols,
int thread_chunk_size) {
extern __shared__ float read_vals[];
int shared_mem_idx, read_idx, read_idx_row, write_idx;
int row_linear_idx = blockIdx.x * cols;
int read_idx_base = row_linear_idx
+ (threadIdx.x * thread_chunk_size) % cols;
int row_last_linear_idx = row_linear_idx + cols;
for (read_idx = read_idx_base; read_idx < row_last_linear_idx;
read_idx++) {
read_idx_row = read_idx / cols;
shared_mem_idx = read_idx - row_linear_idx;
if (read_idx >= read_idx_row * (1 + cols)) {
read_vals[shared_mem_idx] = d_mat[read_idx];
}
else {
read_vals[shared_mem_idx] = d_helper[read_idx - cols * read_idx_row
+ (read_idx_row - 1)
* read_idx_row / 2];
}
}
__syncthreads();
for (read_idx = read_idx_base; read_idx < row_last_linear_idx;
read_idx++) {
write_idx = (read_idx + ceil((float)read_idx / cols)) + !(read_idx % cols);
d_mat[write_idx] = read_vals[read_idx - row_linear_idx];
if ((write_idx - 1) % (cols + 1) == 0) {
d_mat[write_idx - 1] = 1.0f;
}
}
}
void ReAlignMemory_ShiftRight(float *d_mat, float *d_helper,
int rows, int cols, int max_threadblock_size) {
int org_size = rows * cols;
int reqd_threads = rows * (rows - 1) / 2;
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
if (threadblock_size > max_threadblock_size)
threadblock_size = max_threadblock_size;
int num_threadblocks = my_ceilf_division_FCLayer(reqd_threads, threadblock_size);
int thread_chunk_size = my_ceilf_division_FCLayer(cols, max_threadblock_size);
ShiftRight_PopulateHelper_GPUKernel << < num_threadblocks,
threadblock_size >> >
(d_mat, d_helper, org_size,
rows, cols);
reqd_threads = my_ceilf_division_FCLayer(cols, thread_chunk_size);
threadblock_size = my_ceilf_division_FCLayer(reqd_threads, GPU_WARP_SIZE)
* GPU_WARP_SIZE;
ReAlignMemory_ShiftRight_GPUKernel << < rows,
threadblock_size,
sizeof(float) * cols >> >
(d_mat, d_helper,
org_size, cols,
thread_chunk_size);
}
__global__ void ShiftLeft_PopulateHelper_GPUKernel(float *d_mat,
float *d_helper,
int total_size,
int rows, int cols) {
int idx = (blockDim.x * blockIdx.x + threadIdx.x) % total_size;
int i = floor(0.5f * (sqrt((float)1 + 8 * idx) - 1.0f));
int j = cols - (idx - i * (i - 1) / 2) - 1 + i;
int read_idx = j + i * cols;
d_helper[idx] = d_mat[read_idx];
}
__global__ void ReAlignMemory_ShiftLeft_GPUKernel(float *d_mat,
float *d_helper,
int total_size,
int cols,
int thread_chunk_size) {
extern __shared__ float read_vals[];
int shared_mem_idx, read_idx, read_idx_row, write_idx;
int row_linear_idx = blockIdx.x * cols;
int rows = total_size / cols;
int read_idx_base = row_linear_idx
+ (threadIdx.x * thread_chunk_size) % cols + 1;
int read_idx_lateral;
int row_last_linear_idx = row_linear_idx + cols;
for (read_idx = read_idx_base; read_idx < row_last_linear_idx;
read_idx++) {
read_idx_row = read_idx / cols;
shared_mem_idx = read_idx - row_linear_idx - 1;
if (read_idx < ((read_idx_row + 1) * (cols - 1))
|| blockIdx.x == (rows - 1)) {
read_vals[shared_mem_idx] = d_mat[read_idx];
}
else {
read_idx_lateral = row_linear_idx + cols - shared_mem_idx - 2;
read_vals[shared_mem_idx] = d_helper[read_idx_lateral
- cols * read_idx_row
+ (read_idx_row - 1)
* read_idx_row / 2 + read_idx_row];
}
}
__syncthreads();
for (read_idx = read_idx_base; read_idx < row_last_linear_idx;
read_idx++) {
read_idx_row = read_idx / cols;
shared_mem_idx = read_idx - row_linear_idx - 1;
write_idx = row_linear_idx + shared_mem_idx - read_idx_row;
d_mat[write_idx] = read_vals[shared_mem_idx];
}
}
void ReAlignMemory_ShiftLeft(float *d_mat, float *d_helper,
int rows, int cols, int max_threadblock_size) {
int org_size = rows * cols;
int reqd_threads = rows * (rows - 1) / 2;
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
if (threadblock_size > max_threadblock_size)
threadblock_size = max_threadblock_size;
int num_threadblocks = my_ceilf_division_FCLayer(reqd_threads, threadblock_size);
int thread_chunk_size = my_ceilf_division_FCLayer((cols - 1), max_threadblock_size);
ShiftLeft_PopulateHelper_GPUKernel << < num_threadblocks,
threadblock_size >> >
(d_mat, d_helper, org_size,
rows, cols);
reqd_threads = my_ceilf_division_FCLayer((cols - 1), thread_chunk_size);
threadblock_size = my_ceilf_division_FCLayer(reqd_threads, GPU_WARP_SIZE)
* GPU_WARP_SIZE;
ReAlignMemory_ShiftLeft_GPUKernel << < rows,
threadblock_size,
sizeof(float) * (cols - 1) >> >
(d_mat, d_helper,
org_size, cols,
thread_chunk_size);
}
void ReAlignMemory_ShiftLeft_CPU(float *d_mat, int rows, int cols) {
int sz = rows * (cols - 1);
float *tmp0 = (float *)malloc(sizeof(float) * rows * cols);
float *tmp1 = (float *)malloc(sizeof(float) * sz);
cudaMemcpy(tmp0, d_mat, sizeof(float) * rows * cols, cudaMemcpyDeviceToHost);
for (int i = 0; i < rows; i++) {
for (int j = 1; j < cols; j++) {
tmp1[(j - 1) + i * (cols - 1)] = tmp0[j + i * cols];
}
}
cudaMemcpy(d_mat, tmp1, sizeof(float) * sz, cudaMemcpyHostToDevice);
free(tmp0);
free(tmp1);
}
void SubtractElemwise(float *d_mat, float delta, int mat_size) {
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(mat_size, threadblock_size);
SubtractElemwise_GPUKernel << < num_threadblocks, threadblock_size >> >
(d_mat, delta, mat_size);
}
void FloatGPUMemset(float *d_array, int array_size, float val) {
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(array_size, threadblock_size);
FloatGPUMemset_GPUKernel << < num_threadblocks,
threadblock_size >> >
(d_array, array_size, val);
}
void ReplaceVal(float *d_mat, int total_size, float val, float replace_val) {
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(total_size, threadblock_size);
ReplaceVal_GPUKernel << < num_threadblocks, threadblock_size >> >
(d_mat, total_size, val, replace_val);
}
void Replace2Vals(float *d_mat, int total_size, float val0, float val1,
float replace_val0, float replace_val1) {
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(total_size, threadblock_size);
Replace2Vals_GPUKernel << < num_threadblocks, threadblock_size >> >
(d_mat, total_size, val0, val1,
replace_val0, replace_val1);
}
void FillOnes(float *d_data, int batch_size, int elem_size) {
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(batch_size, threadblock_size);
FillOnes_GPUKernel << < num_threadblocks,
threadblock_size >> > (d_data, elem_size + 1,
batch_size);
}
void InitIdentityMatrix(float *d_mat, int side) {
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer((side * side), threadblock_size);
InitIdentityMatrix_GPUKernel << < num_threadblocks,
threadblock_size >> > (d_mat, side);
}
void WeightMatrixRegularizeElemWise(float *d_mat_in, int d_mat_cols,
float reg_inp_scalar, int d_mat_size) {
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(d_mat_size, threadblock_size);
WeightMatrixRegularizeElemWise_GPUKernel << < num_threadblocks,
threadblock_size >> >
(d_mat_in, d_mat_cols,
reg_inp_scalar, d_mat_size);
}
void ElemwiseGradCompute(float *d_data, float *d_out_minus_labels,
float *d_elem_grads, int input_batch_size,
int input_neurons, int output_neurons) {
int reqd_threads = (input_batch_size * (input_neurons + 1))
* output_neurons;
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(reqd_threads, threadblock_size);
ElemwiseGradCompute_GPUKernel << < num_threadblocks, threadblock_size >> >
(d_data, d_out_minus_labels, d_elem_grads,
input_batch_size, input_neurons,
output_neurons);
}
void ComputeGradientsFromElemGrads(float *d_elem_grads,
float *d_softmax_gradients,
float learning_rate, float momentum,
int input_batch_size, int input_neurons,
int output_neurons) {
int reqd_threads = (input_neurons + 1) * output_neurons;
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(reqd_threads, threadblock_size);
ComputeGradientsFromElemGrads_GPUKernel << < num_threadblocks,
threadblock_size >> >
(d_elem_grads,
d_softmax_gradients,
learning_rate, momentum,
input_batch_size, input_neurons,
output_neurons);
}
void ComputeSoftmaxLoss(float *d_out, float *d_labels,
float *d_out_minus_labels, float coeff,
int input_batch_size, int output_neurons) {
int reqd_threads = input_batch_size * output_neurons;
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(reqd_threads, threadblock_size);
ComputeSoftmaxLoss_GPUKernel << < num_threadblocks, threadblock_size >> >
(d_out, d_labels,
d_out_minus_labels, coeff,
input_batch_size, output_neurons);
}
void ReluBackprop(float *d_backprop_derivatives, float *d_out_xw_act,
float *d_fwd_layer_derivatives, float relu_clip,
int derivative_matrix_size) {
int reqd_threads = derivative_matrix_size;
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(reqd_threads, threadblock_size);
ReluBackprop_GPUKernel << < num_threadblocks, threadblock_size >> >
(d_backprop_derivatives, d_out_xw_act,
d_fwd_layer_derivatives, relu_clip,
derivative_matrix_size);
}
void SigmoidBackprop(float *d_backprop_derivatives, float *d_out_xw_act,
float *d_fwd_layer_derivatives,
int derivative_matrix_size) {
int reqd_threads = derivative_matrix_size;
int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
int num_threadblocks = my_ceilf_division_FCLayer(reqd_threads, threadblock_size);
SigmoidBackprop_GPUKernel << < num_threadblocks, threadblock_size >> >
(d_backprop_derivatives, d_out_xw_act,
d_fwd_layer_derivatives,
derivative_matrix_size);
}
//void ComputePrevLayerDerivsFromElemGrads_efficient(float *d_elem_grads,
// float *d_prev_layer_derivatives,
// int input_batch_size,
// int input_neurons,
// int output_neurons) {
// int num_threadblocks = (input_neurons + 1) * input_batch_size;
// int sum_stride = 2;
// int threadblock_size = std::ceilf(std::ceilf((float)output_neurons
// / sum_stride)
// / GPU_WARP_SIZE) * GPU_WARP_SIZE;
// ComputePrevLayerDerivsFromElemGrads_efficient_GPUKernel <<< num_threadblocks,
// threadblock_size >>>
// (d_elem_grads,
// d_prev_layer_derivatives,
// input_batch_size,
// input_neurons,
// output_neurons, sum_stride);
//}
//void ComputePrevLayerDerivsFromElemGrads(float *d_elem_grads,
// float *d_prev_layer_derivatives,
// int input_batch_size,
// int input_neurons,
// int output_neurons) {
// int reqd_threads = (input_neurons + 1) * input_batch_size;
// int threadblock_size = GPU_WARP_SIZE * GPU_WARP_DISPATCHERS * 2;
// int num_threadblocks = std::ceilf((float)reqd_threads / threadblock_size);
// ComputePrevLayerDerivsFromElemGrads_GPUKernel <<< num_threadblocks,
// threadblock_size >>>
// (d_elem_grads,
// d_prev_layer_derivatives,
// input_batch_size,
// input_neurons,
// output_neurons);
//} |
c062389dbf102e7cf7f41e1a634724c0bd4c780c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2022, The Neko Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the authors nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include "coef_kernel.h"
#include <device/device_config.h>
#include <device/cuda/check.h>
extern "C" {
/**
* Fortran wrapper for generating geometric factors
*/
void cuda_coef_generate_geo(void *G11, void *G12, void *G13,
void *G22, void *G23, void *G33,
void *drdx, void *drdy, void *drdz,
void *dsdx, void *dsdy, void *dsdz,
void *dtdx, void *dtdy, void *dtdz,
void *jacinv, void *w3, int *nel,
int *lx, int *gdim) {
const dim3 nthrds(1024, 1, 1);
const dim3 nblcks((*nel), 1, 1);
const hipStream_t stream = (hipStream_t) glb_cmd_queue;
#define GEO_CASE(LX) \
case LX: \
hipLaunchKernelGGL(( coef_generate_geo_kernel<real, LX, 1024>) \
, dim3(nblcks), dim3(nthrds), 0, stream, \
(real *) G11, (real *) G12, (real *) G13, \
(real *) G22, (real *) G23, (real *) G33, \
(real *) drdx, (real *) drdy, (real *) drdz, \
(real *) dsdx, (real *) dsdy, (real *) dsdz, \
(real *) dtdx, (real *) dtdy, (real *) dtdz, \
(real *) jacinv, (real *) w3, *gdim); \
CUDA_CHECK(hipGetLastError()); \
break
switch(*lx) {
GEO_CASE(2);
GEO_CASE(3);
GEO_CASE(4);
GEO_CASE(5);
GEO_CASE(6);
GEO_CASE(7);
GEO_CASE(8);
GEO_CASE(9);
GEO_CASE(10);
GEO_CASE(11);
GEO_CASE(12);
GEO_CASE(13);
GEO_CASE(14);
GEO_CASE(15);
GEO_CASE(16);
default:
{
fprintf(stderr, __FILE__ ": size not supported: %d\n", *lx);
exit(1);
}
}
}
/**
* Fortran wrapper for generating geometric factors
*/
void cuda_coef_generate_dxyzdrst(void *drdx, void *drdy, void *drdz,
void *dsdx, void *dsdy, void *dsdz,
void *dtdx, void *dtdy, void *dtdz,
void *dxdr, void *dydr, void *dzdr,
void *dxds, void *dyds, void *dzds,
void *dxdt, void *dydt, void *dzdt,
void *dx, void *dy, void *dz,
void *x, void *y, void *z,
void *jacinv, void *jac,
int *lx, int *nel) {
const int n = (*nel) * (*lx) * (*lx) * (*lx);
const dim3 nthrds(1024, 1, 1);
const dim3 nblcks_dxyz((*nel), 1, 1);
const dim3 nblcks_drst((n + 1024 - 1)/ 1024, 1, 1);
const hipStream_t stream = (hipStream_t) glb_cmd_queue;
#define DXYZDRST_CASE(LX) \
case LX: \
hipLaunchKernelGGL(( coef_generate_dxyz_kernel<real, LX, 1024>) \
, dim3(nblcks_dxyz), dim3(nthrds), 0, stream, \
(real *) dxdr, (real *) dydr, (real *) dzdr, \
(real *) dxds, (real *) dyds, (real *) dzds, \
(real *) dxdt, (real *) dydt, (real *) dzdt, \
(real *) dx, (real *) dy, (real *) dz, \
(real *) x, (real *) y, (real *) z); \
CUDA_CHECK(hipGetLastError()); \
break
switch(*lx) {
DXYZDRST_CASE(2);
DXYZDRST_CASE(3);
DXYZDRST_CASE(4);
DXYZDRST_CASE(5);
DXYZDRST_CASE(6);
DXYZDRST_CASE(7);
DXYZDRST_CASE(8);
DXYZDRST_CASE(9);
DXYZDRST_CASE(10);
DXYZDRST_CASE(11);
DXYZDRST_CASE(12);
DXYZDRST_CASE(13);
DXYZDRST_CASE(14);
DXYZDRST_CASE(15);
DXYZDRST_CASE(16);
default:
{
fprintf(stderr, __FILE__ ": size not supported: %d\n", *lx);
exit(1);
}
}
hipLaunchKernelGGL(( coef_generate_drst_kernel<real>)
, dim3(nblcks_drst), dim3(nthrds), 0, stream,
(real *) jac, (real *) jacinv,
(real *) drdx, (real *) drdy, (real *) drdz,
(real *) dsdx, (real *) dsdy, (real *) dsdz,
(real *) dtdx, (real *) dtdy, (real *) dtdz,
(real *) dxdr, (real *) dydr, (real *) dzdr,
(real *) dxds, (real *) dyds, (real *) dzds,
(real *) dxdt, (real *) dydt, (real *) dzdt, n);
CUDA_CHECK(hipGetLastError());
}
}
| c062389dbf102e7cf7f41e1a634724c0bd4c780c.cu | /*
Copyright (c) 2022, The Neko Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the authors nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include "coef_kernel.h"
#include <device/device_config.h>
#include <device/cuda/check.h>
extern "C" {
/**
* Fortran wrapper for generating geometric factors
*/
void cuda_coef_generate_geo(void *G11, void *G12, void *G13,
void *G22, void *G23, void *G33,
void *drdx, void *drdy, void *drdz,
void *dsdx, void *dsdy, void *dsdz,
void *dtdx, void *dtdy, void *dtdz,
void *jacinv, void *w3, int *nel,
int *lx, int *gdim) {
const dim3 nthrds(1024, 1, 1);
const dim3 nblcks((*nel), 1, 1);
const cudaStream_t stream = (cudaStream_t) glb_cmd_queue;
#define GEO_CASE(LX) \
case LX: \
coef_generate_geo_kernel<real, LX, 1024> \
<<<nblcks, nthrds, 0, stream>>> \
((real *) G11, (real *) G12, (real *) G13, \
(real *) G22, (real *) G23, (real *) G33, \
(real *) drdx, (real *) drdy, (real *) drdz, \
(real *) dsdx, (real *) dsdy, (real *) dsdz, \
(real *) dtdx, (real *) dtdy, (real *) dtdz, \
(real *) jacinv, (real *) w3, *gdim); \
CUDA_CHECK(cudaGetLastError()); \
break
switch(*lx) {
GEO_CASE(2);
GEO_CASE(3);
GEO_CASE(4);
GEO_CASE(5);
GEO_CASE(6);
GEO_CASE(7);
GEO_CASE(8);
GEO_CASE(9);
GEO_CASE(10);
GEO_CASE(11);
GEO_CASE(12);
GEO_CASE(13);
GEO_CASE(14);
GEO_CASE(15);
GEO_CASE(16);
default:
{
fprintf(stderr, __FILE__ ": size not supported: %d\n", *lx);
exit(1);
}
}
}
/**
* Fortran wrapper for generating geometric factors
*/
void cuda_coef_generate_dxyzdrst(void *drdx, void *drdy, void *drdz,
void *dsdx, void *dsdy, void *dsdz,
void *dtdx, void *dtdy, void *dtdz,
void *dxdr, void *dydr, void *dzdr,
void *dxds, void *dyds, void *dzds,
void *dxdt, void *dydt, void *dzdt,
void *dx, void *dy, void *dz,
void *x, void *y, void *z,
void *jacinv, void *jac,
int *lx, int *nel) {
const int n = (*nel) * (*lx) * (*lx) * (*lx);
const dim3 nthrds(1024, 1, 1);
const dim3 nblcks_dxyz((*nel), 1, 1);
const dim3 nblcks_drst((n + 1024 - 1)/ 1024, 1, 1);
const cudaStream_t stream = (cudaStream_t) glb_cmd_queue;
#define DXYZDRST_CASE(LX) \
case LX: \
coef_generate_dxyz_kernel<real, LX, 1024> \
<<<nblcks_dxyz, nthrds, 0, stream>>> \
((real *) dxdr, (real *) dydr, (real *) dzdr, \
(real *) dxds, (real *) dyds, (real *) dzds, \
(real *) dxdt, (real *) dydt, (real *) dzdt, \
(real *) dx, (real *) dy, (real *) dz, \
(real *) x, (real *) y, (real *) z); \
CUDA_CHECK(cudaGetLastError()); \
break
switch(*lx) {
DXYZDRST_CASE(2);
DXYZDRST_CASE(3);
DXYZDRST_CASE(4);
DXYZDRST_CASE(5);
DXYZDRST_CASE(6);
DXYZDRST_CASE(7);
DXYZDRST_CASE(8);
DXYZDRST_CASE(9);
DXYZDRST_CASE(10);
DXYZDRST_CASE(11);
DXYZDRST_CASE(12);
DXYZDRST_CASE(13);
DXYZDRST_CASE(14);
DXYZDRST_CASE(15);
DXYZDRST_CASE(16);
default:
{
fprintf(stderr, __FILE__ ": size not supported: %d\n", *lx);
exit(1);
}
}
coef_generate_drst_kernel<real>
<<<nblcks_drst, nthrds, 0, stream>>>
((real *) jac, (real *) jacinv,
(real *) drdx, (real *) drdy, (real *) drdz,
(real *) dsdx, (real *) dsdy, (real *) dsdz,
(real *) dtdx, (real *) dtdy, (real *) dtdz,
(real *) dxdr, (real *) dydr, (real *) dzdr,
(real *) dxds, (real *) dyds, (real *) dzds,
(real *) dxdt, (real *) dydt, (real *) dzdt, n);
CUDA_CHECK(cudaGetLastError());
}
}
|
336e64cbdd96c2b9a75ffb36adf6dc388efce8b0.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdint>
#include <cstdlib>
#include <algorithm>
#include "SyncedMemory.h"
#include "pgm.h"
#include "lab3.h"
using namespace std;
#define CHECK {\
auto e = hipDeviceSynchronize();\
if (e != hipSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\
abort();\
}\
}
int main(int argc, char **argv)
{
if (argc != 7) {
printf("Usage: %s <background> <target> <mask> <offset x> <offset y> <output>\n", argv[0]);
abort();
}
bool sucb, suct, sucm;
int wb, hb, cb, wt, ht, ct, wm, hm, cm;
auto imgb = ReadNetpbm(wb, hb, cb, sucb, argv[1]);
auto imgt = ReadNetpbm(wt, ht, ct, suct, argv[2]);
auto imgm = ReadNetpbm(wm, hm, cm, sucm, argv[3]);
if (not (sucb and suct and sucm)) {
puts("Something wrong with reading the input image files.");
abort();
}
if (wt != wm or ht != hm) {
puts("The mask and target image must have the same size.");
abort();
}
if (cm != 1) {
puts("The mask image must be mono-colored.");
abort();
}
if (cb != 3 or ct != 3) {
puts("The background and target image must be colored.");
abort();
}
const int oy = atoi(argv[4]), ox = atoi(argv[5]);
const int SIZEB = wb*hb*3;
const int SIZET = wt*ht*3;
const int SIZEM = wm*hm;
MemoryBuffer<float> background(SIZEB), target(SIZET), mask(SIZEM), output(SIZEB);
auto background_s = background.CreateSync(SIZEB);
auto target_s = target.CreateSync(SIZET);
auto mask_s = mask.CreateSync(SIZEM);
auto output_s = output.CreateSync(SIZEB);
float *background_cpu = background_s.get_cpu_wo();
float *target_cpu = target_s.get_cpu_wo();
float *mask_cpu = mask_s.get_cpu_wo();
copy(imgb.get(), imgb.get()+SIZEB, background_cpu);
copy(imgt.get(), imgt.get()+SIZET, target_cpu);
copy(imgm.get(), imgm.get()+SIZEM, mask_cpu);
PoissonImageCloning(
background_s.get_gpu_ro(),
target_s.get_gpu_ro(),
mask_s.get_gpu_ro(),
output_s.get_gpu_wo(),
wb, hb, wt, ht, oy, ox
);
unique_ptr<uint8_t[]> o(new uint8_t[SIZEB]);
const float *o_cpu = output_s.get_cpu_ro();
transform(o_cpu, o_cpu+SIZEB, o.get(), [](float f) -> uint8_t { return max(min(int(f+0.5f), 255), 0); });
WritePPM(o.get(), wb, hb, argv[6]);
return 0;
} | 336e64cbdd96c2b9a75ffb36adf6dc388efce8b0.cu | #include <cstdio>
#include <cstdint>
#include <cstdlib>
#include <algorithm>
#include "SyncedMemory.h"
#include "pgm.h"
#include "lab3.h"
using namespace std;
#define CHECK {\
auto e = cudaDeviceSynchronize();\
if (e != cudaSuccess) {\
printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\
abort();\
}\
}
int main(int argc, char **argv)
{
if (argc != 7) {
printf("Usage: %s <background> <target> <mask> <offset x> <offset y> <output>\n", argv[0]);
abort();
}
bool sucb, suct, sucm;
int wb, hb, cb, wt, ht, ct, wm, hm, cm;
auto imgb = ReadNetpbm(wb, hb, cb, sucb, argv[1]);
auto imgt = ReadNetpbm(wt, ht, ct, suct, argv[2]);
auto imgm = ReadNetpbm(wm, hm, cm, sucm, argv[3]);
if (not (sucb and suct and sucm)) {
puts("Something wrong with reading the input image files.");
abort();
}
if (wt != wm or ht != hm) {
puts("The mask and target image must have the same size.");
abort();
}
if (cm != 1) {
puts("The mask image must be mono-colored.");
abort();
}
if (cb != 3 or ct != 3) {
puts("The background and target image must be colored.");
abort();
}
const int oy = atoi(argv[4]), ox = atoi(argv[5]);
const int SIZEB = wb*hb*3;
const int SIZET = wt*ht*3;
const int SIZEM = wm*hm;
MemoryBuffer<float> background(SIZEB), target(SIZET), mask(SIZEM), output(SIZEB);
auto background_s = background.CreateSync(SIZEB);
auto target_s = target.CreateSync(SIZET);
auto mask_s = mask.CreateSync(SIZEM);
auto output_s = output.CreateSync(SIZEB);
float *background_cpu = background_s.get_cpu_wo();
float *target_cpu = target_s.get_cpu_wo();
float *mask_cpu = mask_s.get_cpu_wo();
copy(imgb.get(), imgb.get()+SIZEB, background_cpu);
copy(imgt.get(), imgt.get()+SIZET, target_cpu);
copy(imgm.get(), imgm.get()+SIZEM, mask_cpu);
PoissonImageCloning(
background_s.get_gpu_ro(),
target_s.get_gpu_ro(),
mask_s.get_gpu_ro(),
output_s.get_gpu_wo(),
wb, hb, wt, ht, oy, ox
);
unique_ptr<uint8_t[]> o(new uint8_t[SIZEB]);
const float *o_cpu = output_s.get_cpu_ro();
transform(o_cpu, o_cpu+SIZEB, o.get(), [](float f) -> uint8_t { return max(min(int(f+0.5f), 255), 0); });
WritePPM(o.get(), wb, hb, argv[6]);
return 0;
} |
205df864744d76e1fea69f97ccfd487b8f6832ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Assignment 1: ParallelSine
// CSCI 415: Networking and Parallel Computation
// Spring 2017
// Name(s):
//
// Sine implementation derived from slides here: http://15418.courses.cs.cmu.edu/spring2016/lecture/basicarch
// standard imports
#include <stdio.h>
#include <math.h>
#include <iomanip>
#include <iostream>
#include <string>
#include <sys/time.h>
// problem size (vector length) N
static const int N = 12345678;
// Number of terms to use when approximating sine
static const int TERMS = 6;
// kernel function (CPU - Do not modify)
void sine_serial(float *input, float *output)
{
int i;
for (i=0; i<N; i++) {
float value = input[i];
float numer = input[i] * input[i] * input[i];
int denom = 6; // 3!
int sign = -1;
for (int j=1; j<=TERMS;j++)
{
value += sign * numer / denom;
numer *= input[i] * input[i];
denom *= (2*j+2) * (2*j+3);
sign *= -1;
}
output[i] = value;
}
}
// kernel function (CUDA device)
__global__ void sine(float * d_gpu_result, float * d_in)
{
//obtain thread index
int idx = threadIdx.x;
//obtain value in memory that relates to that thread index
float value = d_in[idx];
//sine serial code
float numer = d_in[idx] * d_in[idx] * d_in[idx];
int denom = 6; // 3!
int sign = -1;
for (int j=1; j<=TERMS;j++)
{
value += sign * numer / denom;
numer *= d_in[idx] * d_in[idx];
denom *= (2*j+2) * (2*j+3);
sign *= -1;
}
d_gpu_result[idx] = value;
}
// BEGIN: timing and error checking routines (do not modify)
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, std::string name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
std::cout << std::setprecision(5);
std::cout << name << ": " << ((float) (end_time - start_time)) / (1000 * 1000) << " sec\n";
return end_time - start_time;
}
void checkErrors(const char label[])
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
// END: timing and error checking routines (do not modify)
int main (int argc, char **argv)
{
//BEGIN: CPU implementation (do not modify)
float *h_cpu_result = (float*)malloc(N*sizeof(float));
float *h_input = (float*)malloc(N*sizeof(float));
//Initialize data on CPU
int i;
for (i=0; i<N; i++)
{
h_input[i] = 0.1f * i;
}
//Execute and time the CPU version
long long CPU_start_time = start_timer();
sine_serial(h_input, h_cpu_result);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU Run Time");
//END: CPU implementation (do not modify)
//Preparing and runing my kernel
float * d_in;
float *h_gpu_result = (float*)malloc(N*sizeof(float));
float * d_gpu_result;
//setting aside memory
hipMalloc((void **) &d_in, N*sizeof(float));
hipMalloc((void **) &d_gpu_result, N*sizeof(float));
//starting timmer
long long GPU_start_time = start_timer();
//copying input memory from cpu to gpu
hipMemcpy(d_in, h_input, N * sizeof(float), cudaMemcpyHostToDevise);
//running the threads
hipLaunchKernelGGL(( sine), dim3(1), dim3(N*sizeof(float)), 0, 0, d_gpu_result, d_in);
//coping the output memory from the gpu to the cpu
hipMemcpy(h_gpu_result, d_gpu_result, N*sizeof(float), cudaMemcpyDeviseToHost);
//stopping the timmer
long long GPU_time = stop_timer(GPU_start_time, "\nGPU Run Time");
// Checking to make sure the CPU and GPU results match - Do not modify
int errorCount = 0;
for (i=0; i<N; i++)
{
if (abs(h_cpu_result[i]-h_gpu_result[i]) > 1e-6)
errorCount = errorCount + 1;
}
if (errorCount > 0)
printf("Result comparison failed.\n");
else
printf("Result comparison passed.\n");
// Cleaning up memory
free(h_input);
free(h_cpu_result);
free(h_gpu_result);
return 0;
}
| 205df864744d76e1fea69f97ccfd487b8f6832ca.cu | //
// Assignment 1: ParallelSine
// CSCI 415: Networking and Parallel Computation
// Spring 2017
// Name(s):
//
// Sine implementation derived from slides here: http://15418.courses.cs.cmu.edu/spring2016/lecture/basicarch
// standard imports
#include <stdio.h>
#include <math.h>
#include <iomanip>
#include <iostream>
#include <string>
#include <sys/time.h>
// problem size (vector length) N
static const int N = 12345678;
// Number of terms to use when approximating sine
static const int TERMS = 6;
// kernel function (CPU - Do not modify)
void sine_serial(float *input, float *output)
{
int i;
for (i=0; i<N; i++) {
float value = input[i];
float numer = input[i] * input[i] * input[i];
int denom = 6; // 3!
int sign = -1;
for (int j=1; j<=TERMS;j++)
{
value += sign * numer / denom;
numer *= input[i] * input[i];
denom *= (2*j+2) * (2*j+3);
sign *= -1;
}
output[i] = value;
}
}
// kernel function (CUDA device)
__global__ void sine(float * d_gpu_result, float * d_in)
{
//obtain thread index
int idx = threadIdx.x;
//obtain value in memory that relates to that thread index
float value = d_in[idx];
//sine serial code
float numer = d_in[idx] * d_in[idx] * d_in[idx];
int denom = 6; // 3!
int sign = -1;
for (int j=1; j<=TERMS;j++)
{
value += sign * numer / denom;
numer *= d_in[idx] * d_in[idx];
denom *= (2*j+2) * (2*j+3);
sign *= -1;
}
d_gpu_result[idx] = value;
}
// BEGIN: timing and error checking routines (do not modify)
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, std::string name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
std::cout << std::setprecision(5);
std::cout << name << ": " << ((float) (end_time - start_time)) / (1000 * 1000) << " sec\n";
return end_time - start_time;
}
void checkErrors(const char label[])
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
// END: timing and error checking routines (do not modify)
int main (int argc, char **argv)
{
//BEGIN: CPU implementation (do not modify)
float *h_cpu_result = (float*)malloc(N*sizeof(float));
float *h_input = (float*)malloc(N*sizeof(float));
//Initialize data on CPU
int i;
for (i=0; i<N; i++)
{
h_input[i] = 0.1f * i;
}
//Execute and time the CPU version
long long CPU_start_time = start_timer();
sine_serial(h_input, h_cpu_result);
long long CPU_time = stop_timer(CPU_start_time, "\nCPU Run Time");
//END: CPU implementation (do not modify)
//Preparing and runing my kernel
float * d_in;
float *h_gpu_result = (float*)malloc(N*sizeof(float));
float * d_gpu_result;
//setting aside memory
cudaMalloc((void **) &d_in, N*sizeof(float));
cudaMalloc((void **) &d_gpu_result, N*sizeof(float));
//starting timmer
long long GPU_start_time = start_timer();
//copying input memory from cpu to gpu
cudaMemcpy(d_in, h_input, N * sizeof(float), cudaMemcpyHostToDevise);
//running the threads
sine<<<1, N*sizeof(float)>>>(d_gpu_result, d_in);
//coping the output memory from the gpu to the cpu
cudaMemcpy(h_gpu_result, d_gpu_result, N*sizeof(float), cudaMemcpyDeviseToHost);
//stopping the timmer
long long GPU_time = stop_timer(GPU_start_time, "\nGPU Run Time");
// Checking to make sure the CPU and GPU results match - Do not modify
int errorCount = 0;
for (i=0; i<N; i++)
{
if (abs(h_cpu_result[i]-h_gpu_result[i]) > 1e-6)
errorCount = errorCount + 1;
}
if (errorCount > 0)
printf("Result comparison failed.\n");
else
printf("Result comparison passed.\n");
// Cleaning up memory
free(h_input);
free(h_cpu_result);
free(h_gpu_result);
return 0;
}
|
KernelInvoker.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "KernelInvoker.cuh"
#include <iostream>
#include <stdexcept>
extern int* h_no_sensors;
extern int* h_no_hits;
extern int* h_sensor_Zs;
extern int* h_sensor_hitStarts;
extern int* h_sensor_hitNums;
extern unsigned int* h_hit_IDs;
extern float* h_hit_Xs;
extern float* h_hit_Ys;
extern float* h_hit_Zs;
hipError_t invokeParallelSearch(
const int startingEvent,
const int eventsToProcess,
const std::vector<const Data*> & input,
std::vector<Data> & output) {
// DEBUG << "Input pointer: "
// << std::hex << "0x" << (long long int) &(input[0])
// << std::dec << std::endl;
const Data* startingEvent_input = input[startingEvent];
setHPointersFromInput(
startingEvent_input->data(),
startingEvent_input->size());
std::map<int, int> zhit_to_module;
if (logger::ll.verbosityLevel > 0) {
// map to convert from z of hit to module
for(int i = 0; i < *h_no_sensors; ++i) {
const int z = h_sensor_Zs[i];
zhit_to_module[z] = i;
}
// Some hits z may not correspond to a sensor's,
// but be close enough
for(int i=0; i<*h_no_hits; ++i) {
const int z = h_hit_Zs[i];
if (zhit_to_module.find(z) == zhit_to_module.end()) {
const int sensor = findClosestModule(z, zhit_to_module);
zhit_to_module[z] = sensor;
}
}
}
// int* h_prevs, *h_nexts;
// Histo histo;
Track* dev_tracks;
char* dev_input;
int* dev_tracks_to_follow;
bool* dev_hit_used;
int* dev_atomicsStorage;
Track* dev_tracklets;
int* dev_weak_tracks;
int* dev_event_offsets;
int* dev_hit_offsets;
float* dev_best_fits;
int* dev_hit_candidates;
int* dev_hit_h2_candidates;
// Choose which GPU to run on, change this on a multi-GPU system.
const int device_number = 0;
cudaCheck(hipSetDevice(device_number));
#if USE_SHARED_FOR_HITS
cudaCheck(hipDeviceSetCacheConfig(hipFuncCachePreferShared));
#else
cudaCheck(hipDeviceSetCacheConfig(hipFuncCachePreferL1));
#endif
hipDeviceProp_t* device_properties = (hipDeviceProp_t*) malloc(sizeof(hipDeviceProp_t));
hipGetDeviceProperties(device_properties, 0);
// Some startup settings
dim3 numBlocks(eventsToProcess);
dim3 numThreads(NUMTHREADS_X, 2);
hipFuncSetCacheConfig(searchByTriplet, hipFuncCachePreferShared);
// Allocate memory
// Prepare event offset and hit offset
std::vector<int> event_offsets;
std::vector<int> hit_offsets;
int acc_size = 0, acc_hits = 0;
for (int i=0; i<eventsToProcess; ++i) {
EventBeginning* event = (EventBeginning*) &(*(input[startingEvent + i]))[0];
const int event_size = input[startingEvent + i]->size();
event_offsets.push_back(acc_size);
hit_offsets.push_back(acc_hits);
acc_size += event_size;
acc_hits += event->numberOfHits;
}
// Allocate CPU buffers
const int atomic_space = NUM_ATOMICS + 1;
int* atomics = (int*) malloc(eventsToProcess * atomic_space * sizeof(int));
int* hit_candidates = (int*) malloc(2 * acc_hits * sizeof(int));
// Allocate GPU buffers
cudaCheck(hipMalloc((void**)&dev_tracks, eventsToProcess * MAX_TRACKS * sizeof(Track)));
cudaCheck(hipMalloc((void**)&dev_tracklets, acc_hits * sizeof(Track)));
cudaCheck(hipMalloc((void**)&dev_weak_tracks, acc_hits * sizeof(int)));
cudaCheck(hipMalloc((void**)&dev_tracks_to_follow, eventsToProcess * TTF_MODULO * sizeof(int)));
cudaCheck(hipMalloc((void**)&dev_atomicsStorage, eventsToProcess * atomic_space * sizeof(int)));
cudaCheck(hipMalloc((void**)&dev_event_offsets, event_offsets.size() * sizeof(int)));
cudaCheck(hipMalloc((void**)&dev_hit_offsets, hit_offsets.size() * sizeof(int)));
cudaCheck(hipMalloc((void**)&dev_hit_used, acc_hits * sizeof(bool)));
cudaCheck(hipMalloc((void**)&dev_input, acc_size));
cudaCheck(hipMalloc((void**)&dev_best_fits, eventsToProcess * numThreads.x * MAX_NUMTHREADS_Y * sizeof(float)));
cudaCheck(hipMalloc((void**)&dev_hit_candidates, 2 * acc_hits * sizeof(int)));
cudaCheck(hipMalloc((void**)&dev_hit_h2_candidates, 2 * acc_hits * sizeof(int)));
// Copy stuff from host memory to GPU buffers
cudaCheck(hipMemcpy(dev_event_offsets, &event_offsets[0], event_offsets.size() * sizeof(int), hipMemcpyHostToDevice));
cudaCheck(hipMemcpy(dev_hit_offsets, &hit_offsets[0], hit_offsets.size() * sizeof(int), hipMemcpyHostToDevice));
acc_size = 0;
for (int i=0; i<eventsToProcess; ++i) {
cudaCheck(hipMemcpy(&dev_input[acc_size], &(*(input[startingEvent + i]))[0], input[startingEvent + i]->size(), hipMemcpyHostToDevice));
acc_size += input[startingEvent + i]->size();
}
// Adding timing
// Timing calculation
unsigned int niterations = 1;
unsigned int nexperiments = 1;
std::vector<std::vector<float>> time_values {nexperiments};
std::vector<std::map<std::string, float>> mresults {nexperiments};
// std::vector<std::string> exp_names {nexperiments};
DEBUG << "Now, on your " << device_properties->name << ": searchByTriplet with " << eventsToProcess << " event" << (eventsToProcess>1 ? "s" : "") << std::endl
<< " " << nexperiments << " experiments, " << niterations << " iterations" << std::endl;
for (auto i=0; i<nexperiments; ++i) {
DEBUG << i << ": " << std::flush;
if (nexperiments!=1) numThreads.y = i+1;
for (auto j=0; j<niterations; ++j) {
// Initialize what we need
cudaCheck(hipMemset(dev_hit_used, false, acc_hits * sizeof(bool)));
cudaCheck(hipMemset(dev_atomicsStorage, 0, eventsToProcess * atomic_space * sizeof(int)));
cudaCheck(hipMemset(dev_hit_candidates, -1, 2 * acc_hits * sizeof(int)));
cudaCheck(hipMemset(dev_hit_h2_candidates, -1, 2 * acc_hits * sizeof(int)));
// Just for debugging purposes
cudaCheck(hipMemset(dev_tracks, 0, eventsToProcess * MAX_TRACKS * sizeof(Track)));
cudaCheck(hipMemset(dev_tracklets, 0, acc_hits * sizeof(Track)));
cudaCheck(hipMemset(dev_tracks_to_follow, 0, eventsToProcess * TTF_MODULO * sizeof(int)));
// searchByTriplet
hipEvent_t start_searchByTriplet, stop_searchByTriplet;
float t0;
hipEventCreate(&start_searchByTriplet);
hipEventCreate(&stop_searchByTriplet);
hipEventRecord(start_searchByTriplet, 0 );
// Dynamic allocation - , 3 * numThreads.x * sizeof(float)
hipLaunchKernelGGL(( searchByTriplet), dim3(numBlocks), dim3(numThreads), 0, 0, dev_tracks, (const char*) dev_input,
dev_tracks_to_follow, dev_hit_used, dev_atomicsStorage, dev_tracklets,
dev_weak_tracks, dev_event_offsets, dev_hit_offsets, dev_best_fits,
dev_hit_candidates, dev_hit_h2_candidates);
hipEventRecord( stop_searchByTriplet, 0 );
hipEventSynchronize( stop_searchByTriplet );
hipEventElapsedTime( &t0, start_searchByTriplet, stop_searchByTriplet );
hipEventDestroy( start_searchByTriplet );
hipEventDestroy( stop_searchByTriplet );
cudaCheck( hipPeekAtLastError() );
time_values[i].push_back(t0);
DEBUG << "." << std::flush;
}
DEBUG << std::endl;
}
// Get results
DEBUG << "Number of tracks found per event:" << std::endl << " ";
cudaCheck(hipMemcpy(atomics, dev_atomicsStorage, eventsToProcess * atomic_space * sizeof(int), hipMemcpyDeviceToHost));
for (int i = 0; i < eventsToProcess; ++i) {
const int numberOfTracks = atomics[i];
DEBUG << numberOfTracks << ", ";
output[startingEvent + i].resize(numberOfTracks * sizeof(Track));
cudaCheck(hipMemcpy(&(output[startingEvent + i])[0], &dev_tracks[i * MAX_TRACKS], numberOfTracks * sizeof(Track), hipMemcpyDeviceToHost));
}
DEBUG << std::endl;
// cudaCheck(hipMemcpy(hit_candidates, dev_hit_candidates, 2 * acc_hits * sizeof(int), hipMemcpyDeviceToHost));
// std::ofstream hc0("hit_candidates.0");
// std::ofstream hc1("hit_candidates.1");
// for (int i=0; i<hit_offsets[1] * 2; ++i) hc0 << hit_candidates[i] << std::endl;
// for (int i=hit_offsets[1] * 2; i<acc_hits * 2; ++i) hc1 << hit_candidates[i] << std::endl;
// hc0.close();
// hc1.close();
// Print solution tracks of event 0
if (PRINT_SOLUTION) {
const int numberOfTracks = output[0].size() / sizeof(Track);
Track* tracks_in_solution = (Track*) &(output[0])[0];
if (logger::ll.verbosityLevel > 0) {
for(int i=0; i<numberOfTracks; ++i) {
printTrack(tracks_in_solution, i, zhit_to_module);
}
}
}
DEBUG << std::endl << "Time averages:" << std::endl;
for (auto i=0; i<nexperiments; ++i) {
mresults[i] = calcResults(time_values[i]);
DEBUG << " nthreads (" << NUMTHREADS_X << ", " << (nexperiments==1 ? numThreads.y : i+1) << "): " << mresults[i]["mean"]
<< " ms (std dev " << mresults[i]["deviation"] << ")" << std::endl;
}
free(atomics);
return hipSuccess;
}
/**
* Prints tracks
* Track #n, length <length>:
* <ID> module <module>, x <x>, y <y>, z <z>
*
* @param tracks
* @param trackNumber
*/
void printTrack(const Track * tracks, const int trackNumber, const std::map<int, int>& zhit_to_module) {
const Track t = tracks[trackNumber];
DEBUG << "Track #" << trackNumber << ", length " << (int) t.hitsNum << std::endl;
for(int i=0; i<t.hitsNum; ++i) {
const int hitNumber = t.hits[i];
const unsigned int id = h_hit_IDs[hitNumber];
const float x = h_hit_Xs[hitNumber];
const float y = h_hit_Ys[hitNumber];
const float z = h_hit_Zs[hitNumber];
const int module = zhit_to_module.at((int) z);
DEBUG << " " << std::setw(8) << id << " (" << hitNumber << ")"
<< " module " << std::setw(2) << module
<< ", x " << std::setw(6) << x
<< ", y " << std::setw(6) << y
<< ", z " << std::setw(6) << z << std::endl;
}
DEBUG << std::endl;
}
/**
* The z of the hit may not correspond to any z in the sensors.
* @param z
* @param zhit_to_module
* @return sensor number
*/
int findClosestModule(const int z, const std::map<int, int>& zhit_to_module) {
if (zhit_to_module.find(z) != zhit_to_module.end())
return zhit_to_module.at(z);
int error = 0;
while(true) {
error++;
const int lowerAttempt = z - error;
const int higherAttempt = z + error;
if (zhit_to_module.find(lowerAttempt) != zhit_to_module.end()) {
return zhit_to_module.at(lowerAttempt);
}
if (zhit_to_module.find(higherAttempt) != zhit_to_module.end()) {
return zhit_to_module.at(higherAttempt);
}
}
}
void printOutAllSensorHits(int* prevs, int* nexts) {
DEBUG << "All valid sensor hits: " << std::endl;
for(int i=0; i<h_no_sensors[0]; ++i) {
for(int j=0; j<h_sensor_hitNums[i]; ++j) {
int hit = h_sensor_hitStarts[i] + j;
if(nexts[hit] != -1) {
DEBUG << hit << ", " << nexts[hit] << std::endl;
}
}
}
}
void printOutSensorHits(int sensorNumber, int* prevs, int* nexts) {
for(int i=0; i<h_sensor_hitNums[sensorNumber]; ++i) {
int hstart = h_sensor_hitStarts[sensorNumber];
DEBUG << hstart + i << ": " << prevs[hstart + i] << ", " << nexts[hstart + i] << std::endl;
}
}
void printInfo(int numberOfSensors, int numberOfHits) {
numberOfSensors = numberOfSensors>52 ? 52 : numberOfSensors;
DEBUG << "Read info:" << std::endl
<< " no sensors: " << h_no_sensors[0] << std::endl
<< " no hits: " << h_no_hits[0] << std::endl
<< numberOfSensors << " sensors: " << std::endl;
for (int i=0; i<numberOfSensors; ++i) {
DEBUG << " Zs: " << h_sensor_Zs[i] << std::endl
<< " hitStarts: " << h_sensor_hitStarts[i] << std::endl
<< " hitNums: " << h_sensor_hitNums[i] << std::endl << std::endl;
}
DEBUG << numberOfHits << " hits: " << std::endl;
for (int i=0; i<numberOfHits; ++i) {
DEBUG << " hit_id: " << h_hit_IDs[i] << std::endl
<< " hit_X: " << h_hit_Xs[i] << std::endl
<< " hit_Y: " << h_hit_Ys[i] << std::endl
<< " hit_Z: " << h_hit_Zs[i] << std::endl << std::endl;
}
}
void getMaxNumberOfHits(char*& input, int& maxHits) {
int* l_no_sensors = (int*) &input[0];
int* l_no_hits = (int*) (l_no_sensors + 1);
int* l_sensor_Zs = (int*) (l_no_hits + 1);
int* l_sensor_hitStarts = (int*) (l_sensor_Zs + l_no_sensors[0]);
int* l_sensor_hitNums = (int*) (l_sensor_hitStarts + l_no_sensors[0]);
maxHits = 0;
for(int i=0; i<l_no_sensors[0]; ++i) {
if(l_sensor_hitNums[i] > maxHits)
maxHits = l_sensor_hitNums[i];
}
}
| KernelInvoker.cu | #include "KernelInvoker.cuh"
#include <iostream>
#include <stdexcept>
extern int* h_no_sensors;
extern int* h_no_hits;
extern int* h_sensor_Zs;
extern int* h_sensor_hitStarts;
extern int* h_sensor_hitNums;
extern unsigned int* h_hit_IDs;
extern float* h_hit_Xs;
extern float* h_hit_Ys;
extern float* h_hit_Zs;
cudaError_t invokeParallelSearch(
const int startingEvent,
const int eventsToProcess,
const std::vector<const Data*> & input,
std::vector<Data> & output) {
// DEBUG << "Input pointer: "
// << std::hex << "0x" << (long long int) &(input[0])
// << std::dec << std::endl;
const Data* startingEvent_input = input[startingEvent];
setHPointersFromInput(
startingEvent_input->data(),
startingEvent_input->size());
std::map<int, int> zhit_to_module;
if (logger::ll.verbosityLevel > 0) {
// map to convert from z of hit to module
for(int i = 0; i < *h_no_sensors; ++i) {
const int z = h_sensor_Zs[i];
zhit_to_module[z] = i;
}
// Some hits z may not correspond to a sensor's,
// but be close enough
for(int i=0; i<*h_no_hits; ++i) {
const int z = h_hit_Zs[i];
if (zhit_to_module.find(z) == zhit_to_module.end()) {
const int sensor = findClosestModule(z, zhit_to_module);
zhit_to_module[z] = sensor;
}
}
}
// int* h_prevs, *h_nexts;
// Histo histo;
Track* dev_tracks;
char* dev_input;
int* dev_tracks_to_follow;
bool* dev_hit_used;
int* dev_atomicsStorage;
Track* dev_tracklets;
int* dev_weak_tracks;
int* dev_event_offsets;
int* dev_hit_offsets;
float* dev_best_fits;
int* dev_hit_candidates;
int* dev_hit_h2_candidates;
// Choose which GPU to run on, change this on a multi-GPU system.
const int device_number = 0;
cudaCheck(cudaSetDevice(device_number));
#if USE_SHARED_FOR_HITS
cudaCheck(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared));
#else
cudaCheck(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));
#endif
cudaDeviceProp* device_properties = (cudaDeviceProp*) malloc(sizeof(cudaDeviceProp));
cudaGetDeviceProperties(device_properties, 0);
// Some startup settings
dim3 numBlocks(eventsToProcess);
dim3 numThreads(NUMTHREADS_X, 2);
cudaFuncSetCacheConfig(searchByTriplet, cudaFuncCachePreferShared);
// Allocate memory
// Prepare event offset and hit offset
std::vector<int> event_offsets;
std::vector<int> hit_offsets;
int acc_size = 0, acc_hits = 0;
for (int i=0; i<eventsToProcess; ++i) {
EventBeginning* event = (EventBeginning*) &(*(input[startingEvent + i]))[0];
const int event_size = input[startingEvent + i]->size();
event_offsets.push_back(acc_size);
hit_offsets.push_back(acc_hits);
acc_size += event_size;
acc_hits += event->numberOfHits;
}
// Allocate CPU buffers
const int atomic_space = NUM_ATOMICS + 1;
int* atomics = (int*) malloc(eventsToProcess * atomic_space * sizeof(int));
int* hit_candidates = (int*) malloc(2 * acc_hits * sizeof(int));
// Allocate GPU buffers
cudaCheck(cudaMalloc((void**)&dev_tracks, eventsToProcess * MAX_TRACKS * sizeof(Track)));
cudaCheck(cudaMalloc((void**)&dev_tracklets, acc_hits * sizeof(Track)));
cudaCheck(cudaMalloc((void**)&dev_weak_tracks, acc_hits * sizeof(int)));
cudaCheck(cudaMalloc((void**)&dev_tracks_to_follow, eventsToProcess * TTF_MODULO * sizeof(int)));
cudaCheck(cudaMalloc((void**)&dev_atomicsStorage, eventsToProcess * atomic_space * sizeof(int)));
cudaCheck(cudaMalloc((void**)&dev_event_offsets, event_offsets.size() * sizeof(int)));
cudaCheck(cudaMalloc((void**)&dev_hit_offsets, hit_offsets.size() * sizeof(int)));
cudaCheck(cudaMalloc((void**)&dev_hit_used, acc_hits * sizeof(bool)));
cudaCheck(cudaMalloc((void**)&dev_input, acc_size));
cudaCheck(cudaMalloc((void**)&dev_best_fits, eventsToProcess * numThreads.x * MAX_NUMTHREADS_Y * sizeof(float)));
cudaCheck(cudaMalloc((void**)&dev_hit_candidates, 2 * acc_hits * sizeof(int)));
cudaCheck(cudaMalloc((void**)&dev_hit_h2_candidates, 2 * acc_hits * sizeof(int)));
// Copy stuff from host memory to GPU buffers
cudaCheck(cudaMemcpy(dev_event_offsets, &event_offsets[0], event_offsets.size() * sizeof(int), cudaMemcpyHostToDevice));
cudaCheck(cudaMemcpy(dev_hit_offsets, &hit_offsets[0], hit_offsets.size() * sizeof(int), cudaMemcpyHostToDevice));
acc_size = 0;
for (int i=0; i<eventsToProcess; ++i) {
cudaCheck(cudaMemcpy(&dev_input[acc_size], &(*(input[startingEvent + i]))[0], input[startingEvent + i]->size(), cudaMemcpyHostToDevice));
acc_size += input[startingEvent + i]->size();
}
// Adding timing
// Timing calculation
unsigned int niterations = 1;
unsigned int nexperiments = 1;
std::vector<std::vector<float>> time_values {nexperiments};
std::vector<std::map<std::string, float>> mresults {nexperiments};
// std::vector<std::string> exp_names {nexperiments};
DEBUG << "Now, on your " << device_properties->name << ": searchByTriplet with " << eventsToProcess << " event" << (eventsToProcess>1 ? "s" : "") << std::endl
<< " " << nexperiments << " experiments, " << niterations << " iterations" << std::endl;
for (auto i=0; i<nexperiments; ++i) {
DEBUG << i << ": " << std::flush;
if (nexperiments!=1) numThreads.y = i+1;
for (auto j=0; j<niterations; ++j) {
// Initialize what we need
cudaCheck(cudaMemset(dev_hit_used, false, acc_hits * sizeof(bool)));
cudaCheck(cudaMemset(dev_atomicsStorage, 0, eventsToProcess * atomic_space * sizeof(int)));
cudaCheck(cudaMemset(dev_hit_candidates, -1, 2 * acc_hits * sizeof(int)));
cudaCheck(cudaMemset(dev_hit_h2_candidates, -1, 2 * acc_hits * sizeof(int)));
// Just for debugging purposes
cudaCheck(cudaMemset(dev_tracks, 0, eventsToProcess * MAX_TRACKS * sizeof(Track)));
cudaCheck(cudaMemset(dev_tracklets, 0, acc_hits * sizeof(Track)));
cudaCheck(cudaMemset(dev_tracks_to_follow, 0, eventsToProcess * TTF_MODULO * sizeof(int)));
// searchByTriplet
cudaEvent_t start_searchByTriplet, stop_searchByTriplet;
float t0;
cudaEventCreate(&start_searchByTriplet);
cudaEventCreate(&stop_searchByTriplet);
cudaEventRecord(start_searchByTriplet, 0 );
// Dynamic allocation - , 3 * numThreads.x * sizeof(float)
searchByTriplet<<<numBlocks, numThreads>>>(dev_tracks, (const char*) dev_input,
dev_tracks_to_follow, dev_hit_used, dev_atomicsStorage, dev_tracklets,
dev_weak_tracks, dev_event_offsets, dev_hit_offsets, dev_best_fits,
dev_hit_candidates, dev_hit_h2_candidates);
cudaEventRecord( stop_searchByTriplet, 0 );
cudaEventSynchronize( stop_searchByTriplet );
cudaEventElapsedTime( &t0, start_searchByTriplet, stop_searchByTriplet );
cudaEventDestroy( start_searchByTriplet );
cudaEventDestroy( stop_searchByTriplet );
cudaCheck( cudaPeekAtLastError() );
time_values[i].push_back(t0);
DEBUG << "." << std::flush;
}
DEBUG << std::endl;
}
// Get results
DEBUG << "Number of tracks found per event:" << std::endl << " ";
cudaCheck(cudaMemcpy(atomics, dev_atomicsStorage, eventsToProcess * atomic_space * sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 0; i < eventsToProcess; ++i) {
const int numberOfTracks = atomics[i];
DEBUG << numberOfTracks << ", ";
output[startingEvent + i].resize(numberOfTracks * sizeof(Track));
cudaCheck(cudaMemcpy(&(output[startingEvent + i])[0], &dev_tracks[i * MAX_TRACKS], numberOfTracks * sizeof(Track), cudaMemcpyDeviceToHost));
}
DEBUG << std::endl;
// cudaCheck(cudaMemcpy(hit_candidates, dev_hit_candidates, 2 * acc_hits * sizeof(int), cudaMemcpyDeviceToHost));
// std::ofstream hc0("hit_candidates.0");
// std::ofstream hc1("hit_candidates.1");
// for (int i=0; i<hit_offsets[1] * 2; ++i) hc0 << hit_candidates[i] << std::endl;
// for (int i=hit_offsets[1] * 2; i<acc_hits * 2; ++i) hc1 << hit_candidates[i] << std::endl;
// hc0.close();
// hc1.close();
// Print solution tracks of event 0
if (PRINT_SOLUTION) {
const int numberOfTracks = output[0].size() / sizeof(Track);
Track* tracks_in_solution = (Track*) &(output[0])[0];
if (logger::ll.verbosityLevel > 0) {
for(int i=0; i<numberOfTracks; ++i) {
printTrack(tracks_in_solution, i, zhit_to_module);
}
}
}
DEBUG << std::endl << "Time averages:" << std::endl;
for (auto i=0; i<nexperiments; ++i) {
mresults[i] = calcResults(time_values[i]);
DEBUG << " nthreads (" << NUMTHREADS_X << ", " << (nexperiments==1 ? numThreads.y : i+1) << "): " << mresults[i]["mean"]
<< " ms (std dev " << mresults[i]["deviation"] << ")" << std::endl;
}
free(atomics);
return cudaSuccess;
}
/**
* Prints tracks
* Track #n, length <length>:
* <ID> module <module>, x <x>, y <y>, z <z>
*
* @param tracks
* @param trackNumber
*/
void printTrack(const Track * tracks, const int trackNumber, const std::map<int, int>& zhit_to_module) {
const Track t = tracks[trackNumber];
DEBUG << "Track #" << trackNumber << ", length " << (int) t.hitsNum << std::endl;
for(int i=0; i<t.hitsNum; ++i) {
const int hitNumber = t.hits[i];
const unsigned int id = h_hit_IDs[hitNumber];
const float x = h_hit_Xs[hitNumber];
const float y = h_hit_Ys[hitNumber];
const float z = h_hit_Zs[hitNumber];
const int module = zhit_to_module.at((int) z);
DEBUG << " " << std::setw(8) << id << " (" << hitNumber << ")"
<< " module " << std::setw(2) << module
<< ", x " << std::setw(6) << x
<< ", y " << std::setw(6) << y
<< ", z " << std::setw(6) << z << std::endl;
}
DEBUG << std::endl;
}
/**
* The z of the hit may not correspond to any z in the sensors.
* @param z
* @param zhit_to_module
* @return sensor number
*/
int findClosestModule(const int z, const std::map<int, int>& zhit_to_module) {
if (zhit_to_module.find(z) != zhit_to_module.end())
return zhit_to_module.at(z);
int error = 0;
while(true) {
error++;
const int lowerAttempt = z - error;
const int higherAttempt = z + error;
if (zhit_to_module.find(lowerAttempt) != zhit_to_module.end()) {
return zhit_to_module.at(lowerAttempt);
}
if (zhit_to_module.find(higherAttempt) != zhit_to_module.end()) {
return zhit_to_module.at(higherAttempt);
}
}
}
void printOutAllSensorHits(int* prevs, int* nexts) {
DEBUG << "All valid sensor hits: " << std::endl;
for(int i=0; i<h_no_sensors[0]; ++i) {
for(int j=0; j<h_sensor_hitNums[i]; ++j) {
int hit = h_sensor_hitStarts[i] + j;
if(nexts[hit] != -1) {
DEBUG << hit << ", " << nexts[hit] << std::endl;
}
}
}
}
void printOutSensorHits(int sensorNumber, int* prevs, int* nexts) {
for(int i=0; i<h_sensor_hitNums[sensorNumber]; ++i) {
int hstart = h_sensor_hitStarts[sensorNumber];
DEBUG << hstart + i << ": " << prevs[hstart + i] << ", " << nexts[hstart + i] << std::endl;
}
}
void printInfo(int numberOfSensors, int numberOfHits) {
numberOfSensors = numberOfSensors>52 ? 52 : numberOfSensors;
DEBUG << "Read info:" << std::endl
<< " no sensors: " << h_no_sensors[0] << std::endl
<< " no hits: " << h_no_hits[0] << std::endl
<< numberOfSensors << " sensors: " << std::endl;
for (int i=0; i<numberOfSensors; ++i) {
DEBUG << " Zs: " << h_sensor_Zs[i] << std::endl
<< " hitStarts: " << h_sensor_hitStarts[i] << std::endl
<< " hitNums: " << h_sensor_hitNums[i] << std::endl << std::endl;
}
DEBUG << numberOfHits << " hits: " << std::endl;
for (int i=0; i<numberOfHits; ++i) {
DEBUG << " hit_id: " << h_hit_IDs[i] << std::endl
<< " hit_X: " << h_hit_Xs[i] << std::endl
<< " hit_Y: " << h_hit_Ys[i] << std::endl
<< " hit_Z: " << h_hit_Zs[i] << std::endl << std::endl;
}
}
void getMaxNumberOfHits(char*& input, int& maxHits) {
int* l_no_sensors = (int*) &input[0];
int* l_no_hits = (int*) (l_no_sensors + 1);
int* l_sensor_Zs = (int*) (l_no_hits + 1);
int* l_sensor_hitStarts = (int*) (l_sensor_Zs + l_no_sensors[0]);
int* l_sensor_hitNums = (int*) (l_sensor_hitStarts + l_no_sensors[0]);
maxHits = 0;
for(int i=0; i<l_no_sensors[0]; ++i) {
if(l_sensor_hitNums[i] > maxHits)
maxHits = l_sensor_hitNums[i];
}
}
|
b543099f60542696b9f6d6c65098201c8d5115ad.hip | // !!! This is a file automatically generated by hipify!!!
// incrementArray.cu
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
void incrementArrayOnHost(float *a, int N)
{
int i;
for (i=0; i < N; i++) a[i] = a[i]+1.f;
}
__global__ void incrementArrayOnDevice(float *a, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<N) a[idx] = a[idx]+1.f;
}
int main(int argc, char *argv[])
{
float *a_h, *b_h; // pointers to host memory
float *a_d; // pointer to device memory
int i, N = atoi(argv[1]);
size_t size = N*sizeof(float);
// allocate arrays on host
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
// allocate array on device
hipMalloc((void **) &a_d, size);
// initialization of host data
for (i=0; i<N; i++) a_h[i] = (float)i;
// copy data from host to device
hipMemcpy(a_d, a_h, sizeof(float)*N, hipMemcpyHostToDevice);
// do calculation on host
incrementArrayOnHost(a_h, N);
// do calculation on device:
// Part 1 of 2. Compute execution configuration
int blockSize = atoi(argv[2]);
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
// Part 2 of 2. Call incrementArrayOnDevice kernel
hipLaunchKernelGGL(( incrementArrayOnDevice) , dim3(nBlocks), dim3(blockSize) , 0, 0, a_d, N);
// Retrieve result from device and store in b_h
hipMemcpy(b_h, a_d, sizeof(float)*N, hipMemcpyDeviceToHost);
// check results
for (i=0; i<N; i++) assert(a_h[i] == b_h[i]);
// cleanup
free(a_h); free(b_h); hipFree(a_d);
}
| b543099f60542696b9f6d6c65098201c8d5115ad.cu | // incrementArray.cu
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
void incrementArrayOnHost(float *a, int N)
{
int i;
for (i=0; i < N; i++) a[i] = a[i]+1.f;
}
__global__ void incrementArrayOnDevice(float *a, int N)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<N) a[idx] = a[idx]+1.f;
}
int main(int argc, char *argv[])
{
float *a_h, *b_h; // pointers to host memory
float *a_d; // pointer to device memory
int i, N = atoi(argv[1]);
size_t size = N*sizeof(float);
// allocate arrays on host
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
// allocate array on device
cudaMalloc((void **) &a_d, size);
// initialization of host data
for (i=0; i<N; i++) a_h[i] = (float)i;
// copy data from host to device
cudaMemcpy(a_d, a_h, sizeof(float)*N, cudaMemcpyHostToDevice);
// do calculation on host
incrementArrayOnHost(a_h, N);
// do calculation on device:
// Part 1 of 2. Compute execution configuration
int blockSize = atoi(argv[2]);
int nBlocks = N/blockSize + (N%blockSize == 0?0:1);
// Part 2 of 2. Call incrementArrayOnDevice kernel
incrementArrayOnDevice <<< nBlocks, blockSize >>> (a_d, N);
// Retrieve result from device and store in b_h
cudaMemcpy(b_h, a_d, sizeof(float)*N, cudaMemcpyDeviceToHost);
// check results
for (i=0; i<N; i++) assert(a_h[i] == b_h[i]);
// cleanup
free(a_h); free(b_h); cudaFree(a_d);
}
|
38308573f79957dafdce5b9df0ff2506bddfb7d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <petscsys.h>
#include <assert.h>
#include "ex52_gpu.h"
#if (SPATIAL_DIM_0 == 2)
__device__ vecType f1_laplacian_coef(realType u[], vecType gradU[], realType kappa, int comp) {
vecType l = {kappa*gradU[comp].x, kappa*gradU[comp].y};
return l;
}
__device__ vecType f1_elasticity_coef(realType u[], vecType gradU[], realType kappa, int comp) {
vecType f1;
switch(comp) {
case 0:
f1.x = 0.5*(gradU[0].x + gradU[0].x);
f1.y = 0.5*(gradU[0].y + gradU[1].x);
break;
case 1:
f1.x = 0.5*(gradU[1].x + gradU[0].y);
f1.y = 0.5*(gradU[1].y + gradU[1].y);
}
return f1;
}
#elif (SPATIAL_DIM_0 == 3)
__device__ vecType f1_laplacian_coef(realType u[], vecType gradU[], realType kappa, int comp) {
vecType l = {kappa*gradU[comp].x, kappa*gradU[comp].y, kappa*gradU[comp].z};
return l;
}
__device__ vecType f1_elasticity_coef(realType u[], vecType gradU[], int comp) {
vecType f1;
switch(comp) {
case 0:
f1.x = 0.5*(gradU[0].x + gradU[0].x);
f1.y = 0.5*(gradU[0].y + gradU[1].x);
f1.z = 0.5*(gradU[0].z + gradU[2].x);
break;
case 1:
f1.x = 0.5*(gradU[1].x + gradU[0].y);
f1.y = 0.5*(gradU[1].y + gradU[1].y);
f1.z = 0.5*(gradU[1].z + gradU[2].y);
break;
case 2:
f1.x = 0.5*(gradU[2].x + gradU[0].z);
f1.y = 0.5*(gradU[2].y + gradU[1].z);
f1.z = 0.5*(gradU[2].z + gradU[2].z);
}
return f1;
}
#else
#error "Invalid spatial dimension"
#endif
// dim Number of spatial dimensions: 2
// N_b Number of basis functions: generated
// N_{bt} Number of total basis functions: N_b * N_{comp}
// N_q Number of quadrature points: generated
// N_{bs} Number of block cells LCM(N_b, N_q)
// N_{bst} Number of block cell components LCM(N_{bt}, N_q)
// N_{bl} Number of concurrent blocks generated
// N_t Number of threads: N_{bl} * N_{bs}
// N_{cbc} Number of concurrent basis cells: N_{bl} * N_q
// N_{cqc} Number of concurrent quadrature cells: N_{bl} * N_b
// N_{sbc} Number of serial basis cells: N_{bs} / N_q
// N_{sqc} Number of serial quadrature cells: N_{bs} / N_b
// N_{cb} Number of serial cell batches: input
// N_c Number of total cells: N_{cb}*N_{t}/N_{comp}
__global__ void integrateElementCoefQuadrature(int N_cb, realType *coefficients, realType *physCoefficients, realType *jacobianInverses, realType *jacobianDeterminants, realType *elemVec) {
#include "ex52_gpu_inline.h"
const int dim = SPATIAL_DIM_0;
const int N_b = numBasisFunctions_0; // The number of basis functions
const int N_comp = numBasisComponents_0; // The number of basis function components
const int N_bt = N_b*N_comp; // The total number of scalar basis functions
const int N_q = numQuadraturePoints_0; // The number of quadrature points
const int N_bst = N_bt*N_q; // The block size, LCM(N_b*N_comp, N_q), Notice that a block is not processed simultaneously
const int N_t = N_bst*N_bl; // The number of threads, N_bst * N_bl
const int N_bc = N_t/N_comp; // The number of cells per batch (N_b*N_q*N_bl)
const int N_c = N_cb * N_bc;
const int N_sbc = N_bst / (N_q * N_comp);
const int N_sqc = N_bst / N_bt;
/* Calculated indices */
const int tidx = threadIdx.x + blockDim.x*threadIdx.y;
const int blidx = tidx / N_bst; // Block number for this thread
const int bidx = tidx % N_bt; // Basis function mapped to this thread
const int cidx = tidx % N_comp; // Basis component mapped to this thread
const int qidx = tidx % N_q; // Quadrature point mapped to this thread
const int blbidx = tidx % N_q + blidx*N_q; // Cell mapped to this thread in the basis phase
const int blqidx = tidx % N_b + blidx*N_b; // Cell mapped to this thread in the quadrature phase
const int gidx = blockIdx.y*gridDim.x + blockIdx.x;
const int Goffset = gidx*N_c;
const int Coffset = gidx*N_c*N_bt;
const int Poffset = gidx*N_c*N_q;
const int Eoffset = gidx*N_c*N_bt;
/* Quadrature data */
realType w; // $w_q$, Quadrature weight at $x_q$
//__shared__ realType phi_i[N_bt*N_q]; // $\phi_i(x_q)$, Value of the basis function $i$ at $x_q$
__shared__ vecType phiDer_i[N_bt*N_q]; // $\frac{\partial\phi_i(x_q)}{\partial x_d}$, Value of the derivative of basis function $i$ in direction $x_d$ at $x_q$
/* Geometric data */
__shared__ realType detJ[N_t]; // $|J(x_q)|$, Jacobian determinant at $x_q$
__shared__ realType invJ[N_t*dim*dim]; // $J^{-1}(x_q)$, Jacobian inverse at $x_q$
/* FEM data */
__shared__ realType u_i[N_t*N_bt]; // Coefficients $u_i$ of the field $u|_{\mathcal{T}} = \sum_i u_i \phi_i$
/* Physical coefficient data */
realType kappa; // Physical coefficient $\kappa$ in the equation
/* Intermediate calculations */
//__shared__ realType f_0[N_t*N_sqc]; // $f_0(u(x_q), \nabla u(x_q)) |J(x_q)| w_q$
__shared__ vecType f_1[N_t*N_sqc]; // $f_1(u(x_q), \nabla u(x_q)) |J(x_q)| w_q$
/* Output data */
realType e_i; // Coefficient $e_i$ of the residual
/* These should be generated inline */
/* Load quadrature weights */
w = weights_0[qidx];
/* Load basis tabulation \phi_i for this cell */
if (tidx < N_bt*N_q) {
// phi_i[tidx] = Basis_0[tidx];
phiDer_i[tidx] = BasisDerivatives_0[tidx];
}
for(int batch = 0; batch < N_cb; ++batch) {
/* Load geometry */
detJ[tidx] = jacobianDeterminants[Goffset+batch*N_bc+tidx];
for(int n = 0; n < dim*dim; ++n) {
const int offset = n*N_t;
invJ[offset+tidx] = jacobianInverses[(Goffset+batch*N_bc)*dim*dim+offset+tidx];
}
/* Load coefficients u_i for this cell */
for(int n = 0; n < N_bt; ++n) {
const int offset = n*N_t;
u_i[offset+tidx] = coefficients[Coffset+batch*N_t*N_b+offset+tidx];
}
/* Load physical coefficient for this cell */
kappa = physCoefficients[Poffset+batch*N_t*N_q+tidx];
/* Map coefficients to values at quadrature points */
for(int c = 0; c < N_sqc; ++c) {
realType u[N_comp]; // $u(x_q)$, Value of the field at $x_q$
vecType gradU[N_comp]; // $\nabla u(x_q)$, Value of the field gradient at $x_q$
// vecType x = {0.0, 0.0}; // Quadrature point $x_q$
const int cell = c*N_bl*N_b + blqidx;
const int fidx = (cell*N_q + qidx)*N_comp + cidx;
for(int comp = 0; comp < N_comp; ++comp) {
//u[comp] = 0.0;
#if SPATIAL_DIM_0 == 2
gradU[comp].x = 0.0; gradU[comp].y = 0.0;
#elif SPATIAL_DIM_0 == 3
gradU[comp].x = 0.0; gradU[comp].y = 0.0; gradU[comp].z = 0.0;
#endif
}
/* Get field and derivatives at this quadrature point */
for(int i = 0; i < N_b; ++i) {
for(int comp = 0; comp < N_comp; ++comp) {
const int b = i*N_comp+comp;
const int pidx = qidx*N_bt + b;
const int uidx = cell*N_bt + b;
vecType realSpaceDer;
// u[comp] += u_i[uidx]*phi_i[qidx*N_bt+bbidx];
#if SPATIAL_DIM_0 == 2
realSpaceDer.x = invJ[cell*dim*dim+0*dim+0]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+0]*phiDer_i[pidx].y;
gradU[comp].x += u_i[uidx]*realSpaceDer.x;
realSpaceDer.y = invJ[cell*dim*dim+0*dim+1]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+1]*phiDer_i[pidx].y;
gradU[comp].y += u_i[uidx]*realSpaceDer.y;
#elif SPATIAL_DIM_0 == 3
realSpaceDer.x = invJ[cell*dim*dim+0*dim+0]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+0]*phiDer_i[pidx].y + invJ[cell*dim*dim+2*dim+0]*phiDer_i[pidx].z;
gradU[comp].x += u_i[uidx]*realSpaceDer.x;
realSpaceDer.y = invJ[cell*dim*dim+0*dim+1]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+1]*phiDer_i[pidx].y + invJ[cell*dim*dim+2*dim+1]*phiDer_i[pidx].z;
gradU[comp].y += u_i[uidx]*realSpaceDer.y;
realSpaceDer.z = invJ[cell*dim*dim+0*dim+2]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+2]*phiDer_i[pidx].y + invJ[cell*dim*dim+2*dim+2]*phiDer_i[pidx].z;
gradU[comp].z += u_i[uidx]*realSpaceDer.z;
#endif
}
}
/* Process values at quadrature points */
f_1[fidx] = f1_coef_func(u, gradU, kappa, cidx);
#if SPATIAL_DIM_0 == 2
f_1[fidx].x *= detJ[cell]*w; f_1[fidx].y *= detJ[cell]*w;
#elif SPATIAL_DIM_0 == 3
f_1[fidx].x *= detJ[cell]*w; f_1[fidx].y *= detJ[cell]*w; f_1[fidx].z *= detJ[cell]*w;
#endif
}
/* ==== TRANSPOSE THREADS ==== */
__syncthreads();
/* Map values at quadrature points to coefficients */
for(int c = 0; c < N_sbc; ++c) {
const int cell = c*N_bl*N_q + blbidx;
e_i = 0.0;
for(int q = 0; q < N_q; ++q) {
const int pidx = q*N_bt + bidx;
const int fidx = (cell*N_q + q)*N_comp + cidx;
vecType realSpaceDer;
// e_i += phi_i[pidx]*f_0[fidx];
#if SPATIAL_DIM_0 == 2
realSpaceDer.x = invJ[cell*dim*dim+0*dim+0]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+0]*phiDer_i[pidx].y;
e_i += realSpaceDer.x*f_1[fidx].x;
realSpaceDer.y = invJ[cell*dim*dim+0*dim+1]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+1]*phiDer_i[pidx].y;
e_i += realSpaceDer.y*f_1[fidx].y;
#elif SPATIAL_DIM_0 == 3
realSpaceDer.x = invJ[cell*dim*dim+0*dim+0]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+0]*phiDer_i[pidx].y + invJ[cell*dim*dim+2*dim+0]*phiDer_i[pidx].z;
e_i += realSpaceDer.x*f_1[fidx].x;
realSpaceDer.y = invJ[cell*dim*dim+0*dim+1]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+1]*phiDer_i[pidx].y + invJ[cell*dim*dim+2*dim+1]*phiDer_i[pidx].z;
e_i += realSpaceDer.y*f_1[fidx].y;
realSpaceDer.z = invJ[cell*dim*dim+0*dim+2]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+2]*phiDer_i[pidx].y + invJ[cell*dim*dim+2*dim+2]*phiDer_i[pidx].z;
e_i += realSpaceDer.z*f_1[fidx].z;
#endif
}
#if 0
// Check f_1
{
const int q = 0;
const int i = bidx/N_comp;
// Prints f1[0].x, f1[1].x, f1[0].y, f1[1].y
switch(i) {
case 0:
e_i = f_1[(cell*N_q+q)*N_comp+cidx].x;break;
case 1:
e_i = f_1[(cell*N_q+q)*N_comp+cidx].y;break;
//case 2:
//e_i = f_1[(cell*N_q+q)*N_comp+cidx].z;break;
default:
e_i = 0.0;
}
}
// Check that u_i is being used correctly
//e_i = u_i[cell*N_bt+bidx];
e_i = detJ[cell];
//e_i = coefficients[Coffset+(batch*N_sbc+c)*N_t+tidx];
//e_i = Coffset+(batch*N_sbc+c)*N_t+tidx;
//e_i = cell*N_bt+bidx;
#endif
/* Write element vector for N_{cbc} cells at a time */
elemVec[Eoffset+(batch*N_sbc+c)*N_t+tidx] = e_i;
}
/* ==== Could do one write per batch ==== */
}
return;
}
// Calculate a conforming thread grid for N kernels
#undef __FUNCT__
#define __FUNCT__ "calculateGridCoef"
PetscErrorCode calculateGridCoef(const int N, const int blockSize, unsigned int& x, unsigned int& y, unsigned int& z)
{
PetscFunctionBegin;
z = 1;
if (N % blockSize) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Invalid block size %d for %d elements", blockSize, N);
const int Nblocks = N/blockSize;
for(x = (int) (sqrt(Nblocks) + 0.5); x > 0; --x) {
y = Nblocks/x;
if (x*y == Nblocks) break;
}
if (x*y != Nblocks) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Could not find partition for %d with block size %d", N, blockSize);
PetscFunctionReturn(0);
}
EXTERN_C_BEGIN
#undef __FUNCT__
#define __FUNCT__ "IntegrateElementCoefBatchGPU"
/*
IntegrateElementCoefBatchGPU - Produces element vectors from input element solution and geometric information via quadrature
Input Parameters:
+ Ne - The total number of cells, Nchunk * Ncb * Nbc
. Ncb - The number of serial cell batches
. Nbc - The number of cells per batch
. Nbl - The number of concurrent cells blocks per thread block
. coefficients - An array of the solution vector for each cell
. physCoefficients - An array of the physical coefficient values at quadrature points for each cell
. jacobianInverses - An array of the inverse Jacobian for each cell
. jacobianDeterminants - An array of the Jacobian determinant for each cell
. event - A PetscEvent, used to log flops
- debug - A flag for debugging information
Output Parameter:
. elemVec - An array of the element vectors for each cell
*/
PetscErrorCode IntegrateElementCoefBatchGPU(PetscInt Ne, PetscInt Ncb, PetscInt Nbc, PetscInt Nbl, const PetscScalar coefficients[], const PetscScalar physCoefficients[],
const PetscReal jacobianInverses[], const PetscReal jacobianDeterminants[], PetscScalar elemVec[],
PetscLogEvent event, PetscInt debug) {
#include "ex52_gpu_inline.h"
const int dim = SPATIAL_DIM_0;
const int N_b = numBasisFunctions_0; // The number of basis functions
const int N_comp = numBasisComponents_0; // The number of basis function components
const int N_bt = N_b*N_comp; // The total number of scalar basis functions
const int N_q = numQuadraturePoints_0; // The number of quadrature points
const int N_bst = N_bt*N_q; // The block size, LCM(N_bt, N_q), Notice that a block is not process simultaneously
const int N_t = N_bst*N_bl; // The number of threads, N_bst * N_bl
realType *d_coefficients;
realType *d_physCoefficients;
realType *d_jacobianInverses;
realType *d_jacobianDeterminants;
realType *d_elemVec;
PetscErrorCode ierr;
PetscFunctionBegin;
if (Nbl != N_bl) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Inconsisten block size %d should be %d", Nbl, N_bl);
if (Nbc*N_comp != N_t) SETERRQ3(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Number of threads %d should be %d * %d", N_t, Nbc, N_comp);
if (!Ne) {
PetscStageLog stageLog;
PetscEventPerfLog eventLog = PETSC_NULL;
PetscInt stage;
ierr = PetscLogGetStageLog(&stageLog);CHKERRQ(ierr);
ierr = PetscStageLogGetCurrent(stageLog, &stage);CHKERRQ(ierr);
ierr = PetscStageLogGetEventPerfLog(stageLog, stage, &eventLog);CHKERRQ(ierr);
/* Log performance info */
eventLog->eventInfo[event].count++;
eventLog->eventInfo[event].time += 0.0;
eventLog->eventInfo[event].flops += 0;
PetscFunctionReturn(0);
}
// Marshalling
ierr = hipMalloc((void**) &d_coefficients, Ne*N_bt * sizeof(realType));CHKERRQ(ierr);
ierr = hipMalloc((void**) &d_physCoefficients, Ne*N_bt * sizeof(realType));CHKERRQ(ierr);
ierr = hipMalloc((void**) &d_jacobianInverses, Ne*dim*dim * sizeof(realType));CHKERRQ(ierr);
ierr = hipMalloc((void**) &d_jacobianDeterminants, Ne * sizeof(realType));CHKERRQ(ierr);
ierr = hipMalloc((void**) &d_elemVec, Ne*N_bt * sizeof(realType));CHKERRQ(ierr);
if (sizeof(PetscReal) == sizeof(realType)) {
ierr = hipMemcpy(d_coefficients, coefficients, Ne*N_bt * sizeof(realType), hipMemcpyHostToDevice);CHKERRQ(ierr);
ierr = hipMemcpy(d_physCoefficients, physCoefficients, Ne*N_q * sizeof(realType), hipMemcpyHostToDevice);CHKERRQ(ierr);
ierr = hipMemcpy(d_jacobianInverses, jacobianInverses, Ne*dim*dim * sizeof(realType), hipMemcpyHostToDevice);CHKERRQ(ierr);
ierr = hipMemcpy(d_jacobianDeterminants, jacobianDeterminants, Ne * sizeof(realType), hipMemcpyHostToDevice);CHKERRQ(ierr);
} else {
realType *c, *pc, *jI, *jD;
PetscInt i;
ierr = PetscMalloc4(Ne*N_bt,realType,&c,Ne*N_q,realType,&pc,Ne*dim*dim,realType,&jI,Ne,realType,&jD);CHKERRQ(ierr);
for(i = 0; i < Ne*N_bt; ++i) {c[i] = coefficients[i];}
for(i = 0; i < Ne*N_q; ++i) {pc[i] = physCoefficients[i];}
for(i = 0; i < Ne*dim*dim; ++i) {jI[i] = jacobianInverses[i];}
for(i = 0; i < Ne; ++i) {jD[i] = jacobianDeterminants[i];}
ierr = hipMemcpy(d_coefficients, c, Ne*N_bt * sizeof(realType), hipMemcpyHostToDevice);CHKERRQ(ierr);
ierr = hipMemcpy(d_physCoefficients, pc, Ne*N_q * sizeof(realType), hipMemcpyHostToDevice);CHKERRQ(ierr);
ierr = hipMemcpy(d_jacobianInverses, jI, Ne*dim*dim * sizeof(realType), hipMemcpyHostToDevice);CHKERRQ(ierr);
ierr = hipMemcpy(d_jacobianDeterminants, jD, Ne * sizeof(realType), hipMemcpyHostToDevice);CHKERRQ(ierr);
ierr = PetscFree4(c,pc,jI,jD);CHKERRQ(ierr);
}
// Kernel launch
unsigned int x, y, z;
ierr = calculateGridCoef(Ne, Ncb*Nbc, x, y, z);CHKERRQ(ierr);
dim3 grid(x, y, z);
dim3 block(Nbc*N_comp, 1, 1);
hipEvent_t start, stop;
float msElapsedTime;
ierr = hipEventCreate(&start);CHKERRQ(ierr);
ierr = hipEventCreate(&stop);CHKERRQ(ierr);
//if (debug) {
ierr = PetscPrintf(PETSC_COMM_SELF, "GPU layout grid(%d,%d,%d) block(%d,%d,%d) with %d batches\n",
grid.x, grid.y, grid.z, block.x, block.y, block.z, Ncb);CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_SELF, " N_t: %d, N_cb: %d\n", N_t, Ncb);
//}
ierr = hipEventRecord(start, 0);CHKERRQ(ierr);
hipLaunchKernelGGL(( integrateElementCoefQuadrature), dim3(grid), dim3(block), 0, 0, Ncb, d_coefficients, d_physCoefficients, d_jacobianInverses, d_jacobianDeterminants, d_elemVec);
ierr = hipEventRecord(stop, 0);CHKERRQ(ierr);
ierr = hipEventSynchronize(stop);CHKERRQ(ierr);
ierr = hipEventElapsedTime(&msElapsedTime, start, stop);CHKERRQ(ierr);
ierr = hipEventDestroy(start);CHKERRQ(ierr);
ierr = hipEventDestroy(stop);CHKERRQ(ierr);
// Marshalling
if (sizeof(PetscReal) == sizeof(realType)) {
ierr = hipMemcpy(elemVec, d_elemVec, Ne*N_bt * sizeof(realType), hipMemcpyDeviceToHost);CHKERRQ(ierr);
} else {
realType *eV;
PetscInt i;
ierr = PetscMalloc(Ne*N_bt * sizeof(realType), &eV);CHKERRQ(ierr);
ierr = hipMemcpy(eV, d_elemVec, Ne*N_bt * sizeof(realType), hipMemcpyDeviceToHost);CHKERRQ(ierr);
for(i = 0; i < Ne*N_bt; ++i) {elemVec[i] = eV[i];}
ierr = PetscFree(eV);CHKERRQ(ierr);
}
ierr = hipFree(d_coefficients);CHKERRQ(ierr);
ierr = hipFree(d_jacobianInverses);CHKERRQ(ierr);
ierr = hipFree(d_jacobianDeterminants);CHKERRQ(ierr);
ierr = hipFree(d_elemVec);CHKERRQ(ierr);
{
PetscStageLog stageLog;
PetscEventPerfLog eventLog = PETSC_NULL;
PetscInt stage;
ierr = PetscLogGetStageLog(&stageLog);CHKERRQ(ierr);
ierr = PetscStageLogGetCurrent(stageLog, &stage);CHKERRQ(ierr);
ierr = PetscStageLogGetEventPerfLog(stageLog, stage, &eventLog);CHKERRQ(ierr);
/* Log performance info */
eventLog->eventInfo[event].count++;
eventLog->eventInfo[event].time += msElapsedTime*1.0e-3;
eventLog->eventInfo[event].flops += (((2+(2+2*dim)*dim)*N_comp*N_b+(2+2)*dim*N_comp)*N_q + (2+2*dim)*dim*N_q*N_comp*N_b)*Ne;
}
PetscFunctionReturn(0);
}
EXTERN_C_END
| 38308573f79957dafdce5b9df0ff2506bddfb7d4.cu | #include <petscsys.h>
#include <assert.h>
#include "ex52_gpu.h"
#if (SPATIAL_DIM_0 == 2)
__device__ vecType f1_laplacian_coef(realType u[], vecType gradU[], realType kappa, int comp) {
vecType l = {kappa*gradU[comp].x, kappa*gradU[comp].y};
return l;
}
__device__ vecType f1_elasticity_coef(realType u[], vecType gradU[], realType kappa, int comp) {
vecType f1;
switch(comp) {
case 0:
f1.x = 0.5*(gradU[0].x + gradU[0].x);
f1.y = 0.5*(gradU[0].y + gradU[1].x);
break;
case 1:
f1.x = 0.5*(gradU[1].x + gradU[0].y);
f1.y = 0.5*(gradU[1].y + gradU[1].y);
}
return f1;
}
#elif (SPATIAL_DIM_0 == 3)
__device__ vecType f1_laplacian_coef(realType u[], vecType gradU[], realType kappa, int comp) {
vecType l = {kappa*gradU[comp].x, kappa*gradU[comp].y, kappa*gradU[comp].z};
return l;
}
__device__ vecType f1_elasticity_coef(realType u[], vecType gradU[], int comp) {
vecType f1;
switch(comp) {
case 0:
f1.x = 0.5*(gradU[0].x + gradU[0].x);
f1.y = 0.5*(gradU[0].y + gradU[1].x);
f1.z = 0.5*(gradU[0].z + gradU[2].x);
break;
case 1:
f1.x = 0.5*(gradU[1].x + gradU[0].y);
f1.y = 0.5*(gradU[1].y + gradU[1].y);
f1.z = 0.5*(gradU[1].z + gradU[2].y);
break;
case 2:
f1.x = 0.5*(gradU[2].x + gradU[0].z);
f1.y = 0.5*(gradU[2].y + gradU[1].z);
f1.z = 0.5*(gradU[2].z + gradU[2].z);
}
return f1;
}
#else
#error "Invalid spatial dimension"
#endif
// dim Number of spatial dimensions: 2
// N_b Number of basis functions: generated
// N_{bt} Number of total basis functions: N_b * N_{comp}
// N_q Number of quadrature points: generated
// N_{bs} Number of block cells LCM(N_b, N_q)
// N_{bst} Number of block cell components LCM(N_{bt}, N_q)
// N_{bl} Number of concurrent blocks generated
// N_t Number of threads: N_{bl} * N_{bs}
// N_{cbc} Number of concurrent basis cells: N_{bl} * N_q
// N_{cqc} Number of concurrent quadrature cells: N_{bl} * N_b
// N_{sbc} Number of serial basis cells: N_{bs} / N_q
// N_{sqc} Number of serial quadrature cells: N_{bs} / N_b
// N_{cb} Number of serial cell batches: input
// N_c Number of total cells: N_{cb}*N_{t}/N_{comp}
__global__ void integrateElementCoefQuadrature(int N_cb, realType *coefficients, realType *physCoefficients, realType *jacobianInverses, realType *jacobianDeterminants, realType *elemVec) {
#include "ex52_gpu_inline.h"
const int dim = SPATIAL_DIM_0;
const int N_b = numBasisFunctions_0; // The number of basis functions
const int N_comp = numBasisComponents_0; // The number of basis function components
const int N_bt = N_b*N_comp; // The total number of scalar basis functions
const int N_q = numQuadraturePoints_0; // The number of quadrature points
const int N_bst = N_bt*N_q; // The block size, LCM(N_b*N_comp, N_q), Notice that a block is not processed simultaneously
const int N_t = N_bst*N_bl; // The number of threads, N_bst * N_bl
const int N_bc = N_t/N_comp; // The number of cells per batch (N_b*N_q*N_bl)
const int N_c = N_cb * N_bc;
const int N_sbc = N_bst / (N_q * N_comp);
const int N_sqc = N_bst / N_bt;
/* Calculated indices */
const int tidx = threadIdx.x + blockDim.x*threadIdx.y;
const int blidx = tidx / N_bst; // Block number for this thread
const int bidx = tidx % N_bt; // Basis function mapped to this thread
const int cidx = tidx % N_comp; // Basis component mapped to this thread
const int qidx = tidx % N_q; // Quadrature point mapped to this thread
const int blbidx = tidx % N_q + blidx*N_q; // Cell mapped to this thread in the basis phase
const int blqidx = tidx % N_b + blidx*N_b; // Cell mapped to this thread in the quadrature phase
const int gidx = blockIdx.y*gridDim.x + blockIdx.x;
const int Goffset = gidx*N_c;
const int Coffset = gidx*N_c*N_bt;
const int Poffset = gidx*N_c*N_q;
const int Eoffset = gidx*N_c*N_bt;
/* Quadrature data */
realType w; // $w_q$, Quadrature weight at $x_q$
//__shared__ realType phi_i[N_bt*N_q]; // $\phi_i(x_q)$, Value of the basis function $i$ at $x_q$
__shared__ vecType phiDer_i[N_bt*N_q]; // $\frac{\partial\phi_i(x_q)}{\partial x_d}$, Value of the derivative of basis function $i$ in direction $x_d$ at $x_q$
/* Geometric data */
__shared__ realType detJ[N_t]; // $|J(x_q)|$, Jacobian determinant at $x_q$
__shared__ realType invJ[N_t*dim*dim]; // $J^{-1}(x_q)$, Jacobian inverse at $x_q$
/* FEM data */
__shared__ realType u_i[N_t*N_bt]; // Coefficients $u_i$ of the field $u|_{\mathcal{T}} = \sum_i u_i \phi_i$
/* Physical coefficient data */
realType kappa; // Physical coefficient $\kappa$ in the equation
/* Intermediate calculations */
//__shared__ realType f_0[N_t*N_sqc]; // $f_0(u(x_q), \nabla u(x_q)) |J(x_q)| w_q$
__shared__ vecType f_1[N_t*N_sqc]; // $f_1(u(x_q), \nabla u(x_q)) |J(x_q)| w_q$
/* Output data */
realType e_i; // Coefficient $e_i$ of the residual
/* These should be generated inline */
/* Load quadrature weights */
w = weights_0[qidx];
/* Load basis tabulation \phi_i for this cell */
if (tidx < N_bt*N_q) {
// phi_i[tidx] = Basis_0[tidx];
phiDer_i[tidx] = BasisDerivatives_0[tidx];
}
for(int batch = 0; batch < N_cb; ++batch) {
/* Load geometry */
detJ[tidx] = jacobianDeterminants[Goffset+batch*N_bc+tidx];
for(int n = 0; n < dim*dim; ++n) {
const int offset = n*N_t;
invJ[offset+tidx] = jacobianInverses[(Goffset+batch*N_bc)*dim*dim+offset+tidx];
}
/* Load coefficients u_i for this cell */
for(int n = 0; n < N_bt; ++n) {
const int offset = n*N_t;
u_i[offset+tidx] = coefficients[Coffset+batch*N_t*N_b+offset+tidx];
}
/* Load physical coefficient for this cell */
kappa = physCoefficients[Poffset+batch*N_t*N_q+tidx];
/* Map coefficients to values at quadrature points */
for(int c = 0; c < N_sqc; ++c) {
realType u[N_comp]; // $u(x_q)$, Value of the field at $x_q$
vecType gradU[N_comp]; // $\nabla u(x_q)$, Value of the field gradient at $x_q$
// vecType x = {0.0, 0.0}; // Quadrature point $x_q$
const int cell = c*N_bl*N_b + blqidx;
const int fidx = (cell*N_q + qidx)*N_comp + cidx;
for(int comp = 0; comp < N_comp; ++comp) {
//u[comp] = 0.0;
#if SPATIAL_DIM_0 == 2
gradU[comp].x = 0.0; gradU[comp].y = 0.0;
#elif SPATIAL_DIM_0 == 3
gradU[comp].x = 0.0; gradU[comp].y = 0.0; gradU[comp].z = 0.0;
#endif
}
/* Get field and derivatives at this quadrature point */
for(int i = 0; i < N_b; ++i) {
for(int comp = 0; comp < N_comp; ++comp) {
const int b = i*N_comp+comp;
const int pidx = qidx*N_bt + b;
const int uidx = cell*N_bt + b;
vecType realSpaceDer;
// u[comp] += u_i[uidx]*phi_i[qidx*N_bt+bbidx];
#if SPATIAL_DIM_0 == 2
realSpaceDer.x = invJ[cell*dim*dim+0*dim+0]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+0]*phiDer_i[pidx].y;
gradU[comp].x += u_i[uidx]*realSpaceDer.x;
realSpaceDer.y = invJ[cell*dim*dim+0*dim+1]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+1]*phiDer_i[pidx].y;
gradU[comp].y += u_i[uidx]*realSpaceDer.y;
#elif SPATIAL_DIM_0 == 3
realSpaceDer.x = invJ[cell*dim*dim+0*dim+0]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+0]*phiDer_i[pidx].y + invJ[cell*dim*dim+2*dim+0]*phiDer_i[pidx].z;
gradU[comp].x += u_i[uidx]*realSpaceDer.x;
realSpaceDer.y = invJ[cell*dim*dim+0*dim+1]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+1]*phiDer_i[pidx].y + invJ[cell*dim*dim+2*dim+1]*phiDer_i[pidx].z;
gradU[comp].y += u_i[uidx]*realSpaceDer.y;
realSpaceDer.z = invJ[cell*dim*dim+0*dim+2]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+2]*phiDer_i[pidx].y + invJ[cell*dim*dim+2*dim+2]*phiDer_i[pidx].z;
gradU[comp].z += u_i[uidx]*realSpaceDer.z;
#endif
}
}
/* Process values at quadrature points */
f_1[fidx] = f1_coef_func(u, gradU, kappa, cidx);
#if SPATIAL_DIM_0 == 2
f_1[fidx].x *= detJ[cell]*w; f_1[fidx].y *= detJ[cell]*w;
#elif SPATIAL_DIM_0 == 3
f_1[fidx].x *= detJ[cell]*w; f_1[fidx].y *= detJ[cell]*w; f_1[fidx].z *= detJ[cell]*w;
#endif
}
/* ==== TRANSPOSE THREADS ==== */
__syncthreads();
/* Map values at quadrature points to coefficients */
for(int c = 0; c < N_sbc; ++c) {
const int cell = c*N_bl*N_q + blbidx;
e_i = 0.0;
for(int q = 0; q < N_q; ++q) {
const int pidx = q*N_bt + bidx;
const int fidx = (cell*N_q + q)*N_comp + cidx;
vecType realSpaceDer;
// e_i += phi_i[pidx]*f_0[fidx];
#if SPATIAL_DIM_0 == 2
realSpaceDer.x = invJ[cell*dim*dim+0*dim+0]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+0]*phiDer_i[pidx].y;
e_i += realSpaceDer.x*f_1[fidx].x;
realSpaceDer.y = invJ[cell*dim*dim+0*dim+1]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+1]*phiDer_i[pidx].y;
e_i += realSpaceDer.y*f_1[fidx].y;
#elif SPATIAL_DIM_0 == 3
realSpaceDer.x = invJ[cell*dim*dim+0*dim+0]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+0]*phiDer_i[pidx].y + invJ[cell*dim*dim+2*dim+0]*phiDer_i[pidx].z;
e_i += realSpaceDer.x*f_1[fidx].x;
realSpaceDer.y = invJ[cell*dim*dim+0*dim+1]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+1]*phiDer_i[pidx].y + invJ[cell*dim*dim+2*dim+1]*phiDer_i[pidx].z;
e_i += realSpaceDer.y*f_1[fidx].y;
realSpaceDer.z = invJ[cell*dim*dim+0*dim+2]*phiDer_i[pidx].x + invJ[cell*dim*dim+1*dim+2]*phiDer_i[pidx].y + invJ[cell*dim*dim+2*dim+2]*phiDer_i[pidx].z;
e_i += realSpaceDer.z*f_1[fidx].z;
#endif
}
#if 0
// Check f_1
{
const int q = 0;
const int i = bidx/N_comp;
// Prints f1[0].x, f1[1].x, f1[0].y, f1[1].y
switch(i) {
case 0:
e_i = f_1[(cell*N_q+q)*N_comp+cidx].x;break;
case 1:
e_i = f_1[(cell*N_q+q)*N_comp+cidx].y;break;
//case 2:
//e_i = f_1[(cell*N_q+q)*N_comp+cidx].z;break;
default:
e_i = 0.0;
}
}
// Check that u_i is being used correctly
//e_i = u_i[cell*N_bt+bidx];
e_i = detJ[cell];
//e_i = coefficients[Coffset+(batch*N_sbc+c)*N_t+tidx];
//e_i = Coffset+(batch*N_sbc+c)*N_t+tidx;
//e_i = cell*N_bt+bidx;
#endif
/* Write element vector for N_{cbc} cells at a time */
elemVec[Eoffset+(batch*N_sbc+c)*N_t+tidx] = e_i;
}
/* ==== Could do one write per batch ==== */
}
return;
}
// Calculate a conforming thread grid for N kernels
#undef __FUNCT__
#define __FUNCT__ "calculateGridCoef"
PetscErrorCode calculateGridCoef(const int N, const int blockSize, unsigned int& x, unsigned int& y, unsigned int& z)
{
PetscFunctionBegin;
z = 1;
if (N % blockSize) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Invalid block size %d for %d elements", blockSize, N);
const int Nblocks = N/blockSize;
for(x = (int) (sqrt(Nblocks) + 0.5); x > 0; --x) {
y = Nblocks/x;
if (x*y == Nblocks) break;
}
if (x*y != Nblocks) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Could not find partition for %d with block size %d", N, blockSize);
PetscFunctionReturn(0);
}
EXTERN_C_BEGIN
#undef __FUNCT__
#define __FUNCT__ "IntegrateElementCoefBatchGPU"
/*
IntegrateElementCoefBatchGPU - Produces element vectors from input element solution and geometric information via quadrature
Input Parameters:
+ Ne - The total number of cells, Nchunk * Ncb * Nbc
. Ncb - The number of serial cell batches
. Nbc - The number of cells per batch
. Nbl - The number of concurrent cells blocks per thread block
. coefficients - An array of the solution vector for each cell
. physCoefficients - An array of the physical coefficient values at quadrature points for each cell
. jacobianInverses - An array of the inverse Jacobian for each cell
. jacobianDeterminants - An array of the Jacobian determinant for each cell
. event - A PetscEvent, used to log flops
- debug - A flag for debugging information
Output Parameter:
. elemVec - An array of the element vectors for each cell
*/
PetscErrorCode IntegrateElementCoefBatchGPU(PetscInt Ne, PetscInt Ncb, PetscInt Nbc, PetscInt Nbl, const PetscScalar coefficients[], const PetscScalar physCoefficients[],
const PetscReal jacobianInverses[], const PetscReal jacobianDeterminants[], PetscScalar elemVec[],
PetscLogEvent event, PetscInt debug) {
#include "ex52_gpu_inline.h"
const int dim = SPATIAL_DIM_0;
const int N_b = numBasisFunctions_0; // The number of basis functions
const int N_comp = numBasisComponents_0; // The number of basis function components
const int N_bt = N_b*N_comp; // The total number of scalar basis functions
const int N_q = numQuadraturePoints_0; // The number of quadrature points
const int N_bst = N_bt*N_q; // The block size, LCM(N_bt, N_q), Notice that a block is not process simultaneously
const int N_t = N_bst*N_bl; // The number of threads, N_bst * N_bl
realType *d_coefficients;
realType *d_physCoefficients;
realType *d_jacobianInverses;
realType *d_jacobianDeterminants;
realType *d_elemVec;
PetscErrorCode ierr;
PetscFunctionBegin;
if (Nbl != N_bl) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Inconsisten block size %d should be %d", Nbl, N_bl);
if (Nbc*N_comp != N_t) SETERRQ3(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Number of threads %d should be %d * %d", N_t, Nbc, N_comp);
if (!Ne) {
PetscStageLog stageLog;
PetscEventPerfLog eventLog = PETSC_NULL;
PetscInt stage;
ierr = PetscLogGetStageLog(&stageLog);CHKERRQ(ierr);
ierr = PetscStageLogGetCurrent(stageLog, &stage);CHKERRQ(ierr);
ierr = PetscStageLogGetEventPerfLog(stageLog, stage, &eventLog);CHKERRQ(ierr);
/* Log performance info */
eventLog->eventInfo[event].count++;
eventLog->eventInfo[event].time += 0.0;
eventLog->eventInfo[event].flops += 0;
PetscFunctionReturn(0);
}
// Marshalling
ierr = cudaMalloc((void**) &d_coefficients, Ne*N_bt * sizeof(realType));CHKERRQ(ierr);
ierr = cudaMalloc((void**) &d_physCoefficients, Ne*N_bt * sizeof(realType));CHKERRQ(ierr);
ierr = cudaMalloc((void**) &d_jacobianInverses, Ne*dim*dim * sizeof(realType));CHKERRQ(ierr);
ierr = cudaMalloc((void**) &d_jacobianDeterminants, Ne * sizeof(realType));CHKERRQ(ierr);
ierr = cudaMalloc((void**) &d_elemVec, Ne*N_bt * sizeof(realType));CHKERRQ(ierr);
if (sizeof(PetscReal) == sizeof(realType)) {
ierr = cudaMemcpy(d_coefficients, coefficients, Ne*N_bt * sizeof(realType), cudaMemcpyHostToDevice);CHKERRQ(ierr);
ierr = cudaMemcpy(d_physCoefficients, physCoefficients, Ne*N_q * sizeof(realType), cudaMemcpyHostToDevice);CHKERRQ(ierr);
ierr = cudaMemcpy(d_jacobianInverses, jacobianInverses, Ne*dim*dim * sizeof(realType), cudaMemcpyHostToDevice);CHKERRQ(ierr);
ierr = cudaMemcpy(d_jacobianDeterminants, jacobianDeterminants, Ne * sizeof(realType), cudaMemcpyHostToDevice);CHKERRQ(ierr);
} else {
realType *c, *pc, *jI, *jD;
PetscInt i;
ierr = PetscMalloc4(Ne*N_bt,realType,&c,Ne*N_q,realType,&pc,Ne*dim*dim,realType,&jI,Ne,realType,&jD);CHKERRQ(ierr);
for(i = 0; i < Ne*N_bt; ++i) {c[i] = coefficients[i];}
for(i = 0; i < Ne*N_q; ++i) {pc[i] = physCoefficients[i];}
for(i = 0; i < Ne*dim*dim; ++i) {jI[i] = jacobianInverses[i];}
for(i = 0; i < Ne; ++i) {jD[i] = jacobianDeterminants[i];}
ierr = cudaMemcpy(d_coefficients, c, Ne*N_bt * sizeof(realType), cudaMemcpyHostToDevice);CHKERRQ(ierr);
ierr = cudaMemcpy(d_physCoefficients, pc, Ne*N_q * sizeof(realType), cudaMemcpyHostToDevice);CHKERRQ(ierr);
ierr = cudaMemcpy(d_jacobianInverses, jI, Ne*dim*dim * sizeof(realType), cudaMemcpyHostToDevice);CHKERRQ(ierr);
ierr = cudaMemcpy(d_jacobianDeterminants, jD, Ne * sizeof(realType), cudaMemcpyHostToDevice);CHKERRQ(ierr);
ierr = PetscFree4(c,pc,jI,jD);CHKERRQ(ierr);
}
// Kernel launch
unsigned int x, y, z;
ierr = calculateGridCoef(Ne, Ncb*Nbc, x, y, z);CHKERRQ(ierr);
dim3 grid(x, y, z);
dim3 block(Nbc*N_comp, 1, 1);
cudaEvent_t start, stop;
float msElapsedTime;
ierr = cudaEventCreate(&start);CHKERRQ(ierr);
ierr = cudaEventCreate(&stop);CHKERRQ(ierr);
//if (debug) {
ierr = PetscPrintf(PETSC_COMM_SELF, "GPU layout grid(%d,%d,%d) block(%d,%d,%d) with %d batches\n",
grid.x, grid.y, grid.z, block.x, block.y, block.z, Ncb);CHKERRQ(ierr);
ierr = PetscPrintf(PETSC_COMM_SELF, " N_t: %d, N_cb: %d\n", N_t, Ncb);
//}
ierr = cudaEventRecord(start, 0);CHKERRQ(ierr);
integrateElementCoefQuadrature<<<grid, block>>>(Ncb, d_coefficients, d_physCoefficients, d_jacobianInverses, d_jacobianDeterminants, d_elemVec);
ierr = cudaEventRecord(stop, 0);CHKERRQ(ierr);
ierr = cudaEventSynchronize(stop);CHKERRQ(ierr);
ierr = cudaEventElapsedTime(&msElapsedTime, start, stop);CHKERRQ(ierr);
ierr = cudaEventDestroy(start);CHKERRQ(ierr);
ierr = cudaEventDestroy(stop);CHKERRQ(ierr);
// Marshalling
if (sizeof(PetscReal) == sizeof(realType)) {
ierr = cudaMemcpy(elemVec, d_elemVec, Ne*N_bt * sizeof(realType), cudaMemcpyDeviceToHost);CHKERRQ(ierr);
} else {
realType *eV;
PetscInt i;
ierr = PetscMalloc(Ne*N_bt * sizeof(realType), &eV);CHKERRQ(ierr);
ierr = cudaMemcpy(eV, d_elemVec, Ne*N_bt * sizeof(realType), cudaMemcpyDeviceToHost);CHKERRQ(ierr);
for(i = 0; i < Ne*N_bt; ++i) {elemVec[i] = eV[i];}
ierr = PetscFree(eV);CHKERRQ(ierr);
}
ierr = cudaFree(d_coefficients);CHKERRQ(ierr);
ierr = cudaFree(d_jacobianInverses);CHKERRQ(ierr);
ierr = cudaFree(d_jacobianDeterminants);CHKERRQ(ierr);
ierr = cudaFree(d_elemVec);CHKERRQ(ierr);
{
PetscStageLog stageLog;
PetscEventPerfLog eventLog = PETSC_NULL;
PetscInt stage;
ierr = PetscLogGetStageLog(&stageLog);CHKERRQ(ierr);
ierr = PetscStageLogGetCurrent(stageLog, &stage);CHKERRQ(ierr);
ierr = PetscStageLogGetEventPerfLog(stageLog, stage, &eventLog);CHKERRQ(ierr);
/* Log performance info */
eventLog->eventInfo[event].count++;
eventLog->eventInfo[event].time += msElapsedTime*1.0e-3;
eventLog->eventInfo[event].flops += (((2+(2+2*dim)*dim)*N_comp*N_b+(2+2)*dim*N_comp)*N_q + (2+2*dim)*dim*N_q*N_comp*N_b)*Ne;
}
PetscFunctionReturn(0);
}
EXTERN_C_END
|
0ad41e2444c056a17d114a60d5e57752f1225a18.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <vector>
#include <iostream>
#include "caffe/layers/truncation_layer.hpp"
#include "caffe/util/math_functions.hpp"
using namespace std;
namespace caffe {
template <typename Dtype>
__device__ Dtype Max(const Dtype x, const Dtype y) {
return ((x > y) ? x : y);
}
template <typename Dtype>
__device__ Dtype Min(const Dtype x, const Dtype y) {
return ((x > y) ? y : x);
}
template <typename Dtype>
__global__ void TruncationForward(const int n, const Dtype* in, const Dtype truncation_min, const Dtype truncation_max, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = Min( Max(in[index], truncation_min), truncation_max);
}
}
template <typename Dtype>
void TruncationLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
hipLaunchKernelGGL(( TruncationForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, Dtype(truncation_min_), Dtype(truncation_max_), top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void TruncationBackward(const int n, const Dtype* in_diff,
const Dtype* bottom_data, const Dtype truncation_min, const Dtype truncation_max, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
if (bottom_data[index] > truncation_max) {
out_diff[index] = Max(in_diff[index], Dtype(0));
} else if (bottom_data[index] < truncation_min) {
out_diff[index] = Min(in_diff[index], Dtype(0));
} else {
out_diff[index] = in_diff[index];
}
}
}
template <typename Dtype>
void TruncationLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( TruncationBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, Dtype(truncation_min_), Dtype(truncation_max_), bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(TruncationLayer);
} // namespace caffe
| 0ad41e2444c056a17d114a60d5e57752f1225a18.cu | #include <cmath>
#include <vector>
#include <iostream>
#include "caffe/layers/truncation_layer.hpp"
#include "caffe/util/math_functions.hpp"
using namespace std;
namespace caffe {
template <typename Dtype>
__device__ Dtype Max(const Dtype x, const Dtype y) {
return ((x > y) ? x : y);
}
template <typename Dtype>
__device__ Dtype Min(const Dtype x, const Dtype y) {
return ((x > y) ? y : x);
}
template <typename Dtype>
__global__ void TruncationForward(const int n, const Dtype* in, const Dtype truncation_min, const Dtype truncation_max, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = Min( Max(in[index], truncation_min), truncation_max);
}
}
template <typename Dtype>
void TruncationLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
TruncationForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, Dtype(truncation_min_), Dtype(truncation_max_), top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void TruncationBackward(const int n, const Dtype* in_diff,
const Dtype* bottom_data, const Dtype truncation_min, const Dtype truncation_max, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
if (bottom_data[index] > truncation_max) {
out_diff[index] = Max(in_diff[index], Dtype(0));
} else if (bottom_data[index] < truncation_min) {
out_diff[index] = Min(in_diff[index], Dtype(0));
} else {
out_diff[index] = in_diff[index];
}
}
}
template <typename Dtype>
void TruncationLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
TruncationBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, Dtype(truncation_min_), Dtype(truncation_max_), bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(TruncationLayer);
} // namespace caffe
|
251df53aa45624968c5424c5920a25f542cb5b96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hipfft.h"
#include "cufftw.h"
#include <stdlib.h>
#include<math.h>
#include "hipfftXt.h"
#include <stdio.h>
#include<iostream>
#include<stdlib.h>
#include <fstream>
//using namespace std;
//#define PI 3.1415926535897932384626433832795028841971 //
//#define FFT_N 1024
//#define N 1024 //* 4
//#define M 4
//#define fs 48000
//#define d 0.06
//#define v 340
////int fs,d,v,FFT_N;
///*fs=48000;%
////M=4;%
////d=0.06;%
////FFT_N=768;%
////v=340;%
////f_xxFFT */
////struct complex EE(struct complex a,struct complex b);//
////void FFT(struct complex *xin);//FFT
//int DOA(struct complex *f_x1,struct complex *f_x2,struct complex *f_x3,struct complex *f_x4);//
//float a_x1[N],a_x2[N],a_x3[N],a_x4[N];
//long b_x1[N],b_x2[N],b_x3[N],b_x4[N];
//FILE *fp1 = NULL;
//struct complex x1[FFT_N],x2[FFT_N],x3[FFT_N],x4[FFT_N];
// struct complex Energy;
// struct complex Y,Ytemp;
// struct complex t[4];
// struct complex cov_x[4][1024],temp[4][1024],f_x[4][1024];
// float delay,DOA_scan[181]={0},Earray[181][1024],maxx=0;
// int angle=0;
// int doa_scan[181];
extern "C" int runtest(long *adbuffer)
{
//long ADData;
//float fVolt;
//for (int z = 0; z < N; z++)
//{
// b_x1[z]=adbuffer[4*z];
// b_x2[z]=adbuffer[4*z+1];
// b_x3[z]=adbuffer[4*z+2];
// b_x4[z]=adbuffer[4*z+3];
//}
//for (int j = 0; j < N; j++)
//{
// a_x1[j]=(20000.0/16384)*((b_x1[j]^0x2000)&0x3FFF)- 10000.0;
// a_x2[j]=(20000.0/16384)*((b_x2[j]^0x2000)&0x3FFF)- 10000.0;
// a_x3[j]=(20000.0/16384)*((b_x3[j]^0x2000)&0x3FFF)- 10000.0;
// a_x4[j]=(20000.0/16384)*((b_x4[j]^0x2000)&0x3FFF)- 10000.0;
//}
////if (true)
////{
//// return 1000;
////}
////ADData=(adbuffer[i]^0x2000)&0x3FFF;//-10v---+10v
//// fVolt=((20000.0/16384) * ADData - 10000.0)/1000;
// int i=0;int deg=0; float end_point_power=0.0;
// struct complex s[FFT_N]; //FFTS[1]
// struct complex x1[FFT_N],x2[FFT_N],x3[FFT_N],x4[FFT_N];
// for(int k=0;k<FFT_N;k++)
// {
// x1[k].real=a_x1[k]/1000 ; x1[k].imag=0;
// x2[k].real=a_x2[k]/1000 ; x2[k].imag=0;
// x3[k].real=a_x3[k]/1000 ; x3[k].imag=0;
// x4[k].real=a_x4[k]/1000 ; x4[k].imag=0;
// }
///* for(int u=0;u<FFT_N;u++)
// end_point_power+=abs(x1[u].real)*abs(x1[u].real);
//if(end_point_power<1500){
// end_point_power=0;
// return 1000;}*/
// /*********FFT*******/
// fft(FFT_N,x1);//for(int i=1;i<FFT_N+1)f_x1=x1; //
// fft(FFT_N,x2);//f_x2=x2;
// fft(FFT_N,x3);//f_x3=x3
// fft(FFT_N,x4);//f_x4=x4; //FFT_NFFT //FFT_NFFT
// deg=DOA(x1,x2,x3,x4);
// return deg;
return 0;
}
///*******************************************************************
//struct complex EE(struct complex b1,struct complex b2)
//
//a,b
//ab
//*******************************************************************/
//struct complex EE(struct complex a,struct complex b)
//{
// struct complex c;
// c.real=a.real*b.real-a.imag*b.imag;
// c.imag=a.real*b.imag+a.imag*b.real;
// return(c);
//}
//
///*****************************************************************
//void FFT(struct complex *xin,int FFT_N)
//FFT
//*xinstruct
//*****************************************************************/
////void FFT(struct complex *xin)
////{
//// int f,m,nv2,nm1,i,k,l,j=0;
//// struct complex u,w,t;
//// static struct complex f_xin;
//// nv2=FFT_N/2; //
//// nm1=FFT_N-1;
//// for(i=0;i<nm1;i++)
//// {
//// if(i<j) //i<j,
//// {
//// t=xin[j];
//// xin[j]=xin[i];
//// xin[i]=t;
//// }
//// k=nv2; //j
//// while(k<=j) //k<=j,j1
//// {
//// j=j-k; //0
//// k=k/2; //k/20
//// }
//// j=j+k; //01
//// }
////
//// {
//// int le,lei,ip; //FFTFFT
//// f=FFT_N;
//// for(l=1;(f=f/2)!=1;l++) //l
//// ;
//// for(m=1;m<=l;m++) //
//// { //mmll=log2FFT_N
//// le=2<<(m-1); //lemle
//// lei=le/2; //
//// u.real=1.0; //u1
//// u.imag=0.0;
//// w.real=cos(PI/lei); //w
//// w.imag=-sin(PI/lei);
//// for(j=0;j<=lei-1;j++) //
//// {
//// for(i=j;i<=FFT_N-1;i=i+le) //
//// {
//// ip=i+lei; //iip
//// t=EE(xin[ip],u); //
//// xin[ip].real=xin[i].real-t.real;
//// xin[ip].imag=xin[i].imag-t.imag;
//// xin[i].real=xin[i].real+t.real;
//// xin[i].imag=xin[i].imag+t.imag;
//// }
//// u=EE(u,w); //
//// }
//// }
//// }
////}
//
//int DOA(struct complex *f_x1,struct complex *f_x2,struct complex *f_x3,struct complex *f_x4)
//{
// for(int i=0;i<181;i++)
// doa_scan[i]=i-90;
// Y.real=0;Y.imag=0;
// for(int i=0;i<FFT_N;i++)
// {
// f_x[0][i]=f_x1[i];
// f_x[1][i]=f_x2[i];
// f_x[2][i]=f_x3[i];
// f_x[3][i]=f_x4[i];
// }
//
//for(int i=0;i<=180;i++)
// for(int j=0;j<FFT_N;j++)
// {
// //t
// delay=2*PI*j*fs*d*sin(PI*doa_scan[i]/180)/v/FFT_N;
// for(int k=0;k<M;k++)
// {t[k].real=cos(k*delay);
// t[k].imag=sin(k*delay);
// }
// //fdataY
// for(int k=0;k<M;k++)
// {
// Ytemp=EE(t[k],f_x[k][j]);
// Y.real=Y.real+Ytemp.real;
// Y.imag=Y.imag+Ytemp.imag;
// }
// Earray[i][j]=Y.real*Y.real+Y.imag*Y.imag;
// Y.real=0;Y.imag=0;
// }
//
// for(int i=0;i<=180;i++)
// {
// for(int j=0;j<FFT_N/2;j++)
// DOA_scan[i]+=Earray[i][j];
// if(DOA_scan[i]>maxx)
// {maxx=DOA_scan[i];angle=i-90;}
// }
// return angle;
//}
//
//
//// t[0].real=cos(delay);t[0].imag=sin(delay);
//// t[1].real=4*cos(delay);t[1].imag=4*sin(delay);
//// t[2].real=8*cos(delay);t[2].imag=8*sin(delay);
//// t[3].real=16*cos(delay);t[3].imag=16*sin(delay);
///* for(int i=1;i<FFT_N+1;i++)
// f_x[0][i]=f_x1[i];
// for(int i=1;i<FFT_N+1;i++)
// f_x[1][i]=f_x2[i];
// for(int i=1;i<FFT_N+1;i++)
// f_x[2][i]=f_x3[i];
// for(int i=1;i<FFT_N+1;i++)
// f_x[3][i]=f_x4[i]; */
// /* for(int i=0;i<FFT_N;i++)
// {
// for(int ii=0;ii<M;ii++) //
// for(int jj=0;jj<M;jj++)
// cov_x[ii][jj]=EE(f_x[ii][i],f_x[jj][i]); //4
// for(int j=-90;j<=90;j++)
// {
// delay=-2*PI*i*fs*d*sin(PI*j/180)/v/FFT_N;
// for(int k=0;k<M;k++)
// {t[k].real=k*cos(delay);
// t[k].imag=k*sin(delay);
// }
// //t*cov_x*t
// for(int ii=0;ii<M;i++) //t'*cov_x =temp
// for(int jj=0;jj<M;jj++)
// {
// temp[1][ii].real+=EE(t[jj],cov_x[jj][ii]).real;
// temp[1][ii].imag+=EE(t[jj],cov_x[jj][ii]).imag;
// }
// for(int ii=0;ii<M;ii++)
// {
// Energy.real=EE(temp[1][ii],t[ii]).real;
// Energy.imag=EE(temp[1][ii],t[ii]).imag;
// } //temp*t
// Earray[j][i]=sqrt(Energy.real*Energy.real+Energy.imag*Energy.imag);
// }
// }
// */
| 251df53aa45624968c5424c5920a25f542cb5b96.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cufft.h"
#include "cufftw.h"
#include <stdlib.h>
#include<math.h>
#include "cufftXt.h"
#include <stdio.h>
#include<iostream>
#include<stdlib.h>
#include <fstream>
//using namespace std;
//#define PI 3.1415926535897932384626433832795028841971 //定义圆周率值
//#define FFT_N 1024
//#define N 1024 //通道数*点数 一个点的电压值占用4个字节
//#define M 4
//#define fs 48000
//#define d 0.06
//#define v 340
////int fs,d,v,FFT_N;
///*fs=48000;%采样频率
////M=4;%麦克风阵元数
////d=0.06;%阵元间距
////FFT_N=768;%采样点数
////v=340;%空气中声速
////f_xx每路数据FFT之后的数据 */
////struct complex EE(struct complex a,struct complex b);//对两个复数进行乘法运算
////void FFT(struct complex *xin);//对输入的复数组进行快速傅里叶变换(FFT)
//int DOA(struct complex *f_x1,struct complex *f_x2,struct complex *f_x3,struct complex *f_x4);//波束形成计算角度
//float a_x1[N],a_x2[N],a_x3[N],a_x4[N];
//long b_x1[N],b_x2[N],b_x3[N],b_x4[N];
//FILE *fp1 = NULL;
//struct complex x1[FFT_N],x2[FFT_N],x3[FFT_N],x4[FFT_N];
// struct complex Energy;
// struct complex Y,Ytemp;
// struct complex t[4];
// struct complex cov_x[4][1024],temp[4][1024],f_x[4][1024];
// float delay,DOA_scan[181]={0},Earray[181][1024],maxx=0;
// int angle=0;
// int doa_scan[181];
extern "C" int runtest(long *adbuffer)
{
//long ADData;
//float fVolt;
//for (int z = 0; z < N; z++)
//{
// b_x1[z]=adbuffer[4*z];
// b_x2[z]=adbuffer[4*z+1];
// b_x3[z]=adbuffer[4*z+2];
// b_x4[z]=adbuffer[4*z+3];
//}
//for (int j = 0; j < N; j++)
//{
// a_x1[j]=(20000.0/16384)*((b_x1[j]^0x2000)&0x3FFF)- 10000.0;
// a_x2[j]=(20000.0/16384)*((b_x2[j]^0x2000)&0x3FFF)- 10000.0;
// a_x3[j]=(20000.0/16384)*((b_x3[j]^0x2000)&0x3FFF)- 10000.0;
// a_x4[j]=(20000.0/16384)*((b_x4[j]^0x2000)&0x3FFF)- 10000.0;
//}
////if (true)
////{
//// return 1000;
////}
////ADData=(adbuffer[i]^0x2000)&0x3FFF;//电压默认-10v---+10v
//// fVolt=((20000.0/16384) * ADData - 10000.0)/1000;
// int i=0;int deg=0; float end_point_power=0.0;
// struct complex s[FFT_N]; //FFT输入和输出:从S[1]开始存放,根据大小自己定义
// struct complex x1[FFT_N],x2[FFT_N],x3[FFT_N],x4[FFT_N];
// for(int k=0;k<FFT_N;k++)
// {
// x1[k].real=a_x1[k]/1000 ; x1[k].imag=0;
// x2[k].real=a_x2[k]/1000 ; x2[k].imag=0;
// x3[k].real=a_x3[k]/1000 ; x3[k].imag=0;
// x4[k].real=a_x4[k]/1000 ; x4[k].imag=0;
// }
///* for(int u=0;u<FFT_N;u++)
// end_point_power+=abs(x1[u].real)*abs(x1[u].real);
//if(end_point_power<1500){
// end_point_power=0;
// return 1000;}*/
// /*********计算FFT*******/
// fft(FFT_N,x1);//for(int i=1;i<FFT_N+1)f_x1=x1; //进行快速福利叶变换
// fft(FFT_N,x2);//f_x2=x2;
// fft(FFT_N,x3);//f_x3=x3
// fft(FFT_N,x4);//f_x4=x4; //目的是为了验证对FFT_N点信号进行FFT之后的效果比对 //目的是为了验证对FFT_N点信号进行FFT之后的效果比对
// deg=DOA(x1,x2,x3,x4);
// return deg;
return 0;
}
///*******************************************************************
//函数原型:struct complex EE(struct complex b1,struct complex b2)
//函数功能:对两个复数进行乘法运算
//输入参数:两个以联合体定义的复数a,b
//输出参数:a和b的乘积,以联合体的形式输出
//*******************************************************************/
//struct complex EE(struct complex a,struct complex b)
//{
// struct complex c;
// c.real=a.real*b.real-a.imag*b.imag;
// c.imag=a.real*b.imag+a.imag*b.real;
// return(c);
//}
//
///*****************************************************************
//函数原型:void FFT(struct complex *xin,int FFT_N)
//函数功能:对输入的复数组进行快速傅里叶变换(FFT)
//输入参数:*xin复数结构体组的首地址指针,struct型
//*****************************************************************/
////void FFT(struct complex *xin)
////{
//// int f,m,nv2,nm1,i,k,l,j=0;
//// struct complex u,w,t;
//// static struct complex f_xin;
//// nv2=FFT_N/2; //变址运算,即把自然顺序变成倒位序,采用雷德算法
//// nm1=FFT_N-1;
//// for(i=0;i<nm1;i++)
//// {
//// if(i<j) //如果i<j,即进行变址
//// {
//// t=xin[j];
//// xin[j]=xin[i];
//// xin[i]=t;
//// }
//// k=nv2; //求j的下一个倒位序
//// while(k<=j) //如果k<=j,表示j的最高位为1
//// {
//// j=j-k; //把最高位变成0
//// k=k/2; //k/2,比较次高位,依次类推,逐个比较,直到某个位为0
//// }
//// j=j+k; //把0改为1
//// }
////
//// {
//// int le,lei,ip; //FFT运算核,使用蝶形运算完成FFT运算
//// f=FFT_N;
//// for(l=1;(f=f/2)!=1;l++) //计算l的值,即计算蝶形级数
//// ;
//// for(m=1;m<=l;m++) // 控制蝶形结级数
//// { //m表示第m级蝶形,l为蝶形级总数l=log(2)FFT_N
//// le=2<<(m-1); //le蝶形结距离,即第m级蝶形的蝶形结相距le点
//// lei=le/2; //同一蝶形结中参加运算的两点的距离
//// u.real=1.0; //u为蝶形结运算系数,初始值为1
//// u.imag=0.0;
//// w.real=cos(PI/lei); //w为系数商,即当前系数与前一个系数的商
//// w.imag=-sin(PI/lei);
//// for(j=0;j<=lei-1;j++) //控制计算不同种蝶形结,即计算系数不同的蝶形结
//// {
//// for(i=j;i<=FFT_N-1;i=i+le) //控制同一蝶形结运算,即计算系数相同蝶形结
//// {
//// ip=i+lei; //i,ip分别表示参加蝶形运算的两个节点
//// t=EE(xin[ip],u); //蝶形运算,详见公式
//// xin[ip].real=xin[i].real-t.real;
//// xin[ip].imag=xin[i].imag-t.imag;
//// xin[i].real=xin[i].real+t.real;
//// xin[i].imag=xin[i].imag+t.imag;
//// }
//// u=EE(u,w); //改变系数,进行下一个蝶形运算
//// }
//// }
//// }
////}
//
//int DOA(struct complex *f_x1,struct complex *f_x2,struct complex *f_x3,struct complex *f_x4)
//{
// for(int i=0;i<181;i++)
// doa_scan[i]=i-90;
// Y.real=0;Y.imag=0;
// for(int i=0;i<FFT_N;i++)
// {
// f_x[0][i]=f_x1[i];
// f_x[1][i]=f_x2[i];
// f_x[2][i]=f_x3[i];
// f_x[3][i]=f_x4[i];
// }
//
//for(int i=0;i<=180;i++)
// for(int j=0;j<FFT_N;j++)
// {
// //计算权系数t
// delay=2*PI*j*fs*d*sin(PI*doa_scan[i]/180)/v/FFT_N;
// for(int k=0;k<M;k++)
// {t[k].real=cos(k*delay);
// t[k].imag=sin(k*delay);
// }
// //对fdata补偿得到Y
// for(int k=0;k<M;k++)
// {
// Ytemp=EE(t[k],f_x[k][j]);
// Y.real=Y.real+Ytemp.real;
// Y.imag=Y.imag+Ytemp.imag;
// }
// Earray[i][j]=Y.real*Y.real+Y.imag*Y.imag;
// Y.real=0;Y.imag=0;
// }
//
// for(int i=0;i<=180;i++)
// {
// for(int j=0;j<FFT_N/2;j++)
// DOA_scan[i]+=Earray[i][j];
// if(DOA_scan[i]>maxx)
// {maxx=DOA_scan[i];angle=i-90;}
// }
// return angle;
//}
//
//
//// t[0].real=cos(delay);t[0].imag=sin(delay);
//// t[1].real=4*cos(delay);t[1].imag=4*sin(delay);
//// t[2].real=8*cos(delay);t[2].imag=8*sin(delay);
//// t[3].real=16*cos(delay);t[3].imag=16*sin(delay);
///* for(int i=1;i<FFT_N+1;i++)
// f_x[0][i]=f_x1[i];
// for(int i=1;i<FFT_N+1;i++)
// f_x[1][i]=f_x2[i];
// for(int i=1;i<FFT_N+1;i++)
// f_x[2][i]=f_x3[i];
// for(int i=1;i<FFT_N+1;i++)
// f_x[3][i]=f_x4[i]; */
// /* for(int i=0;i<FFT_N;i++)
// {
// for(int ii=0;ii<M;ii++) //计算协方差矩阵
// for(int jj=0;jj<M;jj++)
// cov_x[ii][jj]=EE(f_x[ii][i],f_x[jj][i]); //求每个频点对应的4路信号的协方差
// for(int j=-90;j<=90;j++)
// {
// delay=-2*PI*i*fs*d*sin(PI*j/180)/v/FFT_N;
// for(int k=0;k<M;k++)
// {t[k].real=k*cos(delay);
// t[k].imag=k*sin(delay);
// }
// //计算每个频点,每个角度的能量值t‘*cov_x*t;
// for(int ii=0;ii<M;i++) //计算t'*cov_x =temp
// for(int jj=0;jj<M;jj++)
// {
// temp[1][ii].real+=EE(t[jj],cov_x[jj][ii]).real;
// temp[1][ii].imag+=EE(t[jj],cov_x[jj][ii]).imag;
// }
// for(int ii=0;ii<M;ii++)
// {
// Energy.real=EE(temp[1][ii],t[ii]).real;
// Energy.imag=EE(temp[1][ii],t[ii]).imag;
// } //计算temp*t
// Earray[j][i]=sqrt(Energy.real*Energy.real+Energy.imag*Energy.imag);
// }
// }
// */
|
a635867a3c9ffdbb544ab74da3c055534dbb3cc2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*------------int_add---------------------------------------------------------//
*
* Purpose: adding integers with the gpu! I am excited! Woo!
*
*-----------------------------------------------------------------------------*/
#include<iostream>
__global__ void add(int *a, int *b, int *c){
*c = *b + *a;
}
using namespace std;
int main(void){
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof(int);
// Allocate space on the gpu
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// input values
a = 2;
b = 7;
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c);
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
cout << "integer from GPU is: " << c << endl;
return 0;
}
| a635867a3c9ffdbb544ab74da3c055534dbb3cc2.cu | /*------------int_add---------------------------------------------------------//
*
* Purpose: adding integers with the gpu! I am excited! Woo!
*
*-----------------------------------------------------------------------------*/
#include<iostream>
__global__ void add(int *a, int *b, int *c){
*c = *b + *a;
}
using namespace std;
int main(void){
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof(int);
// Allocate space on the gpu
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// input values
a = 2;
b = 7;
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
add<<<1,1>>>(d_a, d_b, d_c);
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
cout << "integer from GPU is: " << c << endl;
return 0;
}
|
a703e3ebd3f3e8201f4c6990153a56a05fb74725.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <hip/hip_runtime.h>
#include <signal.h>
#include "util.hpp"
#include <unistd.h>
using namespace std;
unsigned int *random_numbers;
unsigned int *random_numbers_dev;
/* launch one block */
#define BLOCKSIZE 256
#define NUM_THREADS BLOCKSIZE
#define NUM_RANDOM_NUMBERS_DEV (1 << 14)
#include RANDOM_NUMBER_GENERATOR
enum { NUM_RANDOMS = NUM_RANDOM_NUMBERS_DEV };
void
initialize_cuda()
{
choose_device();
CUDA_CHECK_ERROR(hipHostMalloc(&random_numbers,
sizeof(*random_numbers) * NUM_RANDOMS));
CUDA_CHECK_ERROR(hipMalloc(&random_numbers_dev,
sizeof(*random_numbers_dev) * NUM_RANDOMS));
}
__global__ void
kernel_generate_randoms(const RNG::DevParameters params, unsigned int *random_numbers)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
RNG::RNGState rng_state;
RNG::initialize(¶ms, &rng_state);
unsigned int rnds[RNG::num_randoms_per_call];
for(int i = 0; i < NUM_RANDOMS; i += RNG::num_randoms_per_call) {
RNG::generate_random_numbers(&rng_state, rnds, 1, RNG::num_randoms_per_call);
if(idx == 0) {
for(int j = 0; j < RNG::num_randoms_per_call; j++) {
random_numbers[i + j] = rnds[j];
}
}
}
RNG::finalize(¶ms, &rng_state);
}
void
handle_sig_pipe(int sig)
{
exit(EXIT_SUCCESS);
}
int
main(int argc, char **argv)
{
if(isatty(1)) {
cerr << "i won't write to a tty" << endl;
exit(EXIT_FAILURE);
}
signal(SIGPIPE, handle_sig_pipe);
initialize_cuda();
RNG::DevParameters rng_parameters;
RNG::initialize_rng(&rng_parameters);
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(NUM_THREADS / BLOCKSIZE, 1, 1);
for(;;) {
hipLaunchKernelGGL(( kernel_generate_randoms), dim3(grid), dim3(block) , 0, 0, rng_parameters, random_numbers_dev);
CUDA_CHECK_ERROR(hipGetLastError());
CUDA_CHECK_ERROR(hipMemcpy(random_numbers, random_numbers_dev,
sizeof(*random_numbers_dev) * NUM_RANDOMS,
hipMemcpyDeviceToHost));
unsigned int *ptr = random_numbers;
int s = 0, cnt = sizeof(*ptr) * NUM_RANDOMS;
while((s = write(1, ptr, cnt)) < cnt) {
if(s < 0) {
perror("error writing");
exit(EXIT_FAILURE);
}
ptr += s;
cnt -= s;
}
}
return 0;
}
| a703e3ebd3f3e8201f4c6990153a56a05fb74725.cu | #include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cuda_runtime.h>
#include <signal.h>
#include "util.hpp"
#include <unistd.h>
using namespace std;
unsigned int *random_numbers;
unsigned int *random_numbers_dev;
/* launch one block */
#define BLOCKSIZE 256
#define NUM_THREADS BLOCKSIZE
#define NUM_RANDOM_NUMBERS_DEV (1 << 14)
#include RANDOM_NUMBER_GENERATOR
enum { NUM_RANDOMS = NUM_RANDOM_NUMBERS_DEV };
void
initialize_cuda()
{
choose_device();
CUDA_CHECK_ERROR(cudaMallocHost(&random_numbers,
sizeof(*random_numbers) * NUM_RANDOMS));
CUDA_CHECK_ERROR(cudaMalloc(&random_numbers_dev,
sizeof(*random_numbers_dev) * NUM_RANDOMS));
}
__global__ void
kernel_generate_randoms(const RNG::DevParameters params, unsigned int *random_numbers)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
RNG::RNGState rng_state;
RNG::initialize(¶ms, &rng_state);
unsigned int rnds[RNG::num_randoms_per_call];
for(int i = 0; i < NUM_RANDOMS; i += RNG::num_randoms_per_call) {
RNG::generate_random_numbers(&rng_state, rnds, 1, RNG::num_randoms_per_call);
if(idx == 0) {
for(int j = 0; j < RNG::num_randoms_per_call; j++) {
random_numbers[i + j] = rnds[j];
}
}
}
RNG::finalize(¶ms, &rng_state);
}
void
handle_sig_pipe(int sig)
{
exit(EXIT_SUCCESS);
}
int
main(int argc, char **argv)
{
if(isatty(1)) {
cerr << "i won't write to a tty" << endl;
exit(EXIT_FAILURE);
}
signal(SIGPIPE, handle_sig_pipe);
initialize_cuda();
RNG::DevParameters rng_parameters;
RNG::initialize_rng(&rng_parameters);
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(NUM_THREADS / BLOCKSIZE, 1, 1);
for(;;) {
kernel_generate_randoms<<< grid, block >>> (rng_parameters, random_numbers_dev);
CUDA_CHECK_ERROR(cudaGetLastError());
CUDA_CHECK_ERROR(cudaMemcpy(random_numbers, random_numbers_dev,
sizeof(*random_numbers_dev) * NUM_RANDOMS,
cudaMemcpyDeviceToHost));
unsigned int *ptr = random_numbers;
int s = 0, cnt = sizeof(*ptr) * NUM_RANDOMS;
while((s = write(1, ptr, cnt)) < cnt) {
if(s < 0) {
perror("error writing");
exit(EXIT_FAILURE);
}
ptr += s;
cnt -= s;
}
}
return 0;
}
|
b32af3474b807451d2c1a9b300d0555258b1f920.hip | // !!! This is a file automatically generated by hipify!!!
/*
* ParaStation
*
* Copyright (C) 2016-2021 ParTec Cluster Competence Center GmbH, Munich
* Copyright (C) 2021-2023 ParTec AG, Munich
*
* This file may be distributed under the terms of the Q Public License
* as defined in the file LICENSE.QPL included in the packaging of this
* file.
*/
#include <mpi.h>
extern "C" {
#include "mpitest.h"
}
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#ifdef __NVCC__
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hip/driver_types.h>
#endif
#define ThreadsPerBlock 1
// Total domain size:
#define domain_size_x 256
#define domain_size_y 256
#ifdef __NVCC__
__global__ void laplace(float *old_values, float *new_values, int n, int m)
{
int i = threadIdx.x;
int j = blockIdx.x;
if ((i > 0) && (i < n-1) &&
(j > 0) && (j < m-1)) {
new_values[i*m + j] = 0.25 *
(old_values[(i-1)*m + j] +
old_values[(i+1)*m + j] +
old_values[i*m + j-1] +
old_values[i*m + j+1] );
}
}
__global__ void swap_arrays(float *old_values, float *new_values, int n, int m)
{
int i = threadIdx.x;
int j = blockIdx.x;
if ((i > 0) && (i < n-1) &&
(j > 0) && (j < m-1)) {
new_values[i*m + j] = old_values[i*m + j];
}
}
#endif
#define CUDA_CHECK(call, fatal) \
do { \
if ((cuda_error == hipSuccess) && (((call) != hipSuccess) || ((cuda_error = hipGetLastError()) != hipSuccess))) { \
MTestPrintfMsg(0, #call" returned: %s\n", hipGetErrorString(cuda_error)); \
if (fatal) MTestError("Fatal CUDA error! Calling MPI_Abort()...\n"); \
} \
} while(0);
double start_time, stop_time;
int main(int argc, char **argv)
{
int errs = 0;
int t, Tmax = 10000;
int i, j;
int my_rank;
int num_ranks;
int n = domain_size_x;
int m = domain_size_y;
double residual = 0.0;
float** new_values;
float** old_values;
float* new_values_blob;
float* old_values_blob;
float* send_buffer_up;
float* send_buffer_dn;
float* recv_buffer_up;
float* recv_buffer_dn;
#ifdef __NVCC__
hipError_t cuda_error = hipSuccess;
float* __new_values_blob;
float* __old_values_blob;
void* ptr;
#endif
MPI_Request* upper_send_req;
MPI_Request* lower_send_req;
MPI_Request* upper_recv_req;
MPI_Request* lower_recv_req;
MPI_Status stat_array[4];
MPI_Request req_array[4] = { MPI_REQUEST_NULL, MPI_REQUEST_NULL, MPI_REQUEST_NULL, MPI_REQUEST_NULL };
upper_send_req = req_array;
lower_send_req = req_array + 1;
upper_recv_req = req_array + 2;
lower_recv_req = req_array + 3;
MTest_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &num_ranks);
n = domain_size_x / num_ranks;
if (my_rank == num_ranks-1) n += domain_size_x % num_ranks;
m += 2;
n += 2;
MTestPrintfMsg(3, "(%d) %d x %d / (%d x %d)\n", my_rank, domain_size_x, domain_size_y, n, m);
new_values = (float**)malloc(n * sizeof(float*));
new_values_blob = (float *)malloc(n * m * sizeof(float));
old_values = (float**)malloc(n * sizeof(float*));
old_values_blob = (float *)malloc(n * m * sizeof(float));
new_values[0] = new_values_blob;
old_values[0] = old_values_blob;
for (i = 1; i < n; i++) {
new_values[i] = new_values[i-1] + m;
old_values[i] = old_values[i-1] + m;
}
#ifdef __NVCC__
CUDA_CHECK(hipMalloc((void**)&ptr, n * m * sizeof(float)), 1);
__new_values_blob = (float*)ptr;
CUDA_CHECK(hipMalloc((void**)&ptr, n * m * sizeof(float)), 1);
__old_values_blob = (float*)ptr;
#endif
MTestPrintfMsg(1, "(%d) Memory allocated!\n", my_rank);
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
new_values[i][j] = 0.0;
}
}
if (my_rank == 0) {
for (j = 0; j < m; j++) {
new_values[0][j] = 100.0;
}
}
if (my_rank == num_ranks-1) {
for (j = 0; j < m; j++) {
new_values[n-1][j] = 100.0;
}
}
for (i = 0; i < n; i++) {
new_values[i][0] = 100.0;
new_values[i][m-1] = 100.0;
}
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
old_values[i][j] = new_values[i][j];
}
}
#ifdef __NVCC__
CUDA_CHECK(hipMemcpy(__new_values_blob, new_values_blob, n * m * sizeof(float), hipMemcpyHostToDevice), 0);
CUDA_CHECK(hipMemcpy(__old_values_blob, old_values_blob, n * m * sizeof(float), hipMemcpyHostToDevice), 0);
MPI_Barrier(MPI_COMM_WORLD);
send_buffer_up = &__new_values_blob[m];
send_buffer_dn = &__new_values_blob[(n-2)*m];
recv_buffer_up = &__old_values_blob[0];
recv_buffer_dn = &__old_values_blob[(n-1)*m];
#else
send_buffer_up = new_values[1];
send_buffer_dn = new_values[n-2];
recv_buffer_up = old_values[0];
recv_buffer_dn = old_values[n-1];
#endif
MTestPrintfMsg(1, "(%d) Arrays initialized!\n", my_rank);
start_time = MPI_Wtime();
for (t = 0; t <= Tmax; t++){
#ifdef __NVCC__
hipLaunchKernelGGL(( laplace), dim3(m), dim3(n) , 0, 0, __old_values_blob, __new_values_blob, n, m);
CUDA_CHECK(hipDeviceSynchronize(), 0);
#else
for (i = 1; i < n-1; i++) {
for (j = 1; j < m-1; j++) {
new_values[i][j] = 0.25 * ( old_values[i-1][j] + old_values[i+1][j] +
old_values[i][j-1] + old_values[i][j+1] );
}
}
#endif
if (my_rank != 0) {
MPI_Isend(send_buffer_up, m, MPI_FLOAT, my_rank-1, 0, MPI_COMM_WORLD, upper_send_req);
MPI_Irecv(recv_buffer_up, m, MPI_FLOAT, my_rank-1, 0, MPI_COMM_WORLD, upper_recv_req);
}
if (my_rank != num_ranks-1) {
MPI_Irecv(recv_buffer_dn, m, MPI_FLOAT, my_rank+1, 0, MPI_COMM_WORLD, lower_recv_req);
MPI_Isend(send_buffer_dn, m, MPI_FLOAT, my_rank+1, 0, MPI_COMM_WORLD, lower_send_req);
}
MPI_Waitall(4, req_array, stat_array);
if (t < Tmax) {
#ifdef __NVCC__
hipLaunchKernelGGL(( swap_arrays), dim3(m), dim3(n) , 0, 0, __new_values_blob, __old_values_blob, n, m);
CUDA_CHECK(hipDeviceSynchronize(), 0);
#else
for (i = 1; i < n-1; i++) {
for (j = 1; j < m-1; j++) {
old_values[i][j] = new_values[i][j];
}
}
#endif
}
}
MPI_Barrier(MPI_COMM_WORLD);
#ifdef __NVCC__
CUDA_CHECK(hipMemcpy(new_values_blob, __new_values_blob, n * m * sizeof(float), hipMemcpyDeviceToHost), 1);
CUDA_CHECK(hipMemcpy(old_values_blob, __old_values_blob, n * m * sizeof(float), hipMemcpyDeviceToHost), 1);
MPI_Barrier(MPI_COMM_WORLD);
#endif
for (i = 1; i < n-1; i++) {
for (j = 1; j < m-1; j++) {
residual += fabs(old_values[i][j] - new_values[i][j]);
}
}
MPI_Allreduce(MPI_IN_PLACE, &residual, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
stop_time = MPI_Wtime();
MTestPrintfMsg(1, "(%d) Algorithm completed!\n", my_rank);
MTestPrintfMsg(2, "(%d) Residual = %f / Needed time: %f\n", my_rank, residual, stop_time - start_time);
free(new_values);
free(new_values_blob);
free(old_values);
free(old_values_blob);
#ifdef __NVCC__
CUDA_CHECK(hipFree(__new_values_blob), 0);
CUDA_CHECK(hipFree(__old_values_blob), 0);
#endif
MTest_Finalize(errs);
return MTestReturnValue(errs);
}
| b32af3474b807451d2c1a9b300d0555258b1f920.cu | /*
* ParaStation
*
* Copyright (C) 2016-2021 ParTec Cluster Competence Center GmbH, Munich
* Copyright (C) 2021-2023 ParTec AG, Munich
*
* This file may be distributed under the terms of the Q Public License
* as defined in the file LICENSE.QPL included in the packaging of this
* file.
*/
#include <mpi.h>
extern "C" {
#include "mpitest.h"
}
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#ifdef __NVCC__
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <driver_types.h>
#endif
#define ThreadsPerBlock 1
// Total domain size:
#define domain_size_x 256
#define domain_size_y 256
#ifdef __NVCC__
__global__ void laplace(float *old_values, float *new_values, int n, int m)
{
int i = threadIdx.x;
int j = blockIdx.x;
if ((i > 0) && (i < n-1) &&
(j > 0) && (j < m-1)) {
new_values[i*m + j] = 0.25 *
(old_values[(i-1)*m + j] +
old_values[(i+1)*m + j] +
old_values[i*m + j-1] +
old_values[i*m + j+1] );
}
}
__global__ void swap_arrays(float *old_values, float *new_values, int n, int m)
{
int i = threadIdx.x;
int j = blockIdx.x;
if ((i > 0) && (i < n-1) &&
(j > 0) && (j < m-1)) {
new_values[i*m + j] = old_values[i*m + j];
}
}
#endif
#define CUDA_CHECK(call, fatal) \
do { \
if ((cuda_error == cudaSuccess) && (((call) != cudaSuccess) || ((cuda_error = cudaGetLastError()) != cudaSuccess))) { \
MTestPrintfMsg(0, #call" returned: %s\n", cudaGetErrorString(cuda_error)); \
if (fatal) MTestError("Fatal CUDA error! Calling MPI_Abort()...\n"); \
} \
} while(0);
double start_time, stop_time;
int main(int argc, char **argv)
{
int errs = 0;
int t, Tmax = 10000;
int i, j;
int my_rank;
int num_ranks;
int n = domain_size_x;
int m = domain_size_y;
double residual = 0.0;
float** new_values;
float** old_values;
float* new_values_blob;
float* old_values_blob;
float* send_buffer_up;
float* send_buffer_dn;
float* recv_buffer_up;
float* recv_buffer_dn;
#ifdef __NVCC__
cudaError_t cuda_error = cudaSuccess;
float* __new_values_blob;
float* __old_values_blob;
void* ptr;
#endif
MPI_Request* upper_send_req;
MPI_Request* lower_send_req;
MPI_Request* upper_recv_req;
MPI_Request* lower_recv_req;
MPI_Status stat_array[4];
MPI_Request req_array[4] = { MPI_REQUEST_NULL, MPI_REQUEST_NULL, MPI_REQUEST_NULL, MPI_REQUEST_NULL };
upper_send_req = req_array;
lower_send_req = req_array + 1;
upper_recv_req = req_array + 2;
lower_recv_req = req_array + 3;
MTest_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
MPI_Comm_size(MPI_COMM_WORLD, &num_ranks);
n = domain_size_x / num_ranks;
if (my_rank == num_ranks-1) n += domain_size_x % num_ranks;
m += 2;
n += 2;
MTestPrintfMsg(3, "(%d) %d x %d / (%d x %d)\n", my_rank, domain_size_x, domain_size_y, n, m);
new_values = (float**)malloc(n * sizeof(float*));
new_values_blob = (float *)malloc(n * m * sizeof(float));
old_values = (float**)malloc(n * sizeof(float*));
old_values_blob = (float *)malloc(n * m * sizeof(float));
new_values[0] = new_values_blob;
old_values[0] = old_values_blob;
for (i = 1; i < n; i++) {
new_values[i] = new_values[i-1] + m;
old_values[i] = old_values[i-1] + m;
}
#ifdef __NVCC__
CUDA_CHECK(cudaMalloc((void**)&ptr, n * m * sizeof(float)), 1);
__new_values_blob = (float*)ptr;
CUDA_CHECK(cudaMalloc((void**)&ptr, n * m * sizeof(float)), 1);
__old_values_blob = (float*)ptr;
#endif
MTestPrintfMsg(1, "(%d) Memory allocated!\n", my_rank);
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
new_values[i][j] = 0.0;
}
}
if (my_rank == 0) {
for (j = 0; j < m; j++) {
new_values[0][j] = 100.0;
}
}
if (my_rank == num_ranks-1) {
for (j = 0; j < m; j++) {
new_values[n-1][j] = 100.0;
}
}
for (i = 0; i < n; i++) {
new_values[i][0] = 100.0;
new_values[i][m-1] = 100.0;
}
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
old_values[i][j] = new_values[i][j];
}
}
#ifdef __NVCC__
CUDA_CHECK(cudaMemcpy(__new_values_blob, new_values_blob, n * m * sizeof(float), cudaMemcpyHostToDevice), 0);
CUDA_CHECK(cudaMemcpy(__old_values_blob, old_values_blob, n * m * sizeof(float), cudaMemcpyHostToDevice), 0);
MPI_Barrier(MPI_COMM_WORLD);
send_buffer_up = &__new_values_blob[m];
send_buffer_dn = &__new_values_blob[(n-2)*m];
recv_buffer_up = &__old_values_blob[0];
recv_buffer_dn = &__old_values_blob[(n-1)*m];
#else
send_buffer_up = new_values[1];
send_buffer_dn = new_values[n-2];
recv_buffer_up = old_values[0];
recv_buffer_dn = old_values[n-1];
#endif
MTestPrintfMsg(1, "(%d) Arrays initialized!\n", my_rank);
start_time = MPI_Wtime();
for (t = 0; t <= Tmax; t++){
#ifdef __NVCC__
laplace<<< m, n >>>(__old_values_blob, __new_values_blob, n, m);
CUDA_CHECK(cudaDeviceSynchronize(), 0);
#else
for (i = 1; i < n-1; i++) {
for (j = 1; j < m-1; j++) {
new_values[i][j] = 0.25 * ( old_values[i-1][j] + old_values[i+1][j] +
old_values[i][j-1] + old_values[i][j+1] );
}
}
#endif
if (my_rank != 0) {
MPI_Isend(send_buffer_up, m, MPI_FLOAT, my_rank-1, 0, MPI_COMM_WORLD, upper_send_req);
MPI_Irecv(recv_buffer_up, m, MPI_FLOAT, my_rank-1, 0, MPI_COMM_WORLD, upper_recv_req);
}
if (my_rank != num_ranks-1) {
MPI_Irecv(recv_buffer_dn, m, MPI_FLOAT, my_rank+1, 0, MPI_COMM_WORLD, lower_recv_req);
MPI_Isend(send_buffer_dn, m, MPI_FLOAT, my_rank+1, 0, MPI_COMM_WORLD, lower_send_req);
}
MPI_Waitall(4, req_array, stat_array);
if (t < Tmax) {
#ifdef __NVCC__
swap_arrays<<< m, n >>>(__new_values_blob, __old_values_blob, n, m);
CUDA_CHECK(cudaDeviceSynchronize(), 0);
#else
for (i = 1; i < n-1; i++) {
for (j = 1; j < m-1; j++) {
old_values[i][j] = new_values[i][j];
}
}
#endif
}
}
MPI_Barrier(MPI_COMM_WORLD);
#ifdef __NVCC__
CUDA_CHECK(cudaMemcpy(new_values_blob, __new_values_blob, n * m * sizeof(float), cudaMemcpyDeviceToHost), 1);
CUDA_CHECK(cudaMemcpy(old_values_blob, __old_values_blob, n * m * sizeof(float), cudaMemcpyDeviceToHost), 1);
MPI_Barrier(MPI_COMM_WORLD);
#endif
for (i = 1; i < n-1; i++) {
for (j = 1; j < m-1; j++) {
residual += fabs(old_values[i][j] - new_values[i][j]);
}
}
MPI_Allreduce(MPI_IN_PLACE, &residual, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
stop_time = MPI_Wtime();
MTestPrintfMsg(1, "(%d) Algorithm completed!\n", my_rank);
MTestPrintfMsg(2, "(%d) Residual = %f / Needed time: %f\n", my_rank, residual, stop_time - start_time);
free(new_values);
free(new_values_blob);
free(old_values);
free(old_values_blob);
#ifdef __NVCC__
CUDA_CHECK(cudaFree(__new_values_blob), 0);
CUDA_CHECK(cudaFree(__old_values_blob), 0);
#endif
MTest_Finalize(errs);
return MTestReturnValue(errs);
}
|
d64df5d2cbff1d24116d6d993d6acb4b860d6efb.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <algorithm>
#include <vector>
#include <iterator>
#include <fstream>
#include <math.h>
#include <unistd.h>
#include <stdlib.h>
#include <string>
#include <stdio.h>
#include <cmath>
#include<sys/stat.h>
#include<ctime>
#include <hip/hip_runtime.h>
#include<thrust/reduce.h>
#include<cuda_runtime.h>
#include<thrust/sort.h>
#include<thrust/device_ptr.h>
#include<thrust/device_vector.h>
#include<thrust/host_vector.h>
#include<thrust/copy.h>
#include<thrust/execution_policy.h>
using namespace std;
#define thrustSortBlockSize 4000000000
#define bucketNum 10
struct edge{
unsigned int src;
unsigned int dst;
};
struct cmpStruc{
__device__ bool operator () (const edge &a, const edge &b){
return (a.src < b.src) || (a.src == b.src && a.dst < b.dst) ;
}
}cmp;
class edgeVector{
public:
unsigned int capcity;
unsigned int esize;
edge *Edges;
edgeVector(){esize = 0; capcity = 0;}
void init(unsigned int s) { Edges = new edge [s]; capcity = s; return ;}
void addEdge(int _src,int _dst){
if(esize >= capcity) {
capcity *= 2;
edge* tmpEdges = new edge [capcity];
memcpy(tmpEdges,Edges,sizeof(edge)*esize);
delete [] Edges;
Edges = tmpEdges;
}
Edges[esize].src = _src;
Edges[esize].dst = _dst;
esize ++;
}
void clear() {delete [] Edges; return ;}
};
int *edgeOffset;
int *edgeRow;
int *adjLength;
edge *Edges;
clock_t start_, end_;
bool preProcess(const char *fileName, unsigned int &_edgeNum, unsigned &_nodeNum)
{
//get file size
ifstream fin1(fileName,ios::in|ios::binary);
fin1.seekg(0,ios::end);
streampos Size = fin1.tellg();
fin1.close();
long int size = Size;
cout << "the size of input file is " << size << " Byte. " << endl;
unsigned int edgeNum = size/(sizeof(int)*2);
Edges = new edge [edgeNum];
//read data
ifstream fin(fileName, std::ios::binary);
if (fin.bad()) {
cout << "File not fould!" << endl;
return false;
}
cout << "start read data... ..." << endl;
fin.read((char *)Edges,sizeof(edge)*edgeNum);
fin.close();
cout << "end read data" << endl;
//pre work
unsigned int maxNodeID = 0;
//#pragma omp parallel for
for(unsigned int i = 0; i < edgeNum; i ++)
{
if (Edges[i].src < Edges[i].dst) {
int tmpValue = Edges[i].dst;
Edges[i].dst = Edges[i].src;
Edges[i].src = tmpValue;
}
if (Edges[i].src > maxNodeID)
maxNodeID = Edges[i].src;
}
cout << "end rearrange dst and src" << endl;
unsigned nodeNum = maxNodeID + 1;
//sort edges
//************sort edges && get nodeNum********
edgeVector * edgeBucket = new edgeVector [bucketNum];
for (int i = 0; i < bucketNum; i ++)
edgeBucket[i].init(edgeNum/bucketNum);
unsigned bucketStep = (nodeNum + bucketNum - 1)/bucketNum;
for (int i = 0; i < edgeNum; i ++)
{
int bucketID = Edges[i].src/bucketStep;
edgeBucket[bucketID].addEdge(Edges[i].src, Edges[i].dst);
}
cout << "end pust edges in bucket" << endl;
unsigned int *bucketEdgeOffset = new unsigned int [bucketNum];
bucketEdgeOffset[0] = 0;
for (int i = 0; i < bucketNum-1; i ++) {
unsigned int bucketSize = edgeBucket[i].esize;
if (bucketSize > thrustSortBlockSize/sizeof(edge)) {
cout << "bucket " << i << "size is " << bucketSize << ", it's too large!" << endl;
return false;
}
bucketEdgeOffset[i+1] = bucketEdgeOffset[i] + bucketSize;
}
for (int i = 0; i < bucketNum; i++) {
thrust::device_vector<edge> D (edgeBucket[i].Edges, edgeBucket[i].Edges+edgeBucket[i].esize);
thrust::sort(D.begin(),D.begin()+edgeBucket[i].esize,cmp);
thrust::copy(D.begin(),D.begin()+edgeBucket[i].esize,edgeBucket[i].Edges);
}
cout << "end sort edges in GPU " << endl;
for(int i = 0; i < bucketNum; i ++) {
memcpy(Edges+bucketEdgeOffset[i],edgeBucket[i].Edges,sizeof(edge)*edgeBucket[i].esize);
}
cout << "end copy result to Edges" << endl;
delete [] bucketEdgeOffset;
for (int i = 0; i < bucketNum; i ++)
edgeBucket[i].clear();
delete [] edgeBucket;
//************end sort edges && get nodeNum********
//unsigned int nodeNum = Edges[edgeNum-1].src + 1;
edgeOffset = new int [nodeNum+1];
edgeRow = new int [edgeNum];
adjLength = new int[nodeNum];
unsigned int nodePos = 0;
unsigned int edgePos = 0;
edgeOffset[0] = 0;
edge * edgePtr;
int formerSrc = -1,formerDst = -1;
start_ = clock();
for (unsigned int i = 0; i < edgeNum; i++)
{
edgePtr = Edges + i;
// cout << "start put cur edge in csr" << endl;
if (edgePtr->src == edgePtr->dst) {
formerSrc = edgePtr->src;
formerDst = edgePtr->dst;
continue;
}
// cout << "1 " << endl;
if ((i > 0) && (edgePtr->src == formerSrc)) {
//TODO find a more efficienty way
if(edgePtr->dst == formerDst){
continue;
}
edgeRow[edgePos++] = edgePtr->dst;
formerDst = edgePtr->dst;
continue;
}
// cout << "2 " << endl;
int curSrc = edgePtr->src;
for (unsigned j = nodePos + 1; j <= curSrc; j++) {
edgeOffset[j] = edgePos;
adjLength[j-1] = edgeOffset[j]-edgeOffset[j-1];
}
nodePos = curSrc;
edgeRow[edgePos++] = edgePtr->dst;
formerSrc = edgePtr->src;
formerDst = edgePtr->dst;
// cout << " end an edge in a loop " << endl;
}
end_ = clock();
cout << "merge and make csr use " << (double)1000*(end_-start_)/CLOCKS_PER_SEC << " ms." << endl;
edgeOffset[nodeNum] = edgePos;
adjLength[nodeNum-1] = edgeOffset[nodeNum] - edgeOffset[nodeNum-1];
cout << "csr built, edgeNum is "<< edgePos<< ", the node num is " << nodeNum << ", origin egde num is " << edgeNum << endl;
//TODO remove empty node in edgeOffset
_edgeNum = edgeOffset[nodeNum];
_nodeNum = nodeNum;
delete [] Edges;
cout << "rebuild Edges now " << endl;
double start_omp = omp_get_wtime();
Edges = new edge[_edgeNum];
#pragma omp parallel for
for (int i = 0; i < _nodeNum; i++) {
int *curList = edgeRow + edgeOffset[i];
for (int j = 0; j < adjLength[i]; j++) {
Edges[edgeOffset[i]+j].src = i;
Edges[edgeOffset[i]+j].dst = curList[j];
}
}
double end_omp = omp_get_wtime();
cout << "rebuild use " << (end_omp-start_omp) << " s."<< endl;
cout << "rebuild done" << endl;
return true;
}
| d64df5d2cbff1d24116d6d993d6acb4b860d6efb.cu | #include <iostream>
#include <algorithm>
#include <vector>
#include <iterator>
#include <fstream>
#include <math.h>
#include <unistd.h>
#include <stdlib.h>
#include <string>
#include <stdio.h>
#include <cmath>
#include<sys/stat.h>
#include<ctime>
#include <cuda_runtime.h>
#include<thrust/reduce.h>
#include<cuda_runtime.h>
#include<thrust/sort.h>
#include<thrust/device_ptr.h>
#include<thrust/device_vector.h>
#include<thrust/host_vector.h>
#include<thrust/copy.h>
#include<thrust/execution_policy.h>
using namespace std;
#define thrustSortBlockSize 4000000000
#define bucketNum 10
struct edge{
unsigned int src;
unsigned int dst;
};
struct cmpStruc{
__device__ bool operator () (const edge &a, const edge &b){
return (a.src < b.src) || (a.src == b.src && a.dst < b.dst) ;
}
}cmp;
class edgeVector{
public:
unsigned int capcity;
unsigned int esize;
edge *Edges;
edgeVector(){esize = 0; capcity = 0;}
void init(unsigned int s) { Edges = new edge [s]; capcity = s; return ;}
void addEdge(int _src,int _dst){
if(esize >= capcity) {
capcity *= 2;
edge* tmpEdges = new edge [capcity];
memcpy(tmpEdges,Edges,sizeof(edge)*esize);
delete [] Edges;
Edges = tmpEdges;
}
Edges[esize].src = _src;
Edges[esize].dst = _dst;
esize ++;
}
void clear() {delete [] Edges; return ;}
};
int *edgeOffset;
int *edgeRow;
int *adjLength;
edge *Edges;
clock_t start_, end_;
bool preProcess(const char *fileName, unsigned int &_edgeNum, unsigned &_nodeNum)
{
//get file size
ifstream fin1(fileName,ios::in|ios::binary);
fin1.seekg(0,ios::end);
streampos Size = fin1.tellg();
fin1.close();
long int size = Size;
cout << "the size of input file is " << size << " Byte. " << endl;
unsigned int edgeNum = size/(sizeof(int)*2);
Edges = new edge [edgeNum];
//read data
ifstream fin(fileName, std::ios::binary);
if (fin.bad()) {
cout << "File not fould!" << endl;
return false;
}
cout << "start read data... ..." << endl;
fin.read((char *)Edges,sizeof(edge)*edgeNum);
fin.close();
cout << "end read data" << endl;
//pre work
unsigned int maxNodeID = 0;
//#pragma omp parallel for
for(unsigned int i = 0; i < edgeNum; i ++)
{
if (Edges[i].src < Edges[i].dst) {
int tmpValue = Edges[i].dst;
Edges[i].dst = Edges[i].src;
Edges[i].src = tmpValue;
}
if (Edges[i].src > maxNodeID)
maxNodeID = Edges[i].src;
}
cout << "end rearrange dst and src" << endl;
unsigned nodeNum = maxNodeID + 1;
//sort edges
//************sort edges && get nodeNum********
edgeVector * edgeBucket = new edgeVector [bucketNum];
for (int i = 0; i < bucketNum; i ++)
edgeBucket[i].init(edgeNum/bucketNum);
unsigned bucketStep = (nodeNum + bucketNum - 1)/bucketNum;
for (int i = 0; i < edgeNum; i ++)
{
int bucketID = Edges[i].src/bucketStep;
edgeBucket[bucketID].addEdge(Edges[i].src, Edges[i].dst);
}
cout << "end pust edges in bucket" << endl;
unsigned int *bucketEdgeOffset = new unsigned int [bucketNum];
bucketEdgeOffset[0] = 0;
for (int i = 0; i < bucketNum-1; i ++) {
unsigned int bucketSize = edgeBucket[i].esize;
if (bucketSize > thrustSortBlockSize/sizeof(edge)) {
cout << "bucket " << i << "size is " << bucketSize << ", it's too large!" << endl;
return false;
}
bucketEdgeOffset[i+1] = bucketEdgeOffset[i] + bucketSize;
}
for (int i = 0; i < bucketNum; i++) {
thrust::device_vector<edge> D (edgeBucket[i].Edges, edgeBucket[i].Edges+edgeBucket[i].esize);
thrust::sort(D.begin(),D.begin()+edgeBucket[i].esize,cmp);
thrust::copy(D.begin(),D.begin()+edgeBucket[i].esize,edgeBucket[i].Edges);
}
cout << "end sort edges in GPU " << endl;
for(int i = 0; i < bucketNum; i ++) {
memcpy(Edges+bucketEdgeOffset[i],edgeBucket[i].Edges,sizeof(edge)*edgeBucket[i].esize);
}
cout << "end copy result to Edges" << endl;
delete [] bucketEdgeOffset;
for (int i = 0; i < bucketNum; i ++)
edgeBucket[i].clear();
delete [] edgeBucket;
//************end sort edges && get nodeNum********
//unsigned int nodeNum = Edges[edgeNum-1].src + 1;
edgeOffset = new int [nodeNum+1];
edgeRow = new int [edgeNum];
adjLength = new int[nodeNum];
unsigned int nodePos = 0;
unsigned int edgePos = 0;
edgeOffset[0] = 0;
edge * edgePtr;
int formerSrc = -1,formerDst = -1;
start_ = clock();
for (unsigned int i = 0; i < edgeNum; i++)
{
edgePtr = Edges + i;
// cout << "start put cur edge in csr" << endl;
if (edgePtr->src == edgePtr->dst) {
formerSrc = edgePtr->src;
formerDst = edgePtr->dst;
continue;
}
// cout << "1 " << endl;
if ((i > 0) && (edgePtr->src == formerSrc)) {
//TODO find a more efficienty way
if(edgePtr->dst == formerDst){
continue;
}
edgeRow[edgePos++] = edgePtr->dst;
formerDst = edgePtr->dst;
continue;
}
// cout << "2 " << endl;
int curSrc = edgePtr->src;
for (unsigned j = nodePos + 1; j <= curSrc; j++) {
edgeOffset[j] = edgePos;
adjLength[j-1] = edgeOffset[j]-edgeOffset[j-1];
}
nodePos = curSrc;
edgeRow[edgePos++] = edgePtr->dst;
formerSrc = edgePtr->src;
formerDst = edgePtr->dst;
// cout << " end an edge in a loop " << endl;
}
end_ = clock();
cout << "merge and make csr use " << (double)1000*(end_-start_)/CLOCKS_PER_SEC << " ms." << endl;
edgeOffset[nodeNum] = edgePos;
adjLength[nodeNum-1] = edgeOffset[nodeNum] - edgeOffset[nodeNum-1];
cout << "csr built, edgeNum is "<< edgePos<< ", the node num is " << nodeNum << ", origin egde num is " << edgeNum << endl;
//TODO remove empty node in edgeOffset
_edgeNum = edgeOffset[nodeNum];
_nodeNum = nodeNum;
delete [] Edges;
cout << "rebuild Edges now " << endl;
double start_omp = omp_get_wtime();
Edges = new edge[_edgeNum];
#pragma omp parallel for
for (int i = 0; i < _nodeNum; i++) {
int *curList = edgeRow + edgeOffset[i];
for (int j = 0; j < adjLength[i]; j++) {
Edges[edgeOffset[i]+j].src = i;
Edges[edgeOffset[i]+j].dst = curList[j];
}
}
double end_omp = omp_get_wtime();
cout << "rebuild use " << (end_omp-start_omp) << " s."<< endl;
cout << "rebuild done" << endl;
return true;
}
|
a73c0921a1f3ba6651e949af3ef56b258ad2da0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include <boost/graph/buffer_concepts.hpp>
namespace pcl
{
namespace device
{
namespace kinfuLS
{
struct ImageGenerator
{
enum
{
CTA_SIZE_X = 32, CTA_SIZE_Y = 8
};
PtrStep<float> vmap;
PtrStep<float> nmap;
LightSource light;
mutable PtrStepSz<uchar3> dst;
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
if (x >= dst.cols || y >= dst.rows)
return;
float3 v, n;
v.x = vmap.ptr (y)[x];
n.x = nmap.ptr (y)[x];
uchar3 color = make_uchar3 (0, 0, 0);
if (!isnan (v.x) && !isnan (n.x))
{
v.y = vmap.ptr (y + dst.rows)[x];
v.z = vmap.ptr (y + 2 * dst.rows)[x];
n.y = nmap.ptr (y + dst.rows)[x];
n.z = nmap.ptr (y + 2 * dst.rows)[x];
float weight = 1.f;
for (int i = 0; i < light.number; ++i)
{
float3 vec = normalized (light.pos[i] - v);
weight *= std::abs (dot (vec, n));
}
int br = (int)(205 * weight) + 50;
br = max (0, min (255, br));
color = make_uchar3 (br, br, br);
}
dst.ptr (y)[x] = color;
}
};
__global__ void
generateImageKernel (const ImageGenerator ig) {
ig ();
}
void
generateImage (const MapArr& vmap, const MapArr& nmap, const LightSource& light,
PtrStepSz<uchar3> dst)
{
ImageGenerator ig;
ig.vmap = vmap;
ig.nmap = nmap;
ig.light = light;
ig.dst = dst;
dim3 block (ImageGenerator::CTA_SIZE_X, ImageGenerator::CTA_SIZE_Y);
dim3 grid (divUp (dst.cols, block.x), divUp (dst.rows, block.y));
hipLaunchKernelGGL(( generateImageKernel), dim3(grid), dim3(block), 0, 0, ig);
cudaSafeCall (hipGetLastError ());
cudaSafeCall (hipDeviceSynchronize ());
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace pcl
{
namespace device
{
namespace kinfuLS
{
__global__ void generateDepthKernel(const float3 R_inv_row3, const float3 t, const PtrStep<float> vmap, PtrStepSz<unsigned short> depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < depth.cols && y < depth.rows)
{
unsigned short result = 0;
float3 v_g;
v_g.x = vmap.ptr (y)[x];
if (!isnan (v_g.x))
{
v_g.y = vmap.ptr (y + depth.rows)[x];
v_g.z = vmap.ptr (y + 2 * depth.rows)[x];
float v_z = dot(R_inv_row3, v_g - t);
result = static_cast<unsigned short>(v_z * 1000);
}
depth.ptr(y)[x] = result;
}
}
void
generateDepth (const Mat33& R_inv, const float3& t, const MapArr& vmap, DepthMap& dst)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols(), block.x), divUp(dst.rows(), block.y));
hipLaunchKernelGGL(( generateDepthKernel), dim3(grid), dim3(block), 0, 0, R_inv.data[2], t, vmap, dst);
cudaSafeCall (hipGetLastError ());
cudaSafeCall (hipDeviceSynchronize ());
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace pcl
{
namespace device
{
namespace kinfuLS
{
__global__ void
paint3DViewKernel(const PtrStep<uchar3> colors, PtrStepSz<uchar3> dst, float colors_weight)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < dst.cols && y < dst.rows)
{
uchar3 value = dst.ptr(y)[x];
uchar3 color = colors.ptr(y)[x];
if (value.x != 0 || value.y != 0 || value.z != 0)
{
float cx = value.x * (1.f - colors_weight) + color.x * colors_weight;
float cy = value.y * (1.f - colors_weight) + color.y * colors_weight;
float cz = value.z * (1.f - colors_weight) + color.z * colors_weight;
value.x = min(255, max(0, __float2int_rn(cx)));
value.y = min(255, max(0, __float2int_rn(cy)));
value.z = min(255, max(0, __float2int_rn(cz)));
}
dst.ptr(y)[x] = value;
}
}
void
paint3DView(const PtrStep<uchar3>& colors, PtrStepSz<uchar3> dst, float colors_weight)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
colors_weight = min(1.f, max(0.f, colors_weight));
hipLaunchKernelGGL(( paint3DViewKernel), dim3(grid), dim3(block), 0, 0, colors, dst, colors_weight);
cudaSafeCall (hipGetLastError ());
cudaSafeCall (hipDeviceSynchronize ());
}
}
}
} | a73c0921a1f3ba6651e949af3ef56b258ad2da0d.cu | /*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include <boost/graph/buffer_concepts.hpp>
namespace pcl
{
namespace device
{
namespace kinfuLS
{
struct ImageGenerator
{
enum
{
CTA_SIZE_X = 32, CTA_SIZE_Y = 8
};
PtrStep<float> vmap;
PtrStep<float> nmap;
LightSource light;
mutable PtrStepSz<uchar3> dst;
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
if (x >= dst.cols || y >= dst.rows)
return;
float3 v, n;
v.x = vmap.ptr (y)[x];
n.x = nmap.ptr (y)[x];
uchar3 color = make_uchar3 (0, 0, 0);
if (!isnan (v.x) && !isnan (n.x))
{
v.y = vmap.ptr (y + dst.rows)[x];
v.z = vmap.ptr (y + 2 * dst.rows)[x];
n.y = nmap.ptr (y + dst.rows)[x];
n.z = nmap.ptr (y + 2 * dst.rows)[x];
float weight = 1.f;
for (int i = 0; i < light.number; ++i)
{
float3 vec = normalized (light.pos[i] - v);
weight *= std::abs (dot (vec, n));
}
int br = (int)(205 * weight) + 50;
br = max (0, min (255, br));
color = make_uchar3 (br, br, br);
}
dst.ptr (y)[x] = color;
}
};
__global__ void
generateImageKernel (const ImageGenerator ig) {
ig ();
}
void
generateImage (const MapArr& vmap, const MapArr& nmap, const LightSource& light,
PtrStepSz<uchar3> dst)
{
ImageGenerator ig;
ig.vmap = vmap;
ig.nmap = nmap;
ig.light = light;
ig.dst = dst;
dim3 block (ImageGenerator::CTA_SIZE_X, ImageGenerator::CTA_SIZE_Y);
dim3 grid (divUp (dst.cols, block.x), divUp (dst.rows, block.y));
generateImageKernel<<<grid, block>>>(ig);
cudaSafeCall (cudaGetLastError ());
cudaSafeCall (cudaDeviceSynchronize ());
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace pcl
{
namespace device
{
namespace kinfuLS
{
__global__ void generateDepthKernel(const float3 R_inv_row3, const float3 t, const PtrStep<float> vmap, PtrStepSz<unsigned short> depth)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < depth.cols && y < depth.rows)
{
unsigned short result = 0;
float3 v_g;
v_g.x = vmap.ptr (y)[x];
if (!isnan (v_g.x))
{
v_g.y = vmap.ptr (y + depth.rows)[x];
v_g.z = vmap.ptr (y + 2 * depth.rows)[x];
float v_z = dot(R_inv_row3, v_g - t);
result = static_cast<unsigned short>(v_z * 1000);
}
depth.ptr(y)[x] = result;
}
}
void
generateDepth (const Mat33& R_inv, const float3& t, const MapArr& vmap, DepthMap& dst)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols(), block.x), divUp(dst.rows(), block.y));
generateDepthKernel<<<grid, block>>>(R_inv.data[2], t, vmap, dst);
cudaSafeCall (cudaGetLastError ());
cudaSafeCall (cudaDeviceSynchronize ());
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
namespace pcl
{
namespace device
{
namespace kinfuLS
{
__global__ void
paint3DViewKernel(const PtrStep<uchar3> colors, PtrStepSz<uchar3> dst, float colors_weight)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < dst.cols && y < dst.rows)
{
uchar3 value = dst.ptr(y)[x];
uchar3 color = colors.ptr(y)[x];
if (value.x != 0 || value.y != 0 || value.z != 0)
{
float cx = value.x * (1.f - colors_weight) + color.x * colors_weight;
float cy = value.y * (1.f - colors_weight) + color.y * colors_weight;
float cz = value.z * (1.f - colors_weight) + color.z * colors_weight;
value.x = min(255, max(0, __float2int_rn(cx)));
value.y = min(255, max(0, __float2int_rn(cy)));
value.z = min(255, max(0, __float2int_rn(cz)));
}
dst.ptr(y)[x] = value;
}
}
void
paint3DView(const PtrStep<uchar3>& colors, PtrStepSz<uchar3> dst, float colors_weight)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
colors_weight = min(1.f, max(0.f, colors_weight));
paint3DViewKernel<<<grid, block>>>(colors, dst, colors_weight);
cudaSafeCall (cudaGetLastError ());
cudaSafeCall (cudaDeviceSynchronize ());
}
}
}
} |
79abe6de62d3fc1081955d7c745113b8b4ab031a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef INSIZE
#define DEBUG 0
#define INSIZE 256 //dimension of 'in'
#define N_ATOMS 64 //atoms inside 'in'
#define N_FRAGS 4 //number of fragments
#define MASKSIZE 256 //dimension of the 'mask'
#define VOLUMESIZE 1000000 //dimension of 'score_pos'
#define MAX_ANGLE 256
#define LIMIT_DISTANCE2 2.0 //used in fragment_is_bumping, it is the minimum distance between two atoms
#define GRID_FACTOR_D 0.5
#define POCKET_SIZE 100 //linear dimension of the pocket (cube POCKET_SIZExPOCKET_SIZExPOCKET_SIZE)
#define PI 3.141592653589793238462643383279
#define RADIAN_COEF PI/128.0;
#endif
#include <cuda_kerns.h>
#include <stdio.h>
//Mask and the scores array are constant, so we treat them as texture objects
static hipTextureObject_t texScore_pos;
texture<int, 1, hipReadModeElementType> texMask;
//This function returns the sum (reduction) of the 'val' variables in threads of the same warp
__inline__ __device__ int warpReduce(int val) {
for (int i = warpSize/2; i > 0; i/=2)
val += __shfl_down_sync(0xffffffff, val, i, 32);
return val;
}
//This function returns the sum (reduction) of the 'val' variables in threads of the same block
__inline__ __device__ int blockReduce(int val) {
static __shared__ int shared[32];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
//first reduce accross all the warps
val = warpReduce(val);
//then store the values of threads 0,16,32... in shared memory
if (lane==0) shared[wid]=val;
__syncthreads();
//and perform the reduction of the shared values in the first warp, to obtain the final sum
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (wid==0) val = warpReduce(val);
return val;
}
//Just compute_matrix, as in the sequential program. No modifications here. (except for the cuda math functions)
__device__ void compute_matrix( const int rotation_angle,
const float x_orig, const float y_orig, const float z_orig,
const float x_vector, const float y_vector, const float z_vector, float* matrix){
const float u = (float)x_vector - x_orig;
const float v = (float)y_vector - y_orig;
const float w = (float)z_vector - z_orig;
const float u2 = u * u;
const float v2 = v * v;
const float w2 = w * w;
const float l2 = u * u + v * v + w * w;
const float l = sqrtf(l2);
const float angle_rad = (float)-rotation_angle*RADIAN_COEF;
const float sint = sinf(angle_rad);
const float cost = cosf(angle_rad);
const float one_minus_cost = (float)1.0 - cost;
matrix[0] = (u2 + (v2 + w2) * cost) / l2;
matrix[1] = (u* v * one_minus_cost - w* l * sint) / l2;
matrix[2] = (u* w * one_minus_cost + v* l * sint) / l2;
matrix[3] = ((x_orig * (v2 + w2) - u * (y_orig * v + z_orig * w)) * one_minus_cost + (y_orig * w - z_orig * v) * l * sint) / l2;
matrix[4] = (u* v * one_minus_cost + w* l * sint) / l2;
matrix[5] = (v2 + (u2 + w2) * cost) / l2;
matrix[6] = (v* w * one_minus_cost - u* l * sint) / l2;
matrix[7] = ((y_orig * (u2 + w2) - v * (x_orig * u + z_orig * w)) * one_minus_cost + (z_orig * u - x_orig * w) * l * sint) / l2;
matrix[8] = (u* w * one_minus_cost - v* l * sint) / l2;
matrix[9] = (v* w * one_minus_cost + u* l * sint) / l2;
matrix[10]= (w2 + (u2 + v2) * cost) / l2;
matrix[11]= ((z_orig * (u2 + v2) - w * (x_orig * u + y_orig * v)) * one_minus_cost + (x_orig * v - y_orig * u) * l * sint) / l2;
}
//Performs the rotation of the in[] array
__global__ void rotate(float* in, hipTextureObject_t mask, int iter, float precision, int* start, int* stop){
const int index = blockIdx.x;
const int curr_start = start[iter];//tex1Dfetch<int>(start, iter);
const int curr_stop = stop[iter];//tex1Dfetch<int>(stop, iter);
const int x = threadIdx.x;
const int y = threadIdx.x + N_ATOMS;
const int z = threadIdx.x + 2*N_ATOMS;
const int offset = ceil(index*INSIZE/precision);
float m[12];
__shared__ float in_s[N_ATOMS*3];
//pre-load in[] in shared memory for speed
in_s[x] = in[x];
in_s[y] = in[y];
in_s[z] = in[z];
__syncthreads();
//compute the rotation matrix
compute_matrix(index*precision,in_s[curr_start],in_s[curr_start+N_ATOMS],in_s[curr_start+2*N_ATOMS],in_s[curr_stop],in_s[curr_stop+N_ATOMS], in_s[curr_stop+2*N_ATOMS], m);
//read mask from the texture
const int mask_x = tex1Dfetch<int>(mask, x+iter*N_ATOMS);
//then copy in in[] the rotated value (if in mask) or the normal value
if(mask_x == 1){
in[x+offset] = m[0] * in_s[x] + m[1] * in_s[y] + m[2] * in_s[z] + m[3];
in[y+offset] = m[4] * in_s[x] + m[5] * in_s[y] + m[6] * in_s[z] + m[7];
in[z+offset] = m[8] * in_s[x] + m[9] * in_s[y] + m[10] * in_s[z] + m[11];
} else {
in[x+offset]=in_s[x];
in[y+offset]=in_s[y];
in[z+offset]=in_s[z];
}
}
//compute the shotgun of the in[] array
__global__ void measure_shotgun (float* in, hipTextureObject_t scores, int* shotgun, float precision, int iter){
const int index = blockIdx.x;
const int writers = threadIdx.x;
//threadIdx.x represent the atom that we are looking at
//index*INSIZE is the x position of the first atom for a particular angle inside the in[] array
//N_ATOMS and 2*N_ATOMS retrieves the y and z values of the atom
const int x = threadIdx.x + index*INSIZE;
const int y = threadIdx.x + index*INSIZE + N_ATOMS;
const int z = threadIdx.x + index*INSIZE + 2*N_ATOMS;
int index_x = (int) (in[x]*GRID_FACTOR_D);
int index_y = (int) (in[y]*GRID_FACTOR_D);
int index_z = (int) (in[z]*GRID_FACTOR_D);
//find the score from the texture
int score = (int) tex3D<float>(scores, index_x, index_y, index_z);
//sum up all the scores and store them in the shotgun array
int reduced = blockReduce(score);
if(!writers) shotgun[index] = reduced;
}
//find if a particular atom is bumping with one another
__global__ void fragment_is_bumping(float* in, hipTextureObject_t mask, int* is_bumping_p, int iter, float precision, int* is_bumping){
const int index = blockIdx.y;
//here the atoms to be checked, 'i' and 'j', are selected indexing from the threads and from the blocks (to get all the pairs)
int ix = threadIdx.x;
int iy = threadIdx.x + N_ATOMS;
int iz = threadIdx.x + 2*N_ATOMS;
int jx = blockIdx.x;
int jy = blockIdx.x + N_ATOMS;
int jz = blockIdx.x + 2*N_ATOMS;
int offset = index*INSIZE;
__shared__ float in_s[N_ATOMS*3];
//pre-load in[] in shared memory for speed
in_s[ix] = in[ix+offset];
in_s[iy] = in[iy+offset];
in_s[iz] = in[iz+offset];
__syncthreads();
const float diff_x = in_s[ix] - in_s[jx];
const float diff_y = in_s[iy] - in_s[jy];
const float diff_z = in_s[iz] - in_s[jz];
const float distance2 = diff_x * diff_x + diff_y * diff_y + diff_z * diff_z;
//read the mask values from texture
int m_ix = tex1Dfetch<int>(mask, ix+iter*N_ATOMS);
int m_jx = tex1Dfetch<int>(mask, jx+iter*N_ATOMS);
/*is or is not bumping? jx>ix is needed in order to ensure that i don't count as
bumping the same pair twice (or to avoid to check an element against itself)*/
int val_bit = (fabsf(m_ix - m_jx) == 1 && jx>ix && distance2 < LIMIT_DISTANCE2)? 1:0;
//sum all the bits accross each block and write back on a partial bumping matrix (an array for each angle)
int reduced = blockReduce(val_bit);
if(!ix) is_bumping_p[jx+index*N_ATOMS] = reduced;
__syncthreads();
//take the partial bumping matrix and reduce it to a single value for each angle
if(!jx){
int val_bit = is_bumping_p[ix+index*N_ATOMS];
int reduced = blockReduce(val_bit);
if(!ix) is_bumping[index] = (reduced)? 1:0;
}
}
//this functions finds the index, the shotgun and the bumping of the best angle in a warp
__inline__ __device__ void warpReduce(int ind, int sho, int bum, int &ret1, int &ret2, int &ret3) {
int im, sm, bm;
for (int i = warpSize/2; i > 0; i/=2){
im = __shfl_down_sync(0xffffffff, ind, i, 32);
sm = __shfl_down_sync(0xffffffff, sho, i, 32);
bm = __shfl_down_sync(0xffffffff, bum, i, 32);
if(!(bm > bum || (bum==bm && sho>=sm))){
ind = im;
sho = sm;
bum = bm;
}
}
ret1=ind;
ret2=sho;
ret3=bum;
}
/*this functions finds the index, the shotgun and the bumping of the best angle in a block and returns the index
behaves exactly like blockReduce, but implements the support logic to find the best index, not to sum values*/
__inline__ __device__ int find_best(int* shotgun, int* bumping, int index){
int shot = shotgun[index];
int bum = bumping[index];
int ind = index;
static __shared__ int sharedI[32];
static __shared__ int sharedS[32];
static __shared__ int sharedB[32];
int lane = index % warpSize;
int wid = index / warpSize;
//this function writes back the best values in the last 3 parameters
warpReduce(index, shot, bum, ind, shot, bum);
if (lane==0){
sharedI[wid]=ind;
sharedS[wid]=shot;
sharedB[wid]=bum;
}
__syncthreads();
//we need to copy the values to the first warp to reduce them
if(index < blockDim.x / warpSize){
ind = sharedI[lane];
bum = sharedB[lane];
shot = sharedS[lane];
} else {
ind = 0;
bum = 1;
shot = 0;
}
//find the absolute best index in the first warp
if (wid==0) warpReduce(ind, shot, bum, ind, shot, bum);
return ind;
}
__global__ void eval_angles(float* in, int* shotgun, int* bumping){
__shared__ int best_angle;
const int index = threadIdx.x;
int best_index = find_best(shotgun, bumping, index);
//this is done in order to broadcast to every thread what is the best value (otherwise, only the first warp knows it)
if(index == 0) best_angle = best_index;
__syncthreads();
//Copies back the best in[] in the first position.
if(index < INSIZE) in[index] = in[best_angle*INSIZE+index];
}
void ps_kern(float* in, float* out, float precision, float* score_pos, int* start, int* stop, int* mask)
{
float *d_in, *d_score_pos;
int *d_start, *d_stop, *d_mask, *d_shotgun;
int *d_bumping, *d_bumping_partial;
hipError_t status, status_cp, status_wb;
hipStream_t s1, s2;
hipEvent_t start_t, stop_t;
status = hipMalloc((void**) &d_in, sizeof(float)*INSIZE*ceil(MAX_ANGLE/precision));
if(DEBUG && status!=hipSuccess)
printf("%s in %s at line %d\n", hipGetErrorString(status), __FILE__, __LINE__);
status_cp = hipMemcpy(d_in, in, sizeof(float)*INSIZE, hipMemcpyHostToDevice);
if(DEBUG && status_cp!=hipSuccess)
printf("%s in %s at line %d\n", hipGetErrorString(status_cp), __FILE__, __LINE__);
status = hipMalloc((void**) &d_start, sizeof(int)*N_ATOMS);
if(DEBUG && status!=hipSuccess)
printf("%s in %s at line %d\n", hipGetErrorString(status), __FILE__, __LINE__);
status_cp = hipMemcpy(d_start, start, sizeof(int)*N_ATOMS, hipMemcpyHostToDevice);
if(DEBUG && status_cp!=hipSuccess)
printf("%s in %s at line %d\n", hipGetErrorString(status_cp), __FILE__, __LINE__);
status = hipMalloc((void**) &d_stop, sizeof(int)*N_ATOMS);
if(DEBUG && status!=hipSuccess)
printf("%s in %s at line %d\n", hipGetErrorString(status), __FILE__, __LINE__);
status_cp = hipMemcpy(d_stop, stop, sizeof(int)*N_ATOMS, hipMemcpyHostToDevice);
if(DEBUG && status_cp!=hipSuccess)
printf("%s in %s at line %d\n", hipGetErrorString(status_cp), __FILE__, __LINE__);
status = hipMalloc((void**)&d_bumping, sizeof(int)*ceil(MAX_ANGLE/precision));
if(DEBUG && status!=hipSuccess)
printf("%s in %s at line %d\n", hipGetErrorString(status), __FILE__, __LINE__);
status = hipMalloc((void**)&d_bumping_partial, sizeof(int)*ceil(MAX_ANGLE/precision)*N_ATOMS);
if(DEBUG && status!=hipSuccess)
printf("%s in %s at line %d\n", hipGetErrorString(status), __FILE__, __LINE__);
status = hipMalloc((void**)&d_shotgun, sizeof(int)*ceil(MAX_ANGLE/precision));
if(DEBUG && status!=hipSuccess)
printf("%s in %s at line %d\n", hipGetErrorString(status), __FILE__, __LINE__);
status = hipMalloc((void**) &d_mask, sizeof(int)*MASKSIZE);
if(DEBUG && status!=hipSuccess)
printf("%s in %s at line %d\n", hipGetErrorString(status), __FILE__, __LINE__);
status_cp = hipMemcpy(d_mask, mask, sizeof(int)*MASKSIZE, hipMemcpyHostToDevice);
if(DEBUG && status_cp!=hipSuccess)
printf("%s in %s at line %d\n", hipGetErrorString(status_cp), __FILE__, __LINE__);
status = hipMalloc((void**) &d_score_pos, sizeof(float)*VOLUMESIZE);
if(DEBUG && status!=hipSuccess)
printf("%s in %s at line %d\n", hipGetErrorString(status), __FILE__, __LINE__);
status_cp = hipMemcpy(d_score_pos, score_pos, sizeof(float)*VOLUMESIZE, hipMemcpyHostToDevice);
if(DEBUG && status_cp!=hipSuccess)
printf("%s in %s at line %d\n", hipGetErrorString(status_cp), __FILE__, __LINE__);
//initializes bindless texture objects for mask and score_pos
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipArray *d_cuArr;
hipMalloc3DArray(&d_cuArr, &channelDesc, make_hipExtent(POCKET_SIZE*sizeof(float),POCKET_SIZE,POCKET_SIZE), 0);
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr(d_score_pos, POCKET_SIZE*sizeof(float), POCKET_SIZE, POCKET_SIZE);
copyParams.dstArray = d_cuArr;
copyParams.extent = make_hipExtent(POCKET_SIZE,POCKET_SIZE,POCKET_SIZE);
copyParams.kind = hipMemcpyDeviceToDevice;
hipMemcpy3D(©Params);
hipResourceDesc texRes;
memset(&texRes, 0, sizeof(hipResourceDesc));
texRes.resType = hipResourceTypeArray;
texRes.res.array.array = d_cuArr;
hipTextureDesc texDescr;
memset(&texDescr, 0, sizeof(hipTextureDesc));
texDescr.normalizedCoords = false;
texDescr.addressMode[0] = hipAddressModeClamp;
texDescr.addressMode[1] = hipAddressModeClamp;
texDescr.addressMode[2] = hipAddressModeClamp;
texDescr.readMode = hipReadModeElementType;
hipCreateTextureObject(&texScore_pos, &texRes, &texDescr, NULL);
hipResourceDesc resDesc2;
memset(&resDesc2, 0, sizeof(resDesc2));
resDesc2.resType = hipResourceTypeLinear;
resDesc2.res.linear.devPtr = d_mask;
resDesc2.res.linear.desc.f = hipChannelFormatKindFloat;
resDesc2.res.linear.desc.x = 32;
resDesc2.res.linear.sizeInBytes = MASKSIZE*sizeof(int);
hipTextureDesc texDesc2;
memset(&texDesc2, 0, sizeof(texDesc2));
texDesc2.readMode = hipReadModeElementType;
hipTextureObject_t texMask=0;
hipCreateTextureObject(&texMask, &resDesc2, &texDesc2, NULL);
hipEventCreate(&start_t);
hipEventCreate(&stop_t);
hipStreamCreate(&s1);
hipStreamCreate(&s2);
//these are the blocks needed by the fragment_is_bumping() kernel
dim3 bump_blocks(N_ATOMS,ceil(MAX_ANGLE/precision));
hipEventRecord(start_t);
//for each fragment
for (int i=0;i<N_FRAGS;++i){
//For each angle (MAX_ANGLE/precision) rotates each atom (N_ATOMS)
hipLaunchKernelGGL(( rotate), dim3(ceil(MAX_ANGLE/precision)),dim3(N_ATOMS),0,s1, d_in, texMask, i, precision, d_start, d_stop);
hipStreamSynchronize(s1);
//For each angle, evaluates all the pairs
hipLaunchKernelGGL(( fragment_is_bumping), dim3(bump_blocks),dim3(N_ATOMS),0,s1, d_in, texMask, d_bumping_partial, i, precision, d_bumping);
//For each angle (this for each rotated in[]) computes the shotgun of each atom
hipLaunchKernelGGL(( measure_shotgun), dim3(ceil(MAX_ANGLE/precision)),dim3(N_ATOMS),0,s2, d_in, texScore_pos, d_shotgun, precision, i);
//To reassure that we have computed everything
hipStreamSynchronize(s1);
hipStreamSynchronize(s2);
//For each angle, find the best in[] and write it back in the first position
hipLaunchKernelGGL(( eval_angles), dim3(1),dim3(ceil(MAX_ANGLE/precision)),0,s1, d_in, d_shotgun, d_bumping);
}
hipStreamSynchronize(s1);
//Timing result
hipEventRecord(stop_t);
hipEventSynchronize(stop_t);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start_t, stop_t);
printf("\nKernels executed in %f milliseconds\n", milliseconds);
//Write back the rotated in array
status_wb = hipMemcpy(out, d_in, sizeof(float)*INSIZE, hipMemcpyDeviceToHost);
if(DEBUG && status_wb!=hipSuccess)
printf("%s in %s at line %d\n", hipGetErrorString(status_wb), __FILE__, __LINE__);
hipDestroyTextureObject(texScore_pos);
hipDestroyTextureObject(texMask);
hipFree(d_bumping_partial);
hipEventDestroy(start_t);
hipEventDestroy(stop_t);
hipStreamDestroy(s1);
hipStreamDestroy(s2);
hipFree(d_bumping);
hipFree(d_shotgun);
hipFree(d_start);
hipFree(d_stop);
hipFree(d_in);
}
| 79abe6de62d3fc1081955d7c745113b8b4ab031a.cu | #ifndef INSIZE
#define DEBUG 0
#define INSIZE 256 //dimension of 'in'
#define N_ATOMS 64 //atoms inside 'in'
#define N_FRAGS 4 //number of fragments
#define MASKSIZE 256 //dimension of the 'mask'
#define VOLUMESIZE 1000000 //dimension of 'score_pos'
#define MAX_ANGLE 256
#define LIMIT_DISTANCE2 2.0 //used in fragment_is_bumping, it is the minimum distance between two atoms
#define GRID_FACTOR_D 0.5
#define POCKET_SIZE 100 //linear dimension of the pocket (cube POCKET_SIZExPOCKET_SIZExPOCKET_SIZE)
#define PI 3.141592653589793238462643383279
#define RADIAN_COEF PI/128.0;
#endif
#include <cuda_kerns.h>
#include <stdio.h>
//Mask and the scores array are constant, so we treat them as texture objects
static cudaTextureObject_t texScore_pos;
texture<int, 1, cudaReadModeElementType> texMask;
//This function returns the sum (reduction) of the 'val' variables in threads of the same warp
__inline__ __device__ int warpReduce(int val) {
for (int i = warpSize/2; i > 0; i/=2)
val += __shfl_down_sync(0xffffffff, val, i, 32);
return val;
}
//This function returns the sum (reduction) of the 'val' variables in threads of the same block
__inline__ __device__ int blockReduce(int val) {
static __shared__ int shared[32];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
//first reduce accross all the warps
val = warpReduce(val);
//then store the values of threads 0,16,32... in shared memory
if (lane==0) shared[wid]=val;
__syncthreads();
//and perform the reduction of the shared values in the first warp, to obtain the final sum
val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0;
if (wid==0) val = warpReduce(val);
return val;
}
//Just compute_matrix, as in the sequential program. No modifications here. (except for the cuda math functions)
__device__ void compute_matrix( const int rotation_angle,
const float x_orig, const float y_orig, const float z_orig,
const float x_vector, const float y_vector, const float z_vector, float* matrix){
const float u = (float)x_vector - x_orig;
const float v = (float)y_vector - y_orig;
const float w = (float)z_vector - z_orig;
const float u2 = u * u;
const float v2 = v * v;
const float w2 = w * w;
const float l2 = u * u + v * v + w * w;
const float l = sqrtf(l2);
const float angle_rad = (float)-rotation_angle*RADIAN_COEF;
const float sint = sinf(angle_rad);
const float cost = cosf(angle_rad);
const float one_minus_cost = (float)1.0 - cost;
matrix[0] = (u2 + (v2 + w2) * cost) / l2;
matrix[1] = (u* v * one_minus_cost - w* l * sint) / l2;
matrix[2] = (u* w * one_minus_cost + v* l * sint) / l2;
matrix[3] = ((x_orig * (v2 + w2) - u * (y_orig * v + z_orig * w)) * one_minus_cost + (y_orig * w - z_orig * v) * l * sint) / l2;
matrix[4] = (u* v * one_minus_cost + w* l * sint) / l2;
matrix[5] = (v2 + (u2 + w2) * cost) / l2;
matrix[6] = (v* w * one_minus_cost - u* l * sint) / l2;
matrix[7] = ((y_orig * (u2 + w2) - v * (x_orig * u + z_orig * w)) * one_minus_cost + (z_orig * u - x_orig * w) * l * sint) / l2;
matrix[8] = (u* w * one_minus_cost - v* l * sint) / l2;
matrix[9] = (v* w * one_minus_cost + u* l * sint) / l2;
matrix[10]= (w2 + (u2 + v2) * cost) / l2;
matrix[11]= ((z_orig * (u2 + v2) - w * (x_orig * u + y_orig * v)) * one_minus_cost + (x_orig * v - y_orig * u) * l * sint) / l2;
}
//Performs the rotation of the in[] array
__global__ void rotate(float* in, cudaTextureObject_t mask, int iter, float precision, int* start, int* stop){
const int index = blockIdx.x;
const int curr_start = start[iter];//tex1Dfetch<int>(start, iter);
const int curr_stop = stop[iter];//tex1Dfetch<int>(stop, iter);
const int x = threadIdx.x;
const int y = threadIdx.x + N_ATOMS;
const int z = threadIdx.x + 2*N_ATOMS;
const int offset = ceil(index*INSIZE/precision);
float m[12];
__shared__ float in_s[N_ATOMS*3];
//pre-load in[] in shared memory for speed
in_s[x] = in[x];
in_s[y] = in[y];
in_s[z] = in[z];
__syncthreads();
//compute the rotation matrix
compute_matrix(index*precision,in_s[curr_start],in_s[curr_start+N_ATOMS],in_s[curr_start+2*N_ATOMS],in_s[curr_stop],in_s[curr_stop+N_ATOMS], in_s[curr_stop+2*N_ATOMS], m);
//read mask from the texture
const int mask_x = tex1Dfetch<int>(mask, x+iter*N_ATOMS);
//then copy in in[] the rotated value (if in mask) or the normal value
if(mask_x == 1){
in[x+offset] = m[0] * in_s[x] + m[1] * in_s[y] + m[2] * in_s[z] + m[3];
in[y+offset] = m[4] * in_s[x] + m[5] * in_s[y] + m[6] * in_s[z] + m[7];
in[z+offset] = m[8] * in_s[x] + m[9] * in_s[y] + m[10] * in_s[z] + m[11];
} else {
in[x+offset]=in_s[x];
in[y+offset]=in_s[y];
in[z+offset]=in_s[z];
}
}
//compute the shotgun of the in[] array
__global__ void measure_shotgun (float* in, cudaTextureObject_t scores, int* shotgun, float precision, int iter){
const int index = blockIdx.x;
const int writers = threadIdx.x;
//threadIdx.x represent the atom that we are looking at
//index*INSIZE is the x position of the first atom for a particular angle inside the in[] array
//N_ATOMS and 2*N_ATOMS retrieves the y and z values of the atom
const int x = threadIdx.x + index*INSIZE;
const int y = threadIdx.x + index*INSIZE + N_ATOMS;
const int z = threadIdx.x + index*INSIZE + 2*N_ATOMS;
int index_x = (int) (in[x]*GRID_FACTOR_D);
int index_y = (int) (in[y]*GRID_FACTOR_D);
int index_z = (int) (in[z]*GRID_FACTOR_D);
//find the score from the texture
int score = (int) tex3D<float>(scores, index_x, index_y, index_z);
//sum up all the scores and store them in the shotgun array
int reduced = blockReduce(score);
if(!writers) shotgun[index] = reduced;
}
//find if a particular atom is bumping with one another
__global__ void fragment_is_bumping(float* in, cudaTextureObject_t mask, int* is_bumping_p, int iter, float precision, int* is_bumping){
const int index = blockIdx.y;
//here the atoms to be checked, 'i' and 'j', are selected indexing from the threads and from the blocks (to get all the pairs)
int ix = threadIdx.x;
int iy = threadIdx.x + N_ATOMS;
int iz = threadIdx.x + 2*N_ATOMS;
int jx = blockIdx.x;
int jy = blockIdx.x + N_ATOMS;
int jz = blockIdx.x + 2*N_ATOMS;
int offset = index*INSIZE;
__shared__ float in_s[N_ATOMS*3];
//pre-load in[] in shared memory for speed
in_s[ix] = in[ix+offset];
in_s[iy] = in[iy+offset];
in_s[iz] = in[iz+offset];
__syncthreads();
const float diff_x = in_s[ix] - in_s[jx];
const float diff_y = in_s[iy] - in_s[jy];
const float diff_z = in_s[iz] - in_s[jz];
const float distance2 = diff_x * diff_x + diff_y * diff_y + diff_z * diff_z;
//read the mask values from texture
int m_ix = tex1Dfetch<int>(mask, ix+iter*N_ATOMS);
int m_jx = tex1Dfetch<int>(mask, jx+iter*N_ATOMS);
/*is or is not bumping? jx>ix is needed in order to ensure that i don't count as
bumping the same pair twice (or to avoid to check an element against itself)*/
int val_bit = (fabsf(m_ix - m_jx) == 1 && jx>ix && distance2 < LIMIT_DISTANCE2)? 1:0;
//sum all the bits accross each block and write back on a partial bumping matrix (an array for each angle)
int reduced = blockReduce(val_bit);
if(!ix) is_bumping_p[jx+index*N_ATOMS] = reduced;
__syncthreads();
//take the partial bumping matrix and reduce it to a single value for each angle
if(!jx){
int val_bit = is_bumping_p[ix+index*N_ATOMS];
int reduced = blockReduce(val_bit);
if(!ix) is_bumping[index] = (reduced)? 1:0;
}
}
//this functions finds the index, the shotgun and the bumping of the best angle in a warp
__inline__ __device__ void warpReduce(int ind, int sho, int bum, int &ret1, int &ret2, int &ret3) {
int im, sm, bm;
for (int i = warpSize/2; i > 0; i/=2){
im = __shfl_down_sync(0xffffffff, ind, i, 32);
sm = __shfl_down_sync(0xffffffff, sho, i, 32);
bm = __shfl_down_sync(0xffffffff, bum, i, 32);
if(!(bm > bum || (bum==bm && sho>=sm))){
ind = im;
sho = sm;
bum = bm;
}
}
ret1=ind;
ret2=sho;
ret3=bum;
}
/*this functions finds the index, the shotgun and the bumping of the best angle in a block and returns the index
behaves exactly like blockReduce, but implements the support logic to find the best index, not to sum values*/
__inline__ __device__ int find_best(int* shotgun, int* bumping, int index){
int shot = shotgun[index];
int bum = bumping[index];
int ind = index;
static __shared__ int sharedI[32];
static __shared__ int sharedS[32];
static __shared__ int sharedB[32];
int lane = index % warpSize;
int wid = index / warpSize;
//this function writes back the best values in the last 3 parameters
warpReduce(index, shot, bum, ind, shot, bum);
if (lane==0){
sharedI[wid]=ind;
sharedS[wid]=shot;
sharedB[wid]=bum;
}
__syncthreads();
//we need to copy the values to the first warp to reduce them
if(index < blockDim.x / warpSize){
ind = sharedI[lane];
bum = sharedB[lane];
shot = sharedS[lane];
} else {
ind = 0;
bum = 1;
shot = 0;
}
//find the absolute best index in the first warp
if (wid==0) warpReduce(ind, shot, bum, ind, shot, bum);
return ind;
}
__global__ void eval_angles(float* in, int* shotgun, int* bumping){
__shared__ int best_angle;
const int index = threadIdx.x;
int best_index = find_best(shotgun, bumping, index);
//this is done in order to broadcast to every thread what is the best value (otherwise, only the first warp knows it)
if(index == 0) best_angle = best_index;
__syncthreads();
//Copies back the best in[] in the first position.
if(index < INSIZE) in[index] = in[best_angle*INSIZE+index];
}
void ps_kern(float* in, float* out, float precision, float* score_pos, int* start, int* stop, int* mask)
{
float *d_in, *d_score_pos;
int *d_start, *d_stop, *d_mask, *d_shotgun;
int *d_bumping, *d_bumping_partial;
cudaError_t status, status_cp, status_wb;
cudaStream_t s1, s2;
cudaEvent_t start_t, stop_t;
status = cudaMalloc((void**) &d_in, sizeof(float)*INSIZE*ceil(MAX_ANGLE/precision));
if(DEBUG && status!=cudaSuccess)
printf("%s in %s at line %d\n", cudaGetErrorString(status), __FILE__, __LINE__);
status_cp = cudaMemcpy(d_in, in, sizeof(float)*INSIZE, cudaMemcpyHostToDevice);
if(DEBUG && status_cp!=cudaSuccess)
printf("%s in %s at line %d\n", cudaGetErrorString(status_cp), __FILE__, __LINE__);
status = cudaMalloc((void**) &d_start, sizeof(int)*N_ATOMS);
if(DEBUG && status!=cudaSuccess)
printf("%s in %s at line %d\n", cudaGetErrorString(status), __FILE__, __LINE__);
status_cp = cudaMemcpy(d_start, start, sizeof(int)*N_ATOMS, cudaMemcpyHostToDevice);
if(DEBUG && status_cp!=cudaSuccess)
printf("%s in %s at line %d\n", cudaGetErrorString(status_cp), __FILE__, __LINE__);
status = cudaMalloc((void**) &d_stop, sizeof(int)*N_ATOMS);
if(DEBUG && status!=cudaSuccess)
printf("%s in %s at line %d\n", cudaGetErrorString(status), __FILE__, __LINE__);
status_cp = cudaMemcpy(d_stop, stop, sizeof(int)*N_ATOMS, cudaMemcpyHostToDevice);
if(DEBUG && status_cp!=cudaSuccess)
printf("%s in %s at line %d\n", cudaGetErrorString(status_cp), __FILE__, __LINE__);
status = cudaMalloc((void**)&d_bumping, sizeof(int)*ceil(MAX_ANGLE/precision));
if(DEBUG && status!=cudaSuccess)
printf("%s in %s at line %d\n", cudaGetErrorString(status), __FILE__, __LINE__);
status = cudaMalloc((void**)&d_bumping_partial, sizeof(int)*ceil(MAX_ANGLE/precision)*N_ATOMS);
if(DEBUG && status!=cudaSuccess)
printf("%s in %s at line %d\n", cudaGetErrorString(status), __FILE__, __LINE__);
status = cudaMalloc((void**)&d_shotgun, sizeof(int)*ceil(MAX_ANGLE/precision));
if(DEBUG && status!=cudaSuccess)
printf("%s in %s at line %d\n", cudaGetErrorString(status), __FILE__, __LINE__);
status = cudaMalloc((void**) &d_mask, sizeof(int)*MASKSIZE);
if(DEBUG && status!=cudaSuccess)
printf("%s in %s at line %d\n", cudaGetErrorString(status), __FILE__, __LINE__);
status_cp = cudaMemcpy(d_mask, mask, sizeof(int)*MASKSIZE, cudaMemcpyHostToDevice);
if(DEBUG && status_cp!=cudaSuccess)
printf("%s in %s at line %d\n", cudaGetErrorString(status_cp), __FILE__, __LINE__);
status = cudaMalloc((void**) &d_score_pos, sizeof(float)*VOLUMESIZE);
if(DEBUG && status!=cudaSuccess)
printf("%s in %s at line %d\n", cudaGetErrorString(status), __FILE__, __LINE__);
status_cp = cudaMemcpy(d_score_pos, score_pos, sizeof(float)*VOLUMESIZE, cudaMemcpyHostToDevice);
if(DEBUG && status_cp!=cudaSuccess)
printf("%s in %s at line %d\n", cudaGetErrorString(status_cp), __FILE__, __LINE__);
//initializes bindless texture objects for mask and score_pos
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaArray *d_cuArr;
cudaMalloc3DArray(&d_cuArr, &channelDesc, make_cudaExtent(POCKET_SIZE*sizeof(float),POCKET_SIZE,POCKET_SIZE), 0);
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr(d_score_pos, POCKET_SIZE*sizeof(float), POCKET_SIZE, POCKET_SIZE);
copyParams.dstArray = d_cuArr;
copyParams.extent = make_cudaExtent(POCKET_SIZE,POCKET_SIZE,POCKET_SIZE);
copyParams.kind = cudaMemcpyDeviceToDevice;
cudaMemcpy3D(©Params);
cudaResourceDesc texRes;
memset(&texRes, 0, sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypeArray;
texRes.res.array.array = d_cuArr;
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
texDescr.normalizedCoords = false;
texDescr.addressMode[0] = cudaAddressModeClamp;
texDescr.addressMode[1] = cudaAddressModeClamp;
texDescr.addressMode[2] = cudaAddressModeClamp;
texDescr.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&texScore_pos, &texRes, &texDescr, NULL);
cudaResourceDesc resDesc2;
memset(&resDesc2, 0, sizeof(resDesc2));
resDesc2.resType = cudaResourceTypeLinear;
resDesc2.res.linear.devPtr = d_mask;
resDesc2.res.linear.desc.f = cudaChannelFormatKindFloat;
resDesc2.res.linear.desc.x = 32;
resDesc2.res.linear.sizeInBytes = MASKSIZE*sizeof(int);
cudaTextureDesc texDesc2;
memset(&texDesc2, 0, sizeof(texDesc2));
texDesc2.readMode = cudaReadModeElementType;
cudaTextureObject_t texMask=0;
cudaCreateTextureObject(&texMask, &resDesc2, &texDesc2, NULL);
cudaEventCreate(&start_t);
cudaEventCreate(&stop_t);
cudaStreamCreate(&s1);
cudaStreamCreate(&s2);
//these are the blocks needed by the fragment_is_bumping() kernel
dim3 bump_blocks(N_ATOMS,ceil(MAX_ANGLE/precision));
cudaEventRecord(start_t);
//for each fragment
for (int i=0;i<N_FRAGS;++i){
//For each angle (MAX_ANGLE/precision) rotates each atom (N_ATOMS)
rotate<<<ceil(MAX_ANGLE/precision),N_ATOMS,0,s1>>>(d_in, texMask, i, precision, d_start, d_stop);
cudaStreamSynchronize(s1);
//For each angle, evaluates all the pairs
fragment_is_bumping<<<bump_blocks,N_ATOMS,0,s1>>>(d_in, texMask, d_bumping_partial, i, precision, d_bumping);
//For each angle (this for each rotated in[]) computes the shotgun of each atom
measure_shotgun<<<ceil(MAX_ANGLE/precision),N_ATOMS,0,s2>>>(d_in, texScore_pos, d_shotgun, precision, i);
//To reassure that we have computed everything
cudaStreamSynchronize(s1);
cudaStreamSynchronize(s2);
//For each angle, find the best in[] and write it back in the first position
eval_angles<<<1,ceil(MAX_ANGLE/precision),0,s1>>>(d_in, d_shotgun, d_bumping);
}
cudaStreamSynchronize(s1);
//Timing result
cudaEventRecord(stop_t);
cudaEventSynchronize(stop_t);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start_t, stop_t);
printf("\nKernels executed in %f milliseconds\n", milliseconds);
//Write back the rotated in array
status_wb = cudaMemcpy(out, d_in, sizeof(float)*INSIZE, cudaMemcpyDeviceToHost);
if(DEBUG && status_wb!=cudaSuccess)
printf("%s in %s at line %d\n", cudaGetErrorString(status_wb), __FILE__, __LINE__);
cudaDestroyTextureObject(texScore_pos);
cudaDestroyTextureObject(texMask);
cudaFree(d_bumping_partial);
cudaEventDestroy(start_t);
cudaEventDestroy(stop_t);
cudaStreamDestroy(s1);
cudaStreamDestroy(s2);
cudaFree(d_bumping);
cudaFree(d_shotgun);
cudaFree(d_start);
cudaFree(d_stop);
cudaFree(d_in);
}
|
eed68e691de9d769fbbdac6b98b54266870a5bbe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "caffe2/core/context_gpu.h"
#include "smooth_l1_loss_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SmoothL1Kernel(
const int n, const T* in, T* out, T beta) {
// f(x) = 0.5 * x^2 / beta if |x| < beta
// |x| - 0.5 * beta otherwise
CUDA_1D_KERNEL_LOOP(index, n) {
T val = in[index];
#ifdef __HIP_PLATFORM_HCC__
T abs_val = fabsf(val);
#else
T abs_val = abs(val);
#endif
if (abs_val < beta) {
out[index] = 0.5 * val * val / beta;
} else {
out[index] = abs_val - 0.5 * beta;
}
}
}
template <typename T>
__global__ void SmoothL1GradientKernel(
const int n,
const T* in,
T* out,
const T* d_loss_data,
T norm,
T beta) {
// f'(x) = x / beta if |x| < beta
// = sign(x) otherwise
// We also scale by norm * d_loss in this kernel for convenience
CUDA_1D_KERNEL_LOOP(index, n) {
T val = in[index];
#ifdef __HIP_PLATFORM_HCC__
T abs_val = fabsf(val);
#else
T abs_val = abs(val);
#endif
T d_loss = *d_loss_data;
if (abs_val < beta) {
out[index] = norm * d_loss * val / beta;
} else {
out[index] = norm * d_loss * ((T(0) < val) - (val < T(0)));
}
}
}
} // namespace
template<>
bool SmoothL1LossOp<float, CUDAContext>::RunOnDevice() {
auto& Y_hat = Input(0);
auto& Y = Input(1);
auto& alpha_in = Input(2);
auto& alpha_out = Input(3);
auto* avg_loss = Output(0);
int N = Y.dim32(0);
// Require the same number of elements along axis 0 (batch size), but
// otherwise don't care about the shape (just the number of elements)
CAFFE_ENFORCE_EQ(Y_hat.dim32(0), Y.dim32(0),
"Y_hat and Y must have the same number of elements along axis 0");
CAFFE_ENFORCE_EQ(Y_hat.size(), Y.size(),
"Y_hat and Y must have the same number of elements");
CAFFE_ENFORCE_EQ(Y_hat.size(), alpha_in.size());
CAFFE_ENFORCE_EQ(Y_hat.size(), alpha_out.size());
avg_loss->Resize(vector<int64_t>());
buff_.ResizeLike(Y);
// Difference
// d := y_hat - y
math::Sub<float, CUDAContext>(
Y.size(), Y_hat.data<float>(), Y.data<float>(),
buff_.mutable_data<float>(), &context_);
// Element-wise weighted difference (can be used to ignore or reweight
// specific components)
// d := alpha_in * (y_hat - y)
math::Mul<float, CUDAContext>(
buff_.size(), buff_.data<float>(), alpha_in.data<float>(),
buff_.mutable_data<float>(), &context_);
// Element-wise smooth l1 loss
// l := SmoothL1(alpha_in * (y_hat - y))
hipLaunchKernelGGL(( SmoothL1Kernel<float>)
, dim3(CAFFE_GET_BLOCKS(buff_.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
buff_.size(), buff_.data<float>(), buff_.mutable_data<float>(),
beta_);
// Element-wise weighted smooth l1 loss (can be used to specify a per-element
// loss weight)
// l := alpha_out * SmoothL1(alpha_in * (y_hat - y))
math::Mul<float, CUDAContext>(
buff_.size(), buff_.data<float>(), alpha_out.data<float>(),
buff_.mutable_data<float>(), &context_);
// Sum of all losses
// al := sum_i l_i
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Sum<float, CUDAContext>(
buff_.size(), buff_.data<float>(), avg_loss_data, &context_);
// Average of input batch size
// al := 1/N * al
math::Scale<float, float, CUDAContext>(
1, scale_ / N, avg_loss_data, avg_loss_data, &context_);
return true;
}
template<>
bool SmoothL1LossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y_hat = Input(0);
auto& Y = Input(1);
auto& alpha_in = Input(2);
auto& alpha_out = Input(3);
auto& d_avg_loss = Input(4); // gradient of net w.r.t. avg_loss ("gradOuput")
auto* d_Y_hat = Output(0); // gradient of net w.r.t. Y_hat ("gradInput")
// We intentially don't compute gradients for Y, alpha_{in,out} since they
// are not needed (can change in the future if desired)
int N = Y.dim32(0);
// Require the same number of elements along axis 0 (batch size), but
// otherwise don't care about the shape (just the number of elements)
CAFFE_ENFORCE_EQ(Y_hat.dim32(0), Y.dim32(0),
"Y_hat and Y must have the same number of elements along axis 0");
CAFFE_ENFORCE_EQ(Y_hat.size(), Y.size(),
"Y_hat and Y must have the same number of elements");
CAFFE_ENFORCE_EQ(Y_hat.size(), alpha_in.size());
CAFFE_ENFORCE_EQ(Y_hat.size(), alpha_out.size());
CAFFE_ENFORCE_EQ(d_avg_loss.size(), 1);
d_Y_hat->ResizeLike(Y_hat);
buff_.ResizeLike(Y);
// Difference
// d := y_hat - y
math::Sub<float, CUDAContext>(
Y.size(), Y_hat.data<float>(), Y.data<float>(),
buff_.mutable_data<float>(), &context_);
// Element-wise weighted difference (can be used to ignore or reweight
// specific components)
// d := alpha_in * (y_hat - y)
math::Mul<float, CUDAContext>(
buff_.size(), buff_.data<float>(), alpha_in.data<float>(),
buff_.mutable_data<float>(), &context_);
// d_Y_hat := d_avg_loss / N * SmoothL1'(alpha_in * (y_hat - y))
hipLaunchKernelGGL(( SmoothL1GradientKernel<float>)
, dim3(CAFFE_GET_BLOCKS(buff_.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
buff_.size(), buff_.data<float>(), d_Y_hat->mutable_data<float>(),
d_avg_loss.data<float>(), scale_ / N, beta_);
// Element-wise scale by alpha_in and alpha_out
math::Mul<float, CUDAContext>(
d_Y_hat->size(), d_Y_hat->data<float>(), alpha_in.data<float>(),
d_Y_hat->mutable_data<float>(), &context_);
math::Mul<float, CUDAContext>(
d_Y_hat->size(), d_Y_hat->data<float>(), alpha_out.data<float>(),
d_Y_hat->mutable_data<float>(), &context_);
return true;
}
REGISTER_CUDA_OPERATOR(SmoothL1Loss,
SmoothL1LossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SmoothL1LossGradient,
SmoothL1LossGradientOp<float, CUDAContext>);
} // namespace caffe2
| eed68e691de9d769fbbdac6b98b54266870a5bbe.cu | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "caffe2/core/context_gpu.h"
#include "smooth_l1_loss_op.h"
namespace caffe2 {
namespace {
template <typename T>
__global__ void SmoothL1Kernel(
const int n, const T* in, T* out, T beta) {
// f(x) = 0.5 * x^2 / beta if |x| < beta
// |x| - 0.5 * beta otherwise
CUDA_1D_KERNEL_LOOP(index, n) {
T val = in[index];
#ifdef __HIP_PLATFORM_HCC__
T abs_val = fabsf(val);
#else
T abs_val = abs(val);
#endif
if (abs_val < beta) {
out[index] = 0.5 * val * val / beta;
} else {
out[index] = abs_val - 0.5 * beta;
}
}
}
template <typename T>
__global__ void SmoothL1GradientKernel(
const int n,
const T* in,
T* out,
const T* d_loss_data,
T norm,
T beta) {
// f'(x) = x / beta if |x| < beta
// = sign(x) otherwise
// We also scale by norm * d_loss in this kernel for convenience
CUDA_1D_KERNEL_LOOP(index, n) {
T val = in[index];
#ifdef __HIP_PLATFORM_HCC__
T abs_val = fabsf(val);
#else
T abs_val = abs(val);
#endif
T d_loss = *d_loss_data;
if (abs_val < beta) {
out[index] = norm * d_loss * val / beta;
} else {
out[index] = norm * d_loss * ((T(0) < val) - (val < T(0)));
}
}
}
} // namespace
template<>
bool SmoothL1LossOp<float, CUDAContext>::RunOnDevice() {
auto& Y_hat = Input(0);
auto& Y = Input(1);
auto& alpha_in = Input(2);
auto& alpha_out = Input(3);
auto* avg_loss = Output(0);
int N = Y.dim32(0);
// Require the same number of elements along axis 0 (batch size), but
// otherwise don't care about the shape (just the number of elements)
CAFFE_ENFORCE_EQ(Y_hat.dim32(0), Y.dim32(0),
"Y_hat and Y must have the same number of elements along axis 0");
CAFFE_ENFORCE_EQ(Y_hat.size(), Y.size(),
"Y_hat and Y must have the same number of elements");
CAFFE_ENFORCE_EQ(Y_hat.size(), alpha_in.size());
CAFFE_ENFORCE_EQ(Y_hat.size(), alpha_out.size());
avg_loss->Resize(vector<int64_t>());
buff_.ResizeLike(Y);
// Difference
// d := y_hat - y
math::Sub<float, CUDAContext>(
Y.size(), Y_hat.data<float>(), Y.data<float>(),
buff_.mutable_data<float>(), &context_);
// Element-wise weighted difference (can be used to ignore or reweight
// specific components)
// d := alpha_in * (y_hat - y)
math::Mul<float, CUDAContext>(
buff_.size(), buff_.data<float>(), alpha_in.data<float>(),
buff_.mutable_data<float>(), &context_);
// Element-wise smooth l1 loss
// l := SmoothL1(alpha_in * (y_hat - y))
SmoothL1Kernel<float>
<<<CAFFE_GET_BLOCKS(buff_.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
buff_.size(), buff_.data<float>(), buff_.mutable_data<float>(),
beta_);
// Element-wise weighted smooth l1 loss (can be used to specify a per-element
// loss weight)
// l := alpha_out * SmoothL1(alpha_in * (y_hat - y))
math::Mul<float, CUDAContext>(
buff_.size(), buff_.data<float>(), alpha_out.data<float>(),
buff_.mutable_data<float>(), &context_);
// Sum of all losses
// al := sum_i l_i
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Sum<float, CUDAContext>(
buff_.size(), buff_.data<float>(), avg_loss_data, &context_);
// Average of input batch size
// al := 1/N * al
math::Scale<float, float, CUDAContext>(
1, scale_ / N, avg_loss_data, avg_loss_data, &context_);
return true;
}
template<>
bool SmoothL1LossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y_hat = Input(0);
auto& Y = Input(1);
auto& alpha_in = Input(2);
auto& alpha_out = Input(3);
auto& d_avg_loss = Input(4); // gradient of net w.r.t. avg_loss ("gradOuput")
auto* d_Y_hat = Output(0); // gradient of net w.r.t. Y_hat ("gradInput")
// We intentially don't compute gradients for Y, alpha_{in,out} since they
// are not needed (can change in the future if desired)
int N = Y.dim32(0);
// Require the same number of elements along axis 0 (batch size), but
// otherwise don't care about the shape (just the number of elements)
CAFFE_ENFORCE_EQ(Y_hat.dim32(0), Y.dim32(0),
"Y_hat and Y must have the same number of elements along axis 0");
CAFFE_ENFORCE_EQ(Y_hat.size(), Y.size(),
"Y_hat and Y must have the same number of elements");
CAFFE_ENFORCE_EQ(Y_hat.size(), alpha_in.size());
CAFFE_ENFORCE_EQ(Y_hat.size(), alpha_out.size());
CAFFE_ENFORCE_EQ(d_avg_loss.size(), 1);
d_Y_hat->ResizeLike(Y_hat);
buff_.ResizeLike(Y);
// Difference
// d := y_hat - y
math::Sub<float, CUDAContext>(
Y.size(), Y_hat.data<float>(), Y.data<float>(),
buff_.mutable_data<float>(), &context_);
// Element-wise weighted difference (can be used to ignore or reweight
// specific components)
// d := alpha_in * (y_hat - y)
math::Mul<float, CUDAContext>(
buff_.size(), buff_.data<float>(), alpha_in.data<float>(),
buff_.mutable_data<float>(), &context_);
// d_Y_hat := d_avg_loss / N * SmoothL1'(alpha_in * (y_hat - y))
SmoothL1GradientKernel<float>
<<<CAFFE_GET_BLOCKS(buff_.size()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
buff_.size(), buff_.data<float>(), d_Y_hat->mutable_data<float>(),
d_avg_loss.data<float>(), scale_ / N, beta_);
// Element-wise scale by alpha_in and alpha_out
math::Mul<float, CUDAContext>(
d_Y_hat->size(), d_Y_hat->data<float>(), alpha_in.data<float>(),
d_Y_hat->mutable_data<float>(), &context_);
math::Mul<float, CUDAContext>(
d_Y_hat->size(), d_Y_hat->data<float>(), alpha_out.data<float>(),
d_Y_hat->mutable_data<float>(), &context_);
return true;
}
REGISTER_CUDA_OPERATOR(SmoothL1Loss,
SmoothL1LossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SmoothL1LossGradient,
SmoothL1LossGradientOp<float, CUDAContext>);
} // namespace caffe2
|
eeaef0fec7c212777a842a2067642ea3d426252d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include "internal_shared.hpp"
#include <thrust/sort.h>
#include <opencv2/gpu/gpumat.hpp>
#include <opencv2/gpu/device/common.hpp>
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/utility.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/filters.hpp"
#include <float.h>
namespace btl { namespace device
{
namespace surf
{
using namespace cv::gpu;
using namespace cv::gpu::device;
////////////////////////////////////////////////////////////////////////
// Global parameters
// The maximum number of features (before subpixel interpolation) that memory is reserved for.
__constant__ int c_max_candidates;
// The maximum number of features that memory is reserved for.
__constant__ int c_max_features;
// The image size.
__constant__ int c_img_rows;
__constant__ int c_img_cols;
// The number of layers.
__constant__ int c_nOctaveLayers;
// The hessian threshold.
__constant__ float c_hessianThreshold;
// The current octave.
__constant__ int c_octave;
// The current layer size.
__constant__ int c_layer_rows;
__constant__ int c_layer_cols;
void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold)
{
cudaSafeCall( hipMemcpyToSymbol(c_max_candidates, &maxCandidates, sizeof(maxCandidates)) );
cudaSafeCall( hipMemcpyToSymbol(c_max_features, &maxFeatures, sizeof(maxFeatures)) );
cudaSafeCall( hipMemcpyToSymbol(c_img_rows, &img_rows, sizeof(img_rows)) );
cudaSafeCall( hipMemcpyToSymbol(c_img_cols, &img_cols, sizeof(img_cols)) );
cudaSafeCall( hipMemcpyToSymbol(c_nOctaveLayers, &nOctaveLayers, sizeof(nOctaveLayers)) );
cudaSafeCall( hipMemcpyToSymbol(c_hessianThreshold, &hessianThreshold, sizeof(hessianThreshold)) );
}
void loadOctaveConstants(int octave, int layer_rows, int layer_cols)
{
cudaSafeCall( hipMemcpyToSymbol(c_octave, &octave, sizeof(octave)) );
cudaSafeCall( hipMemcpyToSymbol(c_layer_rows, &layer_rows, sizeof(layer_rows)) );
cudaSafeCall( hipMemcpyToSymbol(c_layer_cols, &layer_cols, sizeof(layer_cols)) );
}
////////////////////////////////////////////////////////////////////////
// Integral image texture
texture<unsigned char, 2, hipReadModeElementType> imgTex(0, hipFilterModePoint, hipAddressModeClamp);
texture<unsigned int, 2, hipReadModeElementType> sumTex(0, hipFilterModePoint, hipAddressModeClamp);
texture<unsigned int, 2, hipReadModeElementType> maskSumTex(0, hipFilterModePoint, hipAddressModeClamp);
void bindImgTex(PtrStepSzb img)
{
bindTexture(&imgTex, img);
}
size_t bindSumTex(PtrStepSz<uint> sum)
{
size_t offset;
hipChannelFormatDesc desc_sum = hipCreateChannelDesc<uint>();
cudaSafeCall( hipBindTexture2D(&offset, sumTex, sum.data, desc_sum, sum.cols, sum.rows, sum.step));
return offset / sizeof(uint);
}
size_t bindMaskSumTex(PtrStepSz<uint> maskSum)
{
size_t offset;
hipChannelFormatDesc desc_sum = hipCreateChannelDesc<uint>();
cudaSafeCall( hipBindTexture2D(&offset, maskSumTex, maskSum.data, desc_sum, maskSum.cols, maskSum.rows, maskSum.step));
return offset / sizeof(uint);
}
template <int N> __device__ float icvCalcHaarPatternSum(const float src[][5], int oldSize, int newSize, int y, int x)
{
typedef double real_t;
float ratio = (float)newSize / oldSize;
real_t d = 0;
#pragma unroll
for (int k = 0; k < N; ++k)
{
int dx1 = __float2int_rn(ratio * src[k][0]);
int dy1 = __float2int_rn(ratio * src[k][1]);
int dx2 = __float2int_rn(ratio * src[k][2]);
int dy2 = __float2int_rn(ratio * src[k][3]);
real_t t = 0;
t += tex2D(sumTex, x + dx1, y + dy1);
t -= tex2D(sumTex, x + dx1, y + dy2);
t -= tex2D(sumTex, x + dx2, y + dy1);
t += tex2D(sumTex, x + dx2, y + dy2);
d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1));
}
return (float)d;
}
////////////////////////////////////////////////////////////////////////
// Hessian
__constant__ float c_DX [3][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} };
__constant__ float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} };
__constant__ float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} };
//calc wavelet size
__host__ __device__ __forceinline__ int calcSize(int octave, int layer)
{
/* Wavelet size at first layer of first octave. */
const int HAAR_SIZE0 = 9;
/* Wavelet size increment between layers. This should be an even number,
such that the wavelet sizes in an octave are either all even or all odd.
This ensures that when looking for the neighbours of a sample, the layers
above and below are aligned correctly. */
const int HAAR_SIZE_INC = 6;
int HAAR_OCTAVE_INC = 0;
if(octave > 0 )
HAAR_OCTAVE_INC = (6 << (octave-1));
return HAAR_SIZE0 + HAAR_OCTAVE_INC + (HAAR_SIZE_INC << octave )* layer ;
}
//roughly cols x rows of the total # of threads, each block is 16 x 16
__global__ void icvCalcLayerDetAndTrace(PtrStepf det, PtrStepf trace, uint sumOffset)
{
// Determine the indices
const int gridDim_y = gridDim.y / (c_nOctaveLayers + 2);
const int blockIdx_y = blockIdx.y % gridDim_y;
const int blockIdx_z = blockIdx.y / gridDim_y; //current layers
const int j = threadIdx.x + blockIdx.x * blockDim.x;//current col x
const int i = threadIdx.y + blockIdx_y * blockDim.y;//current row y
const int layer = blockIdx_z; //current layers
const int size = calcSize(c_octave, layer); //wavelet size of the current octave and current layer
const int samples_i = 1 + ((c_img_rows - size) >> c_octave); // the rows of the current layer excludes wavelet size
const int samples_j = 1 + ((c_img_cols - size) >> c_octave); // the cols of the current layer excludes wavelet size
// Ignore pixels where some of the kernel is outside the image
const int margin = (size >> 1) >> c_octave; //margin in terms of the # of pixels int the current octave layer
if (size <= c_img_rows && size <= c_img_cols && i < samples_i && j < samples_j) {
const float dx = icvCalcHaarPatternSum<3>(c_DX , 9, size, (i << c_octave), sumOffset + (j << c_octave));
const float dy = icvCalcHaarPatternSum<3>(c_DY , 9, size, (i << c_octave), sumOffset + (j << c_octave));
const float dxy = icvCalcHaarPatternSum<4>(c_DXY, 9, size, (i << c_octave), sumOffset + (j << c_octave));
det.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx * dy - 0.81f * dxy * dxy;
trace.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx + dy;
}
}
//calc the determine and trace of ONE layer
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayers, const size_t sumOffset)
{
const int min_size = calcSize(octave, 0); //octave is the current octave level, therefore here the min size of the current octave level is calculated
const int max_samples_i = 1 + ((img_rows - min_size) >> octave); // the rows of the current layer excludes wavelet size
const int max_samples_j = 1 + ((img_cols - min_size) >> octave); // the cols of the current layer excludes wavelet size
dim3 threads(16, 16);
dim3 grid;
grid.x = divUp(max_samples_j, threads.x);
grid.y = divUp(max_samples_i, threads.y) * (nOctaveLayers + 2);
//roughly cols x rows of the total # of threads, each block is 16 x 16
hipLaunchKernelGGL(( icvCalcLayerDetAndTrace), dim3(grid), dim3(threads), 0, 0, det, trace, (uint)sumOffset);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// NONMAX
__constant__ float c_DM[5] = {0, 0, 9, 9, 1};
struct WithMask
{
static __device__ bool check(int sum_i, int sum_j, int size, const uint offset)
{
float ratio = (float)size / 9.0f;
float d = 0;
int dx1 = __float2int_rn(ratio * c_DM[0]);
int dy1 = __float2int_rn(ratio * c_DM[1]);
int dx2 = __float2int_rn(ratio * c_DM[2]);
int dy2 = __float2int_rn(ratio * c_DM[3]);
float t = 0;
t += tex2D(maskSumTex, offset + sum_j + dx1, sum_i + dy1);
t -= tex2D(maskSumTex, offset + sum_j + dx1, sum_i + dy2);
t -= tex2D(maskSumTex, offset + sum_j + dx2, sum_i + dy1);
t += tex2D(maskSumTex, offset + sum_j + dx2, sum_i + dy2);
d += t * c_DM[4] / ((dx2 - dx1) * (dy2 - dy1));
return (d >= 0.5f);
}
};
template <typename Mask>
__global__ void icvFindMaximaInLayer(const PtrStepf det, const PtrStepf trace, int4* maxPosBuffer,
unsigned int* maxCounter, const uint maskOffset)
{
extern __shared__ float N9[];
// The hidx variables are the indices to the hessian buffer.
const int gridDim_y = gridDim.y / c_nOctaveLayers;
const int blockIdx_y = blockIdx.y % gridDim_y;
const int blockIdx_z = blockIdx.y / gridDim_y; //current layer
const int layer = blockIdx_z + 1;
const int size = calcSize(c_octave, layer);
// Ignore pixels without a 3x3x3 neighbourhood in the layer above
const int margin = ((calcSize(c_octave, layer + 1) >> 1) >> c_octave) + 1;
const int j = threadIdx.x + blockIdx.x * (blockDim.x - 2) + margin - 1; // current row y
const int i = threadIdx.y + blockIdx_y * (blockDim.y - 2) + margin - 1; // current col x
// Is this thread within the hessian buffer?
const int zoff = blockDim.x * blockDim.y;
const int localLin = threadIdx.x + threadIdx.y * blockDim.x + zoff;
N9[localLin - zoff] = det.ptr(c_layer_rows * (layer - 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
N9[localLin ] = det.ptr(c_layer_rows * (layer ) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
N9[localLin + zoff] = det.ptr(c_layer_rows * (layer + 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
__syncthreads();
if (i < c_layer_rows - margin && j < c_layer_cols - margin && threadIdx.x > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y > 0 && threadIdx.y < blockDim.y - 1)
{
float val0 = N9[localLin];
if (val0 > c_hessianThreshold)
{
// Coordinates for the start of the wavelet in the sum image. There
// is some integer division involved, so don't try to simplify this
// (cancel out sampleStep) without checking the result is the same
const int sum_i = (i - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (j - ((size >> 1) >> c_octave)) << c_octave;
if (Mask::check(sum_i, sum_j, size, maskOffset))
{
// Check to see if we have a max (in its 26 neighbours)
const bool condmax = val0 > N9[localLin - 1 - blockDim.x - zoff]
&& val0 > N9[localLin - blockDim.x - zoff]
&& val0 > N9[localLin + 1 - blockDim.x - zoff]
&& val0 > N9[localLin - 1 - zoff]
&& val0 > N9[localLin - zoff]
&& val0 > N9[localLin + 1 - zoff]
&& val0 > N9[localLin - 1 + blockDim.x - zoff]
&& val0 > N9[localLin + blockDim.x - zoff]
&& val0 > N9[localLin + 1 + blockDim.x - zoff]
&& val0 > N9[localLin - 1 - blockDim.x]
&& val0 > N9[localLin - blockDim.x]
&& val0 > N9[localLin + 1 - blockDim.x]
&& val0 > N9[localLin - 1 ]
&& val0 > N9[localLin + 1 ]
&& val0 > N9[localLin - 1 + blockDim.x]
&& val0 > N9[localLin + blockDim.x]
&& val0 > N9[localLin + 1 + blockDim.x]
&& val0 > N9[localLin - 1 - blockDim.x + zoff]
&& val0 > N9[localLin - blockDim.x + zoff]
&& val0 > N9[localLin + 1 - blockDim.x + zoff]
&& val0 > N9[localLin - 1 + zoff]
&& val0 > N9[localLin + zoff]
&& val0 > N9[localLin + 1 + zoff]
&& val0 > N9[localLin - 1 + blockDim.x + zoff]
&& val0 > N9[localLin + blockDim.x + zoff]
&& val0 > N9[localLin + 1 + blockDim.x + zoff]
;
if(condmax)
{
unsigned int ind = atomicInc(maxCounter,(unsigned int) -1);
if (ind < c_max_candidates)
{
const int laplacian = (int) copysignf(1.0f, trace.ptr(layer * c_layer_rows + i)[j]);
maxPosBuffer[ind] = make_int4(j, i, layer, laplacian);
}
}
}
}
}
}
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nOctaveLayers, const size_t maskOffset)
{
const int layer_rows = img_rows >> octave;
const int layer_cols = img_cols >> octave;
const int min_margin = ((calcSize(octave, 2) >> 1) >> octave) + 1;
dim3 threads(16, 16);
dim3 grid;
grid.x = divUp(layer_cols - 2 * min_margin, threads.x - 2);
grid.y = divUp(layer_rows - 2 * min_margin, threads.y - 2) * nOctaveLayers;
const size_t smem_size = threads.x * threads.y * 3 * sizeof(float);
hipLaunchKernelGGL(( icvFindMaximaInLayer<WithOutMask>), dim3(grid), dim3(threads), smem_size, 0, det, trace, maxPosBuffer, maxCounter, 0);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// INTERPOLATION
__global__ void icvInterpolateKeypoint(const PtrStepf det, const int4* maxPosBuffer,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter)
{
const int4 maxPos = maxPosBuffer[blockIdx.x];
const int j = maxPos.x - 1 + threadIdx.x;
const int i = maxPos.y - 1 + threadIdx.y;
const int layer = maxPos.z - 1 + threadIdx.z;
__shared__ float N9[3][3][3];
N9[threadIdx.z][threadIdx.y][threadIdx.x] = det.ptr(c_layer_rows * layer + i)[j];
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0)
{
__shared__ float dD[3];
//dx
dD[0] = -0.5f * (N9[1][1][2] - N9[1][1][0]);
//dy
dD[1] = -0.5f * (N9[1][2][1] - N9[1][0][1]);
//ds
dD[2] = -0.5f * (N9[2][1][1] - N9[0][1][1]);
__shared__ float H[3][3];
//dxx
H[0][0] = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2];
//dxy
H[0][1]= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]);
//dxs
H[0][2]= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]);
//dyx = dxy
H[1][0] = H[0][1];
//dyy
H[1][1] = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1];
//dys
H[1][2]= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]);
//dsx = dxs
H[2][0] = H[0][2];
//dsy = dys
H[2][1] = H[1][2];
//dss
H[2][2] = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1];
__shared__ float x[3];
if (solve3x3(H, dD, x))
{
if (::fabs(x[0]) <= 1.f && ::fabs(x[1]) <= 1.f && ::fabs(x[2]) <= 1.f)
{
// if the step is within the interpolation region, perform it
const int size = calcSize(c_octave, maxPos.z);
const int sum_i = (maxPos.y - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (maxPos.x - ((size >> 1) >> c_octave)) << c_octave;
const float center_i = sum_i + (float)(size - 1) / 2;
const float center_j = sum_j + (float)(size - 1) / 2;
const float px = center_j + x[0] * (1 << c_octave);
const float py = center_i + x[1] * (1 << c_octave);
const int ds = size - calcSize(c_octave, maxPos.z - 1);
const float psize = roundf(size + x[2] * ds);
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = psize * 1.2f / 9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
// check when grad_wav_size is too big
if ((c_img_rows + 1) >= grad_wav_size && (c_img_cols + 1) >= grad_wav_size)
{
// Get a new feature index.
unsigned int ind = atomicInc(featureCounter, (unsigned int)-1);
if (ind < c_max_features)
{
featureX[ind] = px;
featureY[ind] = py;
featureLaplacian[ind] = maxPos.w;
featureOctave[ind] = c_octave;
featureSize[ind] = psize;
featureHessian[ind] = N9[1][1][1];
}
} // grad_wav_size check
} // If the subpixel interpolation worked
}
} // If this is thread 0.
}
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter)
{
dim3 threads;
threads.x = 3;
threads.y = 3;
threads.z = 3;
dim3 grid;
grid.x = maxCounter;
hipLaunchKernelGGL(( icvInterpolateKeypoint), dim3(grid), dim3(threads), 0, 0, det, maxPosBuffer, featureX, featureY, featureLaplacian, featureOctave, featureSize, featureHessian, featureCounter);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// Orientation
#define ORI_SEARCH_INC 5
#define ORI_WIN 60
#define ORI_SAMPLES 113
__constant__ float c_aptX[ORI_SAMPLES] = {-6, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6};
__constant__ float c_aptY[ORI_SAMPLES] = {0, -3, -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, 4, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, 0};
__constant__ float c_aptW[ORI_SAMPLES] = {0.001455130288377404f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f, 0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f, 0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f, 0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.001455130288377404f};
__constant__ float c_NX[2][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}};
__constant__ float c_NY[2][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}};
__global__ void icvCalcOrientation(const float* featureX, const float* featureY, const float* featureSize, float* featureDir)
{
__shared__ float s_X[128];
__shared__ float s_Y[128];
__shared__ float s_angle[128];
__shared__ float s_sumx[32 * 4];
__shared__ float s_sumy[32 * 4];
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = featureSize[blockIdx.x] * 1.2f / 9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
// check when grad_wav_size is too big
if ((c_img_rows + 1) < grad_wav_size || (c_img_cols + 1) < grad_wav_size)
return;
// Calc X, Y, angle and store it to shared memory
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
float X = 0.0f, Y = 0.0f, angle = 0.0f;
if (tid < ORI_SAMPLES)
{
const float margin = (float)(grad_wav_size - 1) / 2.0f;
const int x = __float2int_rn(featureX[blockIdx.x] + c_aptX[tid] * s - margin);
const int y = __float2int_rn(featureY[blockIdx.x] + c_aptY[tid] * s - margin);
if (y >= 0 && y < (c_img_rows + 1) - grad_wav_size &&
x >= 0 && x < (c_img_cols + 1) - grad_wav_size)
{
X = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NX, 4, grad_wav_size, y, x);
Y = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NY, 4, grad_wav_size, y, x);
angle = atan2f(Y, X);
if (angle < 0)
angle += 2.0f * CV_PI_F;
angle *= 180.0f / CV_PI_F;
}
}
s_X[tid] = X;
s_Y[tid] = Y;
s_angle[tid] = angle;
__syncthreads();
float bestx = 0, besty = 0, best_mod = 0;
#pragma unroll
for (int i = 0; i < 18; ++i)
{
const int dir = (i * 4 + threadIdx.y) * ORI_SEARCH_INC;
float sumx = 0.0f, sumy = 0.0f;
int d = ::abs(__float2int_rn(s_angle[threadIdx.x]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx = s_X[threadIdx.x];
sumy = s_Y[threadIdx.x];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 32]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 32];
sumy += s_Y[threadIdx.x + 32];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 64]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 64];
sumy += s_Y[threadIdx.x + 64];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 96]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 96];
sumy += s_Y[threadIdx.x + 96];
}
cv::gpu::device::reduce<32>(s_sumx + threadIdx.y * 32, sumx, threadIdx.x, plus<volatile float>());
cv::gpu::device::reduce<32>(s_sumy + threadIdx.y * 32, sumy, threadIdx.x, plus<volatile float>());
const float temp_mod = sumx * sumx + sumy * sumy;
if (temp_mod > best_mod)
{
best_mod = temp_mod;
bestx = sumx;
besty = sumy;
}
__syncthreads();
}
if (threadIdx.x == 0)
{
s_X[threadIdx.y] = bestx;
s_Y[threadIdx.y] = besty;
s_angle[threadIdx.y] = best_mod;
}
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0)
{
int bestIdx = 0;
if (s_angle[1] > s_angle[bestIdx])
bestIdx = 1;
if (s_angle[2] > s_angle[bestIdx])
bestIdx = 2;
if (s_angle[3] > s_angle[bestIdx])
bestIdx = 3;
float kp_dir = atan2f(s_Y[bestIdx], s_X[bestIdx]);
if (kp_dir < 0)
kp_dir += 2.0f * CV_PI_F;
kp_dir *= 180.0f / CV_PI_F;
kp_dir = 360.0f - kp_dir;
if (::fabs(kp_dir - 360.f) < FLT_EPSILON)
kp_dir = 0.f;
featureDir[blockIdx.x] = kp_dir;
}
}
#undef ORI_SEARCH_INC
#undef ORI_WIN
#undef ORI_SAMPLES
void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures)
{
dim3 threads;
threads.x = 32;
threads.y = 4;
dim3 grid;
grid.x = nFeatures;
hipLaunchKernelGGL(( icvCalcOrientation), dim3(grid), dim3(threads), 0, 0, featureX, featureY, featureSize, featureDir);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// Descriptors
#define PATCH_SZ 20
__constant__ float c_DW[PATCH_SZ * PATCH_SZ] =
{
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f,
8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f
};
struct WinReader
{
typedef uchar elem_type;
__device__ __forceinline__ WinReader(float centerX_, float centerY_, float win_offset_, float cos_dir_, float sin_dir_) :
centerX(centerX_), centerY(centerY_), win_offset(win_offset_), cos_dir(cos_dir_), sin_dir(sin_dir_)
{
}
__device__ __forceinline__ uchar operator ()(int i, int j) const
{
float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir;
float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir;
return tex2D(imgTex, pixel_x, pixel_y);
}
float centerX;
float centerY;
float win_offset;
float cos_dir;
float sin_dir;
};
__device__ void calc_dx_dy(float s_dx_bin[25], float s_dy_bin[25],
const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
__shared__ float s_PATCH[6][6];
const float centerX = featureX[blockIdx.x];
const float centerY = featureY[blockIdx.x];
const float size = featureSize[blockIdx.x];
float descriptor_dir = 360.0f - featureDir[blockIdx.x];
if (std::abs(descriptor_dir - 360.f) < FLT_EPSILON)
descriptor_dir = 0.f;
descriptor_dir *= (float)(CV_PI_F / 180.0f);
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = size * 1.2f / 9.0f;
/* Extract a window of pixels around the keypoint of size 20s */
const int win_size = (int)((PATCH_SZ + 1) * s);
float sin_dir;
float cos_dir;
sincosf(descriptor_dir, &sin_dir, &cos_dir);
/* Nearest neighbour version (faster) */
const float win_offset = -(float)(win_size - 1) / 2;
// Compute sampling points
// since grids are 2D, need to compute xBlock and yBlock indices
const int xBlock = (blockIdx.y & 3); // blockIdx.y % 4
const int yBlock = (blockIdx.y >> 2); // floor(blockIdx.y/4)
const int xIndex = xBlock * 5 + threadIdx.x;
const int yIndex = yBlock * 5 + threadIdx.y;
const float icoo = ((float)yIndex / (PATCH_SZ + 1)) * win_size;
const float jcoo = ((float)xIndex / (PATCH_SZ + 1)) * win_size;
LinearFilter<WinReader> filter(WinReader(centerX, centerY, win_offset, cos_dir, sin_dir));
s_PATCH[threadIdx.y][threadIdx.x] = filter(icoo, jcoo);
__syncthreads();
if (threadIdx.x < 5 && threadIdx.y < 5)
{
const int tid = threadIdx.y * 5 + threadIdx.x;
const float dw = c_DW[yIndex * PATCH_SZ + xIndex];
const float vx = (s_PATCH[threadIdx.y ][threadIdx.x + 1] - s_PATCH[threadIdx.y][threadIdx.x] + s_PATCH[threadIdx.y + 1][threadIdx.x + 1] - s_PATCH[threadIdx.y + 1][threadIdx.x ]) * dw;
const float vy = (s_PATCH[threadIdx.y + 1][threadIdx.x ] - s_PATCH[threadIdx.y][threadIdx.x] + s_PATCH[threadIdx.y + 1][threadIdx.x + 1] - s_PATCH[threadIdx.y ][threadIdx.x + 1]) * dw;
s_dx_bin[tid] = vx;
s_dy_bin[tid] = vy;
}
}
__device__ void reduce_sum25(volatile float* sdata1, volatile float* sdata2, volatile float* sdata3, volatile float* sdata4, int tid)
{
// first step is to reduce from 25 to 16
if (tid < 9) // use 9 threads
{
sdata1[tid] += sdata1[tid + 16];
sdata2[tid] += sdata2[tid + 16];
sdata3[tid] += sdata3[tid + 16];
sdata4[tid] += sdata4[tid + 16];
}
// sum (reduce) from 16 to 1 (unrolled - aligned to a half-warp)
if (tid < 8)
{
sdata1[tid] += sdata1[tid + 8];
sdata1[tid] += sdata1[tid + 4];
sdata1[tid] += sdata1[tid + 2];
sdata1[tid] += sdata1[tid + 1];
sdata2[tid] += sdata2[tid + 8];
sdata2[tid] += sdata2[tid + 4];
sdata2[tid] += sdata2[tid + 2];
sdata2[tid] += sdata2[tid + 1];
sdata3[tid] += sdata3[tid + 8];
sdata3[tid] += sdata3[tid + 4];
sdata3[tid] += sdata3[tid + 2];
sdata3[tid] += sdata3[tid + 1];
sdata4[tid] += sdata4[tid + 8];
sdata4[tid] += sdata4[tid + 4];
sdata4[tid] += sdata4[tid + 2];
sdata4[tid] += sdata4[tid + 1];
}
}
__global__ void compute_descriptors64(PtrStepf descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
// 2 floats (dx,dy) for each thread (5x5 sample points in each sub-region)
__shared__ float sdx[25];
__shared__ float sdy[25];
__shared__ float sdxabs[25];
__shared__ float sdyabs[25];
calc_dx_dy(sdx, sdy, featureX, featureY, featureSize, featureDir);
__syncthreads();
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
if (tid < 25)
{
sdxabs[tid] = ::fabs(sdx[tid]); // |dx| array
sdyabs[tid] = ::fabs(sdy[tid]); // |dy| array
__syncthreads();
reduce_sum25(sdx, sdy, sdxabs, sdyabs, tid);
__syncthreads();
float* descriptors_block = descriptors.ptr(blockIdx.x) + (blockIdx.y << 2);
// write dx, dy, |dx|, |dy|
if (tid == 0)
{
descriptors_block[0] = sdx[0];
descriptors_block[1] = sdy[0];
descriptors_block[2] = sdxabs[0];
descriptors_block[3] = sdyabs[0];
}
}
}
__global__ void compute_descriptors128(PtrStepf descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
// 2 floats (dx,dy) for each thread (5x5 sample points in each sub-region)
__shared__ float sdx[25];
__shared__ float sdy[25];
// sum (reduce) 5x5 area response
__shared__ float sd1[25];
__shared__ float sd2[25];
__shared__ float sdabs1[25];
__shared__ float sdabs2[25];
calc_dx_dy(sdx, sdy, featureX, featureY, featureSize, featureDir);
__syncthreads();
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
if (tid < 25)
{
if (sdy[tid] >= 0)
{
sd1[tid] = sdx[tid];
sdabs1[tid] = ::fabs(sdx[tid]);
sd2[tid] = 0;
sdabs2[tid] = 0;
}
else
{
sd1[tid] = 0;
sdabs1[tid] = 0;
sd2[tid] = sdx[tid];
sdabs2[tid] = ::fabs(sdx[tid]);
}
__syncthreads();
reduce_sum25(sd1, sd2, sdabs1, sdabs2, tid);
__syncthreads();
float* descriptors_block = descriptors.ptr(blockIdx.x) + (blockIdx.y << 3);
// write dx (dy >= 0), |dx| (dy >= 0), dx (dy < 0), |dx| (dy < 0)
if (tid == 0)
{
descriptors_block[0] = sd1[0];
descriptors_block[1] = sdabs1[0];
descriptors_block[2] = sd2[0];
descriptors_block[3] = sdabs2[0];
}
__syncthreads();
if (sdx[tid] >= 0)
{
sd1[tid] = sdy[tid];
sdabs1[tid] = ::fabs(sdy[tid]);
sd2[tid] = 0;
sdabs2[tid] = 0;
}
else
{
sd1[tid] = 0;
sdabs1[tid] = 0;
sd2[tid] = sdy[tid];
sdabs2[tid] = ::fabs(sdy[tid]);
}
__syncthreads();
reduce_sum25(sd1, sd2, sdabs1, sdabs2, tid);
__syncthreads();
// write dy (dx >= 0), |dy| (dx >= 0), dy (dx < 0), |dy| (dx < 0)
if (tid == 0)
{
descriptors_block[4] = sd1[0];
descriptors_block[5] = sdabs1[0];
descriptors_block[6] = sd2[0];
descriptors_block[7] = sdabs2[0];
}
}
}
template <int BLOCK_DIM_X> __global__ void normalize_descriptors(PtrStepf descriptors)
{
// no need for thread ID
float* descriptor_base = descriptors.ptr(blockIdx.x);
// read in the unnormalized descriptor values (squared)
__shared__ float sqDesc[BLOCK_DIM_X];
const float lookup = descriptor_base[threadIdx.x];
sqDesc[threadIdx.x] = lookup * lookup;
__syncthreads();
if (BLOCK_DIM_X >= 128)
{
if (threadIdx.x < 64)
sqDesc[threadIdx.x] += sqDesc[threadIdx.x + 64];
__syncthreads();
}
// reduction to get total
if (threadIdx.x < 32)
{
volatile float* smem = sqDesc;
smem[threadIdx.x] += smem[threadIdx.x + 32];
smem[threadIdx.x] += smem[threadIdx.x + 16];
smem[threadIdx.x] += smem[threadIdx.x + 8];
smem[threadIdx.x] += smem[threadIdx.x + 4];
smem[threadIdx.x] += smem[threadIdx.x + 2];
smem[threadIdx.x] += smem[threadIdx.x + 1];
}
// compute length (square root)
__shared__ float len;
if (threadIdx.x == 0)
{
len = sqrtf(sqDesc[0]);
}
__syncthreads();
// normalize and store in output
descriptor_base[threadIdx.x] = lookup / len;
}
void compute_descriptors_gpu(const PtrStepSzf& descriptors,
const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures)
{
// compute unnormalized descriptors, then normalize them - odd indexing since grid must be 2D
if (descriptors.cols == 64)
{
hipLaunchKernelGGL(( compute_descriptors64), dim3(dim3(nFeatures, 16, 1)), dim3(dim3(6, 6, 1)), 0, 0, descriptors, featureX, featureY, featureSize, featureDir);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
hipLaunchKernelGGL(( normalize_descriptors<64>), dim3(dim3(nFeatures, 1, 1)), dim3(dim3(64, 1, 1)), 0, 0, descriptors);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
else
{
hipLaunchKernelGGL(( compute_descriptors128), dim3(dim3(nFeatures, 16, 1)), dim3(dim3(6, 6, 1)), 0, 0, descriptors, featureX, featureY, featureSize, featureDir);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
hipLaunchKernelGGL(( normalize_descriptors<128>), dim3(dim3(nFeatures, 1, 1)), dim3(dim3(128, 1, 1)), 0, 0, descriptors);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
}
} // namespace surf
} // namespace devcie
} // namespace btl
| eeaef0fec7c212777a842a2067642ea3d426252d.cu | //#include "internal_shared.hpp"
#include <thrust/sort.h>
#include <opencv2/gpu/gpumat.hpp>
#include <opencv2/gpu/device/common.hpp>
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/utility.hpp"
#include "opencv2/gpu/device/functional.hpp"
#include "opencv2/gpu/device/filters.hpp"
#include <float.h>
namespace btl { namespace device
{
namespace surf
{
using namespace cv::gpu;
using namespace cv::gpu::device;
////////////////////////////////////////////////////////////////////////
// Global parameters
// The maximum number of features (before subpixel interpolation) that memory is reserved for.
__constant__ int c_max_candidates;
// The maximum number of features that memory is reserved for.
__constant__ int c_max_features;
// The image size.
__constant__ int c_img_rows;
__constant__ int c_img_cols;
// The number of layers.
__constant__ int c_nOctaveLayers;
// The hessian threshold.
__constant__ float c_hessianThreshold;
// The current octave.
__constant__ int c_octave;
// The current layer size.
__constant__ int c_layer_rows;
__constant__ int c_layer_cols;
void loadGlobalConstants(int maxCandidates, int maxFeatures, int img_rows, int img_cols, int nOctaveLayers, float hessianThreshold)
{
cudaSafeCall( cudaMemcpyToSymbol(c_max_candidates, &maxCandidates, sizeof(maxCandidates)) );
cudaSafeCall( cudaMemcpyToSymbol(c_max_features, &maxFeatures, sizeof(maxFeatures)) );
cudaSafeCall( cudaMemcpyToSymbol(c_img_rows, &img_rows, sizeof(img_rows)) );
cudaSafeCall( cudaMemcpyToSymbol(c_img_cols, &img_cols, sizeof(img_cols)) );
cudaSafeCall( cudaMemcpyToSymbol(c_nOctaveLayers, &nOctaveLayers, sizeof(nOctaveLayers)) );
cudaSafeCall( cudaMemcpyToSymbol(c_hessianThreshold, &hessianThreshold, sizeof(hessianThreshold)) );
}
void loadOctaveConstants(int octave, int layer_rows, int layer_cols)
{
cudaSafeCall( cudaMemcpyToSymbol(c_octave, &octave, sizeof(octave)) );
cudaSafeCall( cudaMemcpyToSymbol(c_layer_rows, &layer_rows, sizeof(layer_rows)) );
cudaSafeCall( cudaMemcpyToSymbol(c_layer_cols, &layer_cols, sizeof(layer_cols)) );
}
////////////////////////////////////////////////////////////////////////
// Integral image texture
texture<unsigned char, 2, cudaReadModeElementType> imgTex(0, cudaFilterModePoint, cudaAddressModeClamp);
texture<unsigned int, 2, cudaReadModeElementType> sumTex(0, cudaFilterModePoint, cudaAddressModeClamp);
texture<unsigned int, 2, cudaReadModeElementType> maskSumTex(0, cudaFilterModePoint, cudaAddressModeClamp);
void bindImgTex(PtrStepSzb img)
{
bindTexture(&imgTex, img);
}
size_t bindSumTex(PtrStepSz<uint> sum)
{
size_t offset;
cudaChannelFormatDesc desc_sum = cudaCreateChannelDesc<uint>();
cudaSafeCall( cudaBindTexture2D(&offset, sumTex, sum.data, desc_sum, sum.cols, sum.rows, sum.step));
return offset / sizeof(uint);
}
size_t bindMaskSumTex(PtrStepSz<uint> maskSum)
{
size_t offset;
cudaChannelFormatDesc desc_sum = cudaCreateChannelDesc<uint>();
cudaSafeCall( cudaBindTexture2D(&offset, maskSumTex, maskSum.data, desc_sum, maskSum.cols, maskSum.rows, maskSum.step));
return offset / sizeof(uint);
}
template <int N> __device__ float icvCalcHaarPatternSum(const float src[][5], int oldSize, int newSize, int y, int x)
{
typedef double real_t;
float ratio = (float)newSize / oldSize;
real_t d = 0;
#pragma unroll
for (int k = 0; k < N; ++k)
{
int dx1 = __float2int_rn(ratio * src[k][0]);
int dy1 = __float2int_rn(ratio * src[k][1]);
int dx2 = __float2int_rn(ratio * src[k][2]);
int dy2 = __float2int_rn(ratio * src[k][3]);
real_t t = 0;
t += tex2D(sumTex, x + dx1, y + dy1);
t -= tex2D(sumTex, x + dx1, y + dy2);
t -= tex2D(sumTex, x + dx2, y + dy1);
t += tex2D(sumTex, x + dx2, y + dy2);
d += t * src[k][4] / ((dx2 - dx1) * (dy2 - dy1));
}
return (float)d;
}
////////////////////////////////////////////////////////////////////////
// Hessian
__constant__ float c_DX [3][5] = { {0, 2, 3, 7, 1}, {3, 2, 6, 7, -2}, {6, 2, 9, 7, 1} };
__constant__ float c_DY [3][5] = { {2, 0, 7, 3, 1}, {2, 3, 7, 6, -2}, {2, 6, 7, 9, 1} };
__constant__ float c_DXY[4][5] = { {1, 1, 4, 4, 1}, {5, 1, 8, 4, -1}, {1, 5, 4, 8, -1}, {5, 5, 8, 8, 1} };
//calc wavelet size
__host__ __device__ __forceinline__ int calcSize(int octave, int layer)
{
/* Wavelet size at first layer of first octave. */
const int HAAR_SIZE0 = 9;
/* Wavelet size increment between layers. This should be an even number,
such that the wavelet sizes in an octave are either all even or all odd.
This ensures that when looking for the neighbours of a sample, the layers
above and below are aligned correctly. */
const int HAAR_SIZE_INC = 6;
int HAAR_OCTAVE_INC = 0;
if(octave > 0 )
HAAR_OCTAVE_INC = (6 << (octave-1));
return HAAR_SIZE0 + HAAR_OCTAVE_INC + (HAAR_SIZE_INC << octave )* layer ;
}
//roughly cols x rows of the total # of threads, each block is 16 x 16
__global__ void icvCalcLayerDetAndTrace(PtrStepf det, PtrStepf trace, uint sumOffset)
{
// Determine the indices
const int gridDim_y = gridDim.y / (c_nOctaveLayers + 2);
const int blockIdx_y = blockIdx.y % gridDim_y;
const int blockIdx_z = blockIdx.y / gridDim_y; //current layers
const int j = threadIdx.x + blockIdx.x * blockDim.x;//current col x
const int i = threadIdx.y + blockIdx_y * blockDim.y;//current row y
const int layer = blockIdx_z; //current layers
const int size = calcSize(c_octave, layer); //wavelet size of the current octave and current layer
const int samples_i = 1 + ((c_img_rows - size) >> c_octave); // the rows of the current layer excludes wavelet size
const int samples_j = 1 + ((c_img_cols - size) >> c_octave); // the cols of the current layer excludes wavelet size
// Ignore pixels where some of the kernel is outside the image
const int margin = (size >> 1) >> c_octave; //margin in terms of the # of pixels int the current octave layer
if (size <= c_img_rows && size <= c_img_cols && i < samples_i && j < samples_j) {
const float dx = icvCalcHaarPatternSum<3>(c_DX , 9, size, (i << c_octave), sumOffset + (j << c_octave));
const float dy = icvCalcHaarPatternSum<3>(c_DY , 9, size, (i << c_octave), sumOffset + (j << c_octave));
const float dxy = icvCalcHaarPatternSum<4>(c_DXY, 9, size, (i << c_octave), sumOffset + (j << c_octave));
det.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx * dy - 0.81f * dxy * dxy;
trace.ptr(layer * c_layer_rows + i + margin)[j + margin] = dx + dy;
}
}
//calc the determine and trace of ONE layer
void icvCalcLayerDetAndTrace_gpu(const PtrStepf& det, const PtrStepf& trace, int img_rows, int img_cols,
int octave, int nOctaveLayers, const size_t sumOffset)
{
const int min_size = calcSize(octave, 0); //octave is the current octave level, therefore here the min size of the current octave level is calculated
const int max_samples_i = 1 + ((img_rows - min_size) >> octave); // the rows of the current layer excludes wavelet size
const int max_samples_j = 1 + ((img_cols - min_size) >> octave); // the cols of the current layer excludes wavelet size
dim3 threads(16, 16);
dim3 grid;
grid.x = divUp(max_samples_j, threads.x);
grid.y = divUp(max_samples_i, threads.y) * (nOctaveLayers + 2);
//roughly cols x rows of the total # of threads, each block is 16 x 16
icvCalcLayerDetAndTrace<<<grid, threads>>>(det, trace, (uint)sumOffset);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// NONMAX
__constant__ float c_DM[5] = {0, 0, 9, 9, 1};
struct WithMask
{
static __device__ bool check(int sum_i, int sum_j, int size, const uint offset)
{
float ratio = (float)size / 9.0f;
float d = 0;
int dx1 = __float2int_rn(ratio * c_DM[0]);
int dy1 = __float2int_rn(ratio * c_DM[1]);
int dx2 = __float2int_rn(ratio * c_DM[2]);
int dy2 = __float2int_rn(ratio * c_DM[3]);
float t = 0;
t += tex2D(maskSumTex, offset + sum_j + dx1, sum_i + dy1);
t -= tex2D(maskSumTex, offset + sum_j + dx1, sum_i + dy2);
t -= tex2D(maskSumTex, offset + sum_j + dx2, sum_i + dy1);
t += tex2D(maskSumTex, offset + sum_j + dx2, sum_i + dy2);
d += t * c_DM[4] / ((dx2 - dx1) * (dy2 - dy1));
return (d >= 0.5f);
}
};
template <typename Mask>
__global__ void icvFindMaximaInLayer(const PtrStepf det, const PtrStepf trace, int4* maxPosBuffer,
unsigned int* maxCounter, const uint maskOffset)
{
extern __shared__ float N9[];
// The hidx variables are the indices to the hessian buffer.
const int gridDim_y = gridDim.y / c_nOctaveLayers;
const int blockIdx_y = blockIdx.y % gridDim_y;
const int blockIdx_z = blockIdx.y / gridDim_y; //current layer
const int layer = blockIdx_z + 1;
const int size = calcSize(c_octave, layer);
// Ignore pixels without a 3x3x3 neighbourhood in the layer above
const int margin = ((calcSize(c_octave, layer + 1) >> 1) >> c_octave) + 1;
const int j = threadIdx.x + blockIdx.x * (blockDim.x - 2) + margin - 1; // current row y
const int i = threadIdx.y + blockIdx_y * (blockDim.y - 2) + margin - 1; // current col x
// Is this thread within the hessian buffer?
const int zoff = blockDim.x * blockDim.y;
const int localLin = threadIdx.x + threadIdx.y * blockDim.x + zoff;
N9[localLin - zoff] = det.ptr(c_layer_rows * (layer - 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
N9[localLin ] = det.ptr(c_layer_rows * (layer ) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
N9[localLin + zoff] = det.ptr(c_layer_rows * (layer + 1) + ::min(::max(i, 0), c_img_rows - 1))[::min(::max(j, 0), c_img_cols - 1)];
__syncthreads();
if (i < c_layer_rows - margin && j < c_layer_cols - margin && threadIdx.x > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y > 0 && threadIdx.y < blockDim.y - 1)
{
float val0 = N9[localLin];
if (val0 > c_hessianThreshold)
{
// Coordinates for the start of the wavelet in the sum image. There
// is some integer division involved, so don't try to simplify this
// (cancel out sampleStep) without checking the result is the same
const int sum_i = (i - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (j - ((size >> 1) >> c_octave)) << c_octave;
if (Mask::check(sum_i, sum_j, size, maskOffset))
{
// Check to see if we have a max (in its 26 neighbours)
const bool condmax = val0 > N9[localLin - 1 - blockDim.x - zoff]
&& val0 > N9[localLin - blockDim.x - zoff]
&& val0 > N9[localLin + 1 - blockDim.x - zoff]
&& val0 > N9[localLin - 1 - zoff]
&& val0 > N9[localLin - zoff]
&& val0 > N9[localLin + 1 - zoff]
&& val0 > N9[localLin - 1 + blockDim.x - zoff]
&& val0 > N9[localLin + blockDim.x - zoff]
&& val0 > N9[localLin + 1 + blockDim.x - zoff]
&& val0 > N9[localLin - 1 - blockDim.x]
&& val0 > N9[localLin - blockDim.x]
&& val0 > N9[localLin + 1 - blockDim.x]
&& val0 > N9[localLin - 1 ]
&& val0 > N9[localLin + 1 ]
&& val0 > N9[localLin - 1 + blockDim.x]
&& val0 > N9[localLin + blockDim.x]
&& val0 > N9[localLin + 1 + blockDim.x]
&& val0 > N9[localLin - 1 - blockDim.x + zoff]
&& val0 > N9[localLin - blockDim.x + zoff]
&& val0 > N9[localLin + 1 - blockDim.x + zoff]
&& val0 > N9[localLin - 1 + zoff]
&& val0 > N9[localLin + zoff]
&& val0 > N9[localLin + 1 + zoff]
&& val0 > N9[localLin - 1 + blockDim.x + zoff]
&& val0 > N9[localLin + blockDim.x + zoff]
&& val0 > N9[localLin + 1 + blockDim.x + zoff]
;
if(condmax)
{
unsigned int ind = atomicInc(maxCounter,(unsigned int) -1);
if (ind < c_max_candidates)
{
const int laplacian = (int) copysignf(1.0f, trace.ptr(layer * c_layer_rows + i)[j]);
maxPosBuffer[ind] = make_int4(j, i, layer, laplacian);
}
}
}
}
}
}
void icvFindMaximaInLayer_gpu(const PtrStepf& det, const PtrStepf& trace, int4* maxPosBuffer, unsigned int* maxCounter,
int img_rows, int img_cols, int octave, bool use_mask, int nOctaveLayers, const size_t maskOffset)
{
const int layer_rows = img_rows >> octave;
const int layer_cols = img_cols >> octave;
const int min_margin = ((calcSize(octave, 2) >> 1) >> octave) + 1;
dim3 threads(16, 16);
dim3 grid;
grid.x = divUp(layer_cols - 2 * min_margin, threads.x - 2);
grid.y = divUp(layer_rows - 2 * min_margin, threads.y - 2) * nOctaveLayers;
const size_t smem_size = threads.x * threads.y * 3 * sizeof(float);
icvFindMaximaInLayer<WithOutMask><<<grid, threads, smem_size>>>(det, trace, maxPosBuffer, maxCounter, 0);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// INTERPOLATION
__global__ void icvInterpolateKeypoint(const PtrStepf det, const int4* maxPosBuffer,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter)
{
const int4 maxPos = maxPosBuffer[blockIdx.x];
const int j = maxPos.x - 1 + threadIdx.x;
const int i = maxPos.y - 1 + threadIdx.y;
const int layer = maxPos.z - 1 + threadIdx.z;
__shared__ float N9[3][3][3];
N9[threadIdx.z][threadIdx.y][threadIdx.x] = det.ptr(c_layer_rows * layer + i)[j];
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0)
{
__shared__ float dD[3];
//dx
dD[0] = -0.5f * (N9[1][1][2] - N9[1][1][0]);
//dy
dD[1] = -0.5f * (N9[1][2][1] - N9[1][0][1]);
//ds
dD[2] = -0.5f * (N9[2][1][1] - N9[0][1][1]);
__shared__ float H[3][3];
//dxx
H[0][0] = N9[1][1][0] - 2.0f * N9[1][1][1] + N9[1][1][2];
//dxy
H[0][1]= 0.25f * (N9[1][2][2] - N9[1][2][0] - N9[1][0][2] + N9[1][0][0]);
//dxs
H[0][2]= 0.25f * (N9[2][1][2] - N9[2][1][0] - N9[0][1][2] + N9[0][1][0]);
//dyx = dxy
H[1][0] = H[0][1];
//dyy
H[1][1] = N9[1][0][1] - 2.0f * N9[1][1][1] + N9[1][2][1];
//dys
H[1][2]= 0.25f * (N9[2][2][1] - N9[2][0][1] - N9[0][2][1] + N9[0][0][1]);
//dsx = dxs
H[2][0] = H[0][2];
//dsy = dys
H[2][1] = H[1][2];
//dss
H[2][2] = N9[0][1][1] - 2.0f * N9[1][1][1] + N9[2][1][1];
__shared__ float x[3];
if (solve3x3(H, dD, x))
{
if (::fabs(x[0]) <= 1.f && ::fabs(x[1]) <= 1.f && ::fabs(x[2]) <= 1.f)
{
// if the step is within the interpolation region, perform it
const int size = calcSize(c_octave, maxPos.z);
const int sum_i = (maxPos.y - ((size >> 1) >> c_octave)) << c_octave;
const int sum_j = (maxPos.x - ((size >> 1) >> c_octave)) << c_octave;
const float center_i = sum_i + (float)(size - 1) / 2;
const float center_j = sum_j + (float)(size - 1) / 2;
const float px = center_j + x[0] * (1 << c_octave);
const float py = center_i + x[1] * (1 << c_octave);
const int ds = size - calcSize(c_octave, maxPos.z - 1);
const float psize = roundf(size + x[2] * ds);
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = psize * 1.2f / 9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
// check when grad_wav_size is too big
if ((c_img_rows + 1) >= grad_wav_size && (c_img_cols + 1) >= grad_wav_size)
{
// Get a new feature index.
unsigned int ind = atomicInc(featureCounter, (unsigned int)-1);
if (ind < c_max_features)
{
featureX[ind] = px;
featureY[ind] = py;
featureLaplacian[ind] = maxPos.w;
featureOctave[ind] = c_octave;
featureSize[ind] = psize;
featureHessian[ind] = N9[1][1][1];
}
} // grad_wav_size check
} // If the subpixel interpolation worked
}
} // If this is thread 0.
}
void icvInterpolateKeypoint_gpu(const PtrStepf& det, const int4* maxPosBuffer, unsigned int maxCounter,
float* featureX, float* featureY, int* featureLaplacian, int* featureOctave, float* featureSize, float* featureHessian,
unsigned int* featureCounter)
{
dim3 threads;
threads.x = 3;
threads.y = 3;
threads.z = 3;
dim3 grid;
grid.x = maxCounter;
icvInterpolateKeypoint<<<grid, threads>>>(det, maxPosBuffer, featureX, featureY, featureLaplacian, featureOctave, featureSize, featureHessian, featureCounter);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// Orientation
#define ORI_SEARCH_INC 5
#define ORI_WIN 60
#define ORI_SAMPLES 113
__constant__ float c_aptX[ORI_SAMPLES] = {-6, -5, -5, -5, -5, -5, -5, -5, -4, -4, -4, -4, -4, -4, -4, -4, -4, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -3, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6};
__constant__ float c_aptY[ORI_SAMPLES] = {0, -3, -2, -1, 0, 1, 2, 3, -4, -3, -2, -1, 0, 1, 2, 3, 4, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, -4, -3, -2, -1, 0, 1, 2, 3, 4, -3, -2, -1, 0, 1, 2, 3, 0};
__constant__ float c_aptW[ORI_SAMPLES] = {0.001455130288377404f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.001455130288377404f, 0.0035081731621176f, 0.00720730796456337f, 0.01261763460934162f, 0.0188232995569706f, 0.02392910048365593f, 0.02592208795249462f, 0.02392910048365593f, 0.0188232995569706f, 0.01261763460934162f, 0.00720730796456337f, 0.0035081731621176f, 0.001455130288377404f, 0.003238451667129993f, 0.00665318313986063f, 0.01164754293859005f, 0.01737609319388866f, 0.02208934165537357f, 0.02392910048365593f, 0.02208934165537357f, 0.01737609319388866f, 0.01164754293859005f, 0.00665318313986063f, 0.003238451667129993f, 0.002547456417232752f, 0.005233579315245152f, 0.009162282571196556f, 0.01366852037608624f, 0.01737609319388866f, 0.0188232995569706f, 0.01737609319388866f, 0.01366852037608624f, 0.009162282571196556f, 0.005233579315245152f, 0.002547456417232752f, 0.001707611023448408f, 0.0035081731621176f, 0.006141661666333675f, 0.009162282571196556f, 0.01164754293859005f, 0.01261763460934162f, 0.01164754293859005f, 0.009162282571196556f, 0.006141661666333675f, 0.0035081731621176f, 0.001707611023448408f, 0.002003900473937392f, 0.0035081731621176f, 0.005233579315245152f, 0.00665318313986063f, 0.00720730796456337f, 0.00665318313986063f, 0.005233579315245152f, 0.0035081731621176f, 0.002003900473937392f, 0.001707611023448408f, 0.002547456417232752f, 0.003238451667129993f, 0.0035081731621176f, 0.003238451667129993f, 0.002547456417232752f, 0.001707611023448408f, 0.001455130288377404f};
__constant__ float c_NX[2][5] = {{0, 0, 2, 4, -1}, {2, 0, 4, 4, 1}};
__constant__ float c_NY[2][5] = {{0, 0, 4, 2, 1}, {0, 2, 4, 4, -1}};
__global__ void icvCalcOrientation(const float* featureX, const float* featureY, const float* featureSize, float* featureDir)
{
__shared__ float s_X[128];
__shared__ float s_Y[128];
__shared__ float s_angle[128];
__shared__ float s_sumx[32 * 4];
__shared__ float s_sumy[32 * 4];
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = featureSize[blockIdx.x] * 1.2f / 9.0f;
/* To find the dominant orientation, the gradients in x and y are
sampled in a circle of radius 6s using wavelets of size 4s.
We ensure the gradient wavelet size is even to ensure the
wavelet pattern is balanced and symmetric around its center */
const int grad_wav_size = 2 * __float2int_rn(2.0f * s);
// check when grad_wav_size is too big
if ((c_img_rows + 1) < grad_wav_size || (c_img_cols + 1) < grad_wav_size)
return;
// Calc X, Y, angle and store it to shared memory
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
float X = 0.0f, Y = 0.0f, angle = 0.0f;
if (tid < ORI_SAMPLES)
{
const float margin = (float)(grad_wav_size - 1) / 2.0f;
const int x = __float2int_rn(featureX[blockIdx.x] + c_aptX[tid] * s - margin);
const int y = __float2int_rn(featureY[blockIdx.x] + c_aptY[tid] * s - margin);
if (y >= 0 && y < (c_img_rows + 1) - grad_wav_size &&
x >= 0 && x < (c_img_cols + 1) - grad_wav_size)
{
X = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NX, 4, grad_wav_size, y, x);
Y = c_aptW[tid] * icvCalcHaarPatternSum<2>(c_NY, 4, grad_wav_size, y, x);
angle = atan2f(Y, X);
if (angle < 0)
angle += 2.0f * CV_PI_F;
angle *= 180.0f / CV_PI_F;
}
}
s_X[tid] = X;
s_Y[tid] = Y;
s_angle[tid] = angle;
__syncthreads();
float bestx = 0, besty = 0, best_mod = 0;
#pragma unroll
for (int i = 0; i < 18; ++i)
{
const int dir = (i * 4 + threadIdx.y) * ORI_SEARCH_INC;
float sumx = 0.0f, sumy = 0.0f;
int d = ::abs(__float2int_rn(s_angle[threadIdx.x]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx = s_X[threadIdx.x];
sumy = s_Y[threadIdx.x];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 32]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 32];
sumy += s_Y[threadIdx.x + 32];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 64]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 64];
sumy += s_Y[threadIdx.x + 64];
}
d = ::abs(__float2int_rn(s_angle[threadIdx.x + 96]) - dir);
if (d < ORI_WIN / 2 || d > 360 - ORI_WIN / 2)
{
sumx += s_X[threadIdx.x + 96];
sumy += s_Y[threadIdx.x + 96];
}
cv::gpu::device::reduce<32>(s_sumx + threadIdx.y * 32, sumx, threadIdx.x, plus<volatile float>());
cv::gpu::device::reduce<32>(s_sumy + threadIdx.y * 32, sumy, threadIdx.x, plus<volatile float>());
const float temp_mod = sumx * sumx + sumy * sumy;
if (temp_mod > best_mod)
{
best_mod = temp_mod;
bestx = sumx;
besty = sumy;
}
__syncthreads();
}
if (threadIdx.x == 0)
{
s_X[threadIdx.y] = bestx;
s_Y[threadIdx.y] = besty;
s_angle[threadIdx.y] = best_mod;
}
__syncthreads();
if (threadIdx.x == 0 && threadIdx.y == 0)
{
int bestIdx = 0;
if (s_angle[1] > s_angle[bestIdx])
bestIdx = 1;
if (s_angle[2] > s_angle[bestIdx])
bestIdx = 2;
if (s_angle[3] > s_angle[bestIdx])
bestIdx = 3;
float kp_dir = atan2f(s_Y[bestIdx], s_X[bestIdx]);
if (kp_dir < 0)
kp_dir += 2.0f * CV_PI_F;
kp_dir *= 180.0f / CV_PI_F;
kp_dir = 360.0f - kp_dir;
if (::fabs(kp_dir - 360.f) < FLT_EPSILON)
kp_dir = 0.f;
featureDir[blockIdx.x] = kp_dir;
}
}
#undef ORI_SEARCH_INC
#undef ORI_WIN
#undef ORI_SAMPLES
void icvCalcOrientation_gpu(const float* featureX, const float* featureY, const float* featureSize, float* featureDir, int nFeatures)
{
dim3 threads;
threads.x = 32;
threads.y = 4;
dim3 grid;
grid.x = nFeatures;
icvCalcOrientation<<<grid, threads>>>(featureX, featureY, featureSize, featureDir);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
////////////////////////////////////////////////////////////////////////
// Descriptors
#define PATCH_SZ 20
__constant__ float c_DW[PATCH_SZ * PATCH_SZ] =
{
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f,
8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
0.0002302826324012131f, 0.0005262381164357066f, 0.001097041997127235f, 0.002086334861814976f, 0.003619635012000799f, 0.005728822201490402f, 0.008271530270576477f, 0.01089497376233339f, 0.01309141051024199f, 0.01435048412531614f, 0.01435048412531614f, 0.01309141051024199f, 0.01089497376233339f, 0.008271530270576477f, 0.005728822201490402f, 0.003619635012000799f, 0.002086334861814976f, 0.001097041997127235f, 0.0005262381164357066f, 0.0002302826324012131f,
0.0002100782439811155f, 0.0004800673632416874f, 0.001000790391117334f, 0.001903285388834775f, 0.00330205773934722f, 0.00522619066759944f, 0.007545807864516974f, 0.009939077310264111f, 0.01194280479103327f, 0.01309141051024199f, 0.01309141051024199f, 0.01194280479103327f, 0.009939077310264111f, 0.007545807864516974f, 0.00522619066759944f, 0.00330205773934722f, 0.001903285388834775f, 0.001000790391117334f, 0.0004800673632416874f, 0.0002100782439811155f,
0.0001748319627949968f, 0.0003995231236331165f, 0.0008328808471560478f, 0.001583957928232849f, 0.002748048631474376f, 0.004349356517195702f, 0.006279794964939356f, 0.008271529339253902f, 0.009939077310264111f, 0.01089497376233339f, 0.01089497376233339f, 0.009939077310264111f, 0.008271529339253902f, 0.006279794964939356f, 0.004349356517195702f, 0.002748048631474376f, 0.001583957928232849f, 0.0008328808471560478f, 0.0003995231236331165f, 0.0001748319627949968f,
0.0001327334757661447f, 0.0003033203829545528f, 0.0006323281559161842f, 0.001202550483867526f, 0.002086335094645619f, 0.00330205773934722f, 0.004767658654600382f, 0.006279794964939356f, 0.007545807864516974f, 0.008271530270576477f, 0.008271530270576477f, 0.007545807864516974f, 0.006279794964939356f, 0.004767658654600382f, 0.00330205773934722f, 0.002086335094645619f, 0.001202550483867526f, 0.0006323281559161842f, 0.0003033203829545528f, 0.0001327334757661447f,
9.193058212986216e-005f, 0.0002100782585330308f, 0.0004379475140012801f, 0.0008328807889483869f, 0.001444985857233405f, 0.002286989474669099f, 0.00330205773934722f, 0.004349356517195702f, 0.00522619066759944f, 0.005728822201490402f, 0.005728822201490402f, 0.00522619066759944f, 0.004349356517195702f, 0.00330205773934722f, 0.002286989474669099f, 0.001444985857233405f, 0.0008328807889483869f, 0.0004379475140012801f, 0.0002100782585330308f, 9.193058212986216e-005f,
5.808438800158911e-005f, 0.0001327334903180599f, 0.0002767078403849155f, 0.0005262380582280457f, 0.0009129836107604206f, 0.001444985857233405f, 0.002086335094645619f, 0.002748048631474376f, 0.00330205773934722f, 0.003619635012000799f, 0.003619635012000799f, 0.00330205773934722f, 0.002748048631474376f, 0.002086335094645619f, 0.001444985857233405f, 0.0009129836107604206f, 0.0005262380582280457f, 0.0002767078403849155f, 0.0001327334903180599f, 5.808438800158911e-005f,
3.34794785885606e-005f, 7.650675252079964e-005f, 0.0001594926579855382f, 0.0003033203247468919f, 0.0005262380582280457f, 0.0008328807889483869f, 0.001202550483867526f, 0.001583957928232849f, 0.001903285388834775f, 0.002086334861814976f, 0.002086334861814976f, 0.001903285388834775f, 0.001583957928232849f, 0.001202550483867526f, 0.0008328807889483869f, 0.0005262380582280457f, 0.0003033203247468919f, 0.0001594926579855382f, 7.650675252079964e-005f, 3.34794785885606e-005f,
1.760426494001877e-005f, 4.022897701361217e-005f, 8.386484114453197e-005f, 0.0001594926579855382f, 0.0002767078403849155f, 0.0004379475140012801f, 0.0006323281559161842f, 0.0008328808471560478f, 0.001000790391117334f, 0.001097041997127235f, 0.001097041997127235f, 0.001000790391117334f, 0.0008328808471560478f, 0.0006323281559161842f, 0.0004379475140012801f, 0.0002767078403849155f, 0.0001594926579855382f, 8.386484114453197e-005f, 4.022897701361217e-005f, 1.760426494001877e-005f,
8.444558261544444e-006f, 1.929736572492402e-005f, 4.022897701361217e-005f, 7.650675252079964e-005f, 0.0001327334903180599f, 0.0002100782585330308f, 0.0003033203829545528f, 0.0003995231236331165f, 0.0004800673632416874f, 0.0005262381164357066f, 0.0005262381164357066f, 0.0004800673632416874f, 0.0003995231236331165f, 0.0003033203829545528f, 0.0002100782585330308f, 0.0001327334903180599f, 7.650675252079964e-005f, 4.022897701361217e-005f, 1.929736572492402e-005f, 8.444558261544444e-006f,
3.695352233989979e-006f, 8.444558261544444e-006f, 1.760426494001877e-005f, 3.34794785885606e-005f, 5.808438800158911e-005f, 9.193058212986216e-005f, 0.0001327334757661447f, 0.0001748319627949968f, 0.0002100782439811155f, 0.0002302826324012131f, 0.0002302826324012131f, 0.0002100782439811155f, 0.0001748319627949968f, 0.0001327334757661447f, 9.193058212986216e-005f, 5.808438800158911e-005f, 3.34794785885606e-005f, 1.760426494001877e-005f, 8.444558261544444e-006f, 3.695352233989979e-006f
};
struct WinReader
{
typedef uchar elem_type;
__device__ __forceinline__ WinReader(float centerX_, float centerY_, float win_offset_, float cos_dir_, float sin_dir_) :
centerX(centerX_), centerY(centerY_), win_offset(win_offset_), cos_dir(cos_dir_), sin_dir(sin_dir_)
{
}
__device__ __forceinline__ uchar operator ()(int i, int j) const
{
float pixel_x = centerX + (win_offset + j) * cos_dir + (win_offset + i) * sin_dir;
float pixel_y = centerY - (win_offset + j) * sin_dir + (win_offset + i) * cos_dir;
return tex2D(imgTex, pixel_x, pixel_y);
}
float centerX;
float centerY;
float win_offset;
float cos_dir;
float sin_dir;
};
__device__ void calc_dx_dy(float s_dx_bin[25], float s_dy_bin[25],
const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
__shared__ float s_PATCH[6][6];
const float centerX = featureX[blockIdx.x];
const float centerY = featureY[blockIdx.x];
const float size = featureSize[blockIdx.x];
float descriptor_dir = 360.0f - featureDir[blockIdx.x];
if (std::abs(descriptor_dir - 360.f) < FLT_EPSILON)
descriptor_dir = 0.f;
descriptor_dir *= (float)(CV_PI_F / 180.0f);
/* The sampling intervals and wavelet sized for selecting an orientation
and building the keypoint descriptor are defined relative to 's' */
const float s = size * 1.2f / 9.0f;
/* Extract a window of pixels around the keypoint of size 20s */
const int win_size = (int)((PATCH_SZ + 1) * s);
float sin_dir;
float cos_dir;
sincosf(descriptor_dir, &sin_dir, &cos_dir);
/* Nearest neighbour version (faster) */
const float win_offset = -(float)(win_size - 1) / 2;
// Compute sampling points
// since grids are 2D, need to compute xBlock and yBlock indices
const int xBlock = (blockIdx.y & 3); // blockIdx.y % 4
const int yBlock = (blockIdx.y >> 2); // floor(blockIdx.y/4)
const int xIndex = xBlock * 5 + threadIdx.x;
const int yIndex = yBlock * 5 + threadIdx.y;
const float icoo = ((float)yIndex / (PATCH_SZ + 1)) * win_size;
const float jcoo = ((float)xIndex / (PATCH_SZ + 1)) * win_size;
LinearFilter<WinReader> filter(WinReader(centerX, centerY, win_offset, cos_dir, sin_dir));
s_PATCH[threadIdx.y][threadIdx.x] = filter(icoo, jcoo);
__syncthreads();
if (threadIdx.x < 5 && threadIdx.y < 5)
{
const int tid = threadIdx.y * 5 + threadIdx.x;
const float dw = c_DW[yIndex * PATCH_SZ + xIndex];
const float vx = (s_PATCH[threadIdx.y ][threadIdx.x + 1] - s_PATCH[threadIdx.y][threadIdx.x] + s_PATCH[threadIdx.y + 1][threadIdx.x + 1] - s_PATCH[threadIdx.y + 1][threadIdx.x ]) * dw;
const float vy = (s_PATCH[threadIdx.y + 1][threadIdx.x ] - s_PATCH[threadIdx.y][threadIdx.x] + s_PATCH[threadIdx.y + 1][threadIdx.x + 1] - s_PATCH[threadIdx.y ][threadIdx.x + 1]) * dw;
s_dx_bin[tid] = vx;
s_dy_bin[tid] = vy;
}
}
__device__ void reduce_sum25(volatile float* sdata1, volatile float* sdata2, volatile float* sdata3, volatile float* sdata4, int tid)
{
// first step is to reduce from 25 to 16
if (tid < 9) // use 9 threads
{
sdata1[tid] += sdata1[tid + 16];
sdata2[tid] += sdata2[tid + 16];
sdata3[tid] += sdata3[tid + 16];
sdata4[tid] += sdata4[tid + 16];
}
// sum (reduce) from 16 to 1 (unrolled - aligned to a half-warp)
if (tid < 8)
{
sdata1[tid] += sdata1[tid + 8];
sdata1[tid] += sdata1[tid + 4];
sdata1[tid] += sdata1[tid + 2];
sdata1[tid] += sdata1[tid + 1];
sdata2[tid] += sdata2[tid + 8];
sdata2[tid] += sdata2[tid + 4];
sdata2[tid] += sdata2[tid + 2];
sdata2[tid] += sdata2[tid + 1];
sdata3[tid] += sdata3[tid + 8];
sdata3[tid] += sdata3[tid + 4];
sdata3[tid] += sdata3[tid + 2];
sdata3[tid] += sdata3[tid + 1];
sdata4[tid] += sdata4[tid + 8];
sdata4[tid] += sdata4[tid + 4];
sdata4[tid] += sdata4[tid + 2];
sdata4[tid] += sdata4[tid + 1];
}
}
__global__ void compute_descriptors64(PtrStepf descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
// 2 floats (dx,dy) for each thread (5x5 sample points in each sub-region)
__shared__ float sdx[25];
__shared__ float sdy[25];
__shared__ float sdxabs[25];
__shared__ float sdyabs[25];
calc_dx_dy(sdx, sdy, featureX, featureY, featureSize, featureDir);
__syncthreads();
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
if (tid < 25)
{
sdxabs[tid] = ::fabs(sdx[tid]); // |dx| array
sdyabs[tid] = ::fabs(sdy[tid]); // |dy| array
__syncthreads();
reduce_sum25(sdx, sdy, sdxabs, sdyabs, tid);
__syncthreads();
float* descriptors_block = descriptors.ptr(blockIdx.x) + (blockIdx.y << 2);
// write dx, dy, |dx|, |dy|
if (tid == 0)
{
descriptors_block[0] = sdx[0];
descriptors_block[1] = sdy[0];
descriptors_block[2] = sdxabs[0];
descriptors_block[3] = sdyabs[0];
}
}
}
__global__ void compute_descriptors128(PtrStepf descriptors, const float* featureX, const float* featureY, const float* featureSize, const float* featureDir)
{
// 2 floats (dx,dy) for each thread (5x5 sample points in each sub-region)
__shared__ float sdx[25];
__shared__ float sdy[25];
// sum (reduce) 5x5 area response
__shared__ float sd1[25];
__shared__ float sd2[25];
__shared__ float sdabs1[25];
__shared__ float sdabs2[25];
calc_dx_dy(sdx, sdy, featureX, featureY, featureSize, featureDir);
__syncthreads();
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
if (tid < 25)
{
if (sdy[tid] >= 0)
{
sd1[tid] = sdx[tid];
sdabs1[tid] = ::fabs(sdx[tid]);
sd2[tid] = 0;
sdabs2[tid] = 0;
}
else
{
sd1[tid] = 0;
sdabs1[tid] = 0;
sd2[tid] = sdx[tid];
sdabs2[tid] = ::fabs(sdx[tid]);
}
__syncthreads();
reduce_sum25(sd1, sd2, sdabs1, sdabs2, tid);
__syncthreads();
float* descriptors_block = descriptors.ptr(blockIdx.x) + (blockIdx.y << 3);
// write dx (dy >= 0), |dx| (dy >= 0), dx (dy < 0), |dx| (dy < 0)
if (tid == 0)
{
descriptors_block[0] = sd1[0];
descriptors_block[1] = sdabs1[0];
descriptors_block[2] = sd2[0];
descriptors_block[3] = sdabs2[0];
}
__syncthreads();
if (sdx[tid] >= 0)
{
sd1[tid] = sdy[tid];
sdabs1[tid] = ::fabs(sdy[tid]);
sd2[tid] = 0;
sdabs2[tid] = 0;
}
else
{
sd1[tid] = 0;
sdabs1[tid] = 0;
sd2[tid] = sdy[tid];
sdabs2[tid] = ::fabs(sdy[tid]);
}
__syncthreads();
reduce_sum25(sd1, sd2, sdabs1, sdabs2, tid);
__syncthreads();
// write dy (dx >= 0), |dy| (dx >= 0), dy (dx < 0), |dy| (dx < 0)
if (tid == 0)
{
descriptors_block[4] = sd1[0];
descriptors_block[5] = sdabs1[0];
descriptors_block[6] = sd2[0];
descriptors_block[7] = sdabs2[0];
}
}
}
template <int BLOCK_DIM_X> __global__ void normalize_descriptors(PtrStepf descriptors)
{
// no need for thread ID
float* descriptor_base = descriptors.ptr(blockIdx.x);
// read in the unnormalized descriptor values (squared)
__shared__ float sqDesc[BLOCK_DIM_X];
const float lookup = descriptor_base[threadIdx.x];
sqDesc[threadIdx.x] = lookup * lookup;
__syncthreads();
if (BLOCK_DIM_X >= 128)
{
if (threadIdx.x < 64)
sqDesc[threadIdx.x] += sqDesc[threadIdx.x + 64];
__syncthreads();
}
// reduction to get total
if (threadIdx.x < 32)
{
volatile float* smem = sqDesc;
smem[threadIdx.x] += smem[threadIdx.x + 32];
smem[threadIdx.x] += smem[threadIdx.x + 16];
smem[threadIdx.x] += smem[threadIdx.x + 8];
smem[threadIdx.x] += smem[threadIdx.x + 4];
smem[threadIdx.x] += smem[threadIdx.x + 2];
smem[threadIdx.x] += smem[threadIdx.x + 1];
}
// compute length (square root)
__shared__ float len;
if (threadIdx.x == 0)
{
len = sqrtf(sqDesc[0]);
}
__syncthreads();
// normalize and store in output
descriptor_base[threadIdx.x] = lookup / len;
}
void compute_descriptors_gpu(const PtrStepSzf& descriptors,
const float* featureX, const float* featureY, const float* featureSize, const float* featureDir, int nFeatures)
{
// compute unnormalized descriptors, then normalize them - odd indexing since grid must be 2D
if (descriptors.cols == 64)
{
compute_descriptors64<<<dim3(nFeatures, 16, 1), dim3(6, 6, 1)>>>(descriptors, featureX, featureY, featureSize, featureDir);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
normalize_descriptors<64><<<dim3(nFeatures, 1, 1), dim3(64, 1, 1)>>>(descriptors);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
else
{
compute_descriptors128<<<dim3(nFeatures, 16, 1), dim3(6, 6, 1)>>>(descriptors, featureX, featureY, featureSize, featureDir);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
normalize_descriptors<128><<<dim3(nFeatures, 1, 1), dim3(128, 1, 1)>>>(descriptors);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
}
} // namespace surf
} // namespace devcie
} // namespace btl
|
c31d759a92d133ff68f2edef774f755ad6dbf5f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T bilinear_interpolate(const T* bottom_data,
const int height, const int width,
T y, T x) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int) y;
int x_low = (int) x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void BuildDpsGeometryVolumeForward(const int nthreads,
const T* img, const T* coord, const int* disp_channels,
const int num_batch, const int channels, const int height,
const int width, const int sep, const int interval, const int z_num, const int y_num, const int x_num,
T* volume) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int pw = index % x_num;
int ph = (index / x_num) % y_num;
int pd = (index / x_num / y_num) % z_num;
int c = (index / x_num / y_num/ z_num) % sep;
int n = index / x_num / y_num / z_num / sep;
T scale = (T)((x_num - 1) / interval * interval) / (x_num - 1.);
// shift channels by the ratio of pd/maxdisp
int c_shift = int( (T) (pw / interval * interval / scale) / (x_num - 1.) * (channels - sep + 1. - 1e-9) ); // 0 -> 32
// AT_ASSERTM(c_shift <= (channels - sep), "c_shift is (channels - sep) at max");
c_shift = disp_channels[c_shift];
// compute the separated channel
int sep_c = (c_shift / sep + 1) * sep;
int cc;
if ( c < c_shift + sep - sep_c )
cc = sep_c + c;
else
cc = sep_c - (sep - c);
int index_coord_x = (((n * z_num + pd) * y_num + ph) * x_num + pw) * 2;
int index_coord_y = index_coord_x + 1;
T coord_y = (coord[index_coord_y] + 1.) / 2. * (height - 1.);
T coord_x = (coord[index_coord_x] + 1.) / 2. * (width - 1.);
const T* offset_input = img + (n * channels + cc) * height * width;
volume[index] = bilinear_interpolate(offset_input, height, width, coord_y, coord_x);
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height, const int width,
T y, T x,
T & w1, T & w2, T & w3, T & w4,
int & x_low, int & x_high, int & y_low, int & y_high) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int) y;
x_low = (int) x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = bottom_data[y_low * width + x_low];
// T v2 = bottom_data[y_low * width + x_high];
// T v3 = bottom_data[y_high * width + x_low];
// T v4 = bottom_data[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void BuildDpsGeometryVolumeBackwardFeature(const int nthreads,
const T* grad, const T* coord, const int* disp_channels,
const int num_batch, const int channels, const int height,
const int width, const int sep, const int interval, const int z_num, const int y_num, const int x_num,
T* grad_input) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int pw = index % x_num;
int ph = (index / x_num) % y_num;
int pd = (index / x_num / y_num) % z_num;
int c = (index / x_num / y_num/ z_num) % sep;
int n = index / x_num / y_num / z_num / sep;
T scale = (T)((x_num - 1) / interval * interval) / (x_num - 1.);
// shift channels by the ratio of pd/maxdisp
int c_shift = int( (T) (pw / interval * interval / scale) / (x_num - 1.) * (channels - sep + 1. - 1e-9) ); // 0 -> 32
// AT_ASSERTM(c_shift <= (channels - sep), "c_shift is (channels - sep) at max");
c_shift = disp_channels[c_shift];
// compute the separated channel
int sep_c = (c_shift / sep + 1) * sep;
int cc;
if ( c < c_shift + sep - sep_c )
cc = sep_c + c;
else
cc = sep_c - (sep - c);
int index_coord_x = (((n * z_num + pd) * y_num + ph) * x_num + pw) * 2;
int index_coord_y = index_coord_x + 1;
T coord_y = (coord[index_coord_y] + 1.) / 2. * (height - 1.);
T coord_x = (coord[index_coord_x] + 1.) / 2. * (width - 1.);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(height, width, coord_y, coord_x,
w1, w2, w3, w4,
x_low, x_high, y_low, y_high);
T top_diff_this_bin = grad[index];
T g1 = top_diff_this_bin * w1;
T g2 = top_diff_this_bin * w2;
T g3 = top_diff_this_bin * w3;
T g4 = top_diff_this_bin * w4;
T* offset_grad_input = grad_input + (n * channels + cc) * height * width;
if (w1 >= 1e-10)
atomicAdd(offset_grad_input + y_low * width + x_low, static_cast<T>(g1));
if (w2 >= 1e-10)
atomicAdd(offset_grad_input + y_low * width + x_high, static_cast<T>(g2));
if (w3 >= 1e-10)
atomicAdd(offset_grad_input + y_high * width + x_low, static_cast<T>(g3));
if (w4 >= 1e-10)
atomicAdd(offset_grad_input + y_high * width + x_high, static_cast<T>(g4));
} // CUDA_1D_KERNEL_LOOP
} // BuildDpsGeometryVolumeBackward
at::Tensor BuildDpsGeometryVolume_forward_cuda(const at::Tensor& img,
const at::Tensor& coord,
const at::Tensor& disp_channels,
const int sep,
const int interval) {
AT_ASSERTM(img.type().is_cuda(), "img must be a CUDA tensor");
AT_ASSERTM(coord.type().is_cuda(), "coord must be a CUDA tensor");
AT_ASSERTM((img.size(0) == coord.size(0)) && (coord.size(4) == 2), \
"Image and coord should of same batch.");
auto num_batch = img.size(0);
auto channels = img.size(1);
auto height = img.size(2);
auto width = img.size(3);
auto z_num = coord.size(1);
auto y_num = coord.size(2);
auto x_num = coord.size(3);
AT_ASSERTM(interval <= x_num - 1, "interval should be less or equal to z_num - 1");
auto output = at::empty({num_batch, sep, z_num, y_num, x_num}, img.options());
auto output_size = num_batch * sep * z_num * y_num * x_num;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(THCCeilDiv((long)(output_size), 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
THCudaCheck(hipGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(img.type(), "BuildDpsGeometryVolume_forward", [&] {
hipLaunchKernelGGL(( BuildDpsGeometryVolumeForward<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
img.contiguous().data<scalar_t>(),
coord.contiguous().data<scalar_t>(),
disp_channels.contiguous().data<int>(),
num_batch,
channels,
height,
width,
sep,
interval,
z_num,
y_num,
x_num,
output.data<scalar_t>());
});
THCudaCheck(hipGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor BuildDpsGeometryVolume_backward_cuda(const at::Tensor& grad,
const at::Tensor& coord,
const at::Tensor& disp_channels,
const int height,
const int width,
const int channels,
const int sep,
const int interval) {
AT_ASSERTM(coord.type().is_cuda(), "coord must be a CUDA tensor");
auto num_batch = grad.size(0);
auto z_num = grad.size(2);
auto y_num = grad.size(3);
auto x_num = grad.size(4);
auto grad_input = at::zeros({num_batch, channels, height, width}, grad.options());
AT_ASSERTM((z_num == coord.size(1)) && (y_num == coord.size(2)) && (x_num == coord.size(3)),
"grad shape is wrong");
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(THCCeilDiv((long)grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(hipGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "BuildDpsGeometryVolume_backward", [&] {
hipLaunchKernelGGL(( BuildDpsGeometryVolumeBackwardFeature<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.contiguous().data<scalar_t>(),
coord.contiguous().data<scalar_t>(),
disp_channels.contiguous().data<int>(),
num_batch,
channels,
height,
width,
sep,
interval,
z_num,
y_num,
x_num,
grad_input.data<scalar_t>());
});
THCudaCheck(hipGetLastError());
return grad_input;
}
| c31d759a92d133ff68f2edef774f755ad6dbf5f3.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T bilinear_interpolate(const T* bottom_data,
const int height, const int width,
T y, T x) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int) y;
int x_low = (int) x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void BuildDpsGeometryVolumeForward(const int nthreads,
const T* img, const T* coord, const int* disp_channels,
const int num_batch, const int channels, const int height,
const int width, const int sep, const int interval, const int z_num, const int y_num, const int x_num,
T* volume) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int pw = index % x_num;
int ph = (index / x_num) % y_num;
int pd = (index / x_num / y_num) % z_num;
int c = (index / x_num / y_num/ z_num) % sep;
int n = index / x_num / y_num / z_num / sep;
T scale = (T)((x_num - 1) / interval * interval) / (x_num - 1.);
// shift channels by the ratio of pd/maxdisp
int c_shift = int( (T) (pw / interval * interval / scale) / (x_num - 1.) * (channels - sep + 1. - 1e-9) ); // 0 -> 32
// AT_ASSERTM(c_shift <= (channels - sep), "c_shift is (channels - sep) at max");
c_shift = disp_channels[c_shift];
// compute the separated channel
int sep_c = (c_shift / sep + 1) * sep;
int cc;
if ( c < c_shift + sep - sep_c )
cc = sep_c + c;
else
cc = sep_c - (sep - c);
int index_coord_x = (((n * z_num + pd) * y_num + ph) * x_num + pw) * 2;
int index_coord_y = index_coord_x + 1;
T coord_y = (coord[index_coord_y] + 1.) / 2. * (height - 1.);
T coord_x = (coord[index_coord_x] + 1.) / 2. * (width - 1.);
const T* offset_input = img + (n * channels + cc) * height * width;
volume[index] = bilinear_interpolate(offset_input, height, width, coord_y, coord_x);
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height, const int width,
T y, T x,
T & w1, T & w2, T & w3, T & w4,
int & x_low, int & x_high, int & y_low, int & y_high) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int) y;
x_low = (int) x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = bottom_data[y_low * width + x_low];
// T v2 = bottom_data[y_low * width + x_high];
// T v3 = bottom_data[y_high * width + x_low];
// T v4 = bottom_data[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void BuildDpsGeometryVolumeBackwardFeature(const int nthreads,
const T* grad, const T* coord, const int* disp_channels,
const int num_batch, const int channels, const int height,
const int width, const int sep, const int interval, const int z_num, const int y_num, const int x_num,
T* grad_input) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int pw = index % x_num;
int ph = (index / x_num) % y_num;
int pd = (index / x_num / y_num) % z_num;
int c = (index / x_num / y_num/ z_num) % sep;
int n = index / x_num / y_num / z_num / sep;
T scale = (T)((x_num - 1) / interval * interval) / (x_num - 1.);
// shift channels by the ratio of pd/maxdisp
int c_shift = int( (T) (pw / interval * interval / scale) / (x_num - 1.) * (channels - sep + 1. - 1e-9) ); // 0 -> 32
// AT_ASSERTM(c_shift <= (channels - sep), "c_shift is (channels - sep) at max");
c_shift = disp_channels[c_shift];
// compute the separated channel
int sep_c = (c_shift / sep + 1) * sep;
int cc;
if ( c < c_shift + sep - sep_c )
cc = sep_c + c;
else
cc = sep_c - (sep - c);
int index_coord_x = (((n * z_num + pd) * y_num + ph) * x_num + pw) * 2;
int index_coord_y = index_coord_x + 1;
T coord_y = (coord[index_coord_y] + 1.) / 2. * (height - 1.);
T coord_x = (coord[index_coord_x] + 1.) / 2. * (width - 1.);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(height, width, coord_y, coord_x,
w1, w2, w3, w4,
x_low, x_high, y_low, y_high);
T top_diff_this_bin = grad[index];
T g1 = top_diff_this_bin * w1;
T g2 = top_diff_this_bin * w2;
T g3 = top_diff_this_bin * w3;
T g4 = top_diff_this_bin * w4;
T* offset_grad_input = grad_input + (n * channels + cc) * height * width;
if (w1 >= 1e-10)
atomicAdd(offset_grad_input + y_low * width + x_low, static_cast<T>(g1));
if (w2 >= 1e-10)
atomicAdd(offset_grad_input + y_low * width + x_high, static_cast<T>(g2));
if (w3 >= 1e-10)
atomicAdd(offset_grad_input + y_high * width + x_low, static_cast<T>(g3));
if (w4 >= 1e-10)
atomicAdd(offset_grad_input + y_high * width + x_high, static_cast<T>(g4));
} // CUDA_1D_KERNEL_LOOP
} // BuildDpsGeometryVolumeBackward
at::Tensor BuildDpsGeometryVolume_forward_cuda(const at::Tensor& img,
const at::Tensor& coord,
const at::Tensor& disp_channels,
const int sep,
const int interval) {
AT_ASSERTM(img.type().is_cuda(), "img must be a CUDA tensor");
AT_ASSERTM(coord.type().is_cuda(), "coord must be a CUDA tensor");
AT_ASSERTM((img.size(0) == coord.size(0)) && (coord.size(4) == 2), \
"Image and coord should of same batch.");
auto num_batch = img.size(0);
auto channels = img.size(1);
auto height = img.size(2);
auto width = img.size(3);
auto z_num = coord.size(1);
auto y_num = coord.size(2);
auto x_num = coord.size(3);
AT_ASSERTM(interval <= x_num - 1, "interval should be less or equal to z_num - 1");
auto output = at::empty({num_batch, sep, z_num, y_num, x_num}, img.options());
auto output_size = num_batch * sep * z_num * y_num * x_num;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(THCCeilDiv((long)(output_size), 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
THCudaCheck(cudaGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(img.type(), "BuildDpsGeometryVolume_forward", [&] {
BuildDpsGeometryVolumeForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
img.contiguous().data<scalar_t>(),
coord.contiguous().data<scalar_t>(),
disp_channels.contiguous().data<int>(),
num_batch,
channels,
height,
width,
sep,
interval,
z_num,
y_num,
x_num,
output.data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor BuildDpsGeometryVolume_backward_cuda(const at::Tensor& grad,
const at::Tensor& coord,
const at::Tensor& disp_channels,
const int height,
const int width,
const int channels,
const int sep,
const int interval) {
AT_ASSERTM(coord.type().is_cuda(), "coord must be a CUDA tensor");
auto num_batch = grad.size(0);
auto z_num = grad.size(2);
auto y_num = grad.size(3);
auto x_num = grad.size(4);
auto grad_input = at::zeros({num_batch, channels, height, width}, grad.options());
AT_ASSERTM((z_num == coord.size(1)) && (y_num == coord.size(2)) && (x_num == coord.size(3)),
"grad shape is wrong");
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(THCCeilDiv((long)grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(cudaGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.type(), "BuildDpsGeometryVolume_backward", [&] {
BuildDpsGeometryVolumeBackwardFeature<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data<scalar_t>(),
coord.contiguous().data<scalar_t>(),
disp_channels.contiguous().data<int>(),
num_batch,
channels,
height,
width,
sep,
interval,
z_num,
y_num,
x_num,
grad_input.data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return grad_input;
}
|
ac5f01649e7cd7b7bb2c1a33bd027bcf75f3cd25.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define ROWS 4
#define COLUMNS 5
typedef struct mystruct
{
int a[ROWS];
int **data;
}mystruct;
__global__ void printKernel(mystruct *d_var)
{
int i, j;
for(i = 0; i < ROWS; i++)
{
for(j = 0; j < COLUMNS; j++)
{
printf("%d\t", d_var->data[i][j]);
}
printf("\n");
}
}
int main()
{
int i, j, k=1;
mystruct *var, *d_var;
/* Allocate and initialize a dynamic 2D array on CPU */
var->data = (int**)malloc(ROWS*sizeof(int*));
for (i = 0; i < ROWS; i++)
var->data[i] = (int*)malloc(COLUMNS*sizeof(int));
for(i = 0; i < ROWS; i++)
{
var->a[i] = 2;
for(j = 0; j < COLUMNS; j++)
{
var->data[i][j] = k++;
}
}
/* Allocate memory for struct on GPU*/
hipMalloc((void**)&d_var, sizeof(mystruct));
/*Allocate memory explicitly for the 2D array*/
hipMalloc((void**)&d_var->data, ROWS*sizeof(int*));
for(i = 0; i < ROWS; i++)
hipMalloc((void**)&d_var->data[i], COLUMNS*sizeof(int));
/*Copy the host struct to device*/
hipMemcpy(d_var, var, (sizeof(mystruct)+ROWS*COLUMNS*sizeof(int)), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( printKernel), dim3(1),dim3(1), 0, 0, d_var);
free(var);
hipFree(d_var);
return 0;
}
| ac5f01649e7cd7b7bb2c1a33bd027bcf75f3cd25.cu | #include <stdio.h>
#include <stdlib.h>
#define ROWS 4
#define COLUMNS 5
typedef struct mystruct
{
int a[ROWS];
int **data;
}mystruct;
__global__ void printKernel(mystruct *d_var)
{
int i, j;
for(i = 0; i < ROWS; i++)
{
for(j = 0; j < COLUMNS; j++)
{
printf("%d\t", d_var->data[i][j]);
}
printf("\n");
}
}
int main()
{
int i, j, k=1;
mystruct *var, *d_var;
/* Allocate and initialize a dynamic 2D array on CPU */
var->data = (int**)malloc(ROWS*sizeof(int*));
for (i = 0; i < ROWS; i++)
var->data[i] = (int*)malloc(COLUMNS*sizeof(int));
for(i = 0; i < ROWS; i++)
{
var->a[i] = 2;
for(j = 0; j < COLUMNS; j++)
{
var->data[i][j] = k++;
}
}
/* Allocate memory for struct on GPU*/
cudaMalloc((void**)&d_var, sizeof(mystruct));
/*Allocate memory explicitly for the 2D array*/
cudaMalloc((void**)&d_var->data, ROWS*sizeof(int*));
for(i = 0; i < ROWS; i++)
cudaMalloc((void**)&d_var->data[i], COLUMNS*sizeof(int));
/*Copy the host struct to device*/
cudaMemcpy(d_var, var, (sizeof(mystruct)+ROWS*COLUMNS*sizeof(int)), cudaMemcpyHostToDevice);
printKernel<<<1,1>>>(d_var);
free(var);
cudaFree(d_var);
return 0;
}
|
11b772792d2f1d6e9647c20d58cda71f188a0d5f.hip | // !!! This is a file automatically generated by hipify!!!
//ulimit -s unlimited
//nvcc -lcusparse test.cu
//nvcc -lcublas -lcusparse -arch sm_20 test.cu && ./a.out
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include "hip/device_functions.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hip/hip_runtime.h>
#include "rocblas.h"
#include "hipsparse.h"
#define MaxTTD_NNZ_ELEMENTS 300000000
#define TOTALTHREDSPERBLOCK 256
#define COLUMNLENGTH 4
#define NMAXITERKernel 100000
#define ROWSCALE 2
using namespace std;
void generateTTDProblem(int row, int col, int exampleType, int* mOut,
int* nOut, float** A, int** ColIDX, int ** RowIDX, int * nnzElements,
int* boundary, int boundarySize, int** nodeDescription, int rowScale);
void getBoundaryVector(int row, int col, int exampleType, int** boundary,
int* boudarySize);
void getForceVector(float ** d, int m, int r, int c, int exampleType);
int GCD(int a, int b);
__global__ void setup_kernel(hiprandState_t *state);
void checkCUDAError(const char *msg);
void scaleBetta(int* betta, float* beta, float scale);
int minimabs(int * betta);
int maximabs(int * betta);
void print_time_message(clock_t* t1, char message[200]) {
clock_t t2 = clock();
double diff = ((float) t2 - (float) (*t1)) / 1000000.0F;
printf("%s: %f sec.\n", message, diff);
*t1 = clock();
}
double getElapsetTime(clock_t* t1) {
clock_t t2 = clock();
double diff = ((float) t2 - (float) (*t1)) / 1000000.0F;
*t1 = clock();
return diff;
}
__global__ void RCDMKernel(float *A, int*R_Idx, int* n, float*residuals,
float*x, float * lambda, hiprandState_t* cstate) {
int j, i, k;
float delta, tmp; //partialDetivative
int id = (blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x
+ threadIdx.x);
hiprandState_t localState = cstate[id];
__shared__ float partialDetivative[TOTALTHREDSPERBLOCK];
float xLocal;
float LiLocal;
float ALocal[COLUMNLENGTH];
int cidx;
int RIDX[COLUMNLENGTH];
for (k = 0; k < NMAXITERKernel; k++) {
double d = hiprand_uniform_double(&localState);
int idx = (int) (d * n[0]);
// LOAD A, R, residuals
// float* residualsAddress[COLUMNLENGTH];
xLocal = x[idx];
// LiLocal = Li[idx];
cidx = idx * COLUMNLENGTH;
partialDetivative[threadIdx.x] = 0;
// #pragma unroll COLUMNLENGTH
for (i = 0; i < COLUMNLENGTH; i++) {
j = cidx + i;
ALocal[i] = A[j];
LiLocal += ALocal[i] * ALocal[i];
RIDX[i] = R_Idx[j] - 1;
// residualsAddress[i] = &residuals[RIDX[i]];
partialDetivative[threadIdx.x] += ALocal[i] * residuals[RIDX[i]];
}
LiLocal = 1 / LiLocal;
tmp = LiLocal * (partialDetivative[threadIdx.x] + lambda[0]);
if (xLocal > tmp) {
delta = -tmp;
} else {
tmp = LiLocal * (partialDetivative[threadIdx.x] - lambda[0]);
if (xLocal < tmp) {
delta = -tmp;
} else {
delta = -xLocal;
}
}
atomicAdd(&x[idx], delta);
// x[idx]+=delta;
// atomicAdd(&x[idx], 1);
for (i = 0; i < COLUMNLENGTH; i++) {
atomicAdd(&residuals[RIDX[i]], ALocal[i] * delta);
// residuals[RIDX[i]]+= ALocal[i] * delta;
// atomicAdd(residualsAddress[i], ALocal[i] * delta);
}
}
// cstate[id] = localState;
}
void calculateTTDProblem() {
int dim1 = 14 * 2;
int dim2 = 1;
int totalThreads = TOTALTHREDSPERBLOCK;
int col, row, exampleType;
// cout << "Enter number of columns: ";
col = 115;
// cin >> col;
// cout << "Enter number of rows: ";
row = 115;
// cin >> row;
// cout << "Enter example type: ";
// cin >> exampleType;
exampleType = 2;
int m, n;
clock_t t1;//, t2;
float * A_TTD;
int * Row_IDX;
int * Col_IDX;
int nnzElements;
int* nodeDescription;
//-------------------------------GET BOUNDARY POINTS
int * boundary;
int boundarySize = 0;
getBoundaryVector(row, col, exampleType, &boundary, &boundarySize);
cout << "Boundary size is " << boundarySize << "\n";
//-------------------------------GENERATE PROBLEMS
t1 = clock();
generateTTDProblem(row, col, exampleType, &m, &n, &A_TTD, &Col_IDX,
&Row_IDX, &nnzElements, boundary, boundarySize, &nodeDescription,
ROWSCALE);
print_time_message(&t1, "Getting problem dimension");
cout << "Dimension of your problem is " << m << " x " << n << "\n";
printf("Number of NNZ: %d\n", nnzElements);
//-------------------------------GET FORCE VECTORS
float* g;
getForceVector(&g, m, row, col, exampleType);
for (int i = 0; i < m; i++) {
g[i] = -g[i];
}
print_time_message(&t1, "Force vector obtained");
float x[n];
float Li[n];
for (int i = 0; i < n; i++) {
x[i] = 0;
Li[i] = 0;
for (int k = 4 * i; k < 4 * i + 4; k++) {
Li[i] += A_TTD[k] * A_TTD[k];
}
if (Li[i] > 0) {
Li[i] = 1 / Li[i];
}
}
//-------------------------------Inicializacia CuSpare Library
/* allocate GPU memory and copy the matrix and vectors into it */
dim3 dimBlock( totalThreads);
dim3 dimGridRCDM( dim1, dim2);
int totalthreadsOnBlocks = dim1 * dim2 * totalThreads;
hiprandState_t *devStates;
hipMalloc((void **) &devStates, totalthreadsOnBlocks * sizeof(hiprandState_t));
hipLaunchKernelGGL(( setup_kernel), dim3(dimGridRCDM), dim3(dimBlock) , 0, 0, devStates);
checkCUDAError("Inicializacia ranom states");
float* A_d;
int* R_Idx_d;
hipMalloc((void**) &A_d, nnzElements * sizeof(float));
checkCUDAError("kernel execution Alloc A_d");
hipMalloc((void**) &R_Idx_d, nnzElements * sizeof(int));
checkCUDAError("kernel execution Alloc R_IDX");
int * n_d;
hipMalloc((void**) &n_d, 1 * sizeof(int));
checkCUDAError("kernel execution Alloc ...");
float* g_d;
hipMalloc((void**) &g_d, m * sizeof(float));
checkCUDAError("kernel execution Alloc g_d");
float* x_d;
hipMalloc((void**) &x_d, n * sizeof(float));
checkCUDAError("kernel execution Alloc x_d");
float* lambda_d;
hipMalloc((void**) &lambda_d, 1 * sizeof(float));
checkCUDAError("kernel execution Alloc lambda_d");
// float* Li_d;
// hipMalloc((void**) &Li_d, n * sizeof(float));
// checkCUDAError("kernel execution Alloc Li_d");
print_time_message(&t1, "Device memory allocated");
//------------------------------- COPY DATA
hipMemcpy(A_d, A_TTD, nnzElements * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError("kernel execution Copy A_d");
hipMemcpy(R_Idx_d, Row_IDX, nnzElements * sizeof(int),
hipMemcpyHostToDevice);
checkCUDAError("kernel execution Copy R");
hipMemcpy(g_d, g, m * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError("kernel execution compy ....");
hipMemcpy(x_d, x, n * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError("kernel execution compy ....");
hipMemcpy(n_d, &n, 1 * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError("kernel execution compy ....");
// hipMemcpy(Li_d, Li, n * sizeof(float), hipMemcpyHostToDevice);
// checkCUDAError("kernel execution compy ....");
print_time_message(&t1, "Data coppied");
//-------------------------------serial code
float time;
double serialPower;
float lambda = 0.0001;
double serialObjective = 0;
for (int i = 0; i < m; i++)
serialObjective += g[i] * g[i];
serialObjective = serialObjective * 0.5;
printf("Serial code execution. Objective at beggining: %f\n",
serialObjective);
int SerialContractor = 1;
getElapsetTime(&t1);
for (int i = 0; i < n / SerialContractor; i++) {
int idx = (int) (n * (rand() / (RAND_MAX + 1.0)));
float tmp = 0;
for (int j = idx * 4; j < idx * 4 + 4; j++) {
tmp += A_TTD[j] * g[Row_IDX[j]];
}
float tmp1 = Li[idx] * (tmp + lambda);
if (x[idx] > tmp1) {
tmp = -tmp1;
} else {
tmp1 = Li[idx] * (tmp - lambda);
if (x[idx] < tmp1) {
tmp = -tmp1;
} else {
tmp = -x[idx];
}
}
x[idx] += tmp;
//update residuals:
for (int j = 4 * idx; j < 4 * idx + 4; j++) {
g[Row_IDX[j]] += tmp * A_TTD[j];
}
}
time = getElapsetTime(&t1);
serialPower = n / time;
serialPower = (double) serialPower / SerialContractor;
printf("Serial code duration: %f, %f iter/sec\n", time, n / time);
serialObjective = 0;
for (int i = 0; i < m; i++)
serialObjective += g[i] * g[i];
double serialL1Norm = 0;
for (int j = 0; j < n; j++)
serialL1Norm += abs(x[j]);
serialObjective = 0.5 * serialObjective + lambda * serialL1Norm;
printf("Serial code execution. Objective after n iterations: %f\n",
serialObjective);
//-------------------------------Computation
int doworking = 1;
int iterations = 1;
while (doworking == 1) {
cout << "Current lambda is " << lambda
<< ". Do you want to change it? (y/n): ";
string doContinue;
cin >> doContinue;
if (doContinue == "y") {
cout << "Enter lambda: ";
cin >> lambda;
}
cout << "Enter number of iterations: ";
cin >> iterations;
float L1Norm = hipblasSasum(n, x_d, 1);
float residualsSum = hipblasSdot(m, g_d, 1, g_d, 1);
double objectiveValue = 0.5 * residualsSum;
objectiveValue += lambda * L1Norm;
printf("L1 norm = %f, redisuals=%f, objective=%1.16f\n", L1Norm,
residualsSum, objectiveValue);
hipMemcpy(lambda_d, &lambda, 1 * sizeof(float), hipMemcpyHostToDevice);
for (int i = 0; i < iterations; i++) {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( RCDMKernel), dim3(dimGridRCDM), dim3(dimBlock) , 0, 0, A_d, R_Idx_d, n_d,
g_d, x_d, lambda_d, devStates);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
double parallelPower = totalthreadsOnBlocks;
parallelPower = parallelPower / time;
parallelPower = parallelPower * 1000 * NMAXITERKernel;
printf(
"Elapset time: %f ms; %1.2f iterations/sec; speedup: %1.4f\n",
time, parallelPower, parallelPower / serialPower);
hipEventDestroy(start);
hipEventDestroy(stop);
L1Norm = hipblasSasum(n, x_d, 1);
residualsSum = hipblasSnrm2(m, g_d, 1);
objectiveValue = 0.5 * residualsSum * residualsSum;
objectiveValue += lambda * L1Norm;
printf("%d:L1 norm = %f, redisuals=%f, objective=%1.16f\n", i,
L1Norm, residualsSum, objectiveValue);
}
cout << "Save particular solution to file? (y/n): ";
cin >> doContinue;
if (doContinue == "y") {
float treshHold;
cout << "Enter treshhold for x: ";
cin >> treshHold;
int writtenBars = 0;
FILE *fp;
fp = fopen("/tmp/ttd.txt", "w");
hipMemcpy(x, x_d, n * sizeof(float), hipMemcpyDeviceToHost);
checkCUDAError("kernel execution compy ....");
for (int i = 0; i < n; i++) {
if (abs(x[i]) > treshHold) {
writtenBars++;
fprintf(fp, "%d,%d,%d,%d,%f\n", nodeDescription[i * 4],
nodeDescription[i * 4 + 1], nodeDescription[i * 4
+ 2], nodeDescription[i * 4 + 3], x[i]);
}
}
fclose(fp);
printf("Number of written bars:%d\n", writtenBars);
}
cout << "Continue? (y/n): ";
cin >> doContinue;
if (doContinue == "n") {
doworking = 0;
}
}
print_time_message(&t1, "Computation");
//-------------------------------DeAllocation
hipFree(devStates);
hipFree(A_d);
hipFree(R_Idx_d);
hipFree(n_d);
hipFree(g_d);
hipFree(x_d);
hipFree(lambda_d);
// hipFree(Li_d);
print_time_message(&t1, "Device memory DeAllocated");
// cout << "Your name score is " << myName << "\n";
// cout << "Your weight in pounds is " << myWeight << "\n";
// cout << "Your height in inches is " << myHeight << "\n";
// cout << "Enter your height in inches: ";
// cin >> myHeight;
// string myName;
// cout << "Your name score is " << myName << "\n";
// cout << "Your weight in pounds is " << myWeight << "\n";
// cout << "Your height in inches is " << myHeight << "\n";
}
__global__ void IncreaseElement(float* x, int element, float* T) {
x[element] = x[element] + T[element];
}
__global__ void ShrinkKernel(float *T, float * E, float* derivatives, float* L,
float* x, float lambdaParameter, int n) {
int id = (blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x
+ threadIdx.x);
if (id < n) {
float LLocal = L[id];
float Li = 1 / LLocal;
float xLocal = x[id];
float lambdaOverL = lambdaParameter * Li;
float alpha = derivatives[id] * Li;
float deltaL = xLocal - alpha - lambdaOverL;
float deltaR = xLocal - alpha + lambdaOverL;
float t;
if (deltaL > 0)
t = deltaL - xLocal;
else if (deltaR < 0)
t = deltaR - xLocal;
else
t = -xLocal;
T[id] = t;
E[id] = t * t * LLocal;
}
}
__global__ void ShrinkKernelSubset(float *T, float * E, float* derivatives,
float* L, float* x, float lambdaParameter, int* ACounts, int* Indexes) {
int id = (blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x
+ threadIdx.x);
if (id < ACounts[0]) {
id = Indexes[id];
float LLocal = L[id];
float Li = 1 / LLocal;
float xLocal = x[id];
float lambdaOverL = lambdaParameter * Li;
float alpha = derivatives[id] * Li;
float deltaL = xLocal - alpha - lambdaOverL;
float deltaR = xLocal - alpha + lambdaOverL;
float t;
if (deltaL > 0)
t = deltaL - xLocal;
else if (deltaR < 0)
t = deltaR - xLocal;
else
t = -xLocal;
T[id] = t;
E[id] = t * t * LLocal;
}
}
void greedyTTD() {
int col, row, exampleType;
cout << "Enter number of columns: ";
col = 80;
cin >> col;
cout << "Enter number of rows: ";
row = 80;
cin >> row;
// cout << "Enter example type: ";
// cin >> exampleType;
exampleType = 7;
int m, n;
clock_t t1;//, t2;
float * A_TTD;
int * Row_IDX;
int * Col_IDX;
int nnzElements;
int* nodeDescription;
//-------------------------------GET BOUNDARY POINTS
int * boundary;
int boundarySize = 0;
getBoundaryVector(row, col, exampleType, &boundary, &boundarySize);
cout << "Boundary size is " << boundarySize << "\n";
//-------------------------------GENERATE PROBLEMS
t1 = clock();
generateTTDProblem(row, col, exampleType, &m, &n, &A_TTD, &Col_IDX,
&Row_IDX, &nnzElements, boundary, boundarySize, &nodeDescription,
ROWSCALE);
print_time_message(&t1, "Getting problem dimension");
cout << "Dimension of your problem is " << m << " x " << n << "\n";
printf("Number of NNZ: %d\n", nnzElements);
//-------------------------------GET FORCE VECTORS
float* g;
getForceVector(&g, m, row, col, exampleType);
for (int i = 0; i < m; i++) {
g[i] = -g[i];
}
print_time_message(&t1, "Force vector obtained");
float x[n];
float L[n];
for (int i = 0; i < n; i++) {
x[i] = 0;
L[i] = 0;
for (int k = 4 * i; k < 4 * i + 4; k++) {
L[i] += A_TTD[k] * A_TTD[k];
}
}
//-------------------------------Inicializacia CuSpare Library
/* allocate GPU memory and copy the matrix and vectors into it */
hipsparseStatus_t status;
hipsparseHandle_t handle = 0;
/* initialize cusparse library */
status = hipsparseCreate(&handle);
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed");
}
//-------------------------------Preparing A' in colum orientated
int ATcount[m];
int actualCount[m];
int rowCounts[m];
for (int i = 0; i < m; i++) {
ATcount[i] = 0;
actualCount[i] = 0;
}
for (int i = 0; i < nnzElements; i++) {
ATcount[Row_IDX[i] - 1]++; // Shift from 1-based to 0 based
}
rowCounts[0] = ATcount[0];
for (int i = 1; i < m; i++) {
int tmpCount = ATcount[i];
rowCounts[i] = ATcount[i];
ATcount[i] += ATcount[i - 1];
actualCount[i] = ATcount[i] - tmpCount;
}
for (int i = 0; i < m; i++) {
ATcount[i] = actualCount[i];
}
float ATCB[nnzElements];
int ColAT[nnzElements];
for (int i = 0; i < n; i++) {
for (int j = 4 * i; j < 4 * i + 4; j++) {
int tmprow = Row_IDX[j] - 1;
ColAT[actualCount[tmprow]] = i;
ATCB[actualCount[tmprow]] = A_TTD[j];
actualCount[tmprow]++;
}
}
//-------------------------------Alokacia device memroies
float* A_d;
int* C_idx_d;
hipMalloc((void**) &A_d, nnzElements * sizeof(float));
checkCUDAError("kernel execution Alloc A_d");
hipMalloc((void**) &C_idx_d, nnzElements * sizeof(int));
checkCUDAError("kernel execution Alloc R_IDX");
int * n_d;
hipMalloc((void**) &n_d, 1 * sizeof(int));
checkCUDAError("kernel execution Alloc ...");
float* derivatives_d;
hipMalloc((void**) &derivatives_d, n * sizeof(float));
checkCUDAError("kernel execution Alloc g_d");
float* L_d;
hipMalloc((void**) &L_d, n * sizeof(float));
checkCUDAError("kernel execution Alloc Li_d");
int* ATcount_d;
hipMalloc((void**) &ATcount_d, m * sizeof(int));
checkCUDAError("kernel execution Alloc AtCount_d");
float* x_d;
hipMalloc((void**) &x_d, n * sizeof(float));
checkCUDAError("kernel execution Alloc x_d");
float* lambda_d;
hipMalloc((void**) &lambda_d, 1 * sizeof(float));
checkCUDAError("kernel execution Alloc lambda_d");
// float* Li_d;
// hipMalloc((void**) &Li_d, n * sizeof(float));
// checkCUDAError("kernel execution Alloc Li_d");
print_time_message(&t1, "Device memory allocated");
//-------------------------------Copy data
hipMemcpy(A_d, &ATCB[0], nnzElements * sizeof(float),
hipMemcpyHostToDevice);
checkCUDAError("kernel execution Copy A_d");
hipMemcpy(C_idx_d, &ColAT[0], nnzElements * sizeof(int),
hipMemcpyHostToDevice);
checkCUDAError("kernel execution Copy R");
hipMemcpy(derivatives_d, x, n * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError("kernel execution compy ....");
hipMemcpy(x_d, x, n * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError("kernel execution compy ....");
hipMemcpy(n_d, &n, 1 * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError("kernel execution compy ....");
hipMemcpy(L_d, L, n * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError("kernel execution compy ....");
for (int i = m - 1; i > 0; i--) {
actualCount[i] -= actualCount[i - 1];
}
// for (int i=0;i<m;i++)
//{
// printf("AT count[%d]=%d\n",i,actualCount[i]);
//}
hipMemcpy(ATcount_d, actualCount, m * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError("kernel execution compy ....");
print_time_message(&t1, "Data coppied");
//-------------------------------Prepare of Derivatives vector on device
for (int i = 0; i < m; i++) {
if (g[i] != 0) {
hipsparseStatus_t copystatus = cusparseSaxpyi(handle, rowCounts[i],
g[i], &A_d[ATcount[i]], &C_idx_d[ATcount[i]],
derivatives_d, HIPSPARSE_INDEX_BASE_ZERO);
if (copystatus != HIPSPARSE_STATUS_SUCCESS) {
printf("Nastala chyba!");
}
}
}
print_time_message(&t1, "Derivatives vector on device");
//---------------------------------------------------------------------
float lambda = 0.0001;
//------------------------------Serial Benchmark-----------------------
float derivatives[n];
hipMemcpy(&derivatives[0], derivatives_d, n * sizeof(float),
hipMemcpyDeviceToHost);
print_time_message(&t1, "Initial Shrink Start");
int max_IDX = 0;
float max_VAL = 0;
float optimalStep = 0;
float TVALUES[n];
float EVALUES[n];
for (int j = 0; j < n; j++) {
float LLocal = L[j];
float Li = 1 / LLocal;
float xLocal = x[j];
float lambdaOverL = lambda * Li;
float alpha = derivatives[j] * Li;
float deltaL = xLocal - alpha - lambdaOverL;
float deltaR = xLocal - alpha + lambdaOverL;
float t;
if (deltaL > 0)
t = deltaL - xLocal;
else if (deltaR < 0)
t = deltaR - xLocal;
else
t = -xLocal;
float tmpEnergy = t * t * LLocal;
TVALUES[j] = t;
EVALUES[j] = tmpEnergy;
if (tmpEnergy > max_VAL) {
optimalStep = t;
max_VAL = tmpEnergy;
max_IDX = j;
}
}
print_time_message(&t1, "Initial Shrink End");
print_time_message(&t1, "Start serial Code");
float setialExecutionTime = 0;
int setialItetaions = 10;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
for (int i = 0; i < setialItetaions; i++) {
// printf("optimal t_idx=%d, tval=%f\n", max_IDX, TVALUES[max_IDX]);
// Update
x[max_IDX] += TVALUES[max_IDX];
optimalStep = TVALUES[max_IDX];
for (int ki = max_IDX * 4; ki < max_IDX * 4 + 4; ki++) {
for (int jj = ATcount[Row_IDX[ki] - 1]; jj < ATcount[Row_IDX[ki]
- 1] + rowCounts[Row_IDX[ki] - 1]; jj++) {
int j = ColAT[jj];
derivatives[j] += optimalStep * A_TTD[ki] * ATCB[jj];
float LLocal = L[j];
float Li = 1 / LLocal;
float xLocal = x[j];
float lambdaOverL = lambda * Li;
float alpha = derivatives[j] * Li;
float deltaL = xLocal - alpha - lambdaOverL;
float deltaR = xLocal - alpha + lambdaOverL;
float t;
if (deltaL > 0)
t = deltaL - xLocal;
else if (deltaR < 0)
t = deltaR - xLocal;
else
t = -xLocal;
TVALUES[j] = t;
EVALUES[j] = t * t * LLocal;
}
}
max_VAL = 0;
max_IDX = 0;
for (int j = 0; j < n; j++) {
if (EVALUES[j] > max_VAL) {
max_VAL = EVALUES[j];
max_IDX = j;
}
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&setialExecutionTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
float setialIterationPerSec = setialItetaions / setialExecutionTime * 1000;
printf("Serial execution time:%f ms = %f it/sec \n", setialExecutionTime,
setialIterationPerSec);
print_time_message(&t1, "Serial time for 10 iterations");
//-------------------------------Computation
float* T_dev;
float* E_dev;
hipblasAlloc(n, sizeof(float), (void**) &T_dev);
checkCUDAError("Alokacia T_dev");
hipblasAlloc(n, sizeof(float), (void**) &E_dev);
checkCUDAError("Alokacia E_dev");
int doworking = 1;
int iterations = 1;
float time;
hipsparseStatus_t copystatus;
while (doworking == 1) {
cout << "Current lambda is " << lambda
<< ". Do you want to change it? (y/n): ";
string doContinue;
cin >> doContinue;
if (doContinue == "y") {
cout << "Enter lambda: ";
cin >> lambda;
}
cout << "Enter number of iterations: ";
cin >> iterations;
hipMemcpy(lambda_d, &lambda, 1 * sizeof(float), hipMemcpyHostToDevice);
dim3 dimBlock( TOTALTHREDSPERBLOCK);
dim3 dimGridRCDM( 1, 1+n/(TOTALTHREDSPERBLOCK));
float tPoint[1];
int maxIndex = 10;
hipLaunchKernelGGL(( ShrinkKernel), dim3(dimGridRCDM) ,dim3(dimBlock) , 0, 0, T_dev, E_dev, derivatives_d, L_d,x_d,
lambda, n);
int maxShrinkSubset = 0;
for (int i = 0; i < m; i++) {
if (rowCounts[i] > maxShrinkSubset)
maxShrinkSubset = rowCounts[i];
}
printf("Max shrink subset %d\n", maxShrinkSubset);
dim3 dimGridShrinkSubset( 1, 1+maxShrinkSubset/(TOTALTHREDSPERBLOCK));
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
for (int i = 0; i < iterations; i++) {
// hipLaunchKernelGGL(( ShrinkKernel), dim3(dimGridRCDM) ,dim3(dimBlock) , 0, 0, T_dev, E_dev, derivatives_d, L_d,x_d,
// lambda, n);
//maxIndex=10;
maxIndex = hipblasIsamax(n, E_dev, 1);
maxIndex = maxIndex - 1;
// printf("%d;Selected index:%d\n", i, maxIndex);
hipLaunchKernelGGL(( IncreaseElement), dim3(1) ,dim3(1) , 0, 0, x_d, maxIndex, T_dev);
checkCUDAError("IncreaseElement");
hipblasGetVector(1, sizeof(float), &T_dev[maxIndex], 1, tPoint, 1);
checkCUDAError("get T");
// printf("optimal t_idx=%d and value = %f\n", maxIndex, tPoint[0]);
for (int ki = maxIndex * 4; ki < maxIndex * 4 + 4; ki++) {
// printf("pridat %f %f %d\n", tPoint[0], A_TTD[ki],Row_IDX[ki]-1);
copystatus = cusparseSaxpyi(handle, rowCounts[Row_IDX[ki] - 1],
tPoint[0] * A_TTD[ki], &A_d[ATcount[Row_IDX[ki] - 1]],
&C_idx_d[ATcount[Row_IDX[ki] - 1]], derivatives_d,
HIPSPARSE_INDEX_BASE_ZERO);
if (copystatus != HIPSPARSE_STATUS_SUCCESS)
printf("Nastala chyba pri CuSparse!\n");
// hipDeviceSynchronize();
hipLaunchKernelGGL(( ShrinkKernelSubset), dim3(dimGridShrinkSubset) ,dim3(dimBlock) , 0, 0, T_dev, E_dev, derivatives_d,
L_d, x_d, lambda, &ATcount_d[Row_IDX[ki] - 1], &C_idx_d[ATcount[Row_IDX[ki] - 1]]);
//
checkCUDAError("Shrink subset");
// hipDeviceSynchronize();
// ShrinkKernelSubset<<< dimGridRCDM ,dimBlock >>>(T_dev, E_dev, derivatives_d,
// L_d, x_d, lambda, n, &C_idx_d[0]);
}
// hipDeviceSynchronize();
// hipMemcpy(x, derivatives_d, n * sizeof(float),
// hipMemcpyDeviceToHost);
// checkCUDAError("kernel execution : copy data Derivatives");
// for (int j = 0; j < n; j++) {
// printf("der[%d]=%f\n", j, x[j]);
// }
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("pridane %f \n", tPoint[0]);
printf("trvanie %f ms , %f iter/sec speedUp:%f \n", time, iterations
/ time * 1000, iterations / time * 1000 / setialIterationPerSec);
// double parallelPower = totalthreadsOnBlocks;
// parallelPower = parallelPower / time;
// parallelPower = parallelPower * 1000 * NMAXITERKernel;
// printf(
// "Elapset time: %f ms; %1.2f iterations/sec; speedup: %1.4f\n",
// time, parallelPower, parallelPower / serialPower);
//
// L1Norm = hipblasSasum(n, x_d, 1);
// residualsSum = hipblasSnrm2(m, g_d, 1);
// objectiveValue = 0.5 * residualsSum * residualsSum;
// objectiveValue += lambda * L1Norm;
// printf("%d:L1 norm = %f, redisuals=%f, objective=%1.16f\n", i,
// L1Norm, residualsSum, objectiveValue);
cout << "Save particular solution to file? (y/n): ";
cin >> doContinue;
if (doContinue == "y") {
float treshHold;
cout << "Enter treshhold for x: ";
cin >> treshHold;
int writtenBars = 0;
FILE *fp;
fp = fopen("/tmp/ttd.txt", "w");
hipMemcpy(x, x_d, n * sizeof(float), hipMemcpyDeviceToHost);
checkCUDAError("kernel execution compy ....");
for (int i = 0; i < n; i++) {
if (abs(x[i]) > treshHold) {
writtenBars++;
fprintf(fp, "%d,%d,%d,%d,%f\n", nodeDescription[i * 4],
nodeDescription[i * 4 + 1], nodeDescription[i * 4
+ 2], nodeDescription[i * 4 + 3], x[i]);
}
}
fclose(fp);
printf("Number of written bars:%d\n", writtenBars);
}
cout << "Continue? (y/n): ";
cin >> doContinue;
if (doContinue == "n") {
doworking = 0;
}
}
print_time_message(&t1, "Computation");
//-------------------------------DeAllocation
hipFree(lambda_d);
hipFree(A_d);
hipFree(C_idx_d);
hipFree(n_d);
hipFree(x_d);
hipFree(derivatives_d);
hipFree(L_d);
hipFree(ATcount_d);
print_time_message(&t1, "Device memory DeAllocated");
}
int main(void) {
greedyTTD();
// calculateTTDProblem();
return 1;
}
void generateTTDProblem(int row, int col, int exampleType, int* mOut,
int* nOut, float** A, int** ColIDX, int ** RowIDX, int * nnzElements,
int* boundary, int boundarySize, int** nodeDescription, int rowScale) {
float kappa = 1;
float scale = sqrt(kappa);
int m = row * col;
float A_values[MaxTTD_NNZ_ELEMENTS];
int Rows_A[MaxTTD_NNZ_ELEMENTS];
int Cols_A[MaxTTD_NNZ_ELEMENTS];
*nodeDescription = new int[MaxTTD_NNZ_ELEMENTS*4];
int nodeDescriptionId = 0;
int nnz = 0;
int node = 0;
int tt = col;
if (row > tt)
tt = row;
int GSDS[tt + 1][tt + 1];
for (int i = 1; i <= tt; i++) {
for (int j = i; j <= tt; j++) {
GSDS[i][j] = GCD(i, j);
GSDS[j][i] = GCD(i, j);
}
}
int betta[2];
float beta[2];
for (int j = 1; j <= col; j++) {
for (int i = 1; i <= col; i++) {
for (int k = 1; k <= row; k++) {
for (int l = 1; l <= col - j; l++) {
betta[0] = rowScale * l;// for node (i,j) we add bars to all (k,j+i)
betta[1] = -k + i;
scaleBetta(betta, beta, scale);
betta[0] = l;
if (col == j)
continue;
if (l > 1) {
int skip = 0;
int ta = minimabs(betta);
int tb = maximabs(betta);
if (ta == 0) {
skip = 1;
} else {
if (GSDS[ta][tb] > 1) {
skip = 1;
}
}
if (skip)
continue;
}
int totalMatchA = 0;
int totalMatchB = 0;
for (int bi = 0; bi < boundarySize; bi++) {
if (boundary[bi] == row * (j - 1) + i)
totalMatchA++;
if (boundary[bi] == row * (j) + k + (l - 1) * row)
totalMatchB++;
}
if (totalMatchA + totalMatchB < 2) {
node++;
int tmp = row * (j - 1) + i;
A_values[nnz] = -(1 - totalMatchA) * beta[0];
Rows_A[nnz] = tmp;
Cols_A[nnz] = node;
nnz++;
A_values[nnz] = -(1 - totalMatchA) * beta[1];
Rows_A[nnz] = tmp + m;
Cols_A[nnz] = node;
nnz++;
tmp = row * (j) + k + (l - 1) * row;
A_values[nnz] = (1 - totalMatchB) * beta[0];
Rows_A[nnz] = tmp;
Cols_A[nnz] = node;
nnz++;
A_values[nnz] = (1 - totalMatchB) * beta[1];
Rows_A[nnz] = tmp + m;
Cols_A[nnz] = node;
nnz++;
(*nodeDescription)[nodeDescriptionId] = i;
(*nodeDescription)[nodeDescriptionId + 1] = j;
(*nodeDescription)[nodeDescriptionId + 2] = k;
(*nodeDescription)[nodeDescriptionId + 3] = l + j;
nodeDescriptionId += 4;
}
}
}
if (i < row) {
int tmp = i + (j - 1) * row;
int totalMatchA = 0;
int totalMatchB = 0;
for (int bi = 0; bi < boundarySize; bi++) {
if (boundary[bi] == tmp)
totalMatchA++;
if (boundary[bi] == tmp + 1)
totalMatchB++;
}
if (totalMatchA + totalMatchB < 2) {
node = node + 1;
A_values[nnz] = -(1 - totalMatchA);
Rows_A[nnz] = tmp + m;
Cols_A[nnz] = node;
nnz++;
A_values[nnz] = 0; //fake node
Rows_A[nnz] = tmp + 1;
Cols_A[nnz] = node;
nnz++;
A_values[nnz] = 0; //fake node
Rows_A[nnz] = tmp + 2;
Cols_A[nnz] = node;
nnz++;
A_values[nnz] = (1 - totalMatchB);
Rows_A[nnz] = tmp + m + 1;
Cols_A[nnz] = node;
nnz++;
(*nodeDescription)[nodeDescriptionId] = i;
(*nodeDescription)[nodeDescriptionId + 1] = j;
(*nodeDescription)[nodeDescriptionId + 2] = i + 1;
(*nodeDescription)[nodeDescriptionId + 3] = j;
nodeDescriptionId += 4;
}
}
}
}*A = new float[nnz];
*ColIDX = new int[nnz];
*RowIDX = new int[nnz];
for (int i = 0; i < nnz; i++) {
(*A)[i] = A_values[i];
(*ColIDX)[i] = Cols_A[i];
(*RowIDX)[i] = Rows_A[i];
}
*nOut = node;
*mOut = row * col * 2;
*nnzElements = nnz;
}
void getBoundaryVector(int row, int col, int exampleType, int** boundary,
int* boudarySize) {
switch (exampleType) {
case 1:
*boundary = new int[row];
for (int i = 0; i < row; i++) {
(*boundary)[i] = i + 1;
}
// boundaryIDX(1, i) = 1;
// boundaryIDX(2, i) = i;
*boudarySize = row;
break;
case 2:
*boundary = new int[4];
(*boundary)[0] = row;
(*boundary)[1] = row * col;
(*boundary)[2] = row * col - row * (col / 3);
(*boundary)[3] = row + row * (col / 3);
*boudarySize = 4;
break;
case 3:
*boundary = new int[4];
(*boundary)[0] = row - 5;
(*boundary)[1] = row * col - 5;
(*boundary)[2] = row * col - row * (col / 3);
(*boundary)[3] = row + row * (col / 3);
*boudarySize = 4;
break;
case 5:
*boundary = new int[0];
*boudarySize = 0;
break;
case 7:
*boundary = new int[2];
(*boundary)[0] = row;
(*boundary)[1] = row * col;
*boudarySize = 2;
break;
default:
break;
}
}
// if (boundarytype==3) %bridge
//
// boundary(1,1)=r;
// boundary(1, 4) = r + r * (floor(c / 3));
// boundary(1, 3) = r * c - r * floor(c / 3);
// boundary(1, 2) = r * c;
//
// boundaryIDX(1, 1) = 1;
// boundaryIDX(2, 1) = r;
// boundaryIDX(1, 2) = c;
// boundaryIDX(2, 2) = r;
//
// boundaryIDX(1, 3) = floor(c / 3) + 1;
// boundaryIDX(2, 3) = r;
// boundaryIDX(1, 4) = c - floor(c / 3);
// boundaryIDX(2, 4) = r;
//
// end
// if (boundarytype==4) %
//
// boundary(1,1)=r;
// % boundary(1,4)=r+r*(floor(c/5));
// % boundary(1,3)=r*c-r*floor(c/5);
// boundary(1, 2) = r * c;
//
// boundaryIDX(1, 1) = 1;
// boundaryIDX(2, 1) = r;
// boundaryIDX(1, 2) = c;
// boundaryIDX(2, 2) = r;
//
// % boundaryIDX(1,3)=floor(c/5)+1;
// % boundaryIDX(2,3)=r;
// % boundaryIDX(1,4)=c-floor(c/5);
// % boundaryIDX(2,4)=r;
//
// end
//
// if (boundarytype==5) %
//
// end
//
// if (boundarytype==6) %
//
// boundary(1,1)=r;
//
// boundary(1, 4) = r + r * (floor(c / 5));
// boundary(1, 3) = r * c - r * floor(c / 5);
// boundary(1, 5) = r + r * 2 * (floor(c / 5));
// boundary(1, 6) = r * c - r * 2 * floor(c / 5);
//
// boundary(1, 2) = r * c;
//
// boundaryIDX(1, 1) = 1;
// boundaryIDX(2, 1) = r;
// boundaryIDX(1, 2) = c;
// boundaryIDX(2, 2) = r;
//
// boundaryIDX(1, 3) = floor(c / 5) + 1;
// boundaryIDX(2, 3) = r;
// boundaryIDX(1, 4) = c - floor(c / 5);
// boundaryIDX(2, 4) = r;
//
// boundaryIDX(1, 5) = 2 * floor(c / 5) + 1;
// boundaryIDX(2, 5) = r;
// boundaryIDX(1, 6) = c - 2 * floor(c / 5);
// boundaryIDX(2, 6) = r;
//
// end
void scaleBetta(int* betta, float* beta, float scale) {
float tmp = scale / (betta[0] * betta[0] + betta[1] * betta[1]);
beta[0] = betta[0] * tmp;
beta[1] = betta[1] * tmp;
}
int GCD(int a, int b) {
while (1) {
a = a % b;
if (a == 0)
return b;
b = b % a;
if (b == 0)
return a;
}
}
void getForceVector(float ** d, int m, int r, int c, int exampleType) {
// int mid = r / 2;
// int midr =(r/2)+1;
// int midc = (c/2)+1;
*d=new float[m];
m = m / 2;
int tmp;
switch (exampleType) {
case 1:
// tmp = r * (c - 1) + mid + 1;
tmp = r * c;
(*d)[tmp + m - 1] = -1;
(*d)[tmp - 1] = 2;
break;
case 2:
for (int cc = 2; cc < c-1; cc++) {
(*d)[-2 + r * cc + m - 1] = -1;
}
break;
case 3:
for (int cc = 2; cc < c; cc++) {
(*d)[-2 + r * cc + m - 1] = -1;
}
break;
case 5:
break;
case 7:
for (int cc = 3; cc < c-3+1; cc++) {
(*d)[-5 + r * cc + m - 1] = -1;
}
break;
default:
break;
}
// if (boundarytype==3)
// midr = floor(r/2)+1
// midc = floor(c/2)+1
//
// for cc=2:c-1
// d(-2+r*cc+m) = -1;
// end
// end
//
// if (boundarytype==6)
// midr = floor(r/2)+1
// midc = floor(c/2)+1
//
// for cc=2:c-1
// d(-1+r*cc+m) = -1;
// end
// end
//
// if (boundarytype==4)
// midr = floor(r/2)+1
// midc = floor(c/2)+1
//
//
//
// for cc=3:c-2
// d(-12+r*cc+m) = -1;
// end
//
// for asdf=1:13
// for cc=6:c-5
// %d(-8-asdf+r*cc+m) = -1;
// end
// end
//
//
// for asdf=1:17
// for cc=6:6
// d(-12-asdf+r*cc+m) = -1;
// if (asdf<17)
// d(-12-asdf+r*cc) = (-1)^asdf;
// end
// end
// end
//
//
// end
}
int maximabs(int * betta) {
if (abs(betta[0]) >= abs(betta[1]))
return abs(betta[0]);
else
return abs(betta[1]);
}
int minimabs(int * betta) {
if (abs(betta[0]) <= abs(betta[1]))
return abs(betta[0]);
else
return abs(betta[1]);
}
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(-1);
}
}
__global__ void setup_kernel(hiprandState_t *state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
/* Each thread gets different seed, a different sequence number, no offset */
hiprand_init(i, i, 0, &state[i]);
}
| 11b772792d2f1d6e9647c20d58cda71f188a0d5f.cu | //ulimit -s unlimited
//nvcc -lcusparse test.cu
//nvcc -lcublas -lcusparse -arch sm_20 test.cu && ./a.out
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include "device_functions.h"
#include <curand.h>
#include <curand_kernel.h>
#include <cuda_runtime.h>
#include "cublas.h"
#include "cusparse.h"
#define MaxTTD_NNZ_ELEMENTS 300000000
#define TOTALTHREDSPERBLOCK 256
#define COLUMNLENGTH 4
#define NMAXITERKernel 100000
#define ROWSCALE 2
using namespace std;
void generateTTDProblem(int row, int col, int exampleType, int* mOut,
int* nOut, float** A, int** ColIDX, int ** RowIDX, int * nnzElements,
int* boundary, int boundarySize, int** nodeDescription, int rowScale);
void getBoundaryVector(int row, int col, int exampleType, int** boundary,
int* boudarySize);
void getForceVector(float ** d, int m, int r, int c, int exampleType);
int GCD(int a, int b);
__global__ void setup_kernel(curandState *state);
void checkCUDAError(const char *msg);
void scaleBetta(int* betta, float* beta, float scale);
int minimabs(int * betta);
int maximabs(int * betta);
void print_time_message(clock_t* t1, char message[200]) {
clock_t t2 = clock();
double diff = ((float) t2 - (float) (*t1)) / 1000000.0F;
printf("%s: %f sec.\n", message, diff);
*t1 = clock();
}
double getElapsetTime(clock_t* t1) {
clock_t t2 = clock();
double diff = ((float) t2 - (float) (*t1)) / 1000000.0F;
*t1 = clock();
return diff;
}
__global__ void RCDMKernel(float *A, int*R_Idx, int* n, float*residuals,
float*x, float * lambda, curandState* cstate) {
int j, i, k;
float delta, tmp; //partialDetivative
int id = (blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x
+ threadIdx.x);
curandState localState = cstate[id];
__shared__ float partialDetivative[TOTALTHREDSPERBLOCK];
float xLocal;
float LiLocal;
float ALocal[COLUMNLENGTH];
int cidx;
int RIDX[COLUMNLENGTH];
for (k = 0; k < NMAXITERKernel; k++) {
double d = curand_uniform_double(&localState);
int idx = (int) (d * n[0]);
// LOAD A, R, residuals
// float* residualsAddress[COLUMNLENGTH];
xLocal = x[idx];
// LiLocal = Li[idx];
cidx = idx * COLUMNLENGTH;
partialDetivative[threadIdx.x] = 0;
// #pragma unroll COLUMNLENGTH
for (i = 0; i < COLUMNLENGTH; i++) {
j = cidx + i;
ALocal[i] = A[j];
LiLocal += ALocal[i] * ALocal[i];
RIDX[i] = R_Idx[j] - 1;
// residualsAddress[i] = &residuals[RIDX[i]];
partialDetivative[threadIdx.x] += ALocal[i] * residuals[RIDX[i]];
}
LiLocal = 1 / LiLocal;
tmp = LiLocal * (partialDetivative[threadIdx.x] + lambda[0]);
if (xLocal > tmp) {
delta = -tmp;
} else {
tmp = LiLocal * (partialDetivative[threadIdx.x] - lambda[0]);
if (xLocal < tmp) {
delta = -tmp;
} else {
delta = -xLocal;
}
}
atomicAdd(&x[idx], delta);
// x[idx]+=delta;
// atomicAdd(&x[idx], 1);
for (i = 0; i < COLUMNLENGTH; i++) {
atomicAdd(&residuals[RIDX[i]], ALocal[i] * delta);
// residuals[RIDX[i]]+= ALocal[i] * delta;
// atomicAdd(residualsAddress[i], ALocal[i] * delta);
}
}
// cstate[id] = localState;
}
void calculateTTDProblem() {
int dim1 = 14 * 2;
int dim2 = 1;
int totalThreads = TOTALTHREDSPERBLOCK;
int col, row, exampleType;
// cout << "Enter number of columns: ";
col = 115;
// cin >> col;
// cout << "Enter number of rows: ";
row = 115;
// cin >> row;
// cout << "Enter example type: ";
// cin >> exampleType;
exampleType = 2;
int m, n;
clock_t t1;//, t2;
float * A_TTD;
int * Row_IDX;
int * Col_IDX;
int nnzElements;
int* nodeDescription;
//-------------------------------GET BOUNDARY POINTS
int * boundary;
int boundarySize = 0;
getBoundaryVector(row, col, exampleType, &boundary, &boundarySize);
cout << "Boundary size is " << boundarySize << "\n";
//-------------------------------GENERATE PROBLEMS
t1 = clock();
generateTTDProblem(row, col, exampleType, &m, &n, &A_TTD, &Col_IDX,
&Row_IDX, &nnzElements, boundary, boundarySize, &nodeDescription,
ROWSCALE);
print_time_message(&t1, "Getting problem dimension");
cout << "Dimension of your problem is " << m << " x " << n << "\n";
printf("Number of NNZ: %d\n", nnzElements);
//-------------------------------GET FORCE VECTORS
float* g;
getForceVector(&g, m, row, col, exampleType);
for (int i = 0; i < m; i++) {
g[i] = -g[i];
}
print_time_message(&t1, "Force vector obtained");
float x[n];
float Li[n];
for (int i = 0; i < n; i++) {
x[i] = 0;
Li[i] = 0;
for (int k = 4 * i; k < 4 * i + 4; k++) {
Li[i] += A_TTD[k] * A_TTD[k];
}
if (Li[i] > 0) {
Li[i] = 1 / Li[i];
}
}
//-------------------------------Inicializacia CuSpare Library
/* allocate GPU memory and copy the matrix and vectors into it */
dim3 dimBlock( totalThreads);
dim3 dimGridRCDM( dim1, dim2);
int totalthreadsOnBlocks = dim1 * dim2 * totalThreads;
curandState *devStates;
cudaMalloc((void **) &devStates, totalthreadsOnBlocks * sizeof(curandState));
setup_kernel<<< dimGridRCDM, dimBlock >>>(devStates);
checkCUDAError("Inicializacia ranom states");
float* A_d;
int* R_Idx_d;
cudaMalloc((void**) &A_d, nnzElements * sizeof(float));
checkCUDAError("kernel execution Alloc A_d");
cudaMalloc((void**) &R_Idx_d, nnzElements * sizeof(int));
checkCUDAError("kernel execution Alloc R_IDX");
int * n_d;
cudaMalloc((void**) &n_d, 1 * sizeof(int));
checkCUDAError("kernel execution Alloc ...");
float* g_d;
cudaMalloc((void**) &g_d, m * sizeof(float));
checkCUDAError("kernel execution Alloc g_d");
float* x_d;
cudaMalloc((void**) &x_d, n * sizeof(float));
checkCUDAError("kernel execution Alloc x_d");
float* lambda_d;
cudaMalloc((void**) &lambda_d, 1 * sizeof(float));
checkCUDAError("kernel execution Alloc lambda_d");
// float* Li_d;
// cudaMalloc((void**) &Li_d, n * sizeof(float));
// checkCUDAError("kernel execution Alloc Li_d");
print_time_message(&t1, "Device memory allocated");
//------------------------------- COPY DATA
cudaMemcpy(A_d, A_TTD, nnzElements * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy A_d");
cudaMemcpy(R_Idx_d, Row_IDX, nnzElements * sizeof(int),
cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy R");
cudaMemcpy(g_d, g, m * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution compy ....");
cudaMemcpy(x_d, x, n * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution compy ....");
cudaMemcpy(n_d, &n, 1 * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution compy ....");
// cudaMemcpy(Li_d, Li, n * sizeof(float), cudaMemcpyHostToDevice);
// checkCUDAError("kernel execution compy ....");
print_time_message(&t1, "Data coppied");
//-------------------------------serial code
float time;
double serialPower;
float lambda = 0.0001;
double serialObjective = 0;
for (int i = 0; i < m; i++)
serialObjective += g[i] * g[i];
serialObjective = serialObjective * 0.5;
printf("Serial code execution. Objective at beggining: %f\n",
serialObjective);
int SerialContractor = 1;
getElapsetTime(&t1);
for (int i = 0; i < n / SerialContractor; i++) {
int idx = (int) (n * (rand() / (RAND_MAX + 1.0)));
float tmp = 0;
for (int j = idx * 4; j < idx * 4 + 4; j++) {
tmp += A_TTD[j] * g[Row_IDX[j]];
}
float tmp1 = Li[idx] * (tmp + lambda);
if (x[idx] > tmp1) {
tmp = -tmp1;
} else {
tmp1 = Li[idx] * (tmp - lambda);
if (x[idx] < tmp1) {
tmp = -tmp1;
} else {
tmp = -x[idx];
}
}
x[idx] += tmp;
//update residuals:
for (int j = 4 * idx; j < 4 * idx + 4; j++) {
g[Row_IDX[j]] += tmp * A_TTD[j];
}
}
time = getElapsetTime(&t1);
serialPower = n / time;
serialPower = (double) serialPower / SerialContractor;
printf("Serial code duration: %f, %f iter/sec\n", time, n / time);
serialObjective = 0;
for (int i = 0; i < m; i++)
serialObjective += g[i] * g[i];
double serialL1Norm = 0;
for (int j = 0; j < n; j++)
serialL1Norm += abs(x[j]);
serialObjective = 0.5 * serialObjective + lambda * serialL1Norm;
printf("Serial code execution. Objective after n iterations: %f\n",
serialObjective);
//-------------------------------Computation
int doworking = 1;
int iterations = 1;
while (doworking == 1) {
cout << "Current lambda is " << lambda
<< ". Do you want to change it? (y/n): ";
string doContinue;
cin >> doContinue;
if (doContinue == "y") {
cout << "Enter lambda: ";
cin >> lambda;
}
cout << "Enter number of iterations: ";
cin >> iterations;
float L1Norm = cublasSasum(n, x_d, 1);
float residualsSum = cublasSdot(m, g_d, 1, g_d, 1);
double objectiveValue = 0.5 * residualsSum;
objectiveValue += lambda * L1Norm;
printf("L1 norm = %f, redisuals=%f, objective=%1.16f\n", L1Norm,
residualsSum, objectiveValue);
cudaMemcpy(lambda_d, &lambda, 1 * sizeof(float), cudaMemcpyHostToDevice);
for (int i = 0; i < iterations; i++) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
RCDMKernel<<< dimGridRCDM, dimBlock >>>(A_d, R_Idx_d, n_d,
g_d, x_d, lambda_d, devStates);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
double parallelPower = totalthreadsOnBlocks;
parallelPower = parallelPower / time;
parallelPower = parallelPower * 1000 * NMAXITERKernel;
printf(
"Elapset time: %f ms; %1.2f iterations/sec; speedup: %1.4f\n",
time, parallelPower, parallelPower / serialPower);
cudaEventDestroy(start);
cudaEventDestroy(stop);
L1Norm = cublasSasum(n, x_d, 1);
residualsSum = cublasSnrm2(m, g_d, 1);
objectiveValue = 0.5 * residualsSum * residualsSum;
objectiveValue += lambda * L1Norm;
printf("%d:L1 norm = %f, redisuals=%f, objective=%1.16f\n", i,
L1Norm, residualsSum, objectiveValue);
}
cout << "Save particular solution to file? (y/n): ";
cin >> doContinue;
if (doContinue == "y") {
float treshHold;
cout << "Enter treshhold for x: ";
cin >> treshHold;
int writtenBars = 0;
FILE *fp;
fp = fopen("/tmp/ttd.txt", "w");
cudaMemcpy(x, x_d, n * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError("kernel execution compy ....");
for (int i = 0; i < n; i++) {
if (abs(x[i]) > treshHold) {
writtenBars++;
fprintf(fp, "%d,%d,%d,%d,%f\n", nodeDescription[i * 4],
nodeDescription[i * 4 + 1], nodeDescription[i * 4
+ 2], nodeDescription[i * 4 + 3], x[i]);
}
}
fclose(fp);
printf("Number of written bars:%d\n", writtenBars);
}
cout << "Continue? (y/n): ";
cin >> doContinue;
if (doContinue == "n") {
doworking = 0;
}
}
print_time_message(&t1, "Computation");
//-------------------------------DeAllocation
cudaFree(devStates);
cudaFree(A_d);
cudaFree(R_Idx_d);
cudaFree(n_d);
cudaFree(g_d);
cudaFree(x_d);
cudaFree(lambda_d);
// cudaFree(Li_d);
print_time_message(&t1, "Device memory DeAllocated");
// cout << "Your name score is " << myName << "\n";
// cout << "Your weight in pounds is " << myWeight << "\n";
// cout << "Your height in inches is " << myHeight << "\n";
// cout << "Enter your height in inches: ";
// cin >> myHeight;
// string myName;
// cout << "Your name score is " << myName << "\n";
// cout << "Your weight in pounds is " << myWeight << "\n";
// cout << "Your height in inches is " << myHeight << "\n";
}
__global__ void IncreaseElement(float* x, int element, float* T) {
x[element] = x[element] + T[element];
}
__global__ void ShrinkKernel(float *T, float * E, float* derivatives, float* L,
float* x, float lambdaParameter, int n) {
int id = (blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x
+ threadIdx.x);
if (id < n) {
float LLocal = L[id];
float Li = 1 / LLocal;
float xLocal = x[id];
float lambdaOverL = lambdaParameter * Li;
float alpha = derivatives[id] * Li;
float deltaL = xLocal - alpha - lambdaOverL;
float deltaR = xLocal - alpha + lambdaOverL;
float t;
if (deltaL > 0)
t = deltaL - xLocal;
else if (deltaR < 0)
t = deltaR - xLocal;
else
t = -xLocal;
T[id] = t;
E[id] = t * t * LLocal;
}
}
__global__ void ShrinkKernelSubset(float *T, float * E, float* derivatives,
float* L, float* x, float lambdaParameter, int* ACounts, int* Indexes) {
int id = (blockIdx.y * blockDim.x * blockDim.y + blockIdx.x * blockDim.x
+ threadIdx.x);
if (id < ACounts[0]) {
id = Indexes[id];
float LLocal = L[id];
float Li = 1 / LLocal;
float xLocal = x[id];
float lambdaOverL = lambdaParameter * Li;
float alpha = derivatives[id] * Li;
float deltaL = xLocal - alpha - lambdaOverL;
float deltaR = xLocal - alpha + lambdaOverL;
float t;
if (deltaL > 0)
t = deltaL - xLocal;
else if (deltaR < 0)
t = deltaR - xLocal;
else
t = -xLocal;
T[id] = t;
E[id] = t * t * LLocal;
}
}
void greedyTTD() {
int col, row, exampleType;
cout << "Enter number of columns: ";
col = 80;
cin >> col;
cout << "Enter number of rows: ";
row = 80;
cin >> row;
// cout << "Enter example type: ";
// cin >> exampleType;
exampleType = 7;
int m, n;
clock_t t1;//, t2;
float * A_TTD;
int * Row_IDX;
int * Col_IDX;
int nnzElements;
int* nodeDescription;
//-------------------------------GET BOUNDARY POINTS
int * boundary;
int boundarySize = 0;
getBoundaryVector(row, col, exampleType, &boundary, &boundarySize);
cout << "Boundary size is " << boundarySize << "\n";
//-------------------------------GENERATE PROBLEMS
t1 = clock();
generateTTDProblem(row, col, exampleType, &m, &n, &A_TTD, &Col_IDX,
&Row_IDX, &nnzElements, boundary, boundarySize, &nodeDescription,
ROWSCALE);
print_time_message(&t1, "Getting problem dimension");
cout << "Dimension of your problem is " << m << " x " << n << "\n";
printf("Number of NNZ: %d\n", nnzElements);
//-------------------------------GET FORCE VECTORS
float* g;
getForceVector(&g, m, row, col, exampleType);
for (int i = 0; i < m; i++) {
g[i] = -g[i];
}
print_time_message(&t1, "Force vector obtained");
float x[n];
float L[n];
for (int i = 0; i < n; i++) {
x[i] = 0;
L[i] = 0;
for (int k = 4 * i; k < 4 * i + 4; k++) {
L[i] += A_TTD[k] * A_TTD[k];
}
}
//-------------------------------Inicializacia CuSpare Library
/* allocate GPU memory and copy the matrix and vectors into it */
cusparseStatus_t status;
cusparseHandle_t handle = 0;
/* initialize cusparse library */
status = cusparseCreate(&handle);
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed");
}
//-------------------------------Preparing A' in colum orientated
int ATcount[m];
int actualCount[m];
int rowCounts[m];
for (int i = 0; i < m; i++) {
ATcount[i] = 0;
actualCount[i] = 0;
}
for (int i = 0; i < nnzElements; i++) {
ATcount[Row_IDX[i] - 1]++; // Shift from 1-based to 0 based
}
rowCounts[0] = ATcount[0];
for (int i = 1; i < m; i++) {
int tmpCount = ATcount[i];
rowCounts[i] = ATcount[i];
ATcount[i] += ATcount[i - 1];
actualCount[i] = ATcount[i] - tmpCount;
}
for (int i = 0; i < m; i++) {
ATcount[i] = actualCount[i];
}
float ATCB[nnzElements];
int ColAT[nnzElements];
for (int i = 0; i < n; i++) {
for (int j = 4 * i; j < 4 * i + 4; j++) {
int tmprow = Row_IDX[j] - 1;
ColAT[actualCount[tmprow]] = i;
ATCB[actualCount[tmprow]] = A_TTD[j];
actualCount[tmprow]++;
}
}
//-------------------------------Alokacia device memroies
float* A_d;
int* C_idx_d;
cudaMalloc((void**) &A_d, nnzElements * sizeof(float));
checkCUDAError("kernel execution Alloc A_d");
cudaMalloc((void**) &C_idx_d, nnzElements * sizeof(int));
checkCUDAError("kernel execution Alloc R_IDX");
int * n_d;
cudaMalloc((void**) &n_d, 1 * sizeof(int));
checkCUDAError("kernel execution Alloc ...");
float* derivatives_d;
cudaMalloc((void**) &derivatives_d, n * sizeof(float));
checkCUDAError("kernel execution Alloc g_d");
float* L_d;
cudaMalloc((void**) &L_d, n * sizeof(float));
checkCUDAError("kernel execution Alloc Li_d");
int* ATcount_d;
cudaMalloc((void**) &ATcount_d, m * sizeof(int));
checkCUDAError("kernel execution Alloc AtCount_d");
float* x_d;
cudaMalloc((void**) &x_d, n * sizeof(float));
checkCUDAError("kernel execution Alloc x_d");
float* lambda_d;
cudaMalloc((void**) &lambda_d, 1 * sizeof(float));
checkCUDAError("kernel execution Alloc lambda_d");
// float* Li_d;
// cudaMalloc((void**) &Li_d, n * sizeof(float));
// checkCUDAError("kernel execution Alloc Li_d");
print_time_message(&t1, "Device memory allocated");
//-------------------------------Copy data
cudaMemcpy(A_d, &ATCB[0], nnzElements * sizeof(float),
cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy A_d");
cudaMemcpy(C_idx_d, &ColAT[0], nnzElements * sizeof(int),
cudaMemcpyHostToDevice);
checkCUDAError("kernel execution Copy R");
cudaMemcpy(derivatives_d, x, n * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution compy ....");
cudaMemcpy(x_d, x, n * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution compy ....");
cudaMemcpy(n_d, &n, 1 * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution compy ....");
cudaMemcpy(L_d, L, n * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution compy ....");
for (int i = m - 1; i > 0; i--) {
actualCount[i] -= actualCount[i - 1];
}
// for (int i=0;i<m;i++)
//{
// printf("AT count[%d]=%d\n",i,actualCount[i]);
//}
cudaMemcpy(ATcount_d, actualCount, m * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("kernel execution compy ....");
print_time_message(&t1, "Data coppied");
//-------------------------------Prepare of Derivatives vector on device
for (int i = 0; i < m; i++) {
if (g[i] != 0) {
cusparseStatus_t copystatus = cusparseSaxpyi(handle, rowCounts[i],
g[i], &A_d[ATcount[i]], &C_idx_d[ATcount[i]],
derivatives_d, CUSPARSE_INDEX_BASE_ZERO);
if (copystatus != CUSPARSE_STATUS_SUCCESS) {
printf("Nastala chyba!");
}
}
}
print_time_message(&t1, "Derivatives vector on device");
//---------------------------------------------------------------------
float lambda = 0.0001;
//------------------------------Serial Benchmark-----------------------
float derivatives[n];
cudaMemcpy(&derivatives[0], derivatives_d, n * sizeof(float),
cudaMemcpyDeviceToHost);
print_time_message(&t1, "Initial Shrink Start");
int max_IDX = 0;
float max_VAL = 0;
float optimalStep = 0;
float TVALUES[n];
float EVALUES[n];
for (int j = 0; j < n; j++) {
float LLocal = L[j];
float Li = 1 / LLocal;
float xLocal = x[j];
float lambdaOverL = lambda * Li;
float alpha = derivatives[j] * Li;
float deltaL = xLocal - alpha - lambdaOverL;
float deltaR = xLocal - alpha + lambdaOverL;
float t;
if (deltaL > 0)
t = deltaL - xLocal;
else if (deltaR < 0)
t = deltaR - xLocal;
else
t = -xLocal;
float tmpEnergy = t * t * LLocal;
TVALUES[j] = t;
EVALUES[j] = tmpEnergy;
if (tmpEnergy > max_VAL) {
optimalStep = t;
max_VAL = tmpEnergy;
max_IDX = j;
}
}
print_time_message(&t1, "Initial Shrink End");
print_time_message(&t1, "Start serial Code");
float setialExecutionTime = 0;
int setialItetaions = 10;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for (int i = 0; i < setialItetaions; i++) {
// printf("optimal t_idx=%d, tval=%f\n", max_IDX, TVALUES[max_IDX]);
// Update
x[max_IDX] += TVALUES[max_IDX];
optimalStep = TVALUES[max_IDX];
for (int ki = max_IDX * 4; ki < max_IDX * 4 + 4; ki++) {
for (int jj = ATcount[Row_IDX[ki] - 1]; jj < ATcount[Row_IDX[ki]
- 1] + rowCounts[Row_IDX[ki] - 1]; jj++) {
int j = ColAT[jj];
derivatives[j] += optimalStep * A_TTD[ki] * ATCB[jj];
float LLocal = L[j];
float Li = 1 / LLocal;
float xLocal = x[j];
float lambdaOverL = lambda * Li;
float alpha = derivatives[j] * Li;
float deltaL = xLocal - alpha - lambdaOverL;
float deltaR = xLocal - alpha + lambdaOverL;
float t;
if (deltaL > 0)
t = deltaL - xLocal;
else if (deltaR < 0)
t = deltaR - xLocal;
else
t = -xLocal;
TVALUES[j] = t;
EVALUES[j] = t * t * LLocal;
}
}
max_VAL = 0;
max_IDX = 0;
for (int j = 0; j < n; j++) {
if (EVALUES[j] > max_VAL) {
max_VAL = EVALUES[j];
max_IDX = j;
}
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&setialExecutionTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
float setialIterationPerSec = setialItetaions / setialExecutionTime * 1000;
printf("Serial execution time:%f ms = %f it/sec \n", setialExecutionTime,
setialIterationPerSec);
print_time_message(&t1, "Serial time for 10 iterations");
//-------------------------------Computation
float* T_dev;
float* E_dev;
cublasAlloc(n, sizeof(float), (void**) &T_dev);
checkCUDAError("Alokacia T_dev");
cublasAlloc(n, sizeof(float), (void**) &E_dev);
checkCUDAError("Alokacia E_dev");
int doworking = 1;
int iterations = 1;
float time;
cusparseStatus_t copystatus;
while (doworking == 1) {
cout << "Current lambda is " << lambda
<< ". Do you want to change it? (y/n): ";
string doContinue;
cin >> doContinue;
if (doContinue == "y") {
cout << "Enter lambda: ";
cin >> lambda;
}
cout << "Enter number of iterations: ";
cin >> iterations;
cudaMemcpy(lambda_d, &lambda, 1 * sizeof(float), cudaMemcpyHostToDevice);
dim3 dimBlock( TOTALTHREDSPERBLOCK);
dim3 dimGridRCDM( 1, 1+n/(TOTALTHREDSPERBLOCK));
float tPoint[1];
int maxIndex = 10;
ShrinkKernel<<< dimGridRCDM ,dimBlock >>>(T_dev, E_dev, derivatives_d, L_d,x_d,
lambda, n);
int maxShrinkSubset = 0;
for (int i = 0; i < m; i++) {
if (rowCounts[i] > maxShrinkSubset)
maxShrinkSubset = rowCounts[i];
}
printf("Max shrink subset %d\n", maxShrinkSubset);
dim3 dimGridShrinkSubset( 1, 1+maxShrinkSubset/(TOTALTHREDSPERBLOCK));
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for (int i = 0; i < iterations; i++) {
// ShrinkKernel<<< dimGridRCDM ,dimBlock >>>(T_dev, E_dev, derivatives_d, L_d,x_d,
// lambda, n);
//maxIndex=10;
maxIndex = cublasIsamax(n, E_dev, 1);
maxIndex = maxIndex - 1;
// printf("%d;Selected index:%d\n", i, maxIndex);
IncreaseElement<<< 1 ,1 >>>(x_d, maxIndex, T_dev);
checkCUDAError("IncreaseElement");
cublasGetVector(1, sizeof(float), &T_dev[maxIndex], 1, tPoint, 1);
checkCUDAError("get T");
// printf("optimal t_idx=%d and value = %f\n", maxIndex, tPoint[0]);
for (int ki = maxIndex * 4; ki < maxIndex * 4 + 4; ki++) {
// printf("pridat %f %f %d\n", tPoint[0], A_TTD[ki],Row_IDX[ki]-1);
copystatus = cusparseSaxpyi(handle, rowCounts[Row_IDX[ki] - 1],
tPoint[0] * A_TTD[ki], &A_d[ATcount[Row_IDX[ki] - 1]],
&C_idx_d[ATcount[Row_IDX[ki] - 1]], derivatives_d,
CUSPARSE_INDEX_BASE_ZERO);
if (copystatus != CUSPARSE_STATUS_SUCCESS)
printf("Nastala chyba pri CuSparse!\n");
// cudaThreadSynchronize();
ShrinkKernelSubset<<< dimGridShrinkSubset ,dimBlock >>>(T_dev, E_dev, derivatives_d,
L_d, x_d, lambda, &ATcount_d[Row_IDX[ki] - 1], &C_idx_d[ATcount[Row_IDX[ki] - 1]]);
//
checkCUDAError("Shrink subset");
// cudaThreadSynchronize();
// ShrinkKernelSubset<<< dimGridRCDM ,dimBlock >>>(T_dev, E_dev, derivatives_d,
// L_d, x_d, lambda, n, &C_idx_d[0]);
}
// cudaThreadSynchronize();
// cudaMemcpy(x, derivatives_d, n * sizeof(float),
// cudaMemcpyDeviceToHost);
// checkCUDAError("kernel execution : copy data Derivatives");
// for (int j = 0; j < n; j++) {
// printf("der[%d]=%f\n", j, x[j]);
// }
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("pridane %f \n", tPoint[0]);
printf("trvanie %f ms , %f iter/sec speedUp:%f \n", time, iterations
/ time * 1000, iterations / time * 1000 / setialIterationPerSec);
// double parallelPower = totalthreadsOnBlocks;
// parallelPower = parallelPower / time;
// parallelPower = parallelPower * 1000 * NMAXITERKernel;
// printf(
// "Elapset time: %f ms; %1.2f iterations/sec; speedup: %1.4f\n",
// time, parallelPower, parallelPower / serialPower);
//
// L1Norm = cublasSasum(n, x_d, 1);
// residualsSum = cublasSnrm2(m, g_d, 1);
// objectiveValue = 0.5 * residualsSum * residualsSum;
// objectiveValue += lambda * L1Norm;
// printf("%d:L1 norm = %f, redisuals=%f, objective=%1.16f\n", i,
// L1Norm, residualsSum, objectiveValue);
cout << "Save particular solution to file? (y/n): ";
cin >> doContinue;
if (doContinue == "y") {
float treshHold;
cout << "Enter treshhold for x: ";
cin >> treshHold;
int writtenBars = 0;
FILE *fp;
fp = fopen("/tmp/ttd.txt", "w");
cudaMemcpy(x, x_d, n * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError("kernel execution compy ....");
for (int i = 0; i < n; i++) {
if (abs(x[i]) > treshHold) {
writtenBars++;
fprintf(fp, "%d,%d,%d,%d,%f\n", nodeDescription[i * 4],
nodeDescription[i * 4 + 1], nodeDescription[i * 4
+ 2], nodeDescription[i * 4 + 3], x[i]);
}
}
fclose(fp);
printf("Number of written bars:%d\n", writtenBars);
}
cout << "Continue? (y/n): ";
cin >> doContinue;
if (doContinue == "n") {
doworking = 0;
}
}
print_time_message(&t1, "Computation");
//-------------------------------DeAllocation
cudaFree(lambda_d);
cudaFree(A_d);
cudaFree(C_idx_d);
cudaFree(n_d);
cudaFree(x_d);
cudaFree(derivatives_d);
cudaFree(L_d);
cudaFree(ATcount_d);
print_time_message(&t1, "Device memory DeAllocated");
}
int main(void) {
greedyTTD();
// calculateTTDProblem();
return 1;
}
void generateTTDProblem(int row, int col, int exampleType, int* mOut,
int* nOut, float** A, int** ColIDX, int ** RowIDX, int * nnzElements,
int* boundary, int boundarySize, int** nodeDescription, int rowScale) {
float kappa = 1;
float scale = sqrt(kappa);
int m = row * col;
float A_values[MaxTTD_NNZ_ELEMENTS];
int Rows_A[MaxTTD_NNZ_ELEMENTS];
int Cols_A[MaxTTD_NNZ_ELEMENTS];
*nodeDescription = new int[MaxTTD_NNZ_ELEMENTS*4];
int nodeDescriptionId = 0;
int nnz = 0;
int node = 0;
int tt = col;
if (row > tt)
tt = row;
int GSDS[tt + 1][tt + 1];
for (int i = 1; i <= tt; i++) {
for (int j = i; j <= tt; j++) {
GSDS[i][j] = GCD(i, j);
GSDS[j][i] = GCD(i, j);
}
}
int betta[2];
float beta[2];
for (int j = 1; j <= col; j++) {
for (int i = 1; i <= col; i++) {
for (int k = 1; k <= row; k++) {
for (int l = 1; l <= col - j; l++) {
betta[0] = rowScale * l;// for node (i,j) we add bars to all (k,j+i)
betta[1] = -k + i;
scaleBetta(betta, beta, scale);
betta[0] = l;
if (col == j)
continue;
if (l > 1) {
int skip = 0;
int ta = minimabs(betta);
int tb = maximabs(betta);
if (ta == 0) {
skip = 1;
} else {
if (GSDS[ta][tb] > 1) {
skip = 1;
}
}
if (skip)
continue;
}
int totalMatchA = 0;
int totalMatchB = 0;
for (int bi = 0; bi < boundarySize; bi++) {
if (boundary[bi] == row * (j - 1) + i)
totalMatchA++;
if (boundary[bi] == row * (j) + k + (l - 1) * row)
totalMatchB++;
}
if (totalMatchA + totalMatchB < 2) {
node++;
int tmp = row * (j - 1) + i;
A_values[nnz] = -(1 - totalMatchA) * beta[0];
Rows_A[nnz] = tmp;
Cols_A[nnz] = node;
nnz++;
A_values[nnz] = -(1 - totalMatchA) * beta[1];
Rows_A[nnz] = tmp + m;
Cols_A[nnz] = node;
nnz++;
tmp = row * (j) + k + (l - 1) * row;
A_values[nnz] = (1 - totalMatchB) * beta[0];
Rows_A[nnz] = tmp;
Cols_A[nnz] = node;
nnz++;
A_values[nnz] = (1 - totalMatchB) * beta[1];
Rows_A[nnz] = tmp + m;
Cols_A[nnz] = node;
nnz++;
(*nodeDescription)[nodeDescriptionId] = i;
(*nodeDescription)[nodeDescriptionId + 1] = j;
(*nodeDescription)[nodeDescriptionId + 2] = k;
(*nodeDescription)[nodeDescriptionId + 3] = l + j;
nodeDescriptionId += 4;
}
}
}
if (i < row) {
int tmp = i + (j - 1) * row;
int totalMatchA = 0;
int totalMatchB = 0;
for (int bi = 0; bi < boundarySize; bi++) {
if (boundary[bi] == tmp)
totalMatchA++;
if (boundary[bi] == tmp + 1)
totalMatchB++;
}
if (totalMatchA + totalMatchB < 2) {
node = node + 1;
A_values[nnz] = -(1 - totalMatchA);
Rows_A[nnz] = tmp + m;
Cols_A[nnz] = node;
nnz++;
A_values[nnz] = 0; //fake node
Rows_A[nnz] = tmp + 1;
Cols_A[nnz] = node;
nnz++;
A_values[nnz] = 0; //fake node
Rows_A[nnz] = tmp + 2;
Cols_A[nnz] = node;
nnz++;
A_values[nnz] = (1 - totalMatchB);
Rows_A[nnz] = tmp + m + 1;
Cols_A[nnz] = node;
nnz++;
(*nodeDescription)[nodeDescriptionId] = i;
(*nodeDescription)[nodeDescriptionId + 1] = j;
(*nodeDescription)[nodeDescriptionId + 2] = i + 1;
(*nodeDescription)[nodeDescriptionId + 3] = j;
nodeDescriptionId += 4;
}
}
}
}*A = new float[nnz];
*ColIDX = new int[nnz];
*RowIDX = new int[nnz];
for (int i = 0; i < nnz; i++) {
(*A)[i] = A_values[i];
(*ColIDX)[i] = Cols_A[i];
(*RowIDX)[i] = Rows_A[i];
}
*nOut = node;
*mOut = row * col * 2;
*nnzElements = nnz;
}
void getBoundaryVector(int row, int col, int exampleType, int** boundary,
int* boudarySize) {
switch (exampleType) {
case 1:
*boundary = new int[row];
for (int i = 0; i < row; i++) {
(*boundary)[i] = i + 1;
}
// boundaryIDX(1, i) = 1;
// boundaryIDX(2, i) = i;
*boudarySize = row;
break;
case 2:
*boundary = new int[4];
(*boundary)[0] = row;
(*boundary)[1] = row * col;
(*boundary)[2] = row * col - row * (col / 3);
(*boundary)[3] = row + row * (col / 3);
*boudarySize = 4;
break;
case 3:
*boundary = new int[4];
(*boundary)[0] = row - 5;
(*boundary)[1] = row * col - 5;
(*boundary)[2] = row * col - row * (col / 3);
(*boundary)[3] = row + row * (col / 3);
*boudarySize = 4;
break;
case 5:
*boundary = new int[0];
*boudarySize = 0;
break;
case 7:
*boundary = new int[2];
(*boundary)[0] = row;
(*boundary)[1] = row * col;
*boudarySize = 2;
break;
default:
break;
}
}
// if (boundarytype==3) %bridge
//
// boundary(1,1)=r;
// boundary(1, 4) = r + r * (floor(c / 3));
// boundary(1, 3) = r * c - r * floor(c / 3);
// boundary(1, 2) = r * c;
//
// boundaryIDX(1, 1) = 1;
// boundaryIDX(2, 1) = r;
// boundaryIDX(1, 2) = c;
// boundaryIDX(2, 2) = r;
//
// boundaryIDX(1, 3) = floor(c / 3) + 1;
// boundaryIDX(2, 3) = r;
// boundaryIDX(1, 4) = c - floor(c / 3);
// boundaryIDX(2, 4) = r;
//
// end
// if (boundarytype==4) %
//
// boundary(1,1)=r;
// % boundary(1,4)=r+r*(floor(c/5));
// % boundary(1,3)=r*c-r*floor(c/5);
// boundary(1, 2) = r * c;
//
// boundaryIDX(1, 1) = 1;
// boundaryIDX(2, 1) = r;
// boundaryIDX(1, 2) = c;
// boundaryIDX(2, 2) = r;
//
// % boundaryIDX(1,3)=floor(c/5)+1;
// % boundaryIDX(2,3)=r;
// % boundaryIDX(1,4)=c-floor(c/5);
// % boundaryIDX(2,4)=r;
//
// end
//
// if (boundarytype==5) %
//
// end
//
// if (boundarytype==6) %
//
// boundary(1,1)=r;
//
// boundary(1, 4) = r + r * (floor(c / 5));
// boundary(1, 3) = r * c - r * floor(c / 5);
// boundary(1, 5) = r + r * 2 * (floor(c / 5));
// boundary(1, 6) = r * c - r * 2 * floor(c / 5);
//
// boundary(1, 2) = r * c;
//
// boundaryIDX(1, 1) = 1;
// boundaryIDX(2, 1) = r;
// boundaryIDX(1, 2) = c;
// boundaryIDX(2, 2) = r;
//
// boundaryIDX(1, 3) = floor(c / 5) + 1;
// boundaryIDX(2, 3) = r;
// boundaryIDX(1, 4) = c - floor(c / 5);
// boundaryIDX(2, 4) = r;
//
// boundaryIDX(1, 5) = 2 * floor(c / 5) + 1;
// boundaryIDX(2, 5) = r;
// boundaryIDX(1, 6) = c - 2 * floor(c / 5);
// boundaryIDX(2, 6) = r;
//
// end
void scaleBetta(int* betta, float* beta, float scale) {
float tmp = scale / (betta[0] * betta[0] + betta[1] * betta[1]);
beta[0] = betta[0] * tmp;
beta[1] = betta[1] * tmp;
}
int GCD(int a, int b) {
while (1) {
a = a % b;
if (a == 0)
return b;
b = b % a;
if (b == 0)
return a;
}
}
void getForceVector(float ** d, int m, int r, int c, int exampleType) {
// int mid = r / 2;
// int midr =(r/2)+1;
// int midc = (c/2)+1;
*d=new float[m];
m = m / 2;
int tmp;
switch (exampleType) {
case 1:
// tmp = r * (c - 1) + mid + 1;
tmp = r * c;
(*d)[tmp + m - 1] = -1;
(*d)[tmp - 1] = 2;
break;
case 2:
for (int cc = 2; cc < c-1; cc++) {
(*d)[-2 + r * cc + m - 1] = -1;
}
break;
case 3:
for (int cc = 2; cc < c; cc++) {
(*d)[-2 + r * cc + m - 1] = -1;
}
break;
case 5:
break;
case 7:
for (int cc = 3; cc < c-3+1; cc++) {
(*d)[-5 + r * cc + m - 1] = -1;
}
break;
default:
break;
}
// if (boundarytype==3)
// midr = floor(r/2)+1
// midc = floor(c/2)+1
//
// for cc=2:c-1
// d(-2+r*cc+m) = -1;
// end
// end
//
// if (boundarytype==6)
// midr = floor(r/2)+1
// midc = floor(c/2)+1
//
// for cc=2:c-1
// d(-1+r*cc+m) = -1;
// end
// end
//
// if (boundarytype==4)
// midr = floor(r/2)+1
// midc = floor(c/2)+1
//
//
//
// for cc=3:c-2
// d(-12+r*cc+m) = -1;
// end
//
// for asdf=1:13
// for cc=6:c-5
// %d(-8-asdf+r*cc+m) = -1;
// end
// end
//
//
// for asdf=1:17
// for cc=6:6
// d(-12-asdf+r*cc+m) = -1;
// if (asdf<17)
// d(-12-asdf+r*cc) = (-1)^asdf;
// end
// end
// end
//
//
// end
}
int maximabs(int * betta) {
if (abs(betta[0]) >= abs(betta[1]))
return abs(betta[0]);
else
return abs(betta[1]);
}
int minimabs(int * betta) {
if (abs(betta[0]) <= abs(betta[1]))
return abs(betta[0]);
else
return abs(betta[1]);
}
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(-1);
}
}
__global__ void setup_kernel(curandState *state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
/* Each thread gets different seed, a different sequence number, no offset */
curand_init(i, i, 0, &state[i]);
}
|
32c0b400452c0d2c4b677d05f50ca8a45b2e64c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "saber/funcs/impl/cuda/saber_concat.h"
#include "saber/funcs/impl/cuda/reorder.h"
#include "saber/funcs/calibrate.h"
namespace anakin{
namespace saber{
const int BLOCK_SIZE = 32;
template <typename dtype>
__global__ void concat_impl_cuda(const int nthreads, const dtype* in_data,
const int num_concats, const int concat_size,
const int top_concat_axis, const int bottom_concat_axis,
const int offset_concat_axis, dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_concat_size = concat_size * bottom_concat_axis;
const int concat_num = index / total_concat_size;
const int concat_index = index % total_concat_size;
const int top_index = concat_index +
(concat_num * top_concat_axis + offset_concat_axis) * concat_size;
out_data[top_index] = in_data[index];
}
}
template <typename dtype>
__global__ void concat_impl_2d_impl(const int inner_size, const int num_concats,
const dtype* in_data, const int concat_size,
const int out_concat_axis,
const int offset_concat_axis, dtype* out_data) {
int idx_inner = threadIdx.x + blockIdx.x * blockDim.x;
int idx_outer = threadIdx.y + blockIdx.y * blockDim.y;
if (idx_inner < inner_size && idx_outer < num_concats) {
int idx_input = idx_outer * inner_size + idx_inner;
int idx_output = (idx_outer * out_concat_axis + offset_concat_axis) * \
concat_size + idx_inner;
out_data[idx_output] = in_data[idx_input];
}
}
template <>
SaberStatus SaberConcat<NV, AK_FLOAT>::create(const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
ConcatParam<NV>& param,
Context<NV>& ctx) {
_num_concats = inputs[0]->count_valid(0, param.axis);
_concat_input_size = inputs[0]->count_valid(param.axis + 1, inputs[0]->dims());
return SaberSuccess;
}
template <>
SaberStatus SaberConcat<NV, AK_FLOAT>::init(const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
ConcatParam<NV>& param,
Context<NV> &ctx) {
// get context
this->_ctx = &ctx;
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberConcat<NV, AK_FLOAT>::dispatch(const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs, ConcatParam<NV>& param) {
hipStream_t stream = this->_ctx->get_compute_stream();
int input_size = inputs.size();
//! get output data, valid shape and stride shape
OpDataType* out_data = (OpDataType*)outputs[0]->mutable_data();
int offset_concat_axis = 0;
Shape out_shape = outputs[0]->valid_shape();
const int out_concat_axis = out_shape[param.axis];
bool out_cont_flag = outputs[0]->is_continue_mem();
bool in_cont_flag = inputs[0]->is_continue_mem();
for (int i = 1; i < input_size; ++i) {
in_cont_flag &= inputs[i]->is_continue_mem();
}
//! inputs and outputs are all with continuous memory
if (in_cont_flag && out_cont_flag){
for (int i = 0; i < input_size; ++i) {
Shape in_shape = inputs[i]->valid_shape();
//std::vector<int> bottom_shape = {tmp[3], tmp[2], tmp[1], tmp[0]};
const OpDataType* in_data = (const OpDataType*)inputs[i]->data();
const int in_concat_axis = in_shape[param.axis];
const int in_concat_size = in_concat_axis * _concat_input_size;
const int nthreads = in_concat_size * _num_concats;
float ratio = (float)in_concat_size / _num_concats;
bool is_balance = (ratio > 0.1 && ratio < 10);
if (is_balance) {
int block_x = BLOCK_SIZE;
int block_y = BLOCK_SIZE;
int grid_x = (in_concat_size + block_x - 1) / block_x;
int grid_y = (_num_concats + block_y - 1) / block_y;
dim3 block(block_x, block_y);
dim3 grid(grid_x, grid_y);
hipLaunchKernelGGL(( concat_impl_2d_impl<OpDataType>), dim3(grid), dim3(block), 0, stream,
in_concat_size, _num_concats, in_data, _concat_input_size,
out_concat_axis, offset_concat_axis, out_data
);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( concat_impl_cuda<OpDataType>), dim3(CUDA_GET_BLOCKS(nthreads)), dim3(CUDA_NUM_THREADS), 0, stream, \
nthreads, in_data, _num_concats, _concat_input_size, \
out_concat_axis, in_concat_axis, offset_concat_axis, out_data);
}
offset_concat_axis += in_concat_axis;
}
} else { //! inputs or outputs memory is not continuous
Shape offset_out = outputs[0]->offset();
Tensor<NV> tsub;
for (int i = 0; i < input_size; ++i) {
Shape in_shape = inputs[i]->valid_shape();
tsub.share_sub_buffer(*outputs[0], in_shape, offset_out);
offset_out[param.axis] += in_shape[param.axis];
tsub.async_copy_from(*inputs[i], stream);
}
}
return SaberSuccess;
}
template <>
SaberStatus SaberConcat<NV, AK_INT8>::create(const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
ConcatParam<NV>& param,
Context<NV>& ctx) {
_num_concats = inputs[0]->count_valid(0, param.axis);
_concat_input_size = inputs[0]->count_valid(param.axis + 1, inputs[0]->dims());
_input_v.resize(inputs.size());
for (int i = 0; i < inputs.size(); ++i) {
if (inputs[i]->get_dtype() == AK_FLOAT) {
_input_v[i].re_alloc(inputs[i]->valid_shape(), AK_INT8);
} else if (inputs[i]->get_dtype() == AK_INT8 && inputs[i]->get_layout() == Layout_NCHW_C4) {
Shape new_shape = Shape({inputs[i]->num(), inputs[i]->channel(),
inputs[i]->height(), inputs[i]->width()}, Layout_NCHW);
_input_v[i].re_alloc(new_shape, AK_INT8);
} else if (inputs[i]->get_dtype() == AK_INT8 && inputs[i]->get_layout() == Layout_NCHW) {
// good, nothing to do
} else {
LOG(FATAL) << "Not support this situation, pls contact the r&d.";
}
}
if (outputs[0]->get_dtype() == AK_FLOAT) {
_output.re_alloc(outputs[0]->valid_shape(), AK_INT8);
_output.set_scale(outputs[0]->get_scale());
} else if (outputs[0]->get_dtype() == AK_INT8 && outputs[0]->get_layout() == Layout_NCHW_C4) {
Shape new_shape = outputs[0]->valid_shape();
new_shape.set_layout(Layout_NCHW);
_output.re_alloc(new_shape, AK_INT8);
_output.set_scale(outputs[0]->get_scale());
} else if (outputs[0]->get_dtype() == AK_INT8 && outputs[0]->get_layout() == Layout_NCHW) {
// good, nothing to do.
} else {
LOG(FATAL) << "Not support this situation, pls contact the r&d.";
}
return SaberSuccess;
}
template <>
SaberStatus SaberConcat<NV, AK_INT8>::init(const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
ConcatParam<NV>& param,
Context<NV> &ctx) {
// get context
this->_ctx = &ctx;
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberConcat<NV, AK_INT8>::dispatch(const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs, ConcatParam<NV>& param) {
hipStream_t stream = this->_ctx->get_compute_stream();
int input_size = inputs.size();
//! get output data, valid shape and stride shape
char* out_data = nullptr;
if (outputs[0]->get_dtype() == AK_FLOAT) {
out_data = (char*)_output.mutable_data();
} else if (outputs[0]->get_dtype() == AK_INT8 && outputs[0]->get_layout() == Layout_NCHW_C4) {
out_data = (char*)_output.mutable_data();
} else if (outputs[0]->get_dtype() == AK_INT8 && outputs[0]->get_layout() == Layout_NCHW) {
out_data = (char*)outputs[0]->mutable_data();
} else {
LOG(FATAL) << "Not support this situation, pls contact the r&d.";
}
int offset_concat_axis = 0;
Shape out_shape = outputs[0]->valid_shape();
const int out_concat_axis = out_shape[param.axis];
//! inputs and outputs are all with continuous memory
for (int i = 0; i < input_size; ++i) {
Shape in_shape = inputs[i]->valid_shape();
//std::vector<int> bottom_shape = {tmp[3], tmp[2], tmp[1], tmp[0]};
const char* in_data = nullptr;
if (inputs[i]->get_dtype() == AK_FLOAT) {
flatten_calibrate<NV, char, float> (_input_v[i], *inputs[i], *_ctx);
in_data = (char*)_input_v[i].mutable_data();
} else if (inputs[i]->get_dtype() == AK_INT8 && inputs[i]->get_layout() == Layout_NCHW_C4) {
convert_nchwc4_to_nchw<NV>(_input_v[i], *inputs[i], *_ctx);
in_data = (char*)_input_v[i].mutable_data();
} else if (inputs[i]->get_dtype() == AK_INT8 && inputs[i]->get_layout() == Layout_NCHW) {
in_data = (char*)inputs[i]->mutable_data();
} else {
LOG(FATAL) << "Not support this situation, pls contact the r&d.";
}
const int in_concat_axis = in_shape[param.axis];
const int in_concat_size = in_concat_axis * _concat_input_size;
const int nthreads = in_concat_size * _num_concats;
float ratio = (float)in_concat_size / _num_concats;
bool is_balance = (ratio > 0.1 && ratio < 10);
if (is_balance) {
int block_x = BLOCK_SIZE;
int block_y = BLOCK_SIZE;
int grid_x = (in_concat_size + block_x - 1) / block_x;
int grid_y = (_num_concats + block_y - 1) / block_y;
dim3 block(block_x, block_y);
dim3 grid(grid_x, grid_y);
hipLaunchKernelGGL(( concat_impl_2d_impl<char>), dim3(grid), dim3(block), 0, stream,
in_concat_size, _num_concats, in_data, _concat_input_size,
out_concat_axis, offset_concat_axis, out_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( concat_impl_cuda<char>), dim3(CUDA_GET_BLOCKS(nthreads)), dim3(CUDA_NUM_THREADS), 0, stream,
nthreads, in_data, _num_concats, _concat_input_size,
out_concat_axis, in_concat_axis, offset_concat_axis, out_data);
}
offset_concat_axis += in_concat_axis;
}
if (outputs[0]->get_dtype() == AK_FLOAT) {
flatten_calibrate<NV, float, char>(*outputs[0], _output, *_ctx);
} else if (outputs[0]->get_dtype() == AK_INT8 && outputs[0]->get_layout() == Layout_NCHW_C4) {
convert_nchw_to_nchwc4<NV>(*outputs[0], _output, *_ctx);
} else if (outputs[0]->get_dtype() == AK_INT8 && outputs[0]->get_layout() == Layout_NCHW) {
// good, nothing to be done;
} else {
LOG(FATAL) << "Not support this situation, pls contact the r&d.";
}
return SaberSuccess;
}
DEFINE_OP_TEMPLATE(SaberConcat, ConcatParam, NV, AK_HALF);
} //namespace anakin
} //namespace anakin
| 32c0b400452c0d2c4b677d05f50ca8a45b2e64c2.cu | #include "saber/funcs/impl/cuda/saber_concat.h"
#include "saber/funcs/impl/cuda/reorder.h"
#include "saber/funcs/calibrate.h"
namespace anakin{
namespace saber{
const int BLOCK_SIZE = 32;
template <typename dtype>
__global__ void concat_impl_cuda(const int nthreads, const dtype* in_data,
const int num_concats, const int concat_size,
const int top_concat_axis, const int bottom_concat_axis,
const int offset_concat_axis, dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_concat_size = concat_size * bottom_concat_axis;
const int concat_num = index / total_concat_size;
const int concat_index = index % total_concat_size;
const int top_index = concat_index +
(concat_num * top_concat_axis + offset_concat_axis) * concat_size;
out_data[top_index] = in_data[index];
}
}
template <typename dtype>
__global__ void concat_impl_2d_impl(const int inner_size, const int num_concats,
const dtype* in_data, const int concat_size,
const int out_concat_axis,
const int offset_concat_axis, dtype* out_data) {
int idx_inner = threadIdx.x + blockIdx.x * blockDim.x;
int idx_outer = threadIdx.y + blockIdx.y * blockDim.y;
if (idx_inner < inner_size && idx_outer < num_concats) {
int idx_input = idx_outer * inner_size + idx_inner;
int idx_output = (idx_outer * out_concat_axis + offset_concat_axis) * \
concat_size + idx_inner;
out_data[idx_output] = in_data[idx_input];
}
}
template <>
SaberStatus SaberConcat<NV, AK_FLOAT>::create(const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
ConcatParam<NV>& param,
Context<NV>& ctx) {
_num_concats = inputs[0]->count_valid(0, param.axis);
_concat_input_size = inputs[0]->count_valid(param.axis + 1, inputs[0]->dims());
return SaberSuccess;
}
template <>
SaberStatus SaberConcat<NV, AK_FLOAT>::init(const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
ConcatParam<NV>& param,
Context<NV> &ctx) {
// get context
this->_ctx = &ctx;
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberConcat<NV, AK_FLOAT>::dispatch(const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs, ConcatParam<NV>& param) {
cudaStream_t stream = this->_ctx->get_compute_stream();
int input_size = inputs.size();
//! get output data, valid shape and stride shape
OpDataType* out_data = (OpDataType*)outputs[0]->mutable_data();
int offset_concat_axis = 0;
Shape out_shape = outputs[0]->valid_shape();
const int out_concat_axis = out_shape[param.axis];
bool out_cont_flag = outputs[0]->is_continue_mem();
bool in_cont_flag = inputs[0]->is_continue_mem();
for (int i = 1; i < input_size; ++i) {
in_cont_flag &= inputs[i]->is_continue_mem();
}
//! inputs and outputs are all with continuous memory
if (in_cont_flag && out_cont_flag){
for (int i = 0; i < input_size; ++i) {
Shape in_shape = inputs[i]->valid_shape();
//std::vector<int> bottom_shape = {tmp[3], tmp[2], tmp[1], tmp[0]};
const OpDataType* in_data = (const OpDataType*)inputs[i]->data();
const int in_concat_axis = in_shape[param.axis];
const int in_concat_size = in_concat_axis * _concat_input_size;
const int nthreads = in_concat_size * _num_concats;
float ratio = (float)in_concat_size / _num_concats;
bool is_balance = (ratio > 0.1 && ratio < 10);
if (is_balance) {
int block_x = BLOCK_SIZE;
int block_y = BLOCK_SIZE;
int grid_x = (in_concat_size + block_x - 1) / block_x;
int grid_y = (_num_concats + block_y - 1) / block_y;
dim3 block(block_x, block_y);
dim3 grid(grid_x, grid_y);
concat_impl_2d_impl<OpDataType><<<grid, block, 0, stream>>>(
in_concat_size, _num_concats, in_data, _concat_input_size,
out_concat_axis, offset_concat_axis, out_data
);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
concat_impl_cuda<OpDataType><<<CUDA_GET_BLOCKS(nthreads), CUDA_NUM_THREADS, 0, stream>>>( \
nthreads, in_data, _num_concats, _concat_input_size, \
out_concat_axis, in_concat_axis, offset_concat_axis, out_data);
}
offset_concat_axis += in_concat_axis;
}
} else { //! inputs or outputs memory is not continuous
Shape offset_out = outputs[0]->offset();
Tensor<NV> tsub;
for (int i = 0; i < input_size; ++i) {
Shape in_shape = inputs[i]->valid_shape();
tsub.share_sub_buffer(*outputs[0], in_shape, offset_out);
offset_out[param.axis] += in_shape[param.axis];
tsub.async_copy_from(*inputs[i], stream);
}
}
return SaberSuccess;
}
template <>
SaberStatus SaberConcat<NV, AK_INT8>::create(const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
ConcatParam<NV>& param,
Context<NV>& ctx) {
_num_concats = inputs[0]->count_valid(0, param.axis);
_concat_input_size = inputs[0]->count_valid(param.axis + 1, inputs[0]->dims());
_input_v.resize(inputs.size());
for (int i = 0; i < inputs.size(); ++i) {
if (inputs[i]->get_dtype() == AK_FLOAT) {
_input_v[i].re_alloc(inputs[i]->valid_shape(), AK_INT8);
} else if (inputs[i]->get_dtype() == AK_INT8 && inputs[i]->get_layout() == Layout_NCHW_C4) {
Shape new_shape = Shape({inputs[i]->num(), inputs[i]->channel(),
inputs[i]->height(), inputs[i]->width()}, Layout_NCHW);
_input_v[i].re_alloc(new_shape, AK_INT8);
} else if (inputs[i]->get_dtype() == AK_INT8 && inputs[i]->get_layout() == Layout_NCHW) {
// good, nothing to do
} else {
LOG(FATAL) << "Not support this situation, pls contact the r&d.";
}
}
if (outputs[0]->get_dtype() == AK_FLOAT) {
_output.re_alloc(outputs[0]->valid_shape(), AK_INT8);
_output.set_scale(outputs[0]->get_scale());
} else if (outputs[0]->get_dtype() == AK_INT8 && outputs[0]->get_layout() == Layout_NCHW_C4) {
Shape new_shape = outputs[0]->valid_shape();
new_shape.set_layout(Layout_NCHW);
_output.re_alloc(new_shape, AK_INT8);
_output.set_scale(outputs[0]->get_scale());
} else if (outputs[0]->get_dtype() == AK_INT8 && outputs[0]->get_layout() == Layout_NCHW) {
// good, nothing to do.
} else {
LOG(FATAL) << "Not support this situation, pls contact the r&d.";
}
return SaberSuccess;
}
template <>
SaberStatus SaberConcat<NV, AK_INT8>::init(const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs,
ConcatParam<NV>& param,
Context<NV> &ctx) {
// get context
this->_ctx = &ctx;
return create(inputs, outputs, param, ctx);
}
template <>
SaberStatus SaberConcat<NV, AK_INT8>::dispatch(const std::vector<Tensor<NV> *>& inputs,
std::vector<Tensor<NV> *>& outputs, ConcatParam<NV>& param) {
cudaStream_t stream = this->_ctx->get_compute_stream();
int input_size = inputs.size();
//! get output data, valid shape and stride shape
char* out_data = nullptr;
if (outputs[0]->get_dtype() == AK_FLOAT) {
out_data = (char*)_output.mutable_data();
} else if (outputs[0]->get_dtype() == AK_INT8 && outputs[0]->get_layout() == Layout_NCHW_C4) {
out_data = (char*)_output.mutable_data();
} else if (outputs[0]->get_dtype() == AK_INT8 && outputs[0]->get_layout() == Layout_NCHW) {
out_data = (char*)outputs[0]->mutable_data();
} else {
LOG(FATAL) << "Not support this situation, pls contact the r&d.";
}
int offset_concat_axis = 0;
Shape out_shape = outputs[0]->valid_shape();
const int out_concat_axis = out_shape[param.axis];
//! inputs and outputs are all with continuous memory
for (int i = 0; i < input_size; ++i) {
Shape in_shape = inputs[i]->valid_shape();
//std::vector<int> bottom_shape = {tmp[3], tmp[2], tmp[1], tmp[0]};
const char* in_data = nullptr;
if (inputs[i]->get_dtype() == AK_FLOAT) {
flatten_calibrate<NV, char, float> (_input_v[i], *inputs[i], *_ctx);
in_data = (char*)_input_v[i].mutable_data();
} else if (inputs[i]->get_dtype() == AK_INT8 && inputs[i]->get_layout() == Layout_NCHW_C4) {
convert_nchwc4_to_nchw<NV>(_input_v[i], *inputs[i], *_ctx);
in_data = (char*)_input_v[i].mutable_data();
} else if (inputs[i]->get_dtype() == AK_INT8 && inputs[i]->get_layout() == Layout_NCHW) {
in_data = (char*)inputs[i]->mutable_data();
} else {
LOG(FATAL) << "Not support this situation, pls contact the r&d.";
}
const int in_concat_axis = in_shape[param.axis];
const int in_concat_size = in_concat_axis * _concat_input_size;
const int nthreads = in_concat_size * _num_concats;
float ratio = (float)in_concat_size / _num_concats;
bool is_balance = (ratio > 0.1 && ratio < 10);
if (is_balance) {
int block_x = BLOCK_SIZE;
int block_y = BLOCK_SIZE;
int grid_x = (in_concat_size + block_x - 1) / block_x;
int grid_y = (_num_concats + block_y - 1) / block_y;
dim3 block(block_x, block_y);
dim3 grid(grid_x, grid_y);
concat_impl_2d_impl<char><<<grid, block, 0, stream>>>(
in_concat_size, _num_concats, in_data, _concat_input_size,
out_concat_axis, offset_concat_axis, out_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
concat_impl_cuda<char><<<CUDA_GET_BLOCKS(nthreads), CUDA_NUM_THREADS, 0, stream>>>(
nthreads, in_data, _num_concats, _concat_input_size,
out_concat_axis, in_concat_axis, offset_concat_axis, out_data);
}
offset_concat_axis += in_concat_axis;
}
if (outputs[0]->get_dtype() == AK_FLOAT) {
flatten_calibrate<NV, float, char>(*outputs[0], _output, *_ctx);
} else if (outputs[0]->get_dtype() == AK_INT8 && outputs[0]->get_layout() == Layout_NCHW_C4) {
convert_nchw_to_nchwc4<NV>(*outputs[0], _output, *_ctx);
} else if (outputs[0]->get_dtype() == AK_INT8 && outputs[0]->get_layout() == Layout_NCHW) {
// good, nothing to be done;
} else {
LOG(FATAL) << "Not support this situation, pls contact the r&d.";
}
return SaberSuccess;
}
DEFINE_OP_TEMPLATE(SaberConcat, ConcatParam, NV, AK_HALF);
} //namespace anakin
} //namespace anakin
|
028b00303012732bab60397bb904c6c70197830c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019,20-21-22 NVIDIA CORPORATION & AFFILIATES.
// All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <THH/THHAtomics.cuh>
#define EPS 1e-7
namespace kaolin {
template<typename scalar_t>
__global__ void dibr_soft_mask_forward_cuda_kernel(
const scalar_t* __restrict__ face_vertices_image,
const scalar_t* __restrict__ face_bboxes,
const int64_t* __restrict__ selected_face_idx,
scalar_t* __restrict__ close_face_prob,
int64_t* __restrict__ close_face_idx,
uint8_t* __restrict__ close_face_dist_type,
scalar_t* __restrict__ soft_mask,
int batch_size,
int height,
int width,
int num_faces,
int knum,
float sigmainv,
float multiplier) {
// bidx * height * width + heiidx * width + wididx
int presentthread = blockIdx.x * blockDim.x + threadIdx.x;
int wididx = presentthread % width;
presentthread = (presentthread - wididx) / width;
int heiidx = presentthread % height;
int bidx = (presentthread - heiidx) / height;
if (bidx >= batch_size || heiidx >= height || wididx >= width) {
return;
}
// which pixel it belongs to
const int totalidx1 = bidx * height * width + heiidx * width + wididx;
const int totalidxk = totalidx1 * knum;
// which face it belongs to?
// face begins from 1
// convert it into int, use round!
int fidxint = selected_face_idx[totalidx1];
// not covered by any faces
// maybe we can search its neighbour
if (fidxint >= 0) {
soft_mask[totalidx1] = 1.0;
}
// pixels not covered by any faces
else {
// pixel coordinate
scalar_t x0 = multiplier / width * (2 * wididx + 1 - width);
scalar_t y0 = multiplier / height * (height - 2 * heiidx - 1);
int kid = 0;
for (int fidxint = 0; fidxint < num_faces; fidxint++) {
// which face it belongs to
const int shift1 = bidx * num_faces + fidxint;
const int shift4 = shift1 * 4;
const int shift6 = shift1 * 6;
///////////////////////////////////////////////////////////////
// will this pixel is influenced by this face?
scalar_t xmin = face_bboxes[shift4 + 0];
scalar_t ymin = face_bboxes[shift4 + 1];
scalar_t xmax = face_bboxes[shift4 + 2];
scalar_t ymax = face_bboxes[shift4 + 3];
// not covered by this face!
if (x0 < xmin || x0 >= xmax || y0 < ymin || y0 >= ymax) {
continue;
}
//////////////////////////////////////////////////////////
scalar_t pdis[6];
// perdis
for (int i = 0; i < 3; i++) {
int pshift = shift6 + i * 2;
scalar_t x1 = face_vertices_image[pshift + 0];
scalar_t y1 = face_vertices_image[pshift + 1];
int pshift2 = shift6 + ((i + 1) % 3) * 2;
scalar_t x2 = face_vertices_image[pshift2 + 0];
scalar_t y2 = face_vertices_image[pshift2 + 1];
// ax + by + c = 0
scalar_t A = y2 - y1;
scalar_t B = x1 - x2;
scalar_t C = x2 * y1 - x1 * y2;
// dissquare = d^2 = (ax+by+c)^2 / (a^2+b^2)
// up = ax + by + c
// down = a^2 + b^2
// dissquare = up^2 / down
scalar_t up = A * x0 + B * y0 + C;
scalar_t down = A * A + B * B;
// is it a bad triangle?
scalar_t x3 = B * B * x0 - A * B * y0 - A * C;
scalar_t y3 = A * A * y0 - A * B * x0 - B * C;
x3 = x3 / (down + EPS);
y3 = y3 / (down + EPS);
scalar_t direct = (x3 - x1) * (x3 - x2) + (y3 - y1) * (y3 - y2);
if (direct > 0) {
// bad triangle
pdis[i] = 4 * multiplier * multiplier;
} else {
// perpendicular distance
pdis[i] = up * up / (down + EPS);
}
}
////////////////////////////////////////////////////////////
// point distance
for (int i = 0; i < 3; i++) {
int pshift = shift6 + i * 2;
scalar_t x1 = face_vertices_image[pshift + 0];
scalar_t y1 = face_vertices_image[pshift + 1];
pdis[i + 3] = (x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1);
}
int edgeid = 0;
scalar_t dissquare = pdis[0];
for (int i = 1; i < 6; i++) {
if (dissquare > pdis[i]) {
dissquare = pdis[i];
edgeid = i;
}
}
scalar_t z = sigmainv * dissquare / multiplier / multiplier;
scalar_t prob = exp(-z);
close_face_prob[totalidxk + kid] = prob;
close_face_idx[totalidxk + kid] = fidxint;
close_face_dist_type[totalidxk + kid] = edgeid + 1;
kid++;
if (kid >= knum)
break;
}
scalar_t allprob = 1.0;
for (int i = 0; i < kid; i++) {
scalar_t prob = close_face_prob[totalidxk + i];
allprob *= (1.0 - prob);
}
// final result
allprob = 1.0 - allprob;
soft_mask[totalidx1] = allprob;
}
}
void dibr_soft_mask_forward_cuda_impl(
const at::Tensor face_vertices_image,
const at::Tensor face_large_bboxes,
const at::Tensor selected_face_idx,
at::Tensor close_face_prob,
at::Tensor close_face_idx,
at::Tensor close_face_dist_type,
at::Tensor soft_mask,
const float sigmainv,
const float multiplier) {
const int batch_size = face_vertices_image.size(0);
const int num_faces = face_vertices_image.size(1);
const int height = selected_face_idx.size(1);
const int width = selected_face_idx.size(2);
const int knum = close_face_idx.size(3);
const int num_pixels = batch_size * height * width;
AT_DISPATCH_FLOATING_TYPES(face_vertices_image.scalar_type(),
"dibr_soft_mask_forward_cuda", [&] {
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(at::device_of(face_vertices_image));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int block_size = 512;
const int grid_size = (num_pixels + block_size - 1) / block_size;
const dim3 threads(block_size, 1, 1);
const dim3 blocks(grid_size, 1, 1);
hipLaunchKernelGGL(( dibr_soft_mask_forward_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
face_vertices_image.data_ptr<scalar_t>(),
face_large_bboxes.data_ptr<scalar_t>(),
selected_face_idx.data_ptr<int64_t>(),
close_face_prob.data_ptr<scalar_t>(),
close_face_idx.data_ptr<int64_t>(),
close_face_dist_type.data_ptr<uint8_t>(),
soft_mask.data_ptr<scalar_t>(),
batch_size, height, width, num_faces, knum, sigmainv, multiplier);
});
return;
}
template<typename scalar_t>
__global__ void dibr_soft_mask_backward_cuda_kernel(
const scalar_t* __restrict__ grad_soft_mask,
const scalar_t* __restrict__ soft_mask,
const int64_t* __restrict__ selected_face_idx,
const scalar_t* __restrict__ close_face_prob,
const int64_t* __restrict__ close_face_idx,
const uint8_t* __restrict__ close_face_dist_type,
const scalar_t* __restrict__ face_vertices_image,
scalar_t* __restrict__ grad_face_vertices_image,
int batch_size, int height, int width, int num_faces,
int knum, float sigmainv, float multiplier) {
int presentthread = blockIdx.x * blockDim.x + threadIdx.x;
int wididx = presentthread % width;
presentthread = (presentthread - wididx) / width;
int heiidx = presentthread % height;
int bidx = (presentthread - heiidx) / height;
if (bidx >= batch_size || heiidx >= height || wididx >= width)
return;
// which pixel it belongs to
const int totalidx1 = bidx * height * width + heiidx * width + wididx;
const int totalidxk = totalidx1 * knum;
// coordinates
scalar_t x0 = multiplier / width * (2 * wididx + 1 - width);
scalar_t y0 = multiplier / height * (height - 2 * heiidx - 1);
// which face it belongs to?
int fidxint = selected_face_idx[totalidx1];
// not covered by any faces
if (fidxint < 0) {
scalar_t dLdp = grad_soft_mask[totalidx1];
scalar_t allprob = soft_mask[totalidx1];
for (int kid = 0; kid < knum; kid++) {
int fidxint = close_face_idx[totalidxk + kid];
if (fidxint < 0)
break;
const int shift1 = bidx * num_faces + fidxint;
const int shift6 = shift1 * 6;
scalar_t prob = close_face_prob[totalidxk + kid];
scalar_t dLdz = -1.0 * sigmainv * dLdp * (1.0 - allprob)
/ (1.0 - prob + EPS) * prob;
int edgecase = close_face_dist_type[totalidxk + kid];
int edgeid = edgecase - 1;
if (edgeid >= 3) {
// point distance
int pshift = shift6 + (edgeid - 3) * 2;
scalar_t x1 = face_vertices_image[pshift + 0];
scalar_t y1 = face_vertices_image[pshift + 1];
scalar_t dLdx1 = dLdz * 2 * (x1 - x0);
scalar_t dLdy1 = dLdz * 2 * (y1 - y0);
atomicAdd(grad_face_vertices_image + pshift + 0,
dLdx1 / multiplier);
atomicAdd(grad_face_vertices_image + pshift + 1,
dLdy1 / multiplier);
} else {
// perpendicular distance
int pshift = shift6 + edgeid * 2;
scalar_t x1 = face_vertices_image[pshift + 0];
scalar_t y1 = face_vertices_image[pshift + 1];
int pshift2 = shift6 + ((edgeid + 1) % 3) * 2;
scalar_t x2 = face_vertices_image[pshift2 + 0];
scalar_t y2 = face_vertices_image[pshift2 + 1];
// ax + by + c = 0
scalar_t A = y2 - y1;
scalar_t B = x1 - x2;
scalar_t C = x2 * y1 - x1 * y2;
// dissquare = d^2 = (ax+by+c)^2 / (a^2+b^2)
// up = ax + by + c
// down = a^2 + b^2
// dissquare = up^2 / down
scalar_t up = A * x0 + B * y0 + C;
scalar_t down = A * A + B * B;
scalar_t dissquare = up * up / (down + EPS);
scalar_t dzdA = 2 * (x0 * up - dissquare * A) / (down + EPS);
scalar_t dzdB = 2 * (y0 * up - dissquare * B) / (down + EPS);
scalar_t dzdC = 2 * up / (down + EPS);
scalar_t dLdx1 = dLdz * (dzdB - y2 * dzdC);
scalar_t dLdy1 = dLdz * (x2 * dzdC - dzdA);
scalar_t dLdx2 = dLdz * (y1 * dzdC - dzdB);
scalar_t dLdy2 = dLdz * (dzdA - x1 * dzdC);
atomicAdd(grad_face_vertices_image + pshift + 0,
dLdx1 / multiplier);
atomicAdd(grad_face_vertices_image + pshift + 1,
dLdy1 / multiplier);
atomicAdd(grad_face_vertices_image + pshift2 + 0,
dLdx2 / multiplier);
atomicAdd(grad_face_vertices_image + pshift2 + 1,
dLdy2 / multiplier);
}
}
}
return;
}
void dibr_soft_mask_backward_cuda_impl(
const at::Tensor grad_soft_mask,
const at::Tensor soft_mask,
const at::Tensor selected_face_idx,
const at::Tensor close_face_prob,
const at::Tensor close_face_idx,
const at::Tensor close_face_dist_type,
const at::Tensor face_vertices_image,
at::Tensor grad_face_vertices_image,
const float sigmainv,
const float multiplier) {
int batch_size = face_vertices_image.size(0);
int num_faces = face_vertices_image.size(1);
int height = selected_face_idx.size(1);
int width = selected_face_idx.size(2);
int knum = close_face_idx.size(3);
const int num_pixels = batch_size * height * width;
AT_DISPATCH_FLOATING_TYPES(face_vertices_image.scalar_type(),
"dibr_soft_mask_backward_cuda", [&] {
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(at::device_of(face_vertices_image));
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int block_size = 1024;
const int grid_size = (num_pixels + block_size - 1) / block_size;
const dim3 threads(block_size, 1, 1);
const dim3 blocks(grid_size, 1, 1);
hipLaunchKernelGGL(( dibr_soft_mask_backward_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
grad_soft_mask.data_ptr<scalar_t>(),
soft_mask.data_ptr<scalar_t>(),
selected_face_idx.data_ptr<int64_t>(),
close_face_prob.data_ptr<scalar_t>(),
close_face_idx.data_ptr<int64_t>(),
close_face_dist_type.data_ptr<uint8_t>(),
face_vertices_image.data_ptr<scalar_t>(),
grad_face_vertices_image.data_ptr<scalar_t>(),
batch_size, height, width, num_faces,
knum, sigmainv, multiplier);
});
return;
}
}
| 028b00303012732bab60397bb904c6c70197830c.cu | // Copyright (c) 2019,20-21-22 NVIDIA CORPORATION & AFFILIATES.
// All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <ATen/ATen.h>
#include <c10/cuda/CUDAGuard.h>
#include <THC/THCAtomics.cuh>
#define EPS 1e-7
namespace kaolin {
template<typename scalar_t>
__global__ void dibr_soft_mask_forward_cuda_kernel(
const scalar_t* __restrict__ face_vertices_image,
const scalar_t* __restrict__ face_bboxes,
const int64_t* __restrict__ selected_face_idx,
scalar_t* __restrict__ close_face_prob,
int64_t* __restrict__ close_face_idx,
uint8_t* __restrict__ close_face_dist_type,
scalar_t* __restrict__ soft_mask,
int batch_size,
int height,
int width,
int num_faces,
int knum,
float sigmainv,
float multiplier) {
// bidx * height * width + heiidx * width + wididx
int presentthread = blockIdx.x * blockDim.x + threadIdx.x;
int wididx = presentthread % width;
presentthread = (presentthread - wididx) / width;
int heiidx = presentthread % height;
int bidx = (presentthread - heiidx) / height;
if (bidx >= batch_size || heiidx >= height || wididx >= width) {
return;
}
// which pixel it belongs to
const int totalidx1 = bidx * height * width + heiidx * width + wididx;
const int totalidxk = totalidx1 * knum;
// which face it belongs to?
// face begins from 1
// convert it into int, use round!
int fidxint = selected_face_idx[totalidx1];
// not covered by any faces
// maybe we can search its neighbour
if (fidxint >= 0) {
soft_mask[totalidx1] = 1.0;
}
// pixels not covered by any faces
else {
// pixel coordinate
scalar_t x0 = multiplier / width * (2 * wididx + 1 - width);
scalar_t y0 = multiplier / height * (height - 2 * heiidx - 1);
int kid = 0;
for (int fidxint = 0; fidxint < num_faces; fidxint++) {
// which face it belongs to
const int shift1 = bidx * num_faces + fidxint;
const int shift4 = shift1 * 4;
const int shift6 = shift1 * 6;
///////////////////////////////////////////////////////////////
// will this pixel is influenced by this face?
scalar_t xmin = face_bboxes[shift4 + 0];
scalar_t ymin = face_bboxes[shift4 + 1];
scalar_t xmax = face_bboxes[shift4 + 2];
scalar_t ymax = face_bboxes[shift4 + 3];
// not covered by this face!
if (x0 < xmin || x0 >= xmax || y0 < ymin || y0 >= ymax) {
continue;
}
//////////////////////////////////////////////////////////
scalar_t pdis[6];
// perdis
for (int i = 0; i < 3; i++) {
int pshift = shift6 + i * 2;
scalar_t x1 = face_vertices_image[pshift + 0];
scalar_t y1 = face_vertices_image[pshift + 1];
int pshift2 = shift6 + ((i + 1) % 3) * 2;
scalar_t x2 = face_vertices_image[pshift2 + 0];
scalar_t y2 = face_vertices_image[pshift2 + 1];
// ax + by + c = 0
scalar_t A = y2 - y1;
scalar_t B = x1 - x2;
scalar_t C = x2 * y1 - x1 * y2;
// dissquare = d^2 = (ax+by+c)^2 / (a^2+b^2)
// up = ax + by + c
// down = a^2 + b^2
// dissquare = up^2 / down
scalar_t up = A * x0 + B * y0 + C;
scalar_t down = A * A + B * B;
// is it a bad triangle?
scalar_t x3 = B * B * x0 - A * B * y0 - A * C;
scalar_t y3 = A * A * y0 - A * B * x0 - B * C;
x3 = x3 / (down + EPS);
y3 = y3 / (down + EPS);
scalar_t direct = (x3 - x1) * (x3 - x2) + (y3 - y1) * (y3 - y2);
if (direct > 0) {
// bad triangle
pdis[i] = 4 * multiplier * multiplier;
} else {
// perpendicular distance
pdis[i] = up * up / (down + EPS);
}
}
////////////////////////////////////////////////////////////
// point distance
for (int i = 0; i < 3; i++) {
int pshift = shift6 + i * 2;
scalar_t x1 = face_vertices_image[pshift + 0];
scalar_t y1 = face_vertices_image[pshift + 1];
pdis[i + 3] = (x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1);
}
int edgeid = 0;
scalar_t dissquare = pdis[0];
for (int i = 1; i < 6; i++) {
if (dissquare > pdis[i]) {
dissquare = pdis[i];
edgeid = i;
}
}
scalar_t z = sigmainv * dissquare / multiplier / multiplier;
scalar_t prob = exp(-z);
close_face_prob[totalidxk + kid] = prob;
close_face_idx[totalidxk + kid] = fidxint;
close_face_dist_type[totalidxk + kid] = edgeid + 1;
kid++;
if (kid >= knum)
break;
}
scalar_t allprob = 1.0;
for (int i = 0; i < kid; i++) {
scalar_t prob = close_face_prob[totalidxk + i];
allprob *= (1.0 - prob);
}
// final result
allprob = 1.0 - allprob;
soft_mask[totalidx1] = allprob;
}
}
void dibr_soft_mask_forward_cuda_impl(
const at::Tensor face_vertices_image,
const at::Tensor face_large_bboxes,
const at::Tensor selected_face_idx,
at::Tensor close_face_prob,
at::Tensor close_face_idx,
at::Tensor close_face_dist_type,
at::Tensor soft_mask,
const float sigmainv,
const float multiplier) {
const int batch_size = face_vertices_image.size(0);
const int num_faces = face_vertices_image.size(1);
const int height = selected_face_idx.size(1);
const int width = selected_face_idx.size(2);
const int knum = close_face_idx.size(3);
const int num_pixels = batch_size * height * width;
AT_DISPATCH_FLOATING_TYPES(face_vertices_image.scalar_type(),
"dibr_soft_mask_forward_cuda", [&] {
const at::cuda::OptionalCUDAGuard device_guard(at::device_of(face_vertices_image));
auto stream = at::cuda::getCurrentCUDAStream();
const int block_size = 512;
const int grid_size = (num_pixels + block_size - 1) / block_size;
const dim3 threads(block_size, 1, 1);
const dim3 blocks(grid_size, 1, 1);
dibr_soft_mask_forward_cuda_kernel<scalar_t><<<blocks, threads>>>(
face_vertices_image.data_ptr<scalar_t>(),
face_large_bboxes.data_ptr<scalar_t>(),
selected_face_idx.data_ptr<int64_t>(),
close_face_prob.data_ptr<scalar_t>(),
close_face_idx.data_ptr<int64_t>(),
close_face_dist_type.data_ptr<uint8_t>(),
soft_mask.data_ptr<scalar_t>(),
batch_size, height, width, num_faces, knum, sigmainv, multiplier);
});
return;
}
template<typename scalar_t>
__global__ void dibr_soft_mask_backward_cuda_kernel(
const scalar_t* __restrict__ grad_soft_mask,
const scalar_t* __restrict__ soft_mask,
const int64_t* __restrict__ selected_face_idx,
const scalar_t* __restrict__ close_face_prob,
const int64_t* __restrict__ close_face_idx,
const uint8_t* __restrict__ close_face_dist_type,
const scalar_t* __restrict__ face_vertices_image,
scalar_t* __restrict__ grad_face_vertices_image,
int batch_size, int height, int width, int num_faces,
int knum, float sigmainv, float multiplier) {
int presentthread = blockIdx.x * blockDim.x + threadIdx.x;
int wididx = presentthread % width;
presentthread = (presentthread - wididx) / width;
int heiidx = presentthread % height;
int bidx = (presentthread - heiidx) / height;
if (bidx >= batch_size || heiidx >= height || wididx >= width)
return;
// which pixel it belongs to
const int totalidx1 = bidx * height * width + heiidx * width + wididx;
const int totalidxk = totalidx1 * knum;
// coordinates
scalar_t x0 = multiplier / width * (2 * wididx + 1 - width);
scalar_t y0 = multiplier / height * (height - 2 * heiidx - 1);
// which face it belongs to?
int fidxint = selected_face_idx[totalidx1];
// not covered by any faces
if (fidxint < 0) {
scalar_t dLdp = grad_soft_mask[totalidx1];
scalar_t allprob = soft_mask[totalidx1];
for (int kid = 0; kid < knum; kid++) {
int fidxint = close_face_idx[totalidxk + kid];
if (fidxint < 0)
break;
const int shift1 = bidx * num_faces + fidxint;
const int shift6 = shift1 * 6;
scalar_t prob = close_face_prob[totalidxk + kid];
scalar_t dLdz = -1.0 * sigmainv * dLdp * (1.0 - allprob)
/ (1.0 - prob + EPS) * prob;
int edgecase = close_face_dist_type[totalidxk + kid];
int edgeid = edgecase - 1;
if (edgeid >= 3) {
// point distance
int pshift = shift6 + (edgeid - 3) * 2;
scalar_t x1 = face_vertices_image[pshift + 0];
scalar_t y1 = face_vertices_image[pshift + 1];
scalar_t dLdx1 = dLdz * 2 * (x1 - x0);
scalar_t dLdy1 = dLdz * 2 * (y1 - y0);
atomicAdd(grad_face_vertices_image + pshift + 0,
dLdx1 / multiplier);
atomicAdd(grad_face_vertices_image + pshift + 1,
dLdy1 / multiplier);
} else {
// perpendicular distance
int pshift = shift6 + edgeid * 2;
scalar_t x1 = face_vertices_image[pshift + 0];
scalar_t y1 = face_vertices_image[pshift + 1];
int pshift2 = shift6 + ((edgeid + 1) % 3) * 2;
scalar_t x2 = face_vertices_image[pshift2 + 0];
scalar_t y2 = face_vertices_image[pshift2 + 1];
// ax + by + c = 0
scalar_t A = y2 - y1;
scalar_t B = x1 - x2;
scalar_t C = x2 * y1 - x1 * y2;
// dissquare = d^2 = (ax+by+c)^2 / (a^2+b^2)
// up = ax + by + c
// down = a^2 + b^2
// dissquare = up^2 / down
scalar_t up = A * x0 + B * y0 + C;
scalar_t down = A * A + B * B;
scalar_t dissquare = up * up / (down + EPS);
scalar_t dzdA = 2 * (x0 * up - dissquare * A) / (down + EPS);
scalar_t dzdB = 2 * (y0 * up - dissquare * B) / (down + EPS);
scalar_t dzdC = 2 * up / (down + EPS);
scalar_t dLdx1 = dLdz * (dzdB - y2 * dzdC);
scalar_t dLdy1 = dLdz * (x2 * dzdC - dzdA);
scalar_t dLdx2 = dLdz * (y1 * dzdC - dzdB);
scalar_t dLdy2 = dLdz * (dzdA - x1 * dzdC);
atomicAdd(grad_face_vertices_image + pshift + 0,
dLdx1 / multiplier);
atomicAdd(grad_face_vertices_image + pshift + 1,
dLdy1 / multiplier);
atomicAdd(grad_face_vertices_image + pshift2 + 0,
dLdx2 / multiplier);
atomicAdd(grad_face_vertices_image + pshift2 + 1,
dLdy2 / multiplier);
}
}
}
return;
}
void dibr_soft_mask_backward_cuda_impl(
const at::Tensor grad_soft_mask,
const at::Tensor soft_mask,
const at::Tensor selected_face_idx,
const at::Tensor close_face_prob,
const at::Tensor close_face_idx,
const at::Tensor close_face_dist_type,
const at::Tensor face_vertices_image,
at::Tensor grad_face_vertices_image,
const float sigmainv,
const float multiplier) {
int batch_size = face_vertices_image.size(0);
int num_faces = face_vertices_image.size(1);
int height = selected_face_idx.size(1);
int width = selected_face_idx.size(2);
int knum = close_face_idx.size(3);
const int num_pixels = batch_size * height * width;
AT_DISPATCH_FLOATING_TYPES(face_vertices_image.scalar_type(),
"dibr_soft_mask_backward_cuda", [&] {
const at::cuda::OptionalCUDAGuard device_guard(at::device_of(face_vertices_image));
auto stream = at::cuda::getCurrentCUDAStream();
const int block_size = 1024;
const int grid_size = (num_pixels + block_size - 1) / block_size;
const dim3 threads(block_size, 1, 1);
const dim3 blocks(grid_size, 1, 1);
dibr_soft_mask_backward_cuda_kernel<scalar_t><<<blocks, threads>>>(
grad_soft_mask.data_ptr<scalar_t>(),
soft_mask.data_ptr<scalar_t>(),
selected_face_idx.data_ptr<int64_t>(),
close_face_prob.data_ptr<scalar_t>(),
close_face_idx.data_ptr<int64_t>(),
close_face_dist_type.data_ptr<uint8_t>(),
face_vertices_image.data_ptr<scalar_t>(),
grad_face_vertices_image.data_ptr<scalar_t>(),
batch_size, height, width, num_faces,
knum, sigmainv, multiplier);
});
return;
}
}
|
a507bfe3c20e643f597f15c0df2ec5fcd23ca075.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "decrementalColouringNew.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *vertexArray = NULL;
hipMalloc(&vertexArray, XSIZE*YSIZE);
int *neighbourArray = NULL;
hipMalloc(&neighbourArray, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int m = 2;
int *decrementalArray = NULL;
hipMalloc(&decrementalArray, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
decrementalColouringNew), dim3(gridBlock),dim3(threadBlock), 0, 0, vertexArray,neighbourArray,n,m,decrementalArray,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
decrementalColouringNew), dim3(gridBlock),dim3(threadBlock), 0, 0, vertexArray,neighbourArray,n,m,decrementalArray,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
decrementalColouringNew), dim3(gridBlock),dim3(threadBlock), 0, 0, vertexArray,neighbourArray,n,m,decrementalArray,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a507bfe3c20e643f597f15c0df2ec5fcd23ca075.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "decrementalColouringNew.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *vertexArray = NULL;
cudaMalloc(&vertexArray, XSIZE*YSIZE);
int *neighbourArray = NULL;
cudaMalloc(&neighbourArray, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int m = 2;
int *decrementalArray = NULL;
cudaMalloc(&decrementalArray, XSIZE*YSIZE);
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
decrementalColouringNew<<<gridBlock,threadBlock>>>(vertexArray,neighbourArray,n,m,decrementalArray,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
decrementalColouringNew<<<gridBlock,threadBlock>>>(vertexArray,neighbourArray,n,m,decrementalArray,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
decrementalColouringNew<<<gridBlock,threadBlock>>>(vertexArray,neighbourArray,n,m,decrementalArray,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
994f64da74f1e2c48aafacaca9f6fa498015d503.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <vector>
#include <set>
#include <map>
#include <algorithm>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include <cmath>
using namespace std;
#define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__))
#define MAX_THREADS_PER_BLOCK 1024
#define GLOBAL_MAX_EDGES_PER_SHARD 67108864 //8388608
#define NUM_STREAMS 2
#define ERR 0.01
void safe_call(hipError_t ret, int line)
{
if(ret!=hipSuccess)
{
printf("Error at line %d : %s\n",line,hipGetErrorString(ret));
exit(-1);
}
}
typedef int VertexId;
typedef int VertexVal;
typedef VertexVal EdgeVal;
typedef struct __interval
{
VertexId start;
VertexId end;
} interval_t;
typedef struct __edge
{
VertexId src;
VertexId dest;
EdgeVal val;
} edge_t;
typedef struct __vertex
{
int numInEdges;
int numOutEdges;
VertexVal val;
// int shardId;
} vertex_t;
typedef struct __shard
{
int Ein;
int Eout;
VertexId Vstart;
VertexId Vend;
VertexId * inEdgesMap;
VertexId * outEdgesMap;
edge_t * inEdges;
edge_t * outEdges;
bool * changed;
VertexVal * inUpdates;
VertexVal * outUpdates;
} shard_t;
/*__device__ double atomicAdd(VertexVal* address, VertexVal val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
*/
#ifndef __GATHER__
#define __GATHER__
__global__ void gather_in(const shard_t * shard, vertex_t * vertices, bool * frontier_cur, bool * frontier_next, int num_vertices, int current_depth)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
VertexId s;
if(id < shard->Ein)
{
s = shard->inEdges[id].src;
shard->inUpdates[id] = vertices[s].val;
}
}
/*__global__ void gather_out(const shard_t * shard, vertex_t * vertices, bool * frontier_cur, bool * frontier_next, int num_vertices, int current_depth)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
VertexId d;
if(id < shard->Eout)
{
d = shard->outEdges[id].dest;
shard->outUpdates[id] = vertices[d].val;
}
}
*/
#endif
#ifndef __APPLY__
#define __APPLY__
__global__ void apply(const shard_t * shard, vertex_t * vertices, bool * frontier_cur, bool * frontier_next, int num_vertices, int current_depth)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
int vid = id + shard->Vstart;
if(vid <= shard->Vend)
{
int i;
VertexVal min=INT_MAX,newval;
shard->changed[id] = false;
// in-updates
if(id == 0)
i=0;
else
i=shard->inEdgesMap[id-1];
for(; i < shard->inEdgesMap[id]; i++)
{
newval = shard->inUpdates[i];
min = (min > newval) ? newval : min;
}
// out-updates
/* if(id == 0)
i=0;
else
i=shard->outEdgesMap[id-1];
for(; i < shard->outEdgesMap[id]; i++)
{
newval = shard->outUpdates[i];
min = (min > newval) ? newval : min;
}
*/
if(vertices[vid].val > min)
{
shard->changed[id] = true;
vertices[vid].val = min;
}
}
}
__global__ void find_frontier(const shard_t * shard, vertex_t * vertices, bool * frontier_cur, bool * frontier_next, int num_vertices, int current_depth)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
if(id < shard->Eout)
{
VertexId s=shard->outEdges[id].src;
VertexId d=shard->outEdges[id].dest;
if(shard->changed[s - shard->Vstart] == true)
{
frontier_next[d] = true;
}
}
}
#endif
/*
#ifndef __SCATTER__
#define __SCATTER__
__global__ void scatter(const shard_t * shard, vertex_t * vertices, bool * frontier_cur, bool * frontier_next, int num_vertices, int current_depth)
{
}
#endif
*/
__global__ void reset_frontier(bool * frontier, int V)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
if(id < V)
{
frontier[id] = false;
}
}
bool costIn(edge_t a, edge_t b)
{
return ((a.src < b.src) || (a.src == b.src && a.dest < b.dest));
}
bool costOut(edge_t a, edge_t b)
{
return ((a.dest < b.dest) || (a.dest == b.dest && a.src < b.src));
}
int main(int argc, char * argv[])
{
struct timeval t1,t2;
static char * filename;
if(argc!=2)
{
printf("./a.out <filename>\n");
exit(-1);
}
else
{
filename = argv[1];
}
FILE * fp = fopen(filename,"r");
if(!fp)
{
printf("Error reading file.\n");
exit(-1);
}
/* Set cuda device to K40 */
CUDA_SAFE_CALL(hipSetDevice(0));
printf("Begin file reading...\n");
/* Get graph from file into CPU memory */
int num_vertices, num_edges, i, j, k;
fscanf(fp,"%d %d",&num_vertices,&num_edges);
//We are always going to have atleast 2 shards to have double bufferring
int ns = num_edges / GLOBAL_MAX_EDGES_PER_SHARD;
int MAX_EDGES_PER_SHARD = (ns == 0) ? (num_edges + 1)/2 : (num_edges + 1)/(ns + 1); //We do this to balance the no of edges in the shards
//Array of vectors. vector i contains the out edges of vertex i
vector< vector<edge_t> > inEdges(num_vertices);
vector< vector<edge_t> > outEdges(num_vertices);
int * prefixVIn = (int *) calloc(num_vertices,sizeof(int));
int * prefixVOut = (int *) calloc(num_vertices,sizeof(int));
int s,d;
// It will contain the visited status of each vertex
vertex_t *vertices;
vertex_t *vertices_host = (vertex_t *) malloc(num_vertices*sizeof(vertex_t));
CUDA_SAFE_CALL(hipMalloc((void **)&vertices, num_vertices*sizeof(vertex_t)));
//Initialise the vertices
for(i=0; i<num_vertices; i++)
{
vertices_host[i].numInEdges = 0;
vertices_host[i].numOutEdges = 0;
vertices_host[i].val = i;
}
srand((unsigned) time(0));
for(i=0; i<num_edges; i++)
{
fscanf(fp,"%d",&s);
fscanf(fp,"%d",&d);
edge_t e,e2;
e.src=s;
e.dest=d;
e.val=rand()%10+1;
e2.src=d;
e2.dest=s;
e2.val=e.val;
inEdges[d].push_back(e);
outEdges[s].push_back(e);
inEdges[s].push_back(e2);
outEdges[d].push_back(e2);
vertices_host[s].numOutEdges +=2;
vertices_host[d].numInEdges +=2;
}
printf("Finished file reading.\n");
printf("\nBegin interval construction...\n");
// Construction of intervals
gettimeofday(&t1,NULL);
int num_intervals = 0, add = 1;
vector<int> startInter;
prefixVIn[0] = inEdges[0].size();
prefixVOut[0] = outEdges[0].size();
if(prefixVIn[0] > MAX_EDGES_PER_SHARD)
{
startInter.push_back(0);
num_intervals++;
add = 0;
}
for(i=1; i<num_vertices; i++)
{
prefixVIn[i] = inEdges[i].size();
prefixVOut[i] = outEdges[i].size();
if(add==1)
{
prefixVIn[i] += prefixVIn[i-1];
prefixVOut[i] += prefixVOut[i-1];
}
if(prefixVIn[i] > MAX_EDGES_PER_SHARD)
{
startInter.push_back(i);
num_intervals++;
add = 0;
}
else
add = 1;
}
if(add==1)
{
startInter.push_back(i-1);
num_intervals++;
}
interval_t * interval = (interval_t *) malloc(num_intervals*sizeof(interval_t));
for(i=0; i<num_intervals; i++)
{
interval[i].start = (i == 0) ? 0 : (startInter[i-1]+1);
interval[i].end = startInter[i];
/* for(j=interval[i].start; j<=interval[i].end; j++)
vertices_host[j].shardId = i;*/
}
gettimeofday(&t2,NULL);
printf("Time to construct intervals : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6)));
printf("\nBegin shard construction...\n");
//Construction of shard
gettimeofday(&t1,NULL);
shard_t * shard = (shard_t *) malloc(num_intervals*sizeof(shard_t));
//Finding the max number of edges in a shard
// We will allocate space for that many edges to each shard to maintain consistency
int MAX_NUM_INEDGES_SHARD = INT_MIN;
int MAX_NUM_OUTEDGES_SHARD = INT_MIN;
int MAX_NUM_VERTICES_SHARD = INT_MIN;
for(i=0; i<num_intervals; i++)
{
int t = prefixVIn[interval[i].end];
if(t > MAX_NUM_INEDGES_SHARD)
MAX_NUM_INEDGES_SHARD = t;
int z = prefixVOut[interval[i].end];
if(z > MAX_NUM_OUTEDGES_SHARD)
MAX_NUM_OUTEDGES_SHARD = z;
int q = interval[i].end-interval[i].start+1;
if(q > MAX_NUM_VERTICES_SHARD)
MAX_NUM_VERTICES_SHARD = q;
}
for(i=0; i<num_intervals; i++)
{
shard[i].Ein = prefixVIn[interval[i].end];
shard[i].Eout = prefixVOut[interval[i].end];
// first and last vertices in shard
shard[i].Vstart = interval[i].start;
shard[i].Vend = interval[i].end;
shard[i].inEdgesMap = (VertexId *) malloc(MAX_NUM_VERTICES_SHARD*sizeof(VertexId));
shard[i].outEdgesMap = (VertexId *) malloc(MAX_NUM_VERTICES_SHARD*sizeof(VertexId));
shard[i].inEdges = (edge_t *) malloc(MAX_NUM_INEDGES_SHARD*sizeof(edge_t));
shard[i].outEdges = (edge_t *) malloc(MAX_NUM_OUTEDGES_SHARD*sizeof(edge_t));
shard[i].inUpdates = (VertexVal *) malloc(MAX_NUM_INEDGES_SHARD*sizeof(VertexVal));
shard[i].outUpdates = (VertexVal *) malloc(MAX_NUM_OUTEDGES_SHARD*sizeof(VertexVal));
shard[i].changed = (bool *) malloc(MAX_NUM_VERTICES_SHARD*sizeof(bool));
}
for(i=0; i<num_intervals; i++)
{
int v = 0, e1 = 0, e2 = 0;
for(j=interval[i].start; j<=interval[i].end; j++)
{
sort(inEdges[j].begin(),inEdges[j].end(),costIn);
shard[i].inEdgesMap[v] = inEdges[j].size();
if(v!=0)
shard[i].inEdgesMap[v] += shard[i].inEdgesMap[v-1];
for(vector<edge_t>::iterator it=inEdges[j].begin(); it!=inEdges[j].end(); ++it)
{
shard[i].inEdges[e1++] = (*it);
}
sort(outEdges[j].begin(),outEdges[j].end(),costOut);
shard[i].outEdgesMap[v] = outEdges[j].size();
if(v!=0)
shard[i].outEdgesMap[v] += shard[i].outEdgesMap[v-1];
for(vector<edge_t>::iterator it=outEdges[j].begin(); it!=outEdges[j].end(); ++it)
{
shard[i].outEdges[e2++] = (*it);
}
v++;
}
}
gettimeofday(&t2,NULL);
printf("Time to construct shards : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6)));
// It will contain the vertices in the next frontier
bool *frontier_cur, *frontier_next;
bool *frontier_host = (bool *) malloc(num_vertices*sizeof(bool));
for(i=0; i<num_vertices; i++)
frontier_host[i] = true;
CUDA_SAFE_CALL(hipMalloc((void **)&frontier_cur, num_vertices*sizeof(bool)));
CUDA_SAFE_CALL(hipMalloc((void **)&frontier_next, num_vertices*sizeof(bool)));
/*
Allocating shards on the device
*/
shard_t * shard_dev[NUM_STREAMS];
VertexId * inEdgesMap_dev[NUM_STREAMS];
VertexId * outEdgesMap_dev[NUM_STREAMS];
edge_t * inEdges_dev[NUM_STREAMS];
edge_t * outEdges_dev[NUM_STREAMS];
VertexVal * inUpdates_dev[NUM_STREAMS];
VertexVal * outUpdates_dev[NUM_STREAMS];
bool * changed_dev[NUM_STREAMS];
for(int i = 0; i < NUM_STREAMS; i++)
{
CUDA_SAFE_CALL(hipMalloc((void **)&shard_dev[i], sizeof(shard_t)));
CUDA_SAFE_CALL(hipMalloc((void **)&inEdgesMap_dev[i], MAX_NUM_VERTICES_SHARD*sizeof(VertexId)));
CUDA_SAFE_CALL(hipMalloc((void **)&outEdgesMap_dev[i], MAX_NUM_VERTICES_SHARD*sizeof(VertexId)));
CUDA_SAFE_CALL(hipMalloc((void **)&inEdges_dev[i], MAX_NUM_INEDGES_SHARD*sizeof(edge_t)));
CUDA_SAFE_CALL(hipMalloc((void **)&outEdges_dev[i], MAX_NUM_OUTEDGES_SHARD*sizeof(edge_t)));
CUDA_SAFE_CALL(hipMalloc((void **)&inUpdates_dev[i], MAX_NUM_INEDGES_SHARD*sizeof(VertexVal)));
CUDA_SAFE_CALL(hipMalloc((void **)&outUpdates_dev[i], MAX_NUM_OUTEDGES_SHARD*sizeof(VertexVal)));
CUDA_SAFE_CALL(hipMalloc((void **)&changed_dev[i], MAX_NUM_VERTICES_SHARD*sizeof(bool)));
}
// Declaring cuda Streams and events
hipStream_t * str;
hipEvent_t * start;
hipEvent_t * stop;
str = (hipStream_t *) malloc(NUM_STREAMS * sizeof(hipStream_t));
start = (hipEvent_t *) malloc(NUM_STREAMS * sizeof(hipEvent_t));
stop = (hipEvent_t *) malloc(NUM_STREAMS * sizeof(hipEvent_t));
for(int i = 0; i < NUM_STREAMS; i++)
{
CUDA_SAFE_CALL(hipStreamCreate(&(str[i])));
CUDA_SAFE_CALL(hipEventCreate(&(start[i])));
CUDA_SAFE_CALL(hipEventCreate(&(stop[i])));
}
double time = 0;
float diff;
/*
Grid and block dimensions for gather phase (edge centric)
*/
int num_of_blocks = 1;
int MAX_THREADS = MAX_NUM_INEDGES_SHARD;
int num_of_threads_per_block = MAX_THREADS;
if(MAX_THREADS>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(MAX_THREADS/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
dim3 grid_inedge( num_of_blocks, 1, 1);
dim3 threads_inedge( num_of_threads_per_block, 1, 1);
/*
Grid and block dimensions for apply phase (vertex centric)
*/
num_of_blocks = 1;
MAX_THREADS = MAX_NUM_VERTICES_SHARD;
num_of_threads_per_block = MAX_THREADS;
if(MAX_THREADS>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(MAX_THREADS/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
dim3 grid_vertex( num_of_blocks, 1, 1);
dim3 threads_vertex( num_of_threads_per_block, 1, 1);
/*
Grid and block dimensions for scatter phase (edge centric)
*/
num_of_blocks = 1;
MAX_THREADS = MAX_NUM_OUTEDGES_SHARD;
num_of_threads_per_block = MAX_THREADS;
if(MAX_THREADS>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(MAX_THREADS/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
dim3 grid_outedge( num_of_blocks, 1, 1);
dim3 threads_outedge( num_of_threads_per_block, 1, 1);
printf("Begin kernel\n");
CUDA_SAFE_CALL(hipMemcpy(vertices,vertices_host,num_vertices*sizeof(vertex_t),hipMemcpyHostToDevice));
int over, sid;
k=0;
gettimeofday(&t1,NULL);
do
{
double tempTime;
CUDA_SAFE_CALL(hipMemcpy(frontier_cur,frontier_host,num_vertices*sizeof(bool),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( reset_frontier), dim3(((num_vertices+MAX_THREADS_PER_BLOCK-1)/MAX_THREADS_PER_BLOCK)),dim3(MAX_THREADS_PER_BLOCK), 0, 0, frontier_next, num_vertices);
CUDA_SAFE_CALL(hipDeviceSynchronize());
#ifdef __GATHER__
/*
GATHER PHASE BEGINS
*/
for(i=0; i<num_intervals; i+=NUM_STREAMS)
{
tempTime = 0;
for(j=0; (j<NUM_STREAMS && (i+j)<num_intervals); j++)
{
sid = i+j;
CUDA_SAFE_CALL(hipMemcpyAsync(shard_dev[j], &shard[sid], sizeof(shard_t),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(inEdgesMap_dev[j], shard[sid].inEdgesMap, MAX_NUM_VERTICES_SHARD*sizeof(VertexId),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->inEdgesMap), &(inEdgesMap_dev[j]), sizeof(VertexId *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(outEdgesMap_dev[j], shard[sid].outEdgesMap, MAX_NUM_VERTICES_SHARD*sizeof(VertexId),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->outEdgesMap), &(outEdgesMap_dev[j]), sizeof(VertexId *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(inEdges_dev[j], shard[sid].inEdges, MAX_NUM_INEDGES_SHARD*sizeof(edge_t),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->inEdges), &(inEdges_dev[j]), sizeof(edge_t *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(outEdges_dev[j], shard[sid].outEdges, MAX_NUM_OUTEDGES_SHARD*sizeof(edge_t),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->outEdges), &(outEdges_dev[j]), sizeof(edge_t *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->inUpdates), &(inUpdates_dev[j]), sizeof(VertexVal *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->outUpdates), &(outUpdates_dev[j]), sizeof(VertexVal *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->changed), &(changed_dev[j]), sizeof(bool *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipEventRecord(start[j],str[j]));
hipLaunchKernelGGL(( gather_in), dim3(grid_inedge), dim3(threads_inedge), 0, str[j], shard_dev[j], vertices, frontier_cur, frontier_next, num_vertices, k);
// gather_out<<<grid_outedge, threads_outedge, 0, str[j]>>> (shard_dev[j], vertices, frontier_cur, frontier_next, num_vertices, k);
CUDA_SAFE_CALL(hipStreamSynchronize(str[j]));
CUDA_SAFE_CALL(hipEventRecord(stop[j],str[j]));
CUDA_SAFE_CALL(hipEventSynchronize(stop[j]));
CUDA_SAFE_CALL(hipEventElapsedTime(&diff,start[j],stop[j]));
tempTime += diff;
CUDA_SAFE_CALL(hipMemcpyAsync(shard[sid].inUpdates, inUpdates_dev[j], MAX_NUM_INEDGES_SHARD*sizeof(VertexVal),hipMemcpyDeviceToHost,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(shard[sid].outUpdates, outUpdates_dev[j], MAX_NUM_OUTEDGES_SHARD*sizeof(VertexVal),hipMemcpyDeviceToHost,str[j]));
//CUDA_SAFE_CALL(hipMemcpyAsync(shard[sid].changed, changed_dev[j], MAX_NUM_VERTICES_SHARD*sizeof(bool),hipMemcpyDeviceToHost,str[j]));
}
time += tempTime;
}
CUDA_SAFE_CALL(hipDeviceSynchronize());
/*
GATHER PHASE ENDS
*/
#endif
#ifdef __APPLY__
/*
APPLY PHASE BEGINS
*/
for(i=0; i<num_intervals; i+=NUM_STREAMS)
{
tempTime = 0;
for(j=0; (j<NUM_STREAMS && (i+j)<num_intervals); j++)
{
sid = i+j;
CUDA_SAFE_CALL(hipMemcpyAsync(shard_dev[j], &shard[sid], sizeof(shard_t),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(inEdgesMap_dev[j], shard[sid].inEdgesMap, MAX_NUM_VERTICES_SHARD*sizeof(VertexId),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->inEdgesMap), &(inEdgesMap_dev[j]), sizeof(VertexId *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(outEdgesMap_dev[j], shard[sid].outEdgesMap, MAX_NUM_VERTICES_SHARD*sizeof(VertexId),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->outEdgesMap), &(outEdgesMap_dev[j]), sizeof(VertexId *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(inEdges_dev[j], shard[sid].inEdges, MAX_NUM_INEDGES_SHARD*sizeof(edge_t),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->inEdges), &(inEdges_dev[j]), sizeof(edge_t *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(outEdges_dev[j], shard[sid].outEdges, MAX_NUM_OUTEDGES_SHARD*sizeof(edge_t),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->outEdges), &(outEdges_dev[j]), sizeof(edge_t *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(inUpdates_dev[j], shard[sid].inUpdates, MAX_NUM_INEDGES_SHARD*sizeof(VertexVal),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->inUpdates), &(inUpdates_dev[j]), sizeof(VertexVal *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(outUpdates_dev[j], shard[sid].outUpdates, MAX_NUM_OUTEDGES_SHARD*sizeof(VertexVal),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->outUpdates), &(outUpdates_dev[j]), sizeof(VertexVal *),hipMemcpyHostToDevice,str[j]));
//CUDA_SAFE_CALL(hipMemcpyAsync(changed_dev[j], shard[sid].changed, MAX_NUM_VERTICES_SHARD*sizeof(bool),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->changed), &(changed_dev[j]), sizeof(bool *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipEventRecord(start[j],str[j]));
hipLaunchKernelGGL(( apply), dim3(grid_vertex), dim3(threads_vertex), 0, str[j], shard_dev[j], vertices, frontier_cur, frontier_next, num_vertices, k);
hipLaunchKernelGGL(( find_frontier), dim3(grid_outedge), dim3(threads_outedge), 0, str[j], shard_dev[j], vertices, frontier_cur, frontier_next, num_vertices, k);
CUDA_SAFE_CALL(hipStreamSynchronize(str[j]));
CUDA_SAFE_CALL(hipEventRecord(stop[j],str[j]));
CUDA_SAFE_CALL(hipEventSynchronize(stop[j]));
CUDA_SAFE_CALL(hipEventElapsedTime(&diff,start[j],stop[j]));
tempTime += diff;
}
time += tempTime;
}
CUDA_SAFE_CALL(hipDeviceSynchronize());
/*
APPLY PHASE ENDS
*/
#endif
#ifdef __SCATTER__
/*
SCATTER PHASE BEGINS
*/
for(i=0; i<num_intervals; i+=NUM_STREAMS)
{
tempTime = 0;
for(j=0; (j<NUM_STREAMS && (i+j)<num_intervals); j++)
{
sid = i+j;
CUDA_SAFE_CALL(hipMemcpyAsync(shard_dev[j], &shard[sid], sizeof(shard_t),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(inEdgesMap_dev[j], shard[sid].inEdgesMap, MAX_NUM_VERTICES_SHARD*sizeof(VertexId),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->inEdgesMap), &(inEdgesMap_dev[j]), sizeof(VertexId *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(outEdgesMap_dev[j], shard[sid].outEdgesMap, MAX_NUM_VERTICES_SHARD*sizeof(VertexId),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->outEdgesMap), &(outEdgesMap_dev[j]), sizeof(VertexId *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(inEdges_dev[j], shard[sid].inEdges, MAX_NUM_INEDGES_SHARD*sizeof(edge_t),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->inEdges), &(inEdges_dev[j]), sizeof(edge_t *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(outEdges_dev[j], shard[sid].outEdges, MAX_NUM_OUTEDGES_SHARD*sizeof(edge_t),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->outEdges), &(outEdges_dev[j]), sizeof(edge_t *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->inUpdates), &(inUpdates_dev[j]), sizeof(VertexVal *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipMemcpyAsync(&((shard_dev[j])->outUpdates), &(outUpdates_dev[j]), sizeof(VertexVal *),hipMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(hipEventRecord(start[j],str[j]));
hipLaunchKernelGGL(( scatter), dim3(grid_outedge), dim3(threads_outedge), 0, str[j], shard_dev[j], vertices, frontier_cur, frontier_next, num_vertices, k);
CUDA_SAFE_CALL(hipStreamSynchronize(str[j]));
CUDA_SAFE_CALL(hipEventRecord(stop[j],str[j]));
CUDA_SAFE_CALL(hipEventSynchronize(stop[j]));
CUDA_SAFE_CALL(hipEventElapsedTime(&diff,start[j],stop[j]));
tempTime += diff;
}
time += tempTime;
}
CUDA_SAFE_CALL(hipDeviceSynchronize());
/*
SCATTER PHASE ENDS
*/
#endif
CUDA_SAFE_CALL(hipMemcpy(frontier_host,frontier_next,num_vertices*sizeof(bool),hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipDeviceSynchronize());
over=false;
for(i=0; i<num_vertices; i++)
{
if(frontier_host[i])
{
over=true;
break;
}
}
k++;
}while(over);
CUDA_SAFE_CALL(hipDeviceSynchronize());
gettimeofday(&t2,NULL);
printf("Time to Connected components : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6)));
printf("Number of iterations : %d\n",k);
/*CUDA_SAFE_CALL(hipMemcpy(vertices_host, vertices, num_vertices*sizeof(vertex_t), hipMemcpyDeviceToHost));
for(int i = 0; i < num_vertices; i++)
{
printf("Vertex %d Component %d\n",i,vertices_host[i].val);
}*/
printf("Time: %f ms\n",time);
for(int i = 0; i < NUM_STREAMS; i++)
{
CUDA_SAFE_CALL(hipStreamDestroy(str[i]));
CUDA_SAFE_CALL(hipEventDestroy(start[i]));
CUDA_SAFE_CALL(hipEventDestroy(stop[i]));
CUDA_SAFE_CALL(hipFree(inEdgesMap_dev[i]));
CUDA_SAFE_CALL(hipFree(outEdgesMap_dev[i]));
CUDA_SAFE_CALL(hipFree(inEdges_dev[i]));
CUDA_SAFE_CALL(hipFree(outEdges_dev[i]));
CUDA_SAFE_CALL(hipFree(inUpdates_dev[i]));
CUDA_SAFE_CALL(hipFree(outUpdates_dev[i]));
CUDA_SAFE_CALL(hipFree(changed_dev[i]));
CUDA_SAFE_CALL(hipFree(shard_dev[i]));
}
free(interval);
for(i=0; i<num_intervals; i++)
{
free(shard[i].inEdgesMap);
free(shard[i].outEdgesMap);
free(shard[i].inEdges);
free(shard[i].outEdges);
free(shard[i].inUpdates);
free(shard[i].outUpdates);
free(shard[i].changed);
}
free(shard);
free(vertices_host);
free(frontier_host);
CUDA_SAFE_CALL(hipFree(vertices));
CUDA_SAFE_CALL(hipFree(frontier_cur));
CUDA_SAFE_CALL(hipFree(frontier_next));
return 0;
}
| 994f64da74f1e2c48aafacaca9f6fa498015d503.cu | #include <iostream>
#include <vector>
#include <set>
#include <map>
#include <algorithm>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include <cmath>
using namespace std;
#define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__))
#define MAX_THREADS_PER_BLOCK 1024
#define GLOBAL_MAX_EDGES_PER_SHARD 67108864 //8388608
#define NUM_STREAMS 2
#define ERR 0.01
void safe_call(cudaError_t ret, int line)
{
if(ret!=cudaSuccess)
{
printf("Error at line %d : %s\n",line,cudaGetErrorString(ret));
exit(-1);
}
}
typedef int VertexId;
typedef int VertexVal;
typedef VertexVal EdgeVal;
typedef struct __interval
{
VertexId start;
VertexId end;
} interval_t;
typedef struct __edge
{
VertexId src;
VertexId dest;
EdgeVal val;
} edge_t;
typedef struct __vertex
{
int numInEdges;
int numOutEdges;
VertexVal val;
// int shardId;
} vertex_t;
typedef struct __shard
{
int Ein;
int Eout;
VertexId Vstart;
VertexId Vend;
VertexId * inEdgesMap;
VertexId * outEdgesMap;
edge_t * inEdges;
edge_t * outEdges;
bool * changed;
VertexVal * inUpdates;
VertexVal * outUpdates;
} shard_t;
/*__device__ double atomicAdd(VertexVal* address, VertexVal val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
*/
#ifndef __GATHER__
#define __GATHER__
__global__ void gather_in(const shard_t * shard, vertex_t * vertices, bool * frontier_cur, bool * frontier_next, int num_vertices, int current_depth)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
VertexId s;
if(id < shard->Ein)
{
s = shard->inEdges[id].src;
shard->inUpdates[id] = vertices[s].val;
}
}
/*__global__ void gather_out(const shard_t * shard, vertex_t * vertices, bool * frontier_cur, bool * frontier_next, int num_vertices, int current_depth)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
VertexId d;
if(id < shard->Eout)
{
d = shard->outEdges[id].dest;
shard->outUpdates[id] = vertices[d].val;
}
}
*/
#endif
#ifndef __APPLY__
#define __APPLY__
__global__ void apply(const shard_t * shard, vertex_t * vertices, bool * frontier_cur, bool * frontier_next, int num_vertices, int current_depth)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
int vid = id + shard->Vstart;
if(vid <= shard->Vend)
{
int i;
VertexVal min=INT_MAX,newval;
shard->changed[id] = false;
// in-updates
if(id == 0)
i=0;
else
i=shard->inEdgesMap[id-1];
for(; i < shard->inEdgesMap[id]; i++)
{
newval = shard->inUpdates[i];
min = (min > newval) ? newval : min;
}
// out-updates
/* if(id == 0)
i=0;
else
i=shard->outEdgesMap[id-1];
for(; i < shard->outEdgesMap[id]; i++)
{
newval = shard->outUpdates[i];
min = (min > newval) ? newval : min;
}
*/
if(vertices[vid].val > min)
{
shard->changed[id] = true;
vertices[vid].val = min;
}
}
}
__global__ void find_frontier(const shard_t * shard, vertex_t * vertices, bool * frontier_cur, bool * frontier_next, int num_vertices, int current_depth)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
if(id < shard->Eout)
{
VertexId s=shard->outEdges[id].src;
VertexId d=shard->outEdges[id].dest;
if(shard->changed[s - shard->Vstart] == true)
{
frontier_next[d] = true;
}
}
}
#endif
/*
#ifndef __SCATTER__
#define __SCATTER__
__global__ void scatter(const shard_t * shard, vertex_t * vertices, bool * frontier_cur, bool * frontier_next, int num_vertices, int current_depth)
{
}
#endif
*/
__global__ void reset_frontier(bool * frontier, int V)
{
int id = blockDim.x*blockIdx.x + threadIdx.x;
if(id < V)
{
frontier[id] = false;
}
}
bool costIn(edge_t a, edge_t b)
{
return ((a.src < b.src) || (a.src == b.src && a.dest < b.dest));
}
bool costOut(edge_t a, edge_t b)
{
return ((a.dest < b.dest) || (a.dest == b.dest && a.src < b.src));
}
int main(int argc, char * argv[])
{
struct timeval t1,t2;
static char * filename;
if(argc!=2)
{
printf("./a.out <filename>\n");
exit(-1);
}
else
{
filename = argv[1];
}
FILE * fp = fopen(filename,"r");
if(!fp)
{
printf("Error reading file.\n");
exit(-1);
}
/* Set cuda device to K40 */
CUDA_SAFE_CALL(cudaSetDevice(0));
printf("Begin file reading...\n");
/* Get graph from file into CPU memory */
int num_vertices, num_edges, i, j, k;
fscanf(fp,"%d %d",&num_vertices,&num_edges);
//We are always going to have atleast 2 shards to have double bufferring
int ns = num_edges / GLOBAL_MAX_EDGES_PER_SHARD;
int MAX_EDGES_PER_SHARD = (ns == 0) ? (num_edges + 1)/2 : (num_edges + 1)/(ns + 1); //We do this to balance the no of edges in the shards
//Array of vectors. vector i contains the out edges of vertex i
vector< vector<edge_t> > inEdges(num_vertices);
vector< vector<edge_t> > outEdges(num_vertices);
int * prefixVIn = (int *) calloc(num_vertices,sizeof(int));
int * prefixVOut = (int *) calloc(num_vertices,sizeof(int));
int s,d;
// It will contain the visited status of each vertex
vertex_t *vertices;
vertex_t *vertices_host = (vertex_t *) malloc(num_vertices*sizeof(vertex_t));
CUDA_SAFE_CALL(cudaMalloc((void **)&vertices, num_vertices*sizeof(vertex_t)));
//Initialise the vertices
for(i=0; i<num_vertices; i++)
{
vertices_host[i].numInEdges = 0;
vertices_host[i].numOutEdges = 0;
vertices_host[i].val = i;
}
srand((unsigned) time(0));
for(i=0; i<num_edges; i++)
{
fscanf(fp,"%d",&s);
fscanf(fp,"%d",&d);
edge_t e,e2;
e.src=s;
e.dest=d;
e.val=rand()%10+1;
e2.src=d;
e2.dest=s;
e2.val=e.val;
inEdges[d].push_back(e);
outEdges[s].push_back(e);
inEdges[s].push_back(e2);
outEdges[d].push_back(e2);
vertices_host[s].numOutEdges +=2;
vertices_host[d].numInEdges +=2;
}
printf("Finished file reading.\n");
printf("\nBegin interval construction...\n");
// Construction of intervals
gettimeofday(&t1,NULL);
int num_intervals = 0, add = 1;
vector<int> startInter;
prefixVIn[0] = inEdges[0].size();
prefixVOut[0] = outEdges[0].size();
if(prefixVIn[0] > MAX_EDGES_PER_SHARD)
{
startInter.push_back(0);
num_intervals++;
add = 0;
}
for(i=1; i<num_vertices; i++)
{
prefixVIn[i] = inEdges[i].size();
prefixVOut[i] = outEdges[i].size();
if(add==1)
{
prefixVIn[i] += prefixVIn[i-1];
prefixVOut[i] += prefixVOut[i-1];
}
if(prefixVIn[i] > MAX_EDGES_PER_SHARD)
{
startInter.push_back(i);
num_intervals++;
add = 0;
}
else
add = 1;
}
if(add==1)
{
startInter.push_back(i-1);
num_intervals++;
}
interval_t * interval = (interval_t *) malloc(num_intervals*sizeof(interval_t));
for(i=0; i<num_intervals; i++)
{
interval[i].start = (i == 0) ? 0 : (startInter[i-1]+1);
interval[i].end = startInter[i];
/* for(j=interval[i].start; j<=interval[i].end; j++)
vertices_host[j].shardId = i;*/
}
gettimeofday(&t2,NULL);
printf("Time to construct intervals : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6)));
printf("\nBegin shard construction...\n");
//Construction of shard
gettimeofday(&t1,NULL);
shard_t * shard = (shard_t *) malloc(num_intervals*sizeof(shard_t));
//Finding the max number of edges in a shard
// We will allocate space for that many edges to each shard to maintain consistency
int MAX_NUM_INEDGES_SHARD = INT_MIN;
int MAX_NUM_OUTEDGES_SHARD = INT_MIN;
int MAX_NUM_VERTICES_SHARD = INT_MIN;
for(i=0; i<num_intervals; i++)
{
int t = prefixVIn[interval[i].end];
if(t > MAX_NUM_INEDGES_SHARD)
MAX_NUM_INEDGES_SHARD = t;
int z = prefixVOut[interval[i].end];
if(z > MAX_NUM_OUTEDGES_SHARD)
MAX_NUM_OUTEDGES_SHARD = z;
int q = interval[i].end-interval[i].start+1;
if(q > MAX_NUM_VERTICES_SHARD)
MAX_NUM_VERTICES_SHARD = q;
}
for(i=0; i<num_intervals; i++)
{
shard[i].Ein = prefixVIn[interval[i].end];
shard[i].Eout = prefixVOut[interval[i].end];
// first and last vertices in shard
shard[i].Vstart = interval[i].start;
shard[i].Vend = interval[i].end;
shard[i].inEdgesMap = (VertexId *) malloc(MAX_NUM_VERTICES_SHARD*sizeof(VertexId));
shard[i].outEdgesMap = (VertexId *) malloc(MAX_NUM_VERTICES_SHARD*sizeof(VertexId));
shard[i].inEdges = (edge_t *) malloc(MAX_NUM_INEDGES_SHARD*sizeof(edge_t));
shard[i].outEdges = (edge_t *) malloc(MAX_NUM_OUTEDGES_SHARD*sizeof(edge_t));
shard[i].inUpdates = (VertexVal *) malloc(MAX_NUM_INEDGES_SHARD*sizeof(VertexVal));
shard[i].outUpdates = (VertexVal *) malloc(MAX_NUM_OUTEDGES_SHARD*sizeof(VertexVal));
shard[i].changed = (bool *) malloc(MAX_NUM_VERTICES_SHARD*sizeof(bool));
}
for(i=0; i<num_intervals; i++)
{
int v = 0, e1 = 0, e2 = 0;
for(j=interval[i].start; j<=interval[i].end; j++)
{
sort(inEdges[j].begin(),inEdges[j].end(),costIn);
shard[i].inEdgesMap[v] = inEdges[j].size();
if(v!=0)
shard[i].inEdgesMap[v] += shard[i].inEdgesMap[v-1];
for(vector<edge_t>::iterator it=inEdges[j].begin(); it!=inEdges[j].end(); ++it)
{
shard[i].inEdges[e1++] = (*it);
}
sort(outEdges[j].begin(),outEdges[j].end(),costOut);
shard[i].outEdgesMap[v] = outEdges[j].size();
if(v!=0)
shard[i].outEdgesMap[v] += shard[i].outEdgesMap[v-1];
for(vector<edge_t>::iterator it=outEdges[j].begin(); it!=outEdges[j].end(); ++it)
{
shard[i].outEdges[e2++] = (*it);
}
v++;
}
}
gettimeofday(&t2,NULL);
printf("Time to construct shards : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6)));
// It will contain the vertices in the next frontier
bool *frontier_cur, *frontier_next;
bool *frontier_host = (bool *) malloc(num_vertices*sizeof(bool));
for(i=0; i<num_vertices; i++)
frontier_host[i] = true;
CUDA_SAFE_CALL(cudaMalloc((void **)&frontier_cur, num_vertices*sizeof(bool)));
CUDA_SAFE_CALL(cudaMalloc((void **)&frontier_next, num_vertices*sizeof(bool)));
/*
Allocating shards on the device
*/
shard_t * shard_dev[NUM_STREAMS];
VertexId * inEdgesMap_dev[NUM_STREAMS];
VertexId * outEdgesMap_dev[NUM_STREAMS];
edge_t * inEdges_dev[NUM_STREAMS];
edge_t * outEdges_dev[NUM_STREAMS];
VertexVal * inUpdates_dev[NUM_STREAMS];
VertexVal * outUpdates_dev[NUM_STREAMS];
bool * changed_dev[NUM_STREAMS];
for(int i = 0; i < NUM_STREAMS; i++)
{
CUDA_SAFE_CALL(cudaMalloc((void **)&shard_dev[i], sizeof(shard_t)));
CUDA_SAFE_CALL(cudaMalloc((void **)&inEdgesMap_dev[i], MAX_NUM_VERTICES_SHARD*sizeof(VertexId)));
CUDA_SAFE_CALL(cudaMalloc((void **)&outEdgesMap_dev[i], MAX_NUM_VERTICES_SHARD*sizeof(VertexId)));
CUDA_SAFE_CALL(cudaMalloc((void **)&inEdges_dev[i], MAX_NUM_INEDGES_SHARD*sizeof(edge_t)));
CUDA_SAFE_CALL(cudaMalloc((void **)&outEdges_dev[i], MAX_NUM_OUTEDGES_SHARD*sizeof(edge_t)));
CUDA_SAFE_CALL(cudaMalloc((void **)&inUpdates_dev[i], MAX_NUM_INEDGES_SHARD*sizeof(VertexVal)));
CUDA_SAFE_CALL(cudaMalloc((void **)&outUpdates_dev[i], MAX_NUM_OUTEDGES_SHARD*sizeof(VertexVal)));
CUDA_SAFE_CALL(cudaMalloc((void **)&changed_dev[i], MAX_NUM_VERTICES_SHARD*sizeof(bool)));
}
// Declaring cuda Streams and events
cudaStream_t * str;
cudaEvent_t * start;
cudaEvent_t * stop;
str = (cudaStream_t *) malloc(NUM_STREAMS * sizeof(cudaStream_t));
start = (cudaEvent_t *) malloc(NUM_STREAMS * sizeof(cudaEvent_t));
stop = (cudaEvent_t *) malloc(NUM_STREAMS * sizeof(cudaEvent_t));
for(int i = 0; i < NUM_STREAMS; i++)
{
CUDA_SAFE_CALL(cudaStreamCreate(&(str[i])));
CUDA_SAFE_CALL(cudaEventCreate(&(start[i])));
CUDA_SAFE_CALL(cudaEventCreate(&(stop[i])));
}
double time = 0;
float diff;
/*
Grid and block dimensions for gather phase (edge centric)
*/
int num_of_blocks = 1;
int MAX_THREADS = MAX_NUM_INEDGES_SHARD;
int num_of_threads_per_block = MAX_THREADS;
if(MAX_THREADS>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(MAX_THREADS/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
dim3 grid_inedge( num_of_blocks, 1, 1);
dim3 threads_inedge( num_of_threads_per_block, 1, 1);
/*
Grid and block dimensions for apply phase (vertex centric)
*/
num_of_blocks = 1;
MAX_THREADS = MAX_NUM_VERTICES_SHARD;
num_of_threads_per_block = MAX_THREADS;
if(MAX_THREADS>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(MAX_THREADS/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
dim3 grid_vertex( num_of_blocks, 1, 1);
dim3 threads_vertex( num_of_threads_per_block, 1, 1);
/*
Grid and block dimensions for scatter phase (edge centric)
*/
num_of_blocks = 1;
MAX_THREADS = MAX_NUM_OUTEDGES_SHARD;
num_of_threads_per_block = MAX_THREADS;
if(MAX_THREADS>MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int)ceil(MAX_THREADS/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
dim3 grid_outedge( num_of_blocks, 1, 1);
dim3 threads_outedge( num_of_threads_per_block, 1, 1);
printf("Begin kernel\n");
CUDA_SAFE_CALL(cudaMemcpy(vertices,vertices_host,num_vertices*sizeof(vertex_t),cudaMemcpyHostToDevice));
int over, sid;
k=0;
gettimeofday(&t1,NULL);
do
{
double tempTime;
CUDA_SAFE_CALL(cudaMemcpy(frontier_cur,frontier_host,num_vertices*sizeof(bool),cudaMemcpyHostToDevice));
reset_frontier<<<((num_vertices+MAX_THREADS_PER_BLOCK-1)/MAX_THREADS_PER_BLOCK),MAX_THREADS_PER_BLOCK>>> (frontier_next, num_vertices);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
#ifdef __GATHER__
/*
GATHER PHASE BEGINS
*/
for(i=0; i<num_intervals; i+=NUM_STREAMS)
{
tempTime = 0;
for(j=0; (j<NUM_STREAMS && (i+j)<num_intervals); j++)
{
sid = i+j;
CUDA_SAFE_CALL(cudaMemcpyAsync(shard_dev[j], &shard[sid], sizeof(shard_t),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(inEdgesMap_dev[j], shard[sid].inEdgesMap, MAX_NUM_VERTICES_SHARD*sizeof(VertexId),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->inEdgesMap), &(inEdgesMap_dev[j]), sizeof(VertexId *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(outEdgesMap_dev[j], shard[sid].outEdgesMap, MAX_NUM_VERTICES_SHARD*sizeof(VertexId),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->outEdgesMap), &(outEdgesMap_dev[j]), sizeof(VertexId *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(inEdges_dev[j], shard[sid].inEdges, MAX_NUM_INEDGES_SHARD*sizeof(edge_t),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->inEdges), &(inEdges_dev[j]), sizeof(edge_t *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(outEdges_dev[j], shard[sid].outEdges, MAX_NUM_OUTEDGES_SHARD*sizeof(edge_t),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->outEdges), &(outEdges_dev[j]), sizeof(edge_t *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->inUpdates), &(inUpdates_dev[j]), sizeof(VertexVal *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->outUpdates), &(outUpdates_dev[j]), sizeof(VertexVal *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->changed), &(changed_dev[j]), sizeof(bool *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaEventRecord(start[j],str[j]));
gather_in<<<grid_inedge, threads_inedge, 0, str[j]>>> (shard_dev[j], vertices, frontier_cur, frontier_next, num_vertices, k);
// gather_out<<<grid_outedge, threads_outedge, 0, str[j]>>> (shard_dev[j], vertices, frontier_cur, frontier_next, num_vertices, k);
CUDA_SAFE_CALL(cudaStreamSynchronize(str[j]));
CUDA_SAFE_CALL(cudaEventRecord(stop[j],str[j]));
CUDA_SAFE_CALL(cudaEventSynchronize(stop[j]));
CUDA_SAFE_CALL(cudaEventElapsedTime(&diff,start[j],stop[j]));
tempTime += diff;
CUDA_SAFE_CALL(cudaMemcpyAsync(shard[sid].inUpdates, inUpdates_dev[j], MAX_NUM_INEDGES_SHARD*sizeof(VertexVal),cudaMemcpyDeviceToHost,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(shard[sid].outUpdates, outUpdates_dev[j], MAX_NUM_OUTEDGES_SHARD*sizeof(VertexVal),cudaMemcpyDeviceToHost,str[j]));
//CUDA_SAFE_CALL(cudaMemcpyAsync(shard[sid].changed, changed_dev[j], MAX_NUM_VERTICES_SHARD*sizeof(bool),cudaMemcpyDeviceToHost,str[j]));
}
time += tempTime;
}
CUDA_SAFE_CALL(cudaDeviceSynchronize());
/*
GATHER PHASE ENDS
*/
#endif
#ifdef __APPLY__
/*
APPLY PHASE BEGINS
*/
for(i=0; i<num_intervals; i+=NUM_STREAMS)
{
tempTime = 0;
for(j=0; (j<NUM_STREAMS && (i+j)<num_intervals); j++)
{
sid = i+j;
CUDA_SAFE_CALL(cudaMemcpyAsync(shard_dev[j], &shard[sid], sizeof(shard_t),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(inEdgesMap_dev[j], shard[sid].inEdgesMap, MAX_NUM_VERTICES_SHARD*sizeof(VertexId),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->inEdgesMap), &(inEdgesMap_dev[j]), sizeof(VertexId *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(outEdgesMap_dev[j], shard[sid].outEdgesMap, MAX_NUM_VERTICES_SHARD*sizeof(VertexId),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->outEdgesMap), &(outEdgesMap_dev[j]), sizeof(VertexId *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(inEdges_dev[j], shard[sid].inEdges, MAX_NUM_INEDGES_SHARD*sizeof(edge_t),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->inEdges), &(inEdges_dev[j]), sizeof(edge_t *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(outEdges_dev[j], shard[sid].outEdges, MAX_NUM_OUTEDGES_SHARD*sizeof(edge_t),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->outEdges), &(outEdges_dev[j]), sizeof(edge_t *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(inUpdates_dev[j], shard[sid].inUpdates, MAX_NUM_INEDGES_SHARD*sizeof(VertexVal),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->inUpdates), &(inUpdates_dev[j]), sizeof(VertexVal *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(outUpdates_dev[j], shard[sid].outUpdates, MAX_NUM_OUTEDGES_SHARD*sizeof(VertexVal),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->outUpdates), &(outUpdates_dev[j]), sizeof(VertexVal *),cudaMemcpyHostToDevice,str[j]));
//CUDA_SAFE_CALL(cudaMemcpyAsync(changed_dev[j], shard[sid].changed, MAX_NUM_VERTICES_SHARD*sizeof(bool),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->changed), &(changed_dev[j]), sizeof(bool *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaEventRecord(start[j],str[j]));
apply<<<grid_vertex, threads_vertex, 0, str[j]>>> (shard_dev[j], vertices, frontier_cur, frontier_next, num_vertices, k);
find_frontier<<<grid_outedge, threads_outedge, 0, str[j]>>> (shard_dev[j], vertices, frontier_cur, frontier_next, num_vertices, k);
CUDA_SAFE_CALL(cudaStreamSynchronize(str[j]));
CUDA_SAFE_CALL(cudaEventRecord(stop[j],str[j]));
CUDA_SAFE_CALL(cudaEventSynchronize(stop[j]));
CUDA_SAFE_CALL(cudaEventElapsedTime(&diff,start[j],stop[j]));
tempTime += diff;
}
time += tempTime;
}
CUDA_SAFE_CALL(cudaDeviceSynchronize());
/*
APPLY PHASE ENDS
*/
#endif
#ifdef __SCATTER__
/*
SCATTER PHASE BEGINS
*/
for(i=0; i<num_intervals; i+=NUM_STREAMS)
{
tempTime = 0;
for(j=0; (j<NUM_STREAMS && (i+j)<num_intervals); j++)
{
sid = i+j;
CUDA_SAFE_CALL(cudaMemcpyAsync(shard_dev[j], &shard[sid], sizeof(shard_t),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(inEdgesMap_dev[j], shard[sid].inEdgesMap, MAX_NUM_VERTICES_SHARD*sizeof(VertexId),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->inEdgesMap), &(inEdgesMap_dev[j]), sizeof(VertexId *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(outEdgesMap_dev[j], shard[sid].outEdgesMap, MAX_NUM_VERTICES_SHARD*sizeof(VertexId),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->outEdgesMap), &(outEdgesMap_dev[j]), sizeof(VertexId *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(inEdges_dev[j], shard[sid].inEdges, MAX_NUM_INEDGES_SHARD*sizeof(edge_t),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->inEdges), &(inEdges_dev[j]), sizeof(edge_t *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(outEdges_dev[j], shard[sid].outEdges, MAX_NUM_OUTEDGES_SHARD*sizeof(edge_t),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->outEdges), &(outEdges_dev[j]), sizeof(edge_t *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->inUpdates), &(inUpdates_dev[j]), sizeof(VertexVal *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaMemcpyAsync(&((shard_dev[j])->outUpdates), &(outUpdates_dev[j]), sizeof(VertexVal *),cudaMemcpyHostToDevice,str[j]));
CUDA_SAFE_CALL(cudaEventRecord(start[j],str[j]));
scatter<<<grid_outedge, threads_outedge, 0, str[j]>>> (shard_dev[j], vertices, frontier_cur, frontier_next, num_vertices, k);
CUDA_SAFE_CALL(cudaStreamSynchronize(str[j]));
CUDA_SAFE_CALL(cudaEventRecord(stop[j],str[j]));
CUDA_SAFE_CALL(cudaEventSynchronize(stop[j]));
CUDA_SAFE_CALL(cudaEventElapsedTime(&diff,start[j],stop[j]));
tempTime += diff;
}
time += tempTime;
}
CUDA_SAFE_CALL(cudaDeviceSynchronize());
/*
SCATTER PHASE ENDS
*/
#endif
CUDA_SAFE_CALL(cudaMemcpy(frontier_host,frontier_next,num_vertices*sizeof(bool),cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaDeviceSynchronize());
over=false;
for(i=0; i<num_vertices; i++)
{
if(frontier_host[i])
{
over=true;
break;
}
}
k++;
}while(over);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
gettimeofday(&t2,NULL);
printf("Time to Connected components : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6)));
printf("Number of iterations : %d\n",k);
/*CUDA_SAFE_CALL(cudaMemcpy(vertices_host, vertices, num_vertices*sizeof(vertex_t), cudaMemcpyDeviceToHost));
for(int i = 0; i < num_vertices; i++)
{
printf("Vertex %d Component %d\n",i,vertices_host[i].val);
}*/
printf("Time: %f ms\n",time);
for(int i = 0; i < NUM_STREAMS; i++)
{
CUDA_SAFE_CALL(cudaStreamDestroy(str[i]));
CUDA_SAFE_CALL(cudaEventDestroy(start[i]));
CUDA_SAFE_CALL(cudaEventDestroy(stop[i]));
CUDA_SAFE_CALL(cudaFree(inEdgesMap_dev[i]));
CUDA_SAFE_CALL(cudaFree(outEdgesMap_dev[i]));
CUDA_SAFE_CALL(cudaFree(inEdges_dev[i]));
CUDA_SAFE_CALL(cudaFree(outEdges_dev[i]));
CUDA_SAFE_CALL(cudaFree(inUpdates_dev[i]));
CUDA_SAFE_CALL(cudaFree(outUpdates_dev[i]));
CUDA_SAFE_CALL(cudaFree(changed_dev[i]));
CUDA_SAFE_CALL(cudaFree(shard_dev[i]));
}
free(interval);
for(i=0; i<num_intervals; i++)
{
free(shard[i].inEdgesMap);
free(shard[i].outEdgesMap);
free(shard[i].inEdges);
free(shard[i].outEdges);
free(shard[i].inUpdates);
free(shard[i].outUpdates);
free(shard[i].changed);
}
free(shard);
free(vertices_host);
free(frontier_host);
CUDA_SAFE_CALL(cudaFree(vertices));
CUDA_SAFE_CALL(cudaFree(frontier_cur));
CUDA_SAFE_CALL(cudaFree(frontier_next));
return 0;
}
|
72bf24144b23ebc0535c2a2b5d35ee95a303bbc1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cu_dense_stereo.h"
#include "launch_utils.h"
#include "MatUtils.h"
#include "patch_score.h"
#include "disparity.h"
#include "InvalidValue.h"
#include "ImageApron.h"
namespace roo
{
const int MinDisparity = 0;
const int DefaultRad = 1;
//typedef SSNDPatchScore<float,DefaultRad,ImgAccessRaw> DefaultSafeScoreType;
typedef SANDPatchScore<float,DefaultRad,ImgAccessRaw> DefaultSafeScoreType;
//typedef SinglePixelSqPatchScore<float,ImgAccessRaw> DefaultSafeScoreType;
//////////////////////////////////////////////////////
// Cost Volume minimum
//////////////////////////////////////////////////////
template<typename Tdisp, typename Tvol>
__global__ void KernCostVolMinimum(Image<Tdisp> disp, Volume<Tvol> vol, unsigned maxDispVal)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
Tdisp bestd = 0;
Tvol bestc = vol(x,y,0);
const int maxDisp = min(maxDispVal, x+1);
for(int d=1; d < maxDisp; ++d) {
const Tvol c = vol(x,y,d);
if(c < bestc) {
bestc = c;
bestd = d;
}
}
disp(x,y) = bestd;
}
template<typename Tdisp, typename Tvol>
void CostVolMinimum(Image<Tdisp> disp, Volume<Tvol> vol, unsigned maxDisp)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,disp);
hipLaunchKernelGGL(( KernCostVolMinimum<Tdisp,Tvol>), dim3(gridDim),dim3(blockDim), 0, 0, disp,vol,maxDisp);
}
template void CostVolMinimum<>(Image<char>,Volume<float>,unsigned);
template void CostVolMinimum<>(Image<char>,Volume<int>,unsigned);
template void CostVolMinimum<>(Image<char>,Volume<unsigned int>,unsigned);
template void CostVolMinimum<>(Image<char>,Volume<unsigned short>,unsigned);
template void CostVolMinimum<>(Image<char>,Volume<unsigned char>,unsigned);
template void CostVolMinimum<>(Image<float>,Volume<float>,unsigned);
template void CostVolMinimum<>(Image<float>,Volume<unsigned short>,unsigned);
//////////////////////////////////////////////////////
// Cost Volume minimum subpix refinement
//////////////////////////////////////////////////////
template<typename Tdisp, typename Tvol>
__global__ void KernCostVolMinimumSubpix(Image<Tdisp> disp, Volume<Tvol> vol, unsigned maxDispVal, float sd)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if( x < disp.w && y < disp.h ) {
Tdisp bestd = 0;
Tvol bestc = 1E10;
for(int d=0; d < maxDispVal; ++d) {
const int xr = x + sd*d;
if(0 <= xr && xr < vol.w) {
const Tvol c = vol(x,y,d);
if(c < bestc) {
bestc = c;
bestd = d;
}
}
}
Tdisp out = bestd;
const int bestxr = x + sd*bestd;
if( 0 < bestxr && bestxr < vol.w-1) {
// Fit parabola to neighbours
const float dl = bestd-1;
const float dr = bestd+1;
const float sl = vol(x,y,dl);
const float sr = vol(x,y,dr);
const float subpixdisp = bestd - (sr-sl) / (2*(sr-2*bestc+sl));
// Minima of parabola
// Check that minima is sensible. Otherwise assume bad data.
if( dl < subpixdisp && subpixdisp < dr ) {
out = subpixdisp;
}
}
disp(x,y) = out;
}
}
void CostVolMinimumSubpix(Image<float> disp, Volume<float> vol, unsigned maxDisp, float sd)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,disp);
hipLaunchKernelGGL(( KernCostVolMinimumSubpix<float,float>), dim3(gridDim),dim3(blockDim), 0, 0, disp,vol,maxDisp,sd);
}
//////////////////////////////////////////////////////
// Cost Volume minimum square penalty subpix refinement
//////////////////////////////////////////////////////
template<typename Tdisp, typename Tvol>
__global__ void KernCostVolMinimumSquarePenaltySubpix(Image<Tdisp> imga, Volume<Tvol> vol, Image<float> imgd, unsigned maxDispVal, float sd, float lambda, float theta)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if( x < imga.w && y < imga.h ) {
const float lastd = imgd(x,y);
const float inv2theta = 1.0f / (2.0f*theta);
Tdisp bestd = 0;
Tvol bestc = inv2theta*lastd*lastd + lambda * vol(x,y,0);
for(int d=1; d < maxDispVal; ++d) {
const int xr = x + sd*d;
if(0 <= xr && xr < vol.w) {
const float ddif = lastd - d;
const Tvol c = inv2theta*ddif*ddif + lambda * vol(x,y,d);
if(c < bestc) {
bestc = c;
bestd = d;
}
}
}
Tdisp out = bestd;
const int bestxr = x + sd*bestd;
if( 0 < bestxr && bestxr < vol.w-1) {
// Newton Step
const float dl = bestd-1;
const float dr = bestd+1;
const float sl = inv2theta*(lastd-dl)*(lastd-dl) + lambda * vol(x,y,dl); //vol(x,y,d3);
const float sr = inv2theta*(lastd-dr)*(lastd-dr) + lambda * vol(x,y,dr); //vol(x,y,d1);
const float subpixdisp = bestd - (sr-sl) / (2*(sr-2*bestc+sl));
// Check that minima is sensible. Otherwise assume bad data.
if( dl < subpixdisp && subpixdisp < dr ) {
out = subpixdisp;
}
}
imga(x,y) = out;
}
}
void CostVolMinimumSquarePenaltySubpix(Image<float> imga, Volume<float> vol, Image<float> imgd, unsigned maxDisp, float sd, float lambda, float theta)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,imga);
hipLaunchKernelGGL(( KernCostVolMinimumSquarePenaltySubpix<float,float>), dim3(gridDim),dim3(blockDim), 0, 0, imga,vol,imgd,maxDisp,sd,lambda,theta);
}
//////////////////////////////////////////////////////
// Edge Weight
//////////////////////////////////////////////////////
__global__ void KernExponentialEdgeWeight(Image<float> imgw, const Image<float> imgi, float alpha, float beta)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if( x < imgi.w && y < imgi.h ) {
float2 grad = make_float2(0,0);
if(0<x && x<imgi.w-1) grad.x = imgi.GetCentralDiffDx<float>(x,y);
if(0<y && y<imgi.h-1) grad.y = imgi.GetCentralDiffDy<float>(x,y);
// if(0<x && x<imgi.w) grad.x = imgi.GetBackwardDiffDx<float>(x,y);
// if(0<y && y<imgi.h) grad.y = imgi.GetBackwardDiffDy<float>(x,y);
const float w = expf( -alpha * powf(sqrt(grad.x*grad.x + grad.y*grad.y),beta) );
imgw(x,y) = w;
}
}
void ExponentialEdgeWeight(Image<float> imgw, const Image<float> imgi, float alpha, float beta)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,imgw);
hipLaunchKernelGGL(( KernExponentialEdgeWeight), dim3(gridDim),dim3(blockDim), 0, 0, imgw,imgi,alpha,beta);
}
//////////////////////////////////////////////////////
// Scanline rectified dense stereo
//////////////////////////////////////////////////////
template<typename TD, typename TI, typename Score>
__global__ void KernDenseStereo(
Image<TD> dDisp, Image<TI> dCamLeft, Image<TI> dCamRight, TD maxDispVal, TD dispStep, float acceptThresh
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
TD bestDisp = InvalidValue<TD>::Value();
if( Score::width <= x && x < (dCamLeft.w - Score::width) &&
Score::height <= y && y < (dCamLeft.h - Score::height) )
{
// Search for best matching pixel
float bestScore = 1E+36;
TD sndBestDisp = InvalidValue<TD>::Value();
float sndBestScore = 1E+37;
TD minDisp = min(maxDispVal, (TD)0);
TD maxDisp = max((TD)0, maxDispVal);
minDisp = max((int)minDisp, -(int)( ((int)dCamLeft.w - (int)Score::width) - (int)x));
maxDisp = min((int)maxDisp, (int)(x + Score::width));
for(TD c = minDisp; c <= maxDisp; c += dispStep ) {
const float score = Score::Score(dCamLeft, x,y, dCamRight, x-c, y);
if(score < bestScore) {
sndBestDisp = bestDisp;
sndBestScore = bestScore;
bestDisp = c;
bestScore = score;
}else if( score <= sndBestScore) {
sndBestDisp = c;
sndBestScore = score;
}
}
if(abs(bestDisp-sndBestDisp) > 1) {
const float cd = (sndBestScore - bestScore) / bestScore;
if( cd < acceptThresh ) {
bestDisp = InvalidValue<TD>::Value();
}
}
}
dDisp(x,y) = bestDisp;
}
const int MAXBW = 512;
//template<typename TD, typename TI, typename Score>
//__global__ void KernDenseStereo(
// Image<TD> dDisp, Image<TI> dCamLeft, Image<TI> dCamRight, TD maxDispVal, TD dispStep, float acceptThresh
//) {
// const int x = blockIdx.x*blockDim.x + threadIdx.x;
// const int y = blockIdx.y*blockDim.y + threadIdx.y;
// const int W = Score::width;
// const int RAD = W / 2;
//// TI patch[W*W];
// // only enough shared mem to cache right image
//// __shared__ ImageApronRows<TI,MAXBW,1,RAD> apron_l;
// __shared__ ImageApronRows<TI,MAXBW,1,RAD> apron_r;
//// __shared__ ImageApronRows<TI,MAXBW,1,0> col_avg_l;
//// __shared__ ImageApronRows<TI,MAXBW,1,0> col_avg_r;
//// __shared__ ImageApronRows<TI,MAXBW,1,0> avg_l;
//// __shared__ ImageApronRows<TI,MAXBW,1,0> avg_r;
// ///////////////////////////////////
//// // Cache line of right/left image +/- RAD
//// apron_l.CacheImage(dCamLeft);
// apron_r.CacheImage(dCamRight);
//// __syncthreads();
//// ///////////////////////////////////
//// // Cache sum of colums for norm
////// int colsuml = 0;
//// int colsumr = 0;
////#pragma unroll
//// for(int i=-RAD; i<=RAD; ++i) {
////// colsuml += apron_l.GetRelThread(0,i);
//// colsumr += apron_r.GetRelThread(0,i);
//// }
////// col_avg_l.GetRelThread(0,0) = colsuml / W;
//// col_avg_r.GetRelThread(0,0) = colsumr / W;
//// __syncthreads();
//// ///////////////////////////////////
//// // Cache sum of block for norm
////// int suml = 0;
//// int sumr = 0;
////#pragma unroll
//// for(int i=-RAD; i<=RAD; ++i) {
////// suml += col_avg_l.GetRelThreadClampX(i,0);
//// sumr += col_avg_r.GetRelThreadClampX(i,0);
//// }
////// avg_l.GetRelThread(0,0) = suml / W;
//// avg_r.GetRelThread(0,0) = sumr / W;
// ///////////////////////////////////
// // Cache left patch, compute mean
////// int sum_l = 0;
//// for(int r=-RAD; r<= RAD; ++r) {
////#pragma unroll
//// for(int c=-RAD; c<=RAD; ++c) {
//// const TI val = dCamLeft.GetWithClampedRange(x+c, y+r);
//// patch[(RAD+r)*W+(RAD+c)] = val;
////// sum_l += val;
//// }
//// }
//// const TI avg_l = sum_l / (W*W);
// __syncthreads();
// TD bestDisp = InvalidValue<TD>::Value();
// if( maxDispVal+Score::width <= x && x < (dCamLeft.w - Score::width) &&
// Score::height <= y && y < (dCamLeft.h - Score::height) )
// {
// // Search for best matching pixel
// float bestScore = 1E+36;
//// TD sndBestDisp = InvalidValue<TD>::Value();
//// float sndBestScore = 1E+37;
//// TD minDisp = min(maxDispVal, (TD)0);
//// TD maxDisp = max((TD)0, maxDispVal);
//// minDisp = max((int)minDisp, -(int)( ((int)dCamLeft.w - (int)Score::width) - (int)x));
//// maxDisp = min((int)maxDisp, (int)(x + Score::width));
// for(TD c = 0; c <= maxDispVal; c += 1 ) {
// float score = 0;
// for(int ky=-RAD; ky <= RAD; ++ky ) {
//#pragma unroll
// for(int kx=-RAD; kx <= RAD; ++kx ) {
//// const int pl = apron_l.GetRelThread(kx,ky);
// const int pl = 0;//patch[(RAD+ky)*W+(RAD+kx)];
// const int pr = apron_r.GetRelThread(kx-c,ky);
// score += abs(pl - pr);
// }
// }
////// Score::Score(dCamLeft, x,y, dCamRight, x-c, y);
// if(score < bestScore) {
//// sndBestDisp = bestDisp;
//// sndBestScore = bestScore;
// bestDisp = c;
// bestScore = score;
//// }else if( score <= sndBestScore) {
//// sndBestDisp = c;
//// sndBestScore = score;
// }
// }
//// if(abs(bestDisp-sndBestDisp) > 1) {
//// const float cd = (sndBestScore - bestScore) / bestScore;
//// if( cd < acceptThresh ) {
//// bestDisp = InvalidValue<TD>::Value();
//// }
//// }
// }
// dDisp(x,y) = bestDisp;
//}
template<typename TDisp, typename TImg>
void DenseStereo(
Image<TDisp> dDisp, const Image<TImg> dCamLeft, const Image<TImg> dCamRight,
TDisp maxDisp, float acceptThresh, int score_rad
) {
dim3 blockDim(dDisp.w, 1);
dim3 gridDim(1, dDisp.h);
// InitDimFromOutputImageOver(blockDim,gridDim,dDisp);
const TDisp dispStep = 1;
if( score_rad == 0 ) {
hipLaunchKernelGGL(( KernDenseStereo<TDisp, TImg, SinglePixelSqPatchScore<float,ImgAccessRaw > >), dim3(gridDim),dim3(blockDim), 0, 0, dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 1 ) {
hipLaunchKernelGGL(( KernDenseStereo<TDisp, TImg, SANDPatchScore<float,1,ImgAccessRaw > >), dim3(gridDim),dim3(blockDim), 0, 0, dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if( score_rad == 2 ) {
hipLaunchKernelGGL(( KernDenseStereo<TDisp, TImg, SANDPatchScore<float,2,ImgAccessRaw > >), dim3(gridDim),dim3(blockDim), 0, 0, dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 3 ) {
hipLaunchKernelGGL(( KernDenseStereo<TDisp, TImg, SANDPatchScore<float,3,ImgAccessRaw > >), dim3(gridDim),dim3(blockDim), 0, 0, dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if( score_rad == 4 ) {
hipLaunchKernelGGL(( KernDenseStereo<TDisp, TImg, SANDPatchScore<float,4,ImgAccessRaw > >), dim3(gridDim),dim3(blockDim), 0, 0, dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 5 ) {
hipLaunchKernelGGL(( KernDenseStereo<TDisp, TImg, SANDPatchScore<float,5,ImgAccessRaw > >), dim3(gridDim),dim3(blockDim), 0, 0, dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 6 ) {
hipLaunchKernelGGL(( KernDenseStereo<TDisp, TImg, SANDPatchScore<float,6,ImgAccessRaw > >), dim3(gridDim),dim3(blockDim), 0, 0, dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 7 ) {
hipLaunchKernelGGL(( KernDenseStereo<TDisp, TImg, SANDPatchScore<float,7,ImgAccessRaw > >), dim3(gridDim),dim3(blockDim), 0, 0, dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}
}
template void DenseStereo<unsigned char, unsigned char>(Image<unsigned char>, const Image<unsigned char>, const Image<unsigned char>, unsigned char, float, int);
template void DenseStereo<char, unsigned char>(Image<char>, const Image<unsigned char>, const Image<unsigned char>, char, float, int);
void DenseStereoSubpix(
Image<float> dDisp, const Image<unsigned char> dCamLeft, const Image<unsigned char> dCamRight, float maxDisp, float dispStep, float acceptThresh, int score_rad, bool score_normed
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dDisp);
// if(score_normed) {
// if( score_rad == 0 ) {
// KernDenseStereo<float, unsigned char, SinglePixelSqPatchScore<float,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 1 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,1,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if( score_rad == 2 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,2,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 3 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,3,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if( score_rad == 4 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,4,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 5 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,5,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 6 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,6,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 7 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,7,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }
// }else{
// if( score_rad == 0 ) {
// KernDenseStereo<float, unsigned char, SinglePixelSqPatchScore<float,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 1 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,1,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if( score_rad == 2 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,2,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 3 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,3,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if( score_rad == 4 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,4,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 5 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,5,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }
// }
}
//////////////////////////////////////////////////////
//////////////////////////////////////////////////////
const int RAD = 3;
const int W = 2*RAD+1;
__global__ void KernDenseStereoTest(
Image<float> dDisp, Image<unsigned char> dCamLeft, Image<unsigned char> dCamRight, int maxDisp
) {
const int x = threadIdx.x;
const int y = blockIdx.y;
__shared__ unsigned char cache_l[W][MAXBW];
__shared__ unsigned char cache_r[W][MAXBW+1];
#pragma unroll
for(int r=0; r<W; ++r ) {
cache_l[r][x] = dCamLeft.Get(x,y+r-RAD);
cache_r[r][x] = dCamRight.Get(x,y+r-RAD);
}
__syncthreads();
int bestScore = 0xFFFFF;
int bestDisp = 0;
const int maxClipDisp = min(x-RAD,maxDisp);
for(int d=0; d<maxClipDisp; ++d)
{
const int xd = x-d;
int score = 0;
#pragma unroll
for(int r=0; r<W; ++r) {
score += abs(cache_l[r][x] - cache_r[r][xd]);
// const int yr = y-RAD+r;
// score += abs(dCamLeft(x,yr) - dCamRight(xd,yr));
}
if(score < bestScore) {
bestScore = score;
bestDisp = d;
}
}
dDisp(x,y) = bestDisp;
}
void DenseStereoTest(
Image<float> dDisp, Image<unsigned char> dCamLeft, Image<unsigned char> dCamRight, int maxDisp
) {
const int w = dDisp.w;
const int h = dDisp.h - 2*RAD;
const int x = 0;
const int y = RAD;
dim3 blockDim(w, 1);
dim3 gridDim(1, h);
hipLaunchKernelGGL(( KernDenseStereoTest), dim3(gridDim),dim3(blockDim), 0, 0, dDisp.SubImage(x,y,w,h), dCamLeft.SubImage(x,y,w,h), dCamRight.SubImage(x,y,w,h), maxDisp);
}
//////////////////////////////////////////////////////
// Check Left and Right disparity images match
//////////////////////////////////////////////////////
template<typename TD>
__global__ void KernLeftRightCheck(
Image<TD> dispL, Image<TD> dispR, float sd, float maxDiff
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if( dispL.InBounds(x,y) ) {
const TD dl = dispL(x,y);
const TD xr = x + sd*dl;
if( 0 <= xr && xr < dispR.w) {
const TD dr = dispR(xr, y);
if(!InvalidValue<TD>::IsValid(dr) || abs(dl - dr) > maxDiff) {
dispL(x,y) = InvalidValue<TD>::Value();
}
}else{
dispL(x,y) = InvalidValue<TD>::Value();
}
}
}
void LeftRightCheck(Image<char> dispL, Image<char> dispR, int sd, int maxDiff)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim, dispL);
hipLaunchKernelGGL(( KernLeftRightCheck<char>), dim3(gridDim),dim3(blockDim), 0, 0, dispL, dispR, sd, maxDiff);
}
void LeftRightCheck(Image<float> dispL, Image<float> dispR, float sd, float maxDiff)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim, dispL);
hipLaunchKernelGGL(( KernLeftRightCheck<float>), dim3(gridDim),dim3(blockDim), 0, 0, dispL, dispR, sd, maxDiff);
}
//////////////////////////////////////////////////////
// Visualise cross section of disparity image
//////////////////////////////////////////////////////
template<typename TD, typename TI, typename Score>
__global__ void KernDisparityImageCrossSection(
Image<TD> dScore, Image<unsigned char> dDisp, Image<TI> dCamLeft, Image<TI> dCamRight, int y
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int c = blockIdx.y*blockDim.y + threadIdx.y;
const int rx = x-c;
const float score = ( 0<= rx && rx < dCamRight.w ) ? Score::Score(dCamLeft, x,y, dCamRight, rx, y) : 0;
const unsigned char mindisp = dDisp(x,y);
const float show = sqrt(score / Score::area) / 255.0f;
dScore(x,c) = show * make_float4( 1,1,1,1);
}
void DisparityImageCrossSection(
Image<float4> dScore, Image<unsigned char> dDisp, const Image<unsigned char> dCamLeft, const Image<unsigned char> dCamRight, int y
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dScore);
hipLaunchKernelGGL(( KernDisparityImageCrossSection<float4, unsigned char, DefaultSafeScoreType>), dim3(gridDim),dim3(blockDim), 0, 0, dScore, dDisp, dCamLeft, dCamRight, y);
}
//////////////////////////////////////////////////////
// Scanline rectified dense stereo sub-pixel refinement
//////////////////////////////////////////////////////
template<typename TDo, typename TDi, typename TI, typename Score>
__global__ void KernDenseStereoSubpixelRefine(
Image<TDo> dDispOut, const Image<TDi> dDisp, const Image<TI> dCamLeft, const Image<TI> dCamRight
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int bestDisp = dDisp(x,y);
// Ignore things at infinity
if(bestDisp < MinDisparity) {
dDispOut(x,y) = InvalidValue<TDo>::Value();
return;
}
// Fit parabola to neighbours
const float d1 = bestDisp+1;
const float d2 = bestDisp;
const float d3 = bestDisp-1;
const float s1 = Score::Score(dCamLeft, x,y, dCamRight, x-d1,y);
const float s2 = Score::Score(dCamLeft, x,y, dCamRight, x-d2,y);
const float s3 = Score::Score(dCamLeft, x,y, dCamRight, x-d3,y);
// Cooefficients of parabola through (d1,s1),(d2,s2),(d3,s3)
const float denom = (d1 - d2)*(d1 - d3)*(d2 - d3);
const float A = (d3 * (s2 - s1) + d2 * (s1 - s3) + d1 * (s3 - s2)) / denom;
const float B = (d3*d3 * (s1 - s2) + d2*d2 * (s3 - s1) + d1*d1 * (s2 - s3)) / denom;
// const float C = (x2 * x3 * (x2 - x3) * y1 + x3 * x1 * (x3 - x1) * y2 + x1 * x2 * (x1 - x2) * y3) / denom;
// Minima of parabola
const float newDisp = -B / (2*A);
// Check that minima is sensible. Otherwise assume bad data.
if( d3 < newDisp && newDisp < d1 ) {
dDispOut(x,y) = newDisp;
}else{
// dDisp(x,y) = bestDisp / maxDisp;
dDispOut(x,y) = InvalidValue<TDo>::Value();
}
}
void DenseStereoSubpixelRefine(
Image<float> dDispOut, const Image<unsigned char> dDisp, const Image<unsigned char> dCamLeft, const Image<unsigned char> dCamRight
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dDisp);
hipLaunchKernelGGL(( KernDenseStereoSubpixelRefine<float,unsigned char,unsigned char, DefaultSafeScoreType>), dim3(gridDim),dim3(blockDim), 0, 0, dDispOut, dDisp, dCamLeft, dCamRight);
}
//////////////////////////////////////////////////////
// Upgrade disparity image to vertex array
//////////////////////////////////////////////////////
__global__ void KernDisparityImageToVbo(
Image<float4> dVbo, const Image<float> dDisp, float baseline, float fu, float fv, float u0, float v0
) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
dVbo(u,v) = DepthFromDisparity(u,v, dDisp(u,v), baseline, fu, fv, u0, v0, MinDisparity);
}
void DisparityImageToVbo(Image<float4> dVbo, const Image<float> dDisp, float baseline, float fu, float fv, float u0, float v0)
{
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dVbo);
hipLaunchKernelGGL(( KernDisparityImageToVbo), dim3(gridDim),dim3(blockDim), 0, 0, dVbo, dDisp, baseline, fu, fv, u0, v0);
}
//////////////////////////////////////////////////////
// Cost Volume
//////////////////////////////////////////////////////
void CostVolumeZero(Volume<CostVolElem> costvol )
{
CostVolElem initial;
initial.sum = 0;
initial.n = 0;
costvol.Fill(initial);
}
//////////////////////////////////////////////////////
template<typename TD, typename TI, typename Score>
__global__ void KernCostVolumeFromStereo(
Volume<CostVolElem> dvol, Image<TI> dimgl, Image<TI> dimgr
) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int d = blockIdx.z*blockDim.z + threadIdx.z;
if( u-d >= (int)Score::rad) {
CostVolElem elem;
elem.sum = Score::Score(dimgl, u,v, dimgr, u-d, v) / Score::area;
elem.n = 1;
dvol(u,v,d) = elem;
}
}
void CostVolumeFromStereo(Volume<CostVolElem> dvol, Image<unsigned char> dimgl, Image<unsigned char> dimgr )
{
dim3 blockDim(8,8,8);
dim3 gridDim(dvol.w / blockDim.x, dvol.h / blockDim.y, dvol.d / blockDim.z);
hipLaunchKernelGGL(( KernCostVolumeFromStereo<unsigned char, unsigned char, DefaultSafeScoreType>), dim3(gridDim),dim3(blockDim), 0, 0, dvol,dimgl,dimgr);
}
//////////////////////////////////////////////////////
template<typename TI, typename Score>
__global__ void KernAddToCostVolume(
Volume<CostVolElem> dvol, const Image<TI> dimgv,
const Image<TI> dimgc, Mat<float,3,4> KT_cv,
float fu, float fv, float u0, float v0,
float baseline
){
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int d = blockIdx.z*blockDim.z + threadIdx.z;
float3 Pv;
Pv.z = fu * baseline / d;
Pv.x = Pv.z * (u-u0) / fu;
Pv.y = Pv.z * (v-v0) / fv;
const float3 KPc = KT_cv * Pv;
const float2 pc = dn(KPc);
if( KPc.z > 0 && dimgc.InBounds(pc.x, pc.y,5) ) {
// vol(u,v,d) = 1.0f;
const float score = Score::Score(dimgv, u,v, dimgc, pc.x, pc.y) / (float)(Score::area);
// const float score = (dimgv(u,v) - dimgc.template GetBilinear<float>(pc)) / 255.0f;
CostVolElem elem = dvol(u,v,d);
elem.sum += score;
elem.n += 1;
dvol(u,v,d) = elem;
}
}
__host__ __device__
float d2Depth(int d, int dsize, float minDepth, float maxDepth, bool invDepth)
{
if (invDepth)
{
float invDepthMin = 1 / maxDepth;
float invDepthMax = 1 / minDepth;
float step = (invDepthMax - invDepthMin) / (dsize - 1);
return 1 / (step * d + invDepthMin);
}
else
{
float step = (maxDepth - minDepth) / (dsize - 1);
return step * d + minDepth;
}
}
__global__ void KernIdx2Depth(Image<float> dOut, const Image<float> dIn, int dsize, float minDepth, float maxDepth, bool invDepth)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (dOut.InBounds(x, y))
{
dOut(x, y) = d2Depth(dIn(x, y), dsize, minDepth, maxDepth, invDepth);
}
}
void Idx2Depth(Image<float> dOut, const Image<float> dIn, int dsize, float minDepth, float maxDepth, bool invDepth)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim, gridDim, dOut);
hipLaunchKernelGGL(( KernIdx2Depth), dim3(gridDim), dim3(blockDim), 0, 0, dOut, dIn, dsize, minDepth, maxDepth, invDepth);
}
template<typename TI, typename Score>
__global__ void KernAddToCostVolume(Volume<CostVolElem> dvol,
const Image<TI> dimgv,
const Image<TI> dimgc,
Mat<float, 3, 4> KT_cv,
ImageIntrinsics K,
float minDepth,
float maxDepth,
bool invDepth)
{
const int u = blockIdx.x * blockDim.x + threadIdx.x;
const int v = blockIdx.y * blockDim.y + threadIdx.y;
const int d = blockIdx.z * blockDim.z + threadIdx.z;
float3 Pv;
Pv.z = d2Depth(d, dvol.d, minDepth, maxDepth, invDepth);
Pv = K.Unproject(u, v, Pv.z);
const float3 KPc = KT_cv * Pv;
const float2 pc = dn(KPc);
if (dimgc.InBounds(pc.x, pc.y, 5))
{
const float score = Score::Score(dimgv, u, v, dimgc, pc.x, pc.y) / (float)(Score::area);
//const float score = abs(dimgv(u,v) - dimgc.template GetBilinear<float>(pc));
CostVolElem elem = dvol(u, v, d);
elem.sum += score;
elem.n += 1;
dvol(u, v, d) = elem;
}
}
void CostVolumeAdd(Volume<CostVolElem> dvol, const Image<unsigned char> dimgv,
const Image<unsigned char> dimgc, Mat<float,3,4> KT_cv,
float fu, float fv, float u0, float v0,
float baseline, int levels
) {
dim3 blockDim(8,8,8);
dim3 gridDim(dvol.w / blockDim.x, dvol.h / blockDim.y, dvol.d / blockDim.z);
hipLaunchKernelGGL(( KernAddToCostVolume<unsigned char, SANDPatchScore<float,DefaultRad,ImgAccessBilinearClamped<float> > >), dim3(gridDim),dim3(blockDim), 0, 0, dvol,dimgv,dimgc, KT_cv, fu,fv,u0,v0, baseline);
}
void CostVolumeAdd(Volume<CostVolElem> dvol,
const Image<unsigned char> dimgv,
const Image<unsigned char> dimgc,
Mat<float, 3, 4> KT_cv,
const ImageIntrinsics K,
float minDepth,
float maxDepth,
bool invDepth)
{
dim3 blockDim(8, 8, 8);
dim3 gridDim(dvol.w / blockDim.x, dvol.h / blockDim.y, dvol.d / blockDim.z);
hipLaunchKernelGGL(( KernAddToCostVolume<unsigned char, SANDPatchScore<float, DefaultRad, ImgAccessBilinearClamped<float> > >), dim3(gridDim), dim3(blockDim), 0, 0, dvol, dimgv, dimgc, KT_cv, K, minDepth, maxDepth, invDepth);
}
// FIXME: should use templates
void CostVolumeAdd(Volume<CostVolElem> dvol,
const Image<float> dimgv,
const Image<float> dimgc,
Mat<float, 3, 4> KT_cv,
const ImageIntrinsics K,
float minDepth,
float maxDepth,
bool invDepth)
{
dim3 blockDim(8, 8, 8);
dim3 gridDim(dvol.w / blockDim.x, dvol.h / blockDim.y, dvol.d / blockDim.z);
hipLaunchKernelGGL(( KernAddToCostVolume<float, SANDPatchScore<float, DefaultRad, ImgAccessBilinearClamped<float> > >), dim3(gridDim), dim3(blockDim), 0, 0, dvol, dimgv, dimgc, KT_cv, K, minDepth, maxDepth, invDepth);
}
//////////////////////////////////////////////////////
template<typename Tdisp>
__global__ void KernCostVolMinimum(Image<Tdisp> disp, Volume<CostVolElem> vol)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
Tdisp bestd = 0;
float bestc = 1E30;
unsigned maxDisp = vol.d;
#pragma unroll
for(int d=0; d < maxDisp; ++d) {
const CostVolElem elem = vol(x,y,d);
const float c = (elem.sum / elem.n);
if(c < bestc) {
bestc = c;
bestd = d;
}
}
disp(x,y) = bestd;
}
void CostVolMinimum(Image<float> disp, Volume<CostVolElem> vol)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,disp);
hipLaunchKernelGGL(( KernCostVolMinimum<float>), dim3(gridDim),dim3(blockDim), 0, 0, disp,vol);
}
//////////////////////////////////////////////////////
__global__ void KernCostVolumeCrossSection(
Image<float> dScore, Image<CostVolElem> dCostVolSlice
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int d = blockIdx.y*blockDim.y + threadIdx.y;
if( dCostVolSlice.InBounds(x,d) )
{
CostVolElem elem = dCostVolSlice(x,d);
const float score = (elem.sum / elem.n) / 255.0f;
dScore(x,d) = score;
}else{
dScore(x,d) = 0.0f / 0.0f;
}
}
void CostVolumeCrossSection(
Image<float> dScore, Volume<CostVolElem> dCostVol, int y
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dScore);
hipLaunchKernelGGL(( KernCostVolumeCrossSection), dim3(gridDim),dim3(blockDim), 0, 0, dScore, dCostVol.ImageXZ(y));
}
//////////////////////////////////////////////////////
template<typename To, typename Ti>
__global__ void KernFilterDispGrad(Image<To> dOut, Image<Ti> dIn, float threshold )
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float dx = dOut.template GetCentralDiffDx<float>(x,y);
const float dy = dOut.template GetCentralDiffDy<float>(x,y);
const bool valid = dx*dx + dy*dy < threshold;
dOut(x,y) = valid ? dIn(x,y) : -1;
}
void FilterDispGrad(
Image<float> dOut, Image<float> dIn, float threshold
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dOut, 16, 16);
hipLaunchKernelGGL(( KernFilterDispGrad<float,float>), dim3(gridDim),dim3(blockDim), 0, 0, dOut, dIn, threshold);
}
//////////////////////////////////////////////////////
// Cost volume with truncated grad and abs. diff. score
// Fast Cost-Volume Filtering for Visual Correspondence and Beyond
// Christoph Rhemann, Asmaa Hosni, Michael Bleyer, Carsten Rother, Margrit Gelautz
//////////////////////////////////////////////////////
template<typename Tout, typename Tin>
__global__ void KernCostVolumeFromStereoTruncatedAbsAndGrad(
Volume<Tout> dvol, Image<Tin> dimgl, Image<Tin> dimgr, float sd,
float alpha, float r1, float r2
) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int d = blockIdx.z*blockDim.z + threadIdx.z;
const int r = u + sd*d;
if( 0 <= r && r < dimgr.w ) {
const float absI = abs( (float)dimgr(r,v) - (float)dimgl(u,v));
const float absGrad = abs( dimgr.template GetCentralDiffDx<float>(r,v) - dimgl.template GetCentralDiffDx<float>(u,v) );
const Tout cost = (1.0f-alpha)*min(absI,r1) + alpha*min(absGrad,r2);
dvol(u,v,d) = cost;
}else{
dvol(u,v,d) = (1.0f-alpha)*r1 + alpha*r2;
}
}
void CostVolumeFromStereoTruncatedAbsAndGrad(Volume<float> dvol, Image<float> dimgl, Image<float> dimgr, float sd, float alpha, float r1, float r2 )
{
dim3 blockDim(8,8,8);
dim3 gridDim( ceil(dvol.w / (float)blockDim.x), ceil(dvol.h / (float)blockDim.y), ceil(dvol.d / (float)blockDim.z) );
hipLaunchKernelGGL(( KernCostVolumeFromStereoTruncatedAbsAndGrad<float,float>), dim3(gridDim),dim3(blockDim), 0, 0, dvol,dimgl,dimgr,sd, alpha,r1,r2);
}
}
| 72bf24144b23ebc0535c2a2b5d35ee95a303bbc1.cu | #include "cu_dense_stereo.h"
#include "launch_utils.h"
#include "MatUtils.h"
#include "patch_score.h"
#include "disparity.h"
#include "InvalidValue.h"
#include "ImageApron.h"
namespace roo
{
const int MinDisparity = 0;
const int DefaultRad = 1;
//typedef SSNDPatchScore<float,DefaultRad,ImgAccessRaw> DefaultSafeScoreType;
typedef SANDPatchScore<float,DefaultRad,ImgAccessRaw> DefaultSafeScoreType;
//typedef SinglePixelSqPatchScore<float,ImgAccessRaw> DefaultSafeScoreType;
//////////////////////////////////////////////////////
// Cost Volume minimum
//////////////////////////////////////////////////////
template<typename Tdisp, typename Tvol>
__global__ void KernCostVolMinimum(Image<Tdisp> disp, Volume<Tvol> vol, unsigned maxDispVal)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
Tdisp bestd = 0;
Tvol bestc = vol(x,y,0);
const int maxDisp = min(maxDispVal, x+1);
for(int d=1; d < maxDisp; ++d) {
const Tvol c = vol(x,y,d);
if(c < bestc) {
bestc = c;
bestd = d;
}
}
disp(x,y) = bestd;
}
template<typename Tdisp, typename Tvol>
void CostVolMinimum(Image<Tdisp> disp, Volume<Tvol> vol, unsigned maxDisp)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,disp);
KernCostVolMinimum<Tdisp,Tvol><<<gridDim,blockDim>>>(disp,vol,maxDisp);
}
template void CostVolMinimum<>(Image<char>,Volume<float>,unsigned);
template void CostVolMinimum<>(Image<char>,Volume<int>,unsigned);
template void CostVolMinimum<>(Image<char>,Volume<unsigned int>,unsigned);
template void CostVolMinimum<>(Image<char>,Volume<unsigned short>,unsigned);
template void CostVolMinimum<>(Image<char>,Volume<unsigned char>,unsigned);
template void CostVolMinimum<>(Image<float>,Volume<float>,unsigned);
template void CostVolMinimum<>(Image<float>,Volume<unsigned short>,unsigned);
//////////////////////////////////////////////////////
// Cost Volume minimum subpix refinement
//////////////////////////////////////////////////////
template<typename Tdisp, typename Tvol>
__global__ void KernCostVolMinimumSubpix(Image<Tdisp> disp, Volume<Tvol> vol, unsigned maxDispVal, float sd)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if( x < disp.w && y < disp.h ) {
Tdisp bestd = 0;
Tvol bestc = 1E10;
for(int d=0; d < maxDispVal; ++d) {
const int xr = x + sd*d;
if(0 <= xr && xr < vol.w) {
const Tvol c = vol(x,y,d);
if(c < bestc) {
bestc = c;
bestd = d;
}
}
}
Tdisp out = bestd;
const int bestxr = x + sd*bestd;
if( 0 < bestxr && bestxr < vol.w-1) {
// Fit parabola to neighbours
const float dl = bestd-1;
const float dr = bestd+1;
const float sl = vol(x,y,dl);
const float sr = vol(x,y,dr);
const float subpixdisp = bestd - (sr-sl) / (2*(sr-2*bestc+sl));
// Minima of parabola
// Check that minima is sensible. Otherwise assume bad data.
if( dl < subpixdisp && subpixdisp < dr ) {
out = subpixdisp;
}
}
disp(x,y) = out;
}
}
void CostVolMinimumSubpix(Image<float> disp, Volume<float> vol, unsigned maxDisp, float sd)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,disp);
KernCostVolMinimumSubpix<float,float><<<gridDim,blockDim>>>(disp,vol,maxDisp,sd);
}
//////////////////////////////////////////////////////
// Cost Volume minimum square penalty subpix refinement
//////////////////////////////////////////////////////
template<typename Tdisp, typename Tvol>
__global__ void KernCostVolMinimumSquarePenaltySubpix(Image<Tdisp> imga, Volume<Tvol> vol, Image<float> imgd, unsigned maxDispVal, float sd, float lambda, float theta)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if( x < imga.w && y < imga.h ) {
const float lastd = imgd(x,y);
const float inv2theta = 1.0f / (2.0f*theta);
Tdisp bestd = 0;
Tvol bestc = inv2theta*lastd*lastd + lambda * vol(x,y,0);
for(int d=1; d < maxDispVal; ++d) {
const int xr = x + sd*d;
if(0 <= xr && xr < vol.w) {
const float ddif = lastd - d;
const Tvol c = inv2theta*ddif*ddif + lambda * vol(x,y,d);
if(c < bestc) {
bestc = c;
bestd = d;
}
}
}
Tdisp out = bestd;
const int bestxr = x + sd*bestd;
if( 0 < bestxr && bestxr < vol.w-1) {
// Newton Step
const float dl = bestd-1;
const float dr = bestd+1;
const float sl = inv2theta*(lastd-dl)*(lastd-dl) + lambda * vol(x,y,dl); //vol(x,y,d3);
const float sr = inv2theta*(lastd-dr)*(lastd-dr) + lambda * vol(x,y,dr); //vol(x,y,d1);
const float subpixdisp = bestd - (sr-sl) / (2*(sr-2*bestc+sl));
// Check that minima is sensible. Otherwise assume bad data.
if( dl < subpixdisp && subpixdisp < dr ) {
out = subpixdisp;
}
}
imga(x,y) = out;
}
}
void CostVolMinimumSquarePenaltySubpix(Image<float> imga, Volume<float> vol, Image<float> imgd, unsigned maxDisp, float sd, float lambda, float theta)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,imga);
KernCostVolMinimumSquarePenaltySubpix<float,float><<<gridDim,blockDim>>>(imga,vol,imgd,maxDisp,sd,lambda,theta);
}
//////////////////////////////////////////////////////
// Edge Weight
//////////////////////////////////////////////////////
__global__ void KernExponentialEdgeWeight(Image<float> imgw, const Image<float> imgi, float alpha, float beta)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if( x < imgi.w && y < imgi.h ) {
float2 grad = make_float2(0,0);
if(0<x && x<imgi.w-1) grad.x = imgi.GetCentralDiffDx<float>(x,y);
if(0<y && y<imgi.h-1) grad.y = imgi.GetCentralDiffDy<float>(x,y);
// if(0<x && x<imgi.w) grad.x = imgi.GetBackwardDiffDx<float>(x,y);
// if(0<y && y<imgi.h) grad.y = imgi.GetBackwardDiffDy<float>(x,y);
const float w = expf( -alpha * powf(sqrt(grad.x*grad.x + grad.y*grad.y),beta) );
imgw(x,y) = w;
}
}
void ExponentialEdgeWeight(Image<float> imgw, const Image<float> imgi, float alpha, float beta)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,imgw);
KernExponentialEdgeWeight<<<gridDim,blockDim>>>(imgw,imgi,alpha,beta);
}
//////////////////////////////////////////////////////
// Scanline rectified dense stereo
//////////////////////////////////////////////////////
template<typename TD, typename TI, typename Score>
__global__ void KernDenseStereo(
Image<TD> dDisp, Image<TI> dCamLeft, Image<TI> dCamRight, TD maxDispVal, TD dispStep, float acceptThresh
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
TD bestDisp = InvalidValue<TD>::Value();
if( Score::width <= x && x < (dCamLeft.w - Score::width) &&
Score::height <= y && y < (dCamLeft.h - Score::height) )
{
// Search for best matching pixel
float bestScore = 1E+36;
TD sndBestDisp = InvalidValue<TD>::Value();
float sndBestScore = 1E+37;
TD minDisp = min(maxDispVal, (TD)0);
TD maxDisp = max((TD)0, maxDispVal);
minDisp = max((int)minDisp, -(int)( ((int)dCamLeft.w - (int)Score::width) - (int)x));
maxDisp = min((int)maxDisp, (int)(x + Score::width));
for(TD c = minDisp; c <= maxDisp; c += dispStep ) {
const float score = Score::Score(dCamLeft, x,y, dCamRight, x-c, y);
if(score < bestScore) {
sndBestDisp = bestDisp;
sndBestScore = bestScore;
bestDisp = c;
bestScore = score;
}else if( score <= sndBestScore) {
sndBestDisp = c;
sndBestScore = score;
}
}
if(abs(bestDisp-sndBestDisp) > 1) {
const float cd = (sndBestScore - bestScore) / bestScore;
if( cd < acceptThresh ) {
bestDisp = InvalidValue<TD>::Value();
}
}
}
dDisp(x,y) = bestDisp;
}
const int MAXBW = 512;
//template<typename TD, typename TI, typename Score>
//__global__ void KernDenseStereo(
// Image<TD> dDisp, Image<TI> dCamLeft, Image<TI> dCamRight, TD maxDispVal, TD dispStep, float acceptThresh
//) {
// const int x = blockIdx.x*blockDim.x + threadIdx.x;
// const int y = blockIdx.y*blockDim.y + threadIdx.y;
// const int W = Score::width;
// const int RAD = W / 2;
//// TI patch[W*W];
// // only enough shared mem to cache right image
//// __shared__ ImageApronRows<TI,MAXBW,1,RAD> apron_l;
// __shared__ ImageApronRows<TI,MAXBW,1,RAD> apron_r;
//// __shared__ ImageApronRows<TI,MAXBW,1,0> col_avg_l;
//// __shared__ ImageApronRows<TI,MAXBW,1,0> col_avg_r;
//// __shared__ ImageApronRows<TI,MAXBW,1,0> avg_l;
//// __shared__ ImageApronRows<TI,MAXBW,1,0> avg_r;
// ///////////////////////////////////
//// // Cache line of right/left image +/- RAD
//// apron_l.CacheImage(dCamLeft);
// apron_r.CacheImage(dCamRight);
//// __syncthreads();
//// ///////////////////////////////////
//// // Cache sum of colums for norm
////// int colsuml = 0;
//// int colsumr = 0;
////#pragma unroll
//// for(int i=-RAD; i<=RAD; ++i) {
////// colsuml += apron_l.GetRelThread(0,i);
//// colsumr += apron_r.GetRelThread(0,i);
//// }
////// col_avg_l.GetRelThread(0,0) = colsuml / W;
//// col_avg_r.GetRelThread(0,0) = colsumr / W;
//// __syncthreads();
//// ///////////////////////////////////
//// // Cache sum of block for norm
////// int suml = 0;
//// int sumr = 0;
////#pragma unroll
//// for(int i=-RAD; i<=RAD; ++i) {
////// suml += col_avg_l.GetRelThreadClampX(i,0);
//// sumr += col_avg_r.GetRelThreadClampX(i,0);
//// }
////// avg_l.GetRelThread(0,0) = suml / W;
//// avg_r.GetRelThread(0,0) = sumr / W;
// ///////////////////////////////////
// // Cache left patch, compute mean
////// int sum_l = 0;
//// for(int r=-RAD; r<= RAD; ++r) {
////#pragma unroll
//// for(int c=-RAD; c<=RAD; ++c) {
//// const TI val = dCamLeft.GetWithClampedRange(x+c, y+r);
//// patch[(RAD+r)*W+(RAD+c)] = val;
////// sum_l += val;
//// }
//// }
//// const TI avg_l = sum_l / (W*W);
// __syncthreads();
// TD bestDisp = InvalidValue<TD>::Value();
// if( maxDispVal+Score::width <= x && x < (dCamLeft.w - Score::width) &&
// Score::height <= y && y < (dCamLeft.h - Score::height) )
// {
// // Search for best matching pixel
// float bestScore = 1E+36;
//// TD sndBestDisp = InvalidValue<TD>::Value();
//// float sndBestScore = 1E+37;
//// TD minDisp = min(maxDispVal, (TD)0);
//// TD maxDisp = max((TD)0, maxDispVal);
//// minDisp = max((int)minDisp, -(int)( ((int)dCamLeft.w - (int)Score::width) - (int)x));
//// maxDisp = min((int)maxDisp, (int)(x + Score::width));
// for(TD c = 0; c <= maxDispVal; c += 1 ) {
// float score = 0;
// for(int ky=-RAD; ky <= RAD; ++ky ) {
//#pragma unroll
// for(int kx=-RAD; kx <= RAD; ++kx ) {
//// const int pl = apron_l.GetRelThread(kx,ky);
// const int pl = 0;//patch[(RAD+ky)*W+(RAD+kx)];
// const int pr = apron_r.GetRelThread(kx-c,ky);
// score += abs(pl - pr);
// }
// }
////// Score::Score(dCamLeft, x,y, dCamRight, x-c, y);
// if(score < bestScore) {
//// sndBestDisp = bestDisp;
//// sndBestScore = bestScore;
// bestDisp = c;
// bestScore = score;
//// }else if( score <= sndBestScore) {
//// sndBestDisp = c;
//// sndBestScore = score;
// }
// }
//// if(abs(bestDisp-sndBestDisp) > 1) {
//// const float cd = (sndBestScore - bestScore) / bestScore;
//// if( cd < acceptThresh ) {
//// bestDisp = InvalidValue<TD>::Value();
//// }
//// }
// }
// dDisp(x,y) = bestDisp;
//}
template<typename TDisp, typename TImg>
void DenseStereo(
Image<TDisp> dDisp, const Image<TImg> dCamLeft, const Image<TImg> dCamRight,
TDisp maxDisp, float acceptThresh, int score_rad
) {
dim3 blockDim(dDisp.w, 1);
dim3 gridDim(1, dDisp.h);
// InitDimFromOutputImageOver(blockDim,gridDim,dDisp);
const TDisp dispStep = 1;
if( score_rad == 0 ) {
KernDenseStereo<TDisp, TImg, SinglePixelSqPatchScore<float,ImgAccessRaw > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 1 ) {
KernDenseStereo<TDisp, TImg, SANDPatchScore<float,1,ImgAccessRaw > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if( score_rad == 2 ) {
KernDenseStereo<TDisp, TImg, SANDPatchScore<float,2,ImgAccessRaw > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 3 ) {
KernDenseStereo<TDisp, TImg, SANDPatchScore<float,3,ImgAccessRaw > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if( score_rad == 4 ) {
KernDenseStereo<TDisp, TImg, SANDPatchScore<float,4,ImgAccessRaw > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 5 ) {
KernDenseStereo<TDisp, TImg, SANDPatchScore<float,5,ImgAccessRaw > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 6 ) {
KernDenseStereo<TDisp, TImg, SANDPatchScore<float,6,ImgAccessRaw > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}else if(score_rad == 7 ) {
KernDenseStereo<TDisp, TImg, SANDPatchScore<float,7,ImgAccessRaw > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
}
}
template void DenseStereo<unsigned char, unsigned char>(Image<unsigned char>, const Image<unsigned char>, const Image<unsigned char>, unsigned char, float, int);
template void DenseStereo<char, unsigned char>(Image<char>, const Image<unsigned char>, const Image<unsigned char>, char, float, int);
void DenseStereoSubpix(
Image<float> dDisp, const Image<unsigned char> dCamLeft, const Image<unsigned char> dCamRight, float maxDisp, float dispStep, float acceptThresh, int score_rad, bool score_normed
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dDisp);
// if(score_normed) {
// if( score_rad == 0 ) {
// KernDenseStereo<float, unsigned char, SinglePixelSqPatchScore<float,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 1 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,1,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if( score_rad == 2 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,2,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 3 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,3,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if( score_rad == 4 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,4,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 5 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,5,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 6 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,6,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 7 ) {
// KernDenseStereo<float, unsigned char, SANDPatchScore<float,7,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }
// }else{
// if( score_rad == 0 ) {
// KernDenseStereo<float, unsigned char, SinglePixelSqPatchScore<float,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 1 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,1,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if( score_rad == 2 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,2,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 3 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,3,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if( score_rad == 4 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,4,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }else if(score_rad == 5 ) {
// KernDenseStereo<float, unsigned char, SADPatchScore<float,5,ImgAccessBilinear<float> > ><<<gridDim,blockDim>>>(dDisp, dCamLeft, dCamRight, maxDisp, dispStep, acceptThresh);
// }
// }
}
//////////////////////////////////////////////////////
//////////////////////////////////////////////////////
const int RAD = 3;
const int W = 2*RAD+1;
__global__ void KernDenseStereoTest(
Image<float> dDisp, Image<unsigned char> dCamLeft, Image<unsigned char> dCamRight, int maxDisp
) {
const int x = threadIdx.x;
const int y = blockIdx.y;
__shared__ unsigned char cache_l[W][MAXBW];
__shared__ unsigned char cache_r[W][MAXBW+1];
#pragma unroll
for(int r=0; r<W; ++r ) {
cache_l[r][x] = dCamLeft.Get(x,y+r-RAD);
cache_r[r][x] = dCamRight.Get(x,y+r-RAD);
}
__syncthreads();
int bestScore = 0xFFFFF;
int bestDisp = 0;
const int maxClipDisp = min(x-RAD,maxDisp);
for(int d=0; d<maxClipDisp; ++d)
{
const int xd = x-d;
int score = 0;
#pragma unroll
for(int r=0; r<W; ++r) {
score += abs(cache_l[r][x] - cache_r[r][xd]);
// const int yr = y-RAD+r;
// score += abs(dCamLeft(x,yr) - dCamRight(xd,yr));
}
if(score < bestScore) {
bestScore = score;
bestDisp = d;
}
}
dDisp(x,y) = bestDisp;
}
void DenseStereoTest(
Image<float> dDisp, Image<unsigned char> dCamLeft, Image<unsigned char> dCamRight, int maxDisp
) {
const int w = dDisp.w;
const int h = dDisp.h - 2*RAD;
const int x = 0;
const int y = RAD;
dim3 blockDim(w, 1);
dim3 gridDim(1, h);
KernDenseStereoTest<<<gridDim,blockDim>>>(dDisp.SubImage(x,y,w,h), dCamLeft.SubImage(x,y,w,h), dCamRight.SubImage(x,y,w,h), maxDisp);
}
//////////////////////////////////////////////////////
// Check Left and Right disparity images match
//////////////////////////////////////////////////////
template<typename TD>
__global__ void KernLeftRightCheck(
Image<TD> dispL, Image<TD> dispR, float sd, float maxDiff
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if( dispL.InBounds(x,y) ) {
const TD dl = dispL(x,y);
const TD xr = x + sd*dl;
if( 0 <= xr && xr < dispR.w) {
const TD dr = dispR(xr, y);
if(!InvalidValue<TD>::IsValid(dr) || abs(dl - dr) > maxDiff) {
dispL(x,y) = InvalidValue<TD>::Value();
}
}else{
dispL(x,y) = InvalidValue<TD>::Value();
}
}
}
void LeftRightCheck(Image<char> dispL, Image<char> dispR, int sd, int maxDiff)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim, dispL);
KernLeftRightCheck<char><<<gridDim,blockDim>>>(dispL, dispR, sd, maxDiff);
}
void LeftRightCheck(Image<float> dispL, Image<float> dispR, float sd, float maxDiff)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim, dispL);
KernLeftRightCheck<float><<<gridDim,blockDim>>>(dispL, dispR, sd, maxDiff);
}
//////////////////////////////////////////////////////
// Visualise cross section of disparity image
//////////////////////////////////////////////////////
template<typename TD, typename TI, typename Score>
__global__ void KernDisparityImageCrossSection(
Image<TD> dScore, Image<unsigned char> dDisp, Image<TI> dCamLeft, Image<TI> dCamRight, int y
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int c = blockIdx.y*blockDim.y + threadIdx.y;
const int rx = x-c;
const float score = ( 0<= rx && rx < dCamRight.w ) ? Score::Score(dCamLeft, x,y, dCamRight, rx, y) : 0;
const unsigned char mindisp = dDisp(x,y);
const float show = sqrt(score / Score::area) / 255.0f;
dScore(x,c) = show * make_float4( 1,1,1,1);
}
void DisparityImageCrossSection(
Image<float4> dScore, Image<unsigned char> dDisp, const Image<unsigned char> dCamLeft, const Image<unsigned char> dCamRight, int y
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dScore);
KernDisparityImageCrossSection<float4, unsigned char, DefaultSafeScoreType><<<gridDim,blockDim>>>(dScore, dDisp, dCamLeft, dCamRight, y);
}
//////////////////////////////////////////////////////
// Scanline rectified dense stereo sub-pixel refinement
//////////////////////////////////////////////////////
template<typename TDo, typename TDi, typename TI, typename Score>
__global__ void KernDenseStereoSubpixelRefine(
Image<TDo> dDispOut, const Image<TDi> dDisp, const Image<TI> dCamLeft, const Image<TI> dCamRight
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const int bestDisp = dDisp(x,y);
// Ignore things at infinity
if(bestDisp < MinDisparity) {
dDispOut(x,y) = InvalidValue<TDo>::Value();
return;
}
// Fit parabola to neighbours
const float d1 = bestDisp+1;
const float d2 = bestDisp;
const float d3 = bestDisp-1;
const float s1 = Score::Score(dCamLeft, x,y, dCamRight, x-d1,y);
const float s2 = Score::Score(dCamLeft, x,y, dCamRight, x-d2,y);
const float s3 = Score::Score(dCamLeft, x,y, dCamRight, x-d3,y);
// Cooefficients of parabola through (d1,s1),(d2,s2),(d3,s3)
const float denom = (d1 - d2)*(d1 - d3)*(d2 - d3);
const float A = (d3 * (s2 - s1) + d2 * (s1 - s3) + d1 * (s3 - s2)) / denom;
const float B = (d3*d3 * (s1 - s2) + d2*d2 * (s3 - s1) + d1*d1 * (s2 - s3)) / denom;
// const float C = (x2 * x3 * (x2 - x3) * y1 + x3 * x1 * (x3 - x1) * y2 + x1 * x2 * (x1 - x2) * y3) / denom;
// Minima of parabola
const float newDisp = -B / (2*A);
// Check that minima is sensible. Otherwise assume bad data.
if( d3 < newDisp && newDisp < d1 ) {
dDispOut(x,y) = newDisp;
}else{
// dDisp(x,y) = bestDisp / maxDisp;
dDispOut(x,y) = InvalidValue<TDo>::Value();
}
}
void DenseStereoSubpixelRefine(
Image<float> dDispOut, const Image<unsigned char> dDisp, const Image<unsigned char> dCamLeft, const Image<unsigned char> dCamRight
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dDisp);
KernDenseStereoSubpixelRefine<float,unsigned char,unsigned char, DefaultSafeScoreType><<<gridDim,blockDim>>>(dDispOut, dDisp, dCamLeft, dCamRight);
}
//////////////////////////////////////////////////////
// Upgrade disparity image to vertex array
//////////////////////////////////////////////////////
__global__ void KernDisparityImageToVbo(
Image<float4> dVbo, const Image<float> dDisp, float baseline, float fu, float fv, float u0, float v0
) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
dVbo(u,v) = DepthFromDisparity(u,v, dDisp(u,v), baseline, fu, fv, u0, v0, MinDisparity);
}
void DisparityImageToVbo(Image<float4> dVbo, const Image<float> dDisp, float baseline, float fu, float fv, float u0, float v0)
{
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dVbo);
KernDisparityImageToVbo<<<gridDim,blockDim>>>(dVbo, dDisp, baseline, fu, fv, u0, v0);
}
//////////////////////////////////////////////////////
// Cost Volume
//////////////////////////////////////////////////////
void CostVolumeZero(Volume<CostVolElem> costvol )
{
CostVolElem initial;
initial.sum = 0;
initial.n = 0;
costvol.Fill(initial);
}
//////////////////////////////////////////////////////
template<typename TD, typename TI, typename Score>
__global__ void KernCostVolumeFromStereo(
Volume<CostVolElem> dvol, Image<TI> dimgl, Image<TI> dimgr
) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int d = blockIdx.z*blockDim.z + threadIdx.z;
if( u-d >= (int)Score::rad) {
CostVolElem elem;
elem.sum = Score::Score(dimgl, u,v, dimgr, u-d, v) / Score::area;
elem.n = 1;
dvol(u,v,d) = elem;
}
}
void CostVolumeFromStereo(Volume<CostVolElem> dvol, Image<unsigned char> dimgl, Image<unsigned char> dimgr )
{
dim3 blockDim(8,8,8);
dim3 gridDim(dvol.w / blockDim.x, dvol.h / blockDim.y, dvol.d / blockDim.z);
KernCostVolumeFromStereo<unsigned char, unsigned char, DefaultSafeScoreType><<<gridDim,blockDim>>>(dvol,dimgl,dimgr);
}
//////////////////////////////////////////////////////
template<typename TI, typename Score>
__global__ void KernAddToCostVolume(
Volume<CostVolElem> dvol, const Image<TI> dimgv,
const Image<TI> dimgc, Mat<float,3,4> KT_cv,
float fu, float fv, float u0, float v0,
float baseline
){
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int d = blockIdx.z*blockDim.z + threadIdx.z;
float3 Pv;
Pv.z = fu * baseline / d;
Pv.x = Pv.z * (u-u0) / fu;
Pv.y = Pv.z * (v-v0) / fv;
const float3 KPc = KT_cv * Pv;
const float2 pc = dn(KPc);
if( KPc.z > 0 && dimgc.InBounds(pc.x, pc.y,5) ) {
// vol(u,v,d) = 1.0f;
const float score = Score::Score(dimgv, u,v, dimgc, pc.x, pc.y) / (float)(Score::area);
// const float score = (dimgv(u,v) - dimgc.template GetBilinear<float>(pc)) / 255.0f;
CostVolElem elem = dvol(u,v,d);
elem.sum += score;
elem.n += 1;
dvol(u,v,d) = elem;
}
}
__host__ __device__
float d2Depth(int d, int dsize, float minDepth, float maxDepth, bool invDepth)
{
if (invDepth)
{
float invDepthMin = 1 / maxDepth;
float invDepthMax = 1 / minDepth;
float step = (invDepthMax - invDepthMin) / (dsize - 1);
return 1 / (step * d + invDepthMin);
}
else
{
float step = (maxDepth - minDepth) / (dsize - 1);
return step * d + minDepth;
}
}
__global__ void KernIdx2Depth(Image<float> dOut, const Image<float> dIn, int dsize, float minDepth, float maxDepth, bool invDepth)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (dOut.InBounds(x, y))
{
dOut(x, y) = d2Depth(dIn(x, y), dsize, minDepth, maxDepth, invDepth);
}
}
void Idx2Depth(Image<float> dOut, const Image<float> dIn, int dsize, float minDepth, float maxDepth, bool invDepth)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim, gridDim, dOut);
KernIdx2Depth<<<gridDim, blockDim>>>(dOut, dIn, dsize, minDepth, maxDepth, invDepth);
}
template<typename TI, typename Score>
__global__ void KernAddToCostVolume(Volume<CostVolElem> dvol,
const Image<TI> dimgv,
const Image<TI> dimgc,
Mat<float, 3, 4> KT_cv,
ImageIntrinsics K,
float minDepth,
float maxDepth,
bool invDepth)
{
const int u = blockIdx.x * blockDim.x + threadIdx.x;
const int v = blockIdx.y * blockDim.y + threadIdx.y;
const int d = blockIdx.z * blockDim.z + threadIdx.z;
float3 Pv;
Pv.z = d2Depth(d, dvol.d, minDepth, maxDepth, invDepth);
Pv = K.Unproject(u, v, Pv.z);
const float3 KPc = KT_cv * Pv;
const float2 pc = dn(KPc);
if (dimgc.InBounds(pc.x, pc.y, 5))
{
const float score = Score::Score(dimgv, u, v, dimgc, pc.x, pc.y) / (float)(Score::area);
//const float score = abs(dimgv(u,v) - dimgc.template GetBilinear<float>(pc));
CostVolElem elem = dvol(u, v, d);
elem.sum += score;
elem.n += 1;
dvol(u, v, d) = elem;
}
}
void CostVolumeAdd(Volume<CostVolElem> dvol, const Image<unsigned char> dimgv,
const Image<unsigned char> dimgc, Mat<float,3,4> KT_cv,
float fu, float fv, float u0, float v0,
float baseline, int levels
) {
dim3 blockDim(8,8,8);
dim3 gridDim(dvol.w / blockDim.x, dvol.h / blockDim.y, dvol.d / blockDim.z);
KernAddToCostVolume<unsigned char, SANDPatchScore<float,DefaultRad,ImgAccessBilinearClamped<float> > ><<<gridDim,blockDim>>>(dvol,dimgv,dimgc, KT_cv, fu,fv,u0,v0, baseline);
}
void CostVolumeAdd(Volume<CostVolElem> dvol,
const Image<unsigned char> dimgv,
const Image<unsigned char> dimgc,
Mat<float, 3, 4> KT_cv,
const ImageIntrinsics K,
float minDepth,
float maxDepth,
bool invDepth)
{
dim3 blockDim(8, 8, 8);
dim3 gridDim(dvol.w / blockDim.x, dvol.h / blockDim.y, dvol.d / blockDim.z);
KernAddToCostVolume<unsigned char, SANDPatchScore<float, DefaultRad, ImgAccessBilinearClamped<float> > ><<<gridDim, blockDim>>>(dvol, dimgv, dimgc, KT_cv, K, minDepth, maxDepth, invDepth);
}
// FIXME: should use templates
void CostVolumeAdd(Volume<CostVolElem> dvol,
const Image<float> dimgv,
const Image<float> dimgc,
Mat<float, 3, 4> KT_cv,
const ImageIntrinsics K,
float minDepth,
float maxDepth,
bool invDepth)
{
dim3 blockDim(8, 8, 8);
dim3 gridDim(dvol.w / blockDim.x, dvol.h / blockDim.y, dvol.d / blockDim.z);
KernAddToCostVolume<float, SANDPatchScore<float, DefaultRad, ImgAccessBilinearClamped<float> > ><<<gridDim, blockDim>>>(dvol, dimgv, dimgc, KT_cv, K, minDepth, maxDepth, invDepth);
}
//////////////////////////////////////////////////////
template<typename Tdisp>
__global__ void KernCostVolMinimum(Image<Tdisp> disp, Volume<CostVolElem> vol)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
Tdisp bestd = 0;
float bestc = 1E30;
unsigned maxDisp = vol.d;
#pragma unroll
for(int d=0; d < maxDisp; ++d) {
const CostVolElem elem = vol(x,y,d);
const float c = (elem.sum / elem.n);
if(c < bestc) {
bestc = c;
bestd = d;
}
}
disp(x,y) = bestd;
}
void CostVolMinimum(Image<float> disp, Volume<CostVolElem> vol)
{
dim3 blockDim, gridDim;
InitDimFromOutputImageOver(blockDim,gridDim,disp);
KernCostVolMinimum<float><<<gridDim,blockDim>>>(disp,vol);
}
//////////////////////////////////////////////////////
__global__ void KernCostVolumeCrossSection(
Image<float> dScore, Image<CostVolElem> dCostVolSlice
) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int d = blockIdx.y*blockDim.y + threadIdx.y;
if( dCostVolSlice.InBounds(x,d) )
{
CostVolElem elem = dCostVolSlice(x,d);
const float score = (elem.sum / elem.n) / 255.0f;
dScore(x,d) = score;
}else{
dScore(x,d) = 0.0f / 0.0f;
}
}
void CostVolumeCrossSection(
Image<float> dScore, Volume<CostVolElem> dCostVol, int y
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dScore);
KernCostVolumeCrossSection<<<gridDim,blockDim>>>(dScore, dCostVol.ImageXZ(y));
}
//////////////////////////////////////////////////////
template<typename To, typename Ti>
__global__ void KernFilterDispGrad(Image<To> dOut, Image<Ti> dIn, float threshold )
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
const float dx = dOut.template GetCentralDiffDx<float>(x,y);
const float dy = dOut.template GetCentralDiffDy<float>(x,y);
const bool valid = dx*dx + dy*dy < threshold;
dOut(x,y) = valid ? dIn(x,y) : -1;
}
void FilterDispGrad(
Image<float> dOut, Image<float> dIn, float threshold
) {
dim3 blockDim, gridDim;
InitDimFromOutputImage(blockDim,gridDim, dOut, 16, 16);
KernFilterDispGrad<float,float><<<gridDim,blockDim>>>(dOut, dIn, threshold);
}
//////////////////////////////////////////////////////
// Cost volume with truncated grad and abs. diff. score
// Fast Cost-Volume Filtering for Visual Correspondence and Beyond
// Christoph Rhemann, Asmaa Hosni, Michael Bleyer, Carsten Rother, Margrit Gelautz
//////////////////////////////////////////////////////
template<typename Tout, typename Tin>
__global__ void KernCostVolumeFromStereoTruncatedAbsAndGrad(
Volume<Tout> dvol, Image<Tin> dimgl, Image<Tin> dimgr, float sd,
float alpha, float r1, float r2
) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int d = blockIdx.z*blockDim.z + threadIdx.z;
const int r = u + sd*d;
if( 0 <= r && r < dimgr.w ) {
const float absI = abs( (float)dimgr(r,v) - (float)dimgl(u,v));
const float absGrad = abs( dimgr.template GetCentralDiffDx<float>(r,v) - dimgl.template GetCentralDiffDx<float>(u,v) );
const Tout cost = (1.0f-alpha)*min(absI,r1) + alpha*min(absGrad,r2);
dvol(u,v,d) = cost;
}else{
dvol(u,v,d) = (1.0f-alpha)*r1 + alpha*r2;
}
}
void CostVolumeFromStereoTruncatedAbsAndGrad(Volume<float> dvol, Image<float> dimgl, Image<float> dimgr, float sd, float alpha, float r1, float r2 )
{
dim3 blockDim(8,8,8);
dim3 gridDim( ceil(dvol.w / (float)blockDim.x), ceil(dvol.h / (float)blockDim.y), ceil(dvol.d / (float)blockDim.z) );
KernCostVolumeFromStereoTruncatedAbsAndGrad<float,float><<<gridDim,blockDim>>>(dvol,dimgl,dimgr,sd, alpha,r1,r2);
}
}
|
3f1eb6e15c69d6944ff5bc13a6c4632fd3b6f2d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlaswp_sym.cu, normal z -> s, Tue Aug 30 09:38:32 2016
@author Stan Tomov
@author Mathieu Faverge
@author Ichitaro Yamazaki
@author Mark Gates
*/
#include "magma_internal.h"
// MAX_PIVOTS is maximum number of pivots to apply in each kernel launch
// NTHREADS is number of threads in a block
// 64 and 256 are better on Kepler;
//#define MAX_PIVOTS 64
//#define NTHREADS 256
#define MAX_PIVOTS 32
#define NTHREADS 64
typedef struct {
float *dA;
int n, lda, j0, npivots;
int ipiv[MAX_PIVOTS];
} slaswp_sym_params_t;
// Matrix A is stored row or column-wise in dA.
// Divide matrix A into block-columns of NTHREADS columns each.
// Each GPU block processes one block-column of A.
// Each thread goes down a column of A,
// swapping rows according to pivots stored in params.
__global__ void slaswp_sym_kernel( slaswp_sym_params_t params )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
if ( tid < params.n ) {
for( int ii = params.j0; ii < params.npivots; ++ii ) {
int i1 = ii;
int i2 = params.ipiv[ii];
// swap: i1 <-> i2
// this thread is responsible for the tid-th element
float *A1 = NULL, *A2 = NULL;
if (tid < i1) {
// row swap: (i1,tid) <-> (i2,tid)
A1 = params.dA + tid*params.lda + i1;
A2 = params.dA + tid*params.lda + i2;
} else if (tid == i1) {
// diagonal swap: (i1,i1) <-> (i2,i2)
A1 = params.dA + i1*params.lda + i1;
A2 = params.dA + i2*params.lda + i2;
} else if (tid < i2) {
// row-col swap: (tid,i1) <-> (i2,tid)
A1 = params.dA + i1*params.lda + tid;
A2 = params.dA + tid*params.lda + i2;
} else if (tid == i2) {
// diagonal swap: done by i1-th thread
} else if (tid > i2) {
// column swap: (tid,i1) <-> (tid,i2)
A1 = params.dA + i1*params.lda + tid;
A2 = params.dA + i2*params.lda + tid;
}
if ( A1 != NULL && A2 != NULL) {
float temp = *A1;
*A1 = *A2;
*A2 = temp;
}
}
}
}
// Launch slaswp_sym kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each.
extern "C" void slaswp_sym( slaswp_sym_params_t ¶ms, magma_queue_t queue )
{
int blocks = magma_ceildiv(params.n, NTHREADS);
hipLaunchKernelGGL(( slaswp_sym_kernel), dim3(blocks), dim3(NTHREADS), 0, queue->cuda_stream() , params );
}
/***************************************************************************//**
Purpose:
=============
SLASWP_SYM applies a series of symmetric pivoting on a symmetric matrix A.
Currently, it is only implemented for the lower-triangular part of the matrix.
Arguments:
==========
@param[in]
n INTEGER
The number of columns of the matrix A.
@param[in,out]
dA REAL array on GPU, dimension (*,*)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
@param[in]
lda INTEGER
Stride between elements in same column.
@param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (One based index.)
@param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (One based index.)
@param[in]
ipiv INTEGER array, on CPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
@param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_laswp_sym
*******************************************************************************/
extern "C" void
magmablas_slaswp_sym_q(
magma_int_t n, float *dA, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 0 )
info = -4;
else if ( k2 < 0 || k2 < k1 )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
for( int k = k1-1; k < k2; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, k2-k );
// fields are: dA n lda j0 npivots
slaswp_sym_params_t params = { dA, int(n), int(lda), int(k), int(k+npivots) };
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[(k+j)*inci] - 1;
}
slaswp_sym( params, queue );
}
}
| 3f1eb6e15c69d6944ff5bc13a6c4632fd3b6f2d7.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@generated from magmablas/zlaswp_sym.cu, normal z -> s, Tue Aug 30 09:38:32 2016
@author Stan Tomov
@author Mathieu Faverge
@author Ichitaro Yamazaki
@author Mark Gates
*/
#include "magma_internal.h"
// MAX_PIVOTS is maximum number of pivots to apply in each kernel launch
// NTHREADS is number of threads in a block
// 64 and 256 are better on Kepler;
//#define MAX_PIVOTS 64
//#define NTHREADS 256
#define MAX_PIVOTS 32
#define NTHREADS 64
typedef struct {
float *dA;
int n, lda, j0, npivots;
int ipiv[MAX_PIVOTS];
} slaswp_sym_params_t;
// Matrix A is stored row or column-wise in dA.
// Divide matrix A into block-columns of NTHREADS columns each.
// Each GPU block processes one block-column of A.
// Each thread goes down a column of A,
// swapping rows according to pivots stored in params.
__global__ void slaswp_sym_kernel( slaswp_sym_params_t params )
{
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
if ( tid < params.n ) {
for( int ii = params.j0; ii < params.npivots; ++ii ) {
int i1 = ii;
int i2 = params.ipiv[ii];
// swap: i1 <-> i2
// this thread is responsible for the tid-th element
float *A1 = NULL, *A2 = NULL;
if (tid < i1) {
// row swap: (i1,tid) <-> (i2,tid)
A1 = params.dA + tid*params.lda + i1;
A2 = params.dA + tid*params.lda + i2;
} else if (tid == i1) {
// diagonal swap: (i1,i1) <-> (i2,i2)
A1 = params.dA + i1*params.lda + i1;
A2 = params.dA + i2*params.lda + i2;
} else if (tid < i2) {
// row-col swap: (tid,i1) <-> (i2,tid)
A1 = params.dA + i1*params.lda + tid;
A2 = params.dA + tid*params.lda + i2;
} else if (tid == i2) {
// diagonal swap: done by i1-th thread
} else if (tid > i2) {
// column swap: (tid,i1) <-> (tid,i2)
A1 = params.dA + i1*params.lda + tid;
A2 = params.dA + i2*params.lda + tid;
}
if ( A1 != NULL && A2 != NULL) {
float temp = *A1;
*A1 = *A2;
*A2 = temp;
}
}
}
}
// Launch slaswp_sym kernel with ceil( n / NTHREADS ) blocks of NTHREADS threads each.
extern "C" void slaswp_sym( slaswp_sym_params_t ¶ms, magma_queue_t queue )
{
int blocks = magma_ceildiv(params.n, NTHREADS);
slaswp_sym_kernel<<< blocks, NTHREADS, 0, queue->cuda_stream() >>>( params );
}
/***************************************************************************//**
Purpose:
=============
SLASWP_SYM applies a series of symmetric pivoting on a symmetric matrix A.
Currently, it is only implemented for the lower-triangular part of the matrix.
Arguments:
==========
@param[in]
n INTEGER
The number of columns of the matrix A.
@param[in,out]
dA REAL array on GPU, dimension (*,*)
On entry, the matrix of column dimension N to which the row
interchanges will be applied.
On exit, the permuted matrix.
@param[in]
lda INTEGER
Stride between elements in same column.
@param[in]
k1 INTEGER
The first element of IPIV for which a row interchange will
be done. (One based index.)
@param[in]
k2 INTEGER
The last element of IPIV for which a row interchange will
be done. (One based index.)
@param[in]
ipiv INTEGER array, on CPU, dimension (K2*abs(INCI))
The vector of pivot indices. Only the elements in positions
K1 through K2 of IPIV are accessed.
IPIV(K) = L implies rows K and L are to be interchanged.
@param[in]
inci INTEGER
The increment between successive values of IPIV.
Currently, IPIV > 0.
TODO: If IPIV is negative, the pivots are applied in reverse order.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_laswp_sym
*******************************************************************************/
extern "C" void
magmablas_slaswp_sym_q(
magma_int_t n, float *dA, magma_int_t lda,
magma_int_t k1, magma_int_t k2,
const magma_int_t *ipiv, magma_int_t inci,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( n < 0 )
info = -1;
else if ( k1 < 0 )
info = -4;
else if ( k2 < 0 || k2 < k1 )
info = -5;
else if ( inci <= 0 )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
for( int k = k1-1; k < k2; k += MAX_PIVOTS ) {
int npivots = min( MAX_PIVOTS, k2-k );
// fields are: dA n lda j0 npivots
slaswp_sym_params_t params = { dA, int(n), int(lda), int(k), int(k+npivots) };
for( int j = 0; j < npivots; ++j ) {
params.ipiv[j] = ipiv[(k+j)*inci] - 1;
}
slaswp_sym( params, queue );
}
}
|
0854b815a106f20d618b6b93f3488f3e4b1e4999.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kSoftMaxCrossEntropyRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < height; i += numThreads) {
target[i] = -__logf(mat[height * (int)labels[i] + i] + tiny);
}
} | 0854b815a106f20d618b6b93f3488f3e4b1e4999.cu | #include "includes.h"
__global__ void kSoftMaxCrossEntropyRowMajor(float* mat, float* labels, float* target, unsigned int width, unsigned int height, float tiny) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < height; i += numThreads) {
target[i] = -__logf(mat[height * (int)labels[i] + i] + tiny);
}
} |
2d65a76698779bf622c46c7d5cf07440b8664c25.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2021 PlenOctree Authors
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <cstdint>
#include <vector>
#include "common_hip.cuh"
#include "data_spec_packed.cuh"
namespace {
// Automatically choose number of CUDA threads based on HW CUDA kernel count
int cuda_n_threads = -1;
__host__ void auto_cuda_threads() {
if (~cuda_n_threads) return;
hipDeviceProp_t dev_prop;
hipGetDeviceProperties(&dev_prop, 0);
const int n_cores = get_sp_cores(dev_prop);
// Optimize number of CUDA threads per block
if (n_cores < 2048) {
cuda_n_threads = 256;
} if (n_cores < 8192) {
cuda_n_threads = 512;
} else {
cuda_n_threads = 1024;
}
}
namespace device {
// SH Coefficients from https://github.com/google/spherical-harmonics
__device__ __constant__ const float C0 = 0.28209479177387814;
__device__ __constant__ const float C1 = 0.4886025119029199;
__device__ __constant__ const float C2[] = {
1.0925484305920792,
-1.0925484305920792,
0.31539156525252005,
-1.0925484305920792,
0.5462742152960396
};
__device__ __constant__ const float C3[] = {
-0.5900435899266435,
2.890611442640554,
-0.4570457994644658,
0.3731763325901154,
-0.4570457994644658,
1.445305721320277,
-0.5900435899266435
};
__device__ __constant__ const float C4[] = {
2.5033429417967046,
-1.7701307697799304,
0.9461746957575601,
-0.6690465435572892,
0.10578554691520431,
-0.6690465435572892,
0.47308734787878004,
-1.7701307697799304,
0.6258357354491761,
};
#define _SOFTPLUS_M1(x) (logf(1 + expf((x) - 1)))
#define _SIGMOID(x) (1 / (1 + expf(-(x))))
template<typename scalar_t>
__host__ __device__ __inline__ static scalar_t _norm(
scalar_t* dir) {
return sqrtf(dir[0] * dir[0] + dir[1] * dir[1] + dir[2] * dir[2]);
}
template<typename scalar_t>
__host__ __device__ __inline__ static void _normalize(
scalar_t* dir) {
scalar_t norm = _norm(dir);
dir[0] /= norm; dir[1] /= norm; dir[2] /= norm;
}
template<typename scalar_t>
__host__ __device__ __inline__ static scalar_t _dot3(
const scalar_t* __restrict__ u,
const scalar_t* __restrict__ v) {
return u[0] * v[0] + u[1] * v[1] + u[2] * v[2];
}
// Calculate basis functions depending on format, for given view directions
template <typename scalar_t>
__device__ __inline__ void maybe_precalc_basis(
const int format,
const int basis_dim,
const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>
extra,
const scalar_t* __restrict__ dir,
scalar_t* __restrict__ out) {
switch(format) {
case FORMAT_ASG:
{
// UNTESTED ASG
for (int i = 0; i < basis_dim; ++i) {
const auto& ptr = extra[i];
scalar_t S = _dot3(dir, &ptr[8]);
scalar_t dot_x = _dot3(dir, &ptr[2]);
scalar_t dot_y = _dot3(dir, &ptr[5]);
out[i] = S * expf(-ptr[0] * dot_x * dot_x
-ptr[1] * dot_y * dot_y) / basis_dim;
}
} // ASG
break;
case FORMAT_SG:
{
for (int i = 0; i < basis_dim; ++i) {
const auto& ptr = extra[i];
out[i] = expf(ptr[0] * (_dot3(dir, &ptr[1]) - 1.f)) / basis_dim;
}
} // SG
break;
case FORMAT_SH:
{
out[0] = C0;
const scalar_t x = dir[0], y = dir[1], z = dir[2];
const scalar_t xx = x * x, yy = y * y, zz = z * z;
const scalar_t xy = x * y, yz = y * z, xz = x * z;
switch (basis_dim) {
case 25:
out[16] = C4[0] * xy * (xx - yy);
out[17] = C4[1] * yz * (3 * xx - yy);
out[18] = C4[2] * xy * (7 * zz - 1.f);
out[19] = C4[3] * yz * (7 * zz - 3.f);
out[20] = C4[4] * (zz * (35 * zz - 30) + 3);
out[21] = C4[5] * xz * (7 * zz - 3);
out[22] = C4[6] * (xx - yy) * (7 * zz - 1.f);
out[23] = C4[7] * xz * (xx - 3 * yy);
out[24] = C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy));
[[fallthrough]];
case 16:
out[9] = C3[0] * y * (3 * xx - yy);
out[10] = C3[1] * xy * z;
out[11] = C3[2] * y * (4 * zz - xx - yy);
out[12] = C3[3] * z * (2 * zz - 3 * xx - 3 * yy);
out[13] = C3[4] * x * (4 * zz - xx - yy);
out[14] = C3[5] * z * (xx - yy);
out[15] = C3[6] * x * (xx - 3 * yy);
[[fallthrough]];
case 9:
out[4] = C2[0] * xy;
out[5] = C2[1] * yz;
out[6] = C2[2] * (2.0 * zz - xx - yy);
out[7] = C2[3] * xz;
out[8] = C2[4] * (xx - yy);
[[fallthrough]];
case 4:
out[1] = -C1 * y;
out[2] = C1 * z;
out[3] = -C1 * x;
}
} // SH
break;
default:
// Do nothing
break;
} // switch
}
template <typename scalar_t>
__device__ __inline__ scalar_t _get_delta_scale(
const scalar_t* __restrict__ scaling,
scalar_t* __restrict__ dir) {
dir[0] *= scaling[0];
dir[1] *= scaling[1];
dir[2] *= scaling[2];
scalar_t delta_scale = 1.f / _norm(dir);
dir[0] *= delta_scale;
dir[1] *= delta_scale;
dir[2] *= delta_scale;
return delta_scale;
}
template <typename scalar_t>
__device__ __inline__ void _dda_unit(
const scalar_t* __restrict__ cen,
const scalar_t* __restrict__ invdir,
scalar_t* __restrict__ tmin,
scalar_t* __restrict__ tmax) {
// Intersect unit AABB
scalar_t t1, t2;
*tmin = 0.0f;
*tmax = 1e9f;
#pragma unroll
for (int i = 0; i < 3; ++i) {
t1 = - cen[i] * invdir[i];
t2 = t1 + invdir[i];
*tmin = max(*tmin, min(t1, t2));
*tmax = min(*tmax, max(t1, t2));
}
}
template <typename scalar_t>
__device__ __inline__ void trace_ray(
PackedTreeSpec<scalar_t>& __restrict__ tree,
SingleRaySpec<scalar_t> ray,
RenderOptions& __restrict__ opt,
torch::TensorAccessor<scalar_t, 1, torch::RestrictPtrTraits, int32_t> out) {
const scalar_t delta_scale = _get_delta_scale(tree.scaling, ray.dir);
scalar_t tmin, tmax;
scalar_t invdir[3];
const int tree_N = tree.child.size(1);
const int data_dim = tree.data.size(4);
const int out_data_dim = out.size(0);
#pragma unroll
for (int i = 0; i < 3; ++i) {
invdir[i] = 1.0 / (ray.dir[i] + 1e-9);
}
_dda_unit(ray.origin, invdir, &tmin, &tmax);
if (tmax < 0 || tmin > tmax) {
// Ray doesn't hit box
for (int j = 0; j < out_data_dim; ++j) {
out[j] = opt.background_brightness;
}
return;
} else {
for (int j = 0; j < out_data_dim; ++j) {
out[j] = 0.f;
}
scalar_t pos[3];
scalar_t basis_fn[25];
maybe_precalc_basis<scalar_t>(opt.format, opt.basis_dim,
tree.extra_data, ray.vdir, basis_fn);
scalar_t light_intensity = 1.f;
scalar_t t = tmin;
scalar_t cube_sz;
const scalar_t d_rgb_pad = 1 + 2 * opt.rgb_padding;
while (t < tmax) {
for (int j = 0; j < 3; ++j) {
pos[j] = ray.origin[j] + t * ray.dir[j];
}
int64_t node_id;
scalar_t* tree_val = query_single_from_root<scalar_t>(tree.data, tree.child,
pos, &cube_sz, tree.weight_accum != nullptr ? &node_id : nullptr);
scalar_t att;
scalar_t subcube_tmin, subcube_tmax;
_dda_unit(pos, invdir, &subcube_tmin, &subcube_tmax);
const scalar_t t_subcube = (subcube_tmax - subcube_tmin) / cube_sz;
const scalar_t delta_t = t_subcube + opt.step_size;
scalar_t sigma = tree_val[data_dim - 1];
if (opt.density_softplus) sigma = _SOFTPLUS_M1(sigma);
if (sigma > opt.sigma_thresh) {
att = expf(-delta_t * delta_scale * sigma);
const scalar_t weight = light_intensity * (1.f - att);
if (opt.format != FORMAT_RGBA) {
for (int t = 0; t < out_data_dim; ++ t) {
int off = t * opt.basis_dim;
scalar_t tmp = 0.0;
for (int i = opt.min_comp; i <= opt.max_comp; ++i) {
tmp += basis_fn[i] * tree_val[off + i];
}
out[t] += weight * (_SIGMOID(tmp) * d_rgb_pad - opt.rgb_padding);
}
} else {
for (int j = 0; j < out_data_dim; ++j) {
out[j] += weight * (_SIGMOID(tree_val[j]) * d_rgb_pad - opt.rgb_padding);
}
}
light_intensity *= att;
if (tree.weight_accum != nullptr) {
if (tree.weight_accum_max) {
atomicMax(&tree.weight_accum[node_id], weight);
} else {
atomicAdd(&tree.weight_accum[node_id], weight);
}
}
if (light_intensity <= opt.stop_thresh) {
// Full opacity, stop
scalar_t scale = 1.0 / (1.0 - light_intensity);
for (int j = 0; j < out_data_dim; ++j) {
out[j] *= scale;
}
return;
}
}
t += delta_t;
}
for (int j = 0; j < out_data_dim; ++j) {
out[j] += light_intensity * opt.background_brightness;
}
}
}
template <typename scalar_t>
__device__ __inline__ void trace_ray_backward(
PackedTreeSpec<scalar_t>& __restrict__ tree,
const torch::TensorAccessor<scalar_t, 1, torch::RestrictPtrTraits, int32_t>
grad_output,
SingleRaySpec<scalar_t> ray,
RenderOptions& __restrict__ opt,
torch::PackedTensorAccessor64<scalar_t, 5, torch::RestrictPtrTraits>
grad_data_out) {
const scalar_t delta_scale = _get_delta_scale(tree.scaling, ray.dir);
scalar_t tmin, tmax;
scalar_t invdir[3];
const int tree_N = tree.child.size(1);
const int data_dim = tree.data.size(4);
const int out_data_dim = grad_output.size(0);
#pragma unroll
for (int i = 0; i < 3; ++i) {
invdir[i] = 1.0 / (ray.dir[i] + 1e-9);
}
_dda_unit(ray.origin, invdir, &tmin, &tmax);
if (tmax < 0 || tmin > tmax) {
// Ray doesn't hit box
return;
} else {
scalar_t pos[3];
scalar_t basis_fn[25];
maybe_precalc_basis<scalar_t>(opt.format, opt.basis_dim, tree.extra_data,
ray.vdir, basis_fn);
scalar_t accum = 0.0;
const scalar_t d_rgb_pad = 1 + 2 * opt.rgb_padding;
// PASS 1
{
scalar_t light_intensity = 1.f, t = tmin, cube_sz;
while (t < tmax) {
for (int j = 0; j < 3; ++j) pos[j] = ray.origin[j] + t * ray.dir[j];
const scalar_t* tree_val = query_single_from_root<scalar_t>(
tree.data, tree.child, pos, &cube_sz);
// Reuse offset on gradient
const int64_t curr_leaf_offset = tree_val - tree.data.data();
scalar_t* grad_tree_val = grad_data_out.data() + curr_leaf_offset;
scalar_t att;
scalar_t subcube_tmin, subcube_tmax;
_dda_unit(pos, invdir, &subcube_tmin, &subcube_tmax);
const scalar_t t_subcube = (subcube_tmax - subcube_tmin) / cube_sz;
const scalar_t delta_t = t_subcube + opt.step_size;
scalar_t sigma = tree_val[data_dim - 1];
if (opt.density_softplus) sigma = _SOFTPLUS_M1(sigma);
if (sigma > 0.0) {
att = expf(-delta_t * sigma * delta_scale);
const scalar_t weight = light_intensity * (1.f - att);
scalar_t total_color = 0.f;
if (opt.format != FORMAT_RGBA) {
for (int t = 0; t < out_data_dim; ++ t) {
int off = t * opt.basis_dim;
scalar_t tmp = 0.0;
for (int i = opt.min_comp; i <= opt.max_comp; ++i) {
tmp += basis_fn[i] * tree_val[off + i];
}
const scalar_t sigmoid = _SIGMOID(tmp);
const scalar_t tmp2 = weight * sigmoid * (1.0 - sigmoid) *
grad_output[t] * d_rgb_pad;
for (int i = opt.min_comp; i <= opt.max_comp; ++i) {
const scalar_t toadd = basis_fn[i] * tmp2;
atomicAdd(&grad_tree_val[off + i],
toadd);
}
total_color += (sigmoid * d_rgb_pad - opt.rgb_padding)
* grad_output[t];
}
} else {
for (int j = 0; j < out_data_dim; ++j) {
const scalar_t sigmoid = _SIGMOID(tree_val[j]);
const scalar_t toadd = weight * sigmoid * (
1.f - sigmoid) * grad_output[j] * d_rgb_pad;
atomicAdd(&grad_tree_val[j], toadd);
total_color += (sigmoid * d_rgb_pad - opt.rgb_padding)
* grad_output[j];
}
}
light_intensity *= att;
accum += weight * total_color;
}
t += delta_t;
}
scalar_t total_grad = 0.f;
for (int j = 0; j < out_data_dim; ++j)
total_grad += grad_output[j];
accum += light_intensity * opt.background_brightness * total_grad;
}
// PASS 2
{
// scalar_t accum_lo = 0.0;
scalar_t light_intensity = 1.f, t = tmin, cube_sz;
while (t < tmax) {
for (int j = 0; j < 3; ++j) pos[j] = ray.origin[j] + t * ray.dir[j];
const scalar_t* tree_val = query_single_from_root<scalar_t>(tree.data,
tree.child, pos, &cube_sz);
// Reuse offset on gradient
const int64_t curr_leaf_offset = tree_val - tree.data.data();
scalar_t* grad_tree_val = grad_data_out.data() + curr_leaf_offset;
scalar_t att;
scalar_t subcube_tmin, subcube_tmax;
_dda_unit(pos, invdir, &subcube_tmin, &subcube_tmax);
const scalar_t t_subcube = (subcube_tmax - subcube_tmin) / cube_sz;
const scalar_t delta_t = t_subcube + opt.step_size;
scalar_t sigma = tree_val[data_dim - 1];
const scalar_t raw_sigma = sigma;
if (opt.density_softplus) sigma = _SOFTPLUS_M1(sigma);
if (sigma > 0.0) {
att = expf(-delta_t * sigma * delta_scale);
const scalar_t weight = light_intensity * (1.f - att);
scalar_t total_color = 0.f;
if (opt.format != FORMAT_RGBA) {
for (int t = 0; t < out_data_dim; ++ t) {
int off = t * opt.basis_dim;
scalar_t tmp = 0.0;
for (int i = opt.min_comp; i <= opt.max_comp; ++i) {
tmp += basis_fn[i] * tree_val[off + i];
}
total_color += (_SIGMOID(tmp) * d_rgb_pad - opt.rgb_padding)
* grad_output[t];
}
} else {
for (int j = 0; j < out_data_dim; ++j) {
total_color += (_SIGMOID(tree_val[j]) * d_rgb_pad - opt.rgb_padding)
* grad_output[j];
}
}
light_intensity *= att;
accum -= weight * total_color;
atomicAdd(
&grad_tree_val[data_dim - 1],
delta_t * delta_scale * (
total_color * light_intensity - accum)
* (opt.density_softplus ?
_SIGMOID(raw_sigma - 1)
: 1)
);
}
t += delta_t;
}
}
}
} // trace_ray_backward
template <typename scalar_t>
__device__ __inline__ void trace_ray_se_grad_hess(
PackedTreeSpec<scalar_t>& __restrict__ tree,
SingleRaySpec<scalar_t> ray,
RenderOptions& __restrict__ opt,
torch::TensorAccessor<scalar_t, 1, torch::RestrictPtrTraits, int32_t> color_ref,
torch::TensorAccessor<scalar_t, 1, torch::RestrictPtrTraits, int32_t> color_out,
torch::PackedTensorAccessor64<scalar_t, 5, torch::RestrictPtrTraits>
grad_data_out,
torch::PackedTensorAccessor64<scalar_t, 5, torch::RestrictPtrTraits>
hessdiag_out) {
const scalar_t delta_scale = _get_delta_scale(tree.scaling, ray.dir);
scalar_t tmin, tmax;
scalar_t invdir[3];
const int tree_N = tree.child.size(1);
const int data_dim = tree.data.size(4);
const int out_data_dim = color_out.size(0);
#pragma unroll
for (int i = 0; i < 3; ++i) {
invdir[i] = 1.0 / (ray.dir[i] + 1e-9);
}
_dda_unit(ray.origin, invdir, &tmin, &tmax);
if (tmax < 0 || tmin > tmax) {
// Ray doesn't hit box
for (int j = 0; j < out_data_dim; ++j) {
color_out[j] = opt.background_brightness;
}
return;
} else {
scalar_t pos[3];
scalar_t basis_fn[25];
maybe_precalc_basis<scalar_t>(opt.format, opt.basis_dim, tree.extra_data,
ray.vdir, basis_fn);
const scalar_t d_rgb_pad = 1 + 2 * opt.rgb_padding;
// PASS 1 - compute residual (trace_ray_se_grad_hess)
{
scalar_t light_intensity = 1.f, t = tmin, cube_sz;
while (t < tmax) {
for (int j = 0; j < 3; ++j) {
pos[j] = ray.origin[j] + t * ray.dir[j];
}
scalar_t* tree_val = query_single_from_root<scalar_t>(tree.data, tree.child,
pos, &cube_sz, nullptr);
scalar_t att;
scalar_t subcube_tmin, subcube_tmax;
_dda_unit(pos, invdir, &subcube_tmin, &subcube_tmax);
const scalar_t t_subcube = (subcube_tmax - subcube_tmin) / cube_sz;
const scalar_t delta_t = t_subcube + opt.step_size;
scalar_t sigma = tree_val[data_dim - 1];
if (opt.density_softplus) sigma = _SOFTPLUS_M1(sigma);
if (sigma > 0.0f) {
att = expf(-delta_t * delta_scale * sigma);
const scalar_t weight = light_intensity * (1.f - att);
if (opt.format != FORMAT_RGBA) {
for (int t = 0; t < out_data_dim; ++ t) {
int off = t * opt.basis_dim;
scalar_t tmp = 0.0;
for (int i = opt.min_comp; i <= opt.max_comp; ++i) {
tmp += basis_fn[i] * tree_val[off + i];
}
color_out[t] += weight * (_SIGMOID(tmp) * d_rgb_pad - opt.rgb_padding);
}
} else {
for (int j = 0; j < out_data_dim; ++j) {
color_out[j] += weight * (_SIGMOID(tree_val[j]) *
d_rgb_pad - opt.rgb_padding);
}
}
light_intensity *= att;
}
t += delta_t;
}
// Add background intensity & color -> residual
for (int j = 0; j < out_data_dim; ++j) {
color_out[j] += light_intensity * opt.background_brightness - color_ref[j];
}
}
// PASS 2 - compute RGB gradient & suffix (trace_ray_se_grad_hess)
scalar_t color_accum[4] = {0, 0, 0, 0};
{
scalar_t light_intensity = 1.f, t = tmin, cube_sz;
while (t < tmax) {
for (int j = 0; j < 3; ++j) pos[j] = ray.origin[j] + t * ray.dir[j];
const scalar_t* tree_val = query_single_from_root<scalar_t>(
tree.data, tree.child, pos, &cube_sz);
// Reuse offset on gradient
const int64_t curr_leaf_offset = tree_val - tree.data.data();
scalar_t* grad_tree_val = grad_data_out.data() + curr_leaf_offset;
scalar_t* hessdiag_tree_val = hessdiag_out.data() + curr_leaf_offset;
scalar_t att;
scalar_t subcube_tmin, subcube_tmax;
_dda_unit(pos, invdir, &subcube_tmin, &subcube_tmax);
const scalar_t t_subcube = (subcube_tmax - subcube_tmin) / cube_sz;
const scalar_t delta_t = t_subcube + opt.step_size;
scalar_t sigma = tree_val[data_dim - 1];
if (opt.density_softplus) sigma = _SOFTPLUS_M1(sigma);
if (sigma > 0.0) {
att = expf(-delta_t * sigma * delta_scale);
const scalar_t weight = light_intensity * (1.f - att);
if (opt.format != FORMAT_RGBA) {
for (int t = 0; t < out_data_dim; ++ t) {
int off = t * opt.basis_dim;
scalar_t tmp = 0.0;
for (int i = opt.min_comp; i <= opt.max_comp; ++i) {
tmp += basis_fn[i] * tree_val[off + i];
}
const scalar_t sigmoid = _SIGMOID(tmp);
const scalar_t grad_ci = weight * sigmoid * (1.0 - sigmoid) *
d_rgb_pad;
// const scalar_t d2_term =
// (1.f - 2.f * sigmoid) * color_out[t];
for (int i = opt.min_comp; i <= opt.max_comp; ++i) {
const scalar_t grad_wi = basis_fn[i] * grad_ci;
atomicAdd(&grad_tree_val[off + i], grad_wi * color_out[t]);
atomicAdd(&hessdiag_tree_val[off + i],
// grad_wi * basis_fn[i] * (grad_ci +
// d2_term) // Newton
grad_wi * grad_wi // Gauss-Newton
);
}
const scalar_t color_j = sigmoid * d_rgb_pad - opt.rgb_padding;
color_accum[t] += weight * color_j;
}
} else {
for (int j = 0; j < out_data_dim; ++j) {
const scalar_t sigmoid = _SIGMOID(tree_val[j]);
const scalar_t grad_ci = weight * sigmoid * (
1.f - sigmoid) * d_rgb_pad;
// const scalar_t d2_term = (1.f - 2.f * sigmoid) * color_out[j];
atomicAdd(&grad_tree_val[j], grad_ci * color_out[j]);
// Newton
// atomicAdd(&hessdiag_tree_val[j], grad_ci * (grad_ci + d2_term));
// Gauss-Newton
atomicAdd(&hessdiag_tree_val[j], grad_ci * grad_ci);
const scalar_t color_j = sigmoid * d_rgb_pad - opt.rgb_padding;
color_accum[j] += weight * color_j;
}
}
light_intensity *= att;
}
t += delta_t;
}
for (int j = 0; j < out_data_dim; ++j) {
color_accum[j] += light_intensity * opt.background_brightness;
}
}
// PASS 3 - finish computing sigma gradient (trace_ray_se_grad_hess)
{
scalar_t light_intensity = 1.f, t = tmin, cube_sz;
scalar_t color_curr[4];
while (t < tmax) {
for (int j = 0; j < 3; ++j) pos[j] = ray.origin[j] + t * ray.dir[j];
const scalar_t* tree_val = query_single_from_root<scalar_t>(tree.data,
tree.child, pos, &cube_sz);
// Reuse offset on gradient
const int64_t curr_leaf_offset = tree_val - tree.data.data();
scalar_t* grad_tree_val = grad_data_out.data() + curr_leaf_offset;
scalar_t* hessdiag_tree_val = hessdiag_out.data() + curr_leaf_offset;
scalar_t att;
scalar_t subcube_tmin, subcube_tmax;
_dda_unit(pos, invdir, &subcube_tmin, &subcube_tmax);
const scalar_t t_subcube = (subcube_tmax - subcube_tmin) / cube_sz;
const scalar_t delta_t = t_subcube + opt.step_size;
scalar_t sigma = tree_val[data_dim - 1];
const scalar_t raw_sigma = sigma;
if (opt.density_softplus) sigma = _SOFTPLUS_M1(sigma);
if (sigma > 0.0) {
att = expf(-delta_t * sigma * delta_scale);
const scalar_t weight = light_intensity * (1.f - att);
if (opt.format != FORMAT_RGBA) {
for (int u = 0; u < out_data_dim; ++ u) {
int off = u * opt.basis_dim;
scalar_t tmp = 0.0;
for (int i = opt.min_comp; i <= opt.max_comp; ++i) {
tmp += basis_fn[i] * tree_val[off + i];
}
color_curr[u] = _SIGMOID(tmp) * d_rgb_pad - opt.rgb_padding;
color_accum[u] -= weight * color_curr[u];
}
} else {
for (int j = 0; j < out_data_dim; ++j) {
color_curr[j] = _SIGMOID(tree_val[j]) * d_rgb_pad - opt.rgb_padding;
color_accum[j] -= weight * color_curr[j];
}
}
light_intensity *= att;
for (int j = 0; j < out_data_dim; ++j) {
const scalar_t grad_sigma = delta_t * delta_scale * (
color_curr[j] * light_intensity - color_accum[j]);
// Newton
// const scalar_t grad2_sigma =
// grad_sigma * (grad_sigma - delta_t * delta_scale * color_out[j]);
// Gauss-Newton
const scalar_t grad2_sigma = grad_sigma * grad_sigma;
if (opt.density_softplus) {
const scalar_t sigmoid = _SIGMOID(raw_sigma - 1);
const scalar_t d_sigmoid = sigmoid * (1.f - sigmoid);
// FIXME not sure this works
atomicAdd(&grad_tree_val[data_dim - 1], grad_sigma *
color_out[j] * sigmoid);
atomicAdd(&hessdiag_tree_val[data_dim - 1],
grad2_sigma * sigmoid * sigmoid
+ grad_sigma * d_sigmoid);
} else {
atomicAdd(&grad_tree_val[data_dim - 1],
grad_sigma * color_out[j]);
atomicAdd(&hessdiag_tree_val[data_dim - 1], grad2_sigma);
}
}
}
t += delta_t;
}
}
// Residual -> color
for (int j = 0; j < out_data_dim; ++j) {
color_out[j] += color_ref[j];
}
}
}
template <typename scalar_t>
__global__ void render_ray_kernel(
PackedTreeSpec<scalar_t> tree,
PackedRaysSpec<scalar_t> rays,
RenderOptions opt,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>
out) {
CUDA_GET_THREAD_ID(tid, rays.origins.size(0));
scalar_t origin[3] = {rays.origins[tid][0], rays.origins[tid][1], rays.origins[tid][2]};
transform_coord<scalar_t>(origin, tree.offset, tree.scaling);
scalar_t dir[3] = {rays.dirs[tid][0], rays.dirs[tid][1], rays.dirs[tid][2]};
trace_ray<scalar_t>(
tree,
SingleRaySpec<scalar_t>{origin, dir, &rays.vdirs[tid][0]},
opt,
out[tid]);
}
template <typename scalar_t>
__global__ void render_ray_backward_kernel(
PackedTreeSpec<scalar_t> tree,
const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>
grad_output,
PackedRaysSpec<scalar_t> rays,
RenderOptions opt,
torch::PackedTensorAccessor64<scalar_t, 5, torch::RestrictPtrTraits>
grad_data_out
) {
CUDA_GET_THREAD_ID(tid, rays.origins.size(0));
scalar_t origin[3] = {rays.origins[tid][0], rays.origins[tid][1], rays.origins[tid][2]};
transform_coord<scalar_t>(origin, tree.offset, tree.scaling);
scalar_t dir[3] = {rays.dirs[tid][0], rays.dirs[tid][1], rays.dirs[tid][2]};
trace_ray_backward<scalar_t>(
tree,
grad_output[tid],
SingleRaySpec<scalar_t>{origin, dir, &rays.vdirs[tid][0]},
opt,
grad_data_out);
}
template <typename scalar_t>
__device__ __inline__ void cam2world_ray(
int ix, int iy,
scalar_t* dir,
scalar_t* origin,
const PackedCameraSpec<scalar_t>& __restrict__ cam) {
scalar_t x = (ix - 0.5 * cam.width) / cam.fx;
scalar_t y = -(iy - 0.5 * cam.height) / cam.fy;
scalar_t z = sqrtf(x * x + y * y + 1.0);
x /= z; y /= z; z = -1.0f / z;
dir[0] = cam.c2w[0][0] * x + cam.c2w[0][1] * y + cam.c2w[0][2] * z;
dir[1] = cam.c2w[1][0] * x + cam.c2w[1][1] * y + cam.c2w[1][2] * z;
dir[2] = cam.c2w[2][0] * x + cam.c2w[2][1] * y + cam.c2w[2][2] * z;
origin[0] = cam.c2w[0][3]; origin[1] = cam.c2w[1][3]; origin[2] = cam.c2w[2][3];
}
template <typename scalar_t>
__host__ __device__ __inline__ static void maybe_world2ndc(
RenderOptions& __restrict__ opt,
scalar_t* __restrict__ dir,
scalar_t* __restrict__ cen, scalar_t near = 1.f) {
if (opt.ndc_width < 0)
return;
scalar_t t = -(near + cen[2]) / dir[2];
for (int i = 0; i < 3; ++i) {
cen[i] = cen[i] + t * dir[i];
}
dir[0] = -((2 * opt.ndc_focal) / opt.ndc_width) * (dir[0] / dir[2] - cen[0] / cen[2]);
dir[1] = -((2 * opt.ndc_focal) / opt.ndc_height) * (dir[1] / dir[2] - cen[1] / cen[2]);
dir[2] = -2 * near / cen[2];
cen[0] = -((2 * opt.ndc_focal) / opt.ndc_width) * (cen[0] / cen[2]);
cen[1] = -((2 * opt.ndc_focal) / opt.ndc_height) * (cen[1] / cen[2]);
cen[2] = 1 + 2 * near / cen[2];
_normalize(dir);
}
template <typename scalar_t>
__global__ void render_image_kernel(
PackedTreeSpec<scalar_t> tree,
PackedCameraSpec<scalar_t> cam,
RenderOptions opt,
torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
out) {
CUDA_GET_THREAD_ID(tid, cam.width * cam.height);
int iy = tid / cam.width, ix = tid % cam.width;
scalar_t dir[3], origin[3];
cam2world_ray(ix, iy, dir, origin, cam);
scalar_t vdir[3] = {dir[0], dir[1], dir[2]};
maybe_world2ndc(opt, dir, origin);
transform_coord<scalar_t>(origin, tree.offset, tree.scaling);
trace_ray<scalar_t>(
tree,
SingleRaySpec<scalar_t>{origin, dir, vdir},
opt,
out[iy][ix]);
}
template <typename scalar_t>
__global__ void render_image_backward_kernel(
PackedTreeSpec<scalar_t> tree,
const torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
grad_output,
PackedCameraSpec<scalar_t> cam,
RenderOptions opt,
torch::PackedTensorAccessor64<scalar_t, 5, torch::RestrictPtrTraits>
grad_data_out) {
CUDA_GET_THREAD_ID(tid, cam.width * cam.height);
int iy = tid / cam.width, ix = tid % cam.width;
scalar_t dir[3], origin[3];
cam2world_ray(ix, iy, dir, origin, cam);
scalar_t vdir[3] = {dir[0], dir[1], dir[2]};
maybe_world2ndc(opt, dir, origin);
transform_coord<scalar_t>(origin, tree.offset, tree.scaling);
trace_ray_backward<scalar_t>(
tree,
grad_output[iy][ix],
SingleRaySpec<scalar_t>{origin, dir, vdir},
opt,
grad_data_out);
}
template <typename scalar_t>
__global__ void se_grad_kernel(
PackedTreeSpec<scalar_t> tree,
PackedRaysSpec<scalar_t> rays,
RenderOptions opt,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> color_ref,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> color_out,
torch::PackedTensorAccessor64<scalar_t, 5, torch::RestrictPtrTraits> grad_out,
torch::PackedTensorAccessor64<scalar_t, 5, torch::RestrictPtrTraits> hessdiag_out) {
CUDA_GET_THREAD_ID(tid, rays.origins.size(0));
scalar_t origin[3] = {rays.origins[tid][0], rays.origins[tid][1], rays.origins[tid][2]};
transform_coord<scalar_t>(origin, tree.offset, tree.scaling);
scalar_t dir[3] = {rays.dirs[tid][0], rays.dirs[tid][1], rays.dirs[tid][2]};
trace_ray_se_grad_hess<scalar_t>(
tree,
SingleRaySpec<scalar_t>{origin, dir, &rays.vdirs[tid][0]},
opt,
color_ref[tid],
color_out[tid],
grad_out,
hessdiag_out);
}
template <typename scalar_t>
__global__ void se_grad_persp_kernel(
PackedTreeSpec<scalar_t> tree,
PackedCameraSpec<scalar_t> cam,
RenderOptions opt,
torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
color_ref,
torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
color_out,
torch::PackedTensorAccessor64<scalar_t, 5, torch::RestrictPtrTraits> grad_out,
torch::PackedTensorAccessor64<scalar_t, 5, torch::RestrictPtrTraits> hessdiag_out) {
CUDA_GET_THREAD_ID(tid, cam.width * cam.height);
int iy = tid / cam.width, ix = tid % cam.width;
scalar_t dir[3], origin[3];
cam2world_ray(ix, iy, dir, origin, cam);
scalar_t vdir[3] = {dir[0], dir[1], dir[2]};
maybe_world2ndc(opt, dir, origin);
transform_coord<scalar_t>(origin, tree.offset, tree.scaling);
trace_ray_se_grad_hess<scalar_t>(
tree,
SingleRaySpec<scalar_t>{origin, dir, vdir},
opt,
color_ref[iy][ix],
color_out[iy][ix],
grad_out,
hessdiag_out);
}
template <typename scalar_t>
__device__ __inline__ void grid_trace_ray(
const torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
data,
const scalar_t* __restrict__ origin,
const scalar_t* __restrict__ dir,
const scalar_t* __restrict__ vdir,
scalar_t step_size,
scalar_t delta_scale,
scalar_t sigma_thresh,
torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
grid_weight,
torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
grid_hit) {
scalar_t tmin, tmax;
scalar_t invdir[3];
const int reso = data.size(0);
scalar_t* grid_weight_val = grid_weight.data();
scalar_t* grid_hit_val = grid_hit.data();
#pragma unroll
for (int i = 0; i < 3; ++i) {
invdir[i] = 1.0 / (dir[i] + 1e-9);
}
_dda_unit(origin, invdir, &tmin, &tmax);
if (tmax < 0 || tmin > tmax) {
// Ray doesn't hit box
return;
} else {
scalar_t pos[3];
scalar_t light_intensity = 1.f;
scalar_t t = tmin;
scalar_t cube_sz = reso;
int32_t u, v, w, node_id;
while (t < tmax) {
for (int j = 0; j < 3; ++j) {
pos[j] = origin[j] + t * dir[j];
}
clamp_coord<scalar_t>(pos);
pos[0] *= reso;
pos[1] *= reso;
pos[2] *= reso;
u = floor(pos[0]);
v = floor(pos[1]);
w = floor(pos[2]);
pos[0] -= u;
pos[1] -= v;
pos[2] -= w;
node_id = u * reso * reso + v * reso + w;
scalar_t att;
scalar_t subcube_tmin, subcube_tmax;
_dda_unit(pos, invdir, &subcube_tmin, &subcube_tmax);
const scalar_t t_subcube = (subcube_tmax - subcube_tmin) / cube_sz;
const scalar_t delta_t = t_subcube + step_size;
scalar_t sigma = data[u][v][w];
if (sigma > sigma_thresh) {
att = expf(-delta_t * delta_scale * sigma);
const scalar_t weight = light_intensity * (1.f - att);
light_intensity *= att;
atomicMax(&grid_weight_val[node_id], weight);
atomicAdd(&grid_hit_val[node_id], (scalar_t) 1.0);
}
t += delta_t;
}
}
}
template <typename scalar_t>
__global__ void grid_weight_render_kernel(
const torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
data,
PackedCameraSpec<scalar_t> cam,
RenderOptions opt,
const scalar_t* __restrict__ offset,
const scalar_t* __restrict__ scaling,
torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
grid_weight,
torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
grid_hit) {
CUDA_GET_THREAD_ID(tid, cam.width * cam.height);
int iy = tid / cam.width, ix = tid % cam.width;
scalar_t dir[3], origin[3];
cam2world_ray(ix, iy, dir, origin, cam);
scalar_t vdir[3] = {dir[0], dir[1], dir[2]};
maybe_world2ndc(opt, dir, origin);
transform_coord<scalar_t>(origin, offset, scaling);
const scalar_t delta_scale = _get_delta_scale(scaling, dir);
grid_trace_ray<scalar_t>(
data,
origin,
dir,
vdir,
opt.step_size,
delta_scale,
opt.sigma_thresh,
grid_weight,
grid_hit);
}
} // namespace device
// Compute RGB output dimension from input dimension & SH degree
__host__ int get_out_data_dim(int format, int basis_dim, int in_data_dim) {
if (format != FORMAT_RGBA) {
return (in_data_dim - 1) / basis_dim;
} else {
return in_data_dim - 1;
}
}
} // namespace
torch::Tensor volume_render(TreeSpec& tree, RaysSpec& rays, RenderOptions& opt) {
tree.check();
rays.check();
DEVICE_GUARD(tree.data);
const auto Q = rays.origins.size(0);
auto_cuda_threads();
const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads);
int out_data_dim = get_out_data_dim(opt.format, opt.basis_dim, tree.data.size(4));
torch::Tensor result = torch::zeros({Q, out_data_dim}, rays.origins.options());
AT_DISPATCH_FLOATING_TYPES(rays.origins.type(), __FUNCTION__, [&] {
hipLaunchKernelGGL(( device::render_ray_kernel<scalar_t>), dim3(blocks), dim3(cuda_n_threads), 0, 0,
tree, rays, opt,
result.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>());
});
CUDA_CHECK_ERRORS;
return result;
}
torch::Tensor volume_render_image(TreeSpec& tree, CameraSpec& cam, RenderOptions& opt) {
tree.check();
cam.check();
DEVICE_GUARD(tree.data);
const size_t Q = size_t(cam.width) * cam.height;
auto_cuda_threads();
const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads);
int out_data_dim = get_out_data_dim(opt.format, opt.basis_dim, tree.data.size(4));
torch::Tensor result = torch::zeros({cam.height, cam.width, out_data_dim},
tree.data.options());
AT_DISPATCH_FLOATING_TYPES(tree.data.type(), __FUNCTION__, [&] {
hipLaunchKernelGGL(( device::render_image_kernel<scalar_t>), dim3(blocks), dim3(cuda_n_threads), 0, 0,
tree, cam, opt,
result.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>());
});
CUDA_CHECK_ERRORS;
return result;
}
torch::Tensor volume_render_backward(
TreeSpec& tree, RaysSpec& rays,
RenderOptions& opt,
torch::Tensor grad_output) {
tree.check();
rays.check();
DEVICE_GUARD(tree.data);
const int Q = rays.origins.size(0);
auto_cuda_threads();
const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads);
int out_data_dim = get_out_data_dim(opt.format, opt.basis_dim, tree.data.size(4));
torch::Tensor result = torch::zeros_like(tree.data);
AT_DISPATCH_FLOATING_TYPES(rays.origins.type(), __FUNCTION__, [&] {
hipLaunchKernelGGL(( device::render_ray_backward_kernel<scalar_t>), dim3(blocks), dim3(cuda_n_threads), 0, 0,
tree,
grad_output.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
rays,
opt,
result.packed_accessor64<scalar_t, 5, torch::RestrictPtrTraits>());
});
CUDA_CHECK_ERRORS;
return result;
}
torch::Tensor volume_render_image_backward(TreeSpec& tree, CameraSpec& cam,
RenderOptions& opt,
torch::Tensor grad_output) {
tree.check();
cam.check();
DEVICE_GUARD(tree.data);
const size_t Q = size_t(cam.width) * cam.height;
auto_cuda_threads();
const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads);
int out_data_dim = get_out_data_dim(opt.format, opt.basis_dim, tree.data.size(4));
torch::Tensor result = torch::zeros_like(tree.data);
AT_DISPATCH_FLOATING_TYPES(tree.data.type(), __FUNCTION__, [&] {
hipLaunchKernelGGL(( device::render_image_backward_kernel<scalar_t>), dim3(blocks), dim3(cuda_n_threads), 0, 0,
tree,
grad_output.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(),
cam,
opt,
result.packed_accessor64<scalar_t, 5, torch::RestrictPtrTraits>());
});
CUDA_CHECK_ERRORS;
return result;
}
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> se_grad(
TreeSpec& tree, RaysSpec& rays, torch::Tensor color, RenderOptions& opt) {
tree.check();
rays.check();
DEVICE_GUARD(tree.data);
CHECK_INPUT(color);
const auto Q = rays.origins.size(0);
auto_cuda_threads();
const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads);
int out_data_dim = get_out_data_dim(opt.format, opt.basis_dim, tree.data.size(4));
if (out_data_dim > 4) {
throw std::runtime_error("Tree's output dim cannot be > 4 for se_grad");
}
torch::Tensor result = torch::zeros({Q, out_data_dim}, rays.origins.options());
torch::Tensor grad = torch::zeros_like(tree.data);
torch::Tensor hessdiag = torch::zeros_like(tree.data);
AT_DISPATCH_FLOATING_TYPES(rays.origins.type(), __FUNCTION__, [&] {
hipLaunchKernelGGL(( device::se_grad_kernel<scalar_t>), dim3(blocks), dim3(cuda_n_threads), 0, 0,
tree, rays, opt,
color.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
result.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
grad.packed_accessor64<scalar_t, 5, torch::RestrictPtrTraits>(),
hessdiag.packed_accessor64<scalar_t, 5, torch::RestrictPtrTraits>());
});
CUDA_CHECK_ERRORS;
return std::template tuple<torch::Tensor, torch::Tensor, torch::Tensor>(result, grad, hessdiag);
}
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> se_grad_persp(
TreeSpec& tree,
CameraSpec& cam,
RenderOptions& opt,
torch::Tensor color) {
tree.check();
cam.check();
DEVICE_GUARD(tree.data);
CHECK_INPUT(color);
const size_t Q = size_t(cam.width) * cam.height;
auto_cuda_threads();
const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads);
int out_data_dim = get_out_data_dim(opt.format, opt.basis_dim, tree.data.size(4));
if (out_data_dim > 4) {
throw std::runtime_error("Tree's output dim cannot be > 4 for se_grad");
}
torch::Tensor result = torch::zeros({cam.height, cam.width, out_data_dim},
tree.data.options());
torch::Tensor grad = torch::zeros_like(tree.data);
torch::Tensor hessdiag = torch::zeros_like(tree.data);
AT_DISPATCH_FLOATING_TYPES(tree.data.type(), __FUNCTION__, [&] {
hipLaunchKernelGGL(( device::se_grad_persp_kernel<scalar_t>), dim3(blocks), dim3(cuda_n_threads), 0, 0,
tree, cam, opt,
color.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(),
result.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(),
grad.packed_accessor64<scalar_t, 5, torch::RestrictPtrTraits>(),
hessdiag.packed_accessor64<scalar_t, 5, torch::RestrictPtrTraits>());
});
CUDA_CHECK_ERRORS;
return std::template tuple<torch::Tensor, torch::Tensor, torch::Tensor>(result, grad, hessdiag);
}
std::vector<torch::Tensor> grid_weight_render(
torch::Tensor data, CameraSpec& cam, RenderOptions& opt,
torch::Tensor offset, torch::Tensor scaling) {
cam.check();
DEVICE_GUARD(data);
const size_t Q = size_t(cam.width) * cam.height;
auto_cuda_threads();
const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads);
torch::Tensor grid_weight = torch::zeros_like(data);
torch::Tensor grid_hit = torch::zeros_like(data);
AT_DISPATCH_FLOATING_TYPES(data.type(), __FUNCTION__, [&] {
hipLaunchKernelGGL(( device::grid_weight_render_kernel<scalar_t>), dim3(blocks), dim3(cuda_n_threads), 0, 0,
data.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(),
cam,
opt,
offset.data<scalar_t>(),
scaling.data<scalar_t>(),
grid_weight.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(),
grid_hit.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>());
});
CUDA_CHECK_ERRORS;
return {grid_weight, grid_hit};
}
| 2d65a76698779bf622c46c7d5cf07440b8664c25.cu | /*
* Copyright 2021 PlenOctree Authors
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <cstdint>
#include <vector>
#include "common.cuh"
#include "data_spec_packed.cuh"
namespace {
// Automatically choose number of CUDA threads based on HW CUDA kernel count
int cuda_n_threads = -1;
__host__ void auto_cuda_threads() {
if (~cuda_n_threads) return;
cudaDeviceProp dev_prop;
cudaGetDeviceProperties(&dev_prop, 0);
const int n_cores = get_sp_cores(dev_prop);
// Optimize number of CUDA threads per block
if (n_cores < 2048) {
cuda_n_threads = 256;
} if (n_cores < 8192) {
cuda_n_threads = 512;
} else {
cuda_n_threads = 1024;
}
}
namespace device {
// SH Coefficients from https://github.com/google/spherical-harmonics
__device__ __constant__ const float C0 = 0.28209479177387814;
__device__ __constant__ const float C1 = 0.4886025119029199;
__device__ __constant__ const float C2[] = {
1.0925484305920792,
-1.0925484305920792,
0.31539156525252005,
-1.0925484305920792,
0.5462742152960396
};
__device__ __constant__ const float C3[] = {
-0.5900435899266435,
2.890611442640554,
-0.4570457994644658,
0.3731763325901154,
-0.4570457994644658,
1.445305721320277,
-0.5900435899266435
};
__device__ __constant__ const float C4[] = {
2.5033429417967046,
-1.7701307697799304,
0.9461746957575601,
-0.6690465435572892,
0.10578554691520431,
-0.6690465435572892,
0.47308734787878004,
-1.7701307697799304,
0.6258357354491761,
};
#define _SOFTPLUS_M1(x) (logf(1 + expf((x) - 1)))
#define _SIGMOID(x) (1 / (1 + expf(-(x))))
template<typename scalar_t>
__host__ __device__ __inline__ static scalar_t _norm(
scalar_t* dir) {
return sqrtf(dir[0] * dir[0] + dir[1] * dir[1] + dir[2] * dir[2]);
}
template<typename scalar_t>
__host__ __device__ __inline__ static void _normalize(
scalar_t* dir) {
scalar_t norm = _norm(dir);
dir[0] /= norm; dir[1] /= norm; dir[2] /= norm;
}
template<typename scalar_t>
__host__ __device__ __inline__ static scalar_t _dot3(
const scalar_t* __restrict__ u,
const scalar_t* __restrict__ v) {
return u[0] * v[0] + u[1] * v[1] + u[2] * v[2];
}
// Calculate basis functions depending on format, for given view directions
template <typename scalar_t>
__device__ __inline__ void maybe_precalc_basis(
const int format,
const int basis_dim,
const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>
extra,
const scalar_t* __restrict__ dir,
scalar_t* __restrict__ out) {
switch(format) {
case FORMAT_ASG:
{
// UNTESTED ASG
for (int i = 0; i < basis_dim; ++i) {
const auto& ptr = extra[i];
scalar_t S = _dot3(dir, &ptr[8]);
scalar_t dot_x = _dot3(dir, &ptr[2]);
scalar_t dot_y = _dot3(dir, &ptr[5]);
out[i] = S * expf(-ptr[0] * dot_x * dot_x
-ptr[1] * dot_y * dot_y) / basis_dim;
}
} // ASG
break;
case FORMAT_SG:
{
for (int i = 0; i < basis_dim; ++i) {
const auto& ptr = extra[i];
out[i] = expf(ptr[0] * (_dot3(dir, &ptr[1]) - 1.f)) / basis_dim;
}
} // SG
break;
case FORMAT_SH:
{
out[0] = C0;
const scalar_t x = dir[0], y = dir[1], z = dir[2];
const scalar_t xx = x * x, yy = y * y, zz = z * z;
const scalar_t xy = x * y, yz = y * z, xz = x * z;
switch (basis_dim) {
case 25:
out[16] = C4[0] * xy * (xx - yy);
out[17] = C4[1] * yz * (3 * xx - yy);
out[18] = C4[2] * xy * (7 * zz - 1.f);
out[19] = C4[3] * yz * (7 * zz - 3.f);
out[20] = C4[4] * (zz * (35 * zz - 30) + 3);
out[21] = C4[5] * xz * (7 * zz - 3);
out[22] = C4[6] * (xx - yy) * (7 * zz - 1.f);
out[23] = C4[7] * xz * (xx - 3 * yy);
out[24] = C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy));
[[fallthrough]];
case 16:
out[9] = C3[0] * y * (3 * xx - yy);
out[10] = C3[1] * xy * z;
out[11] = C3[2] * y * (4 * zz - xx - yy);
out[12] = C3[3] * z * (2 * zz - 3 * xx - 3 * yy);
out[13] = C3[4] * x * (4 * zz - xx - yy);
out[14] = C3[5] * z * (xx - yy);
out[15] = C3[6] * x * (xx - 3 * yy);
[[fallthrough]];
case 9:
out[4] = C2[0] * xy;
out[5] = C2[1] * yz;
out[6] = C2[2] * (2.0 * zz - xx - yy);
out[7] = C2[3] * xz;
out[8] = C2[4] * (xx - yy);
[[fallthrough]];
case 4:
out[1] = -C1 * y;
out[2] = C1 * z;
out[3] = -C1 * x;
}
} // SH
break;
default:
// Do nothing
break;
} // switch
}
template <typename scalar_t>
__device__ __inline__ scalar_t _get_delta_scale(
const scalar_t* __restrict__ scaling,
scalar_t* __restrict__ dir) {
dir[0] *= scaling[0];
dir[1] *= scaling[1];
dir[2] *= scaling[2];
scalar_t delta_scale = 1.f / _norm(dir);
dir[0] *= delta_scale;
dir[1] *= delta_scale;
dir[2] *= delta_scale;
return delta_scale;
}
template <typename scalar_t>
__device__ __inline__ void _dda_unit(
const scalar_t* __restrict__ cen,
const scalar_t* __restrict__ invdir,
scalar_t* __restrict__ tmin,
scalar_t* __restrict__ tmax) {
// Intersect unit AABB
scalar_t t1, t2;
*tmin = 0.0f;
*tmax = 1e9f;
#pragma unroll
for (int i = 0; i < 3; ++i) {
t1 = - cen[i] * invdir[i];
t2 = t1 + invdir[i];
*tmin = max(*tmin, min(t1, t2));
*tmax = min(*tmax, max(t1, t2));
}
}
template <typename scalar_t>
__device__ __inline__ void trace_ray(
PackedTreeSpec<scalar_t>& __restrict__ tree,
SingleRaySpec<scalar_t> ray,
RenderOptions& __restrict__ opt,
torch::TensorAccessor<scalar_t, 1, torch::RestrictPtrTraits, int32_t> out) {
const scalar_t delta_scale = _get_delta_scale(tree.scaling, ray.dir);
scalar_t tmin, tmax;
scalar_t invdir[3];
const int tree_N = tree.child.size(1);
const int data_dim = tree.data.size(4);
const int out_data_dim = out.size(0);
#pragma unroll
for (int i = 0; i < 3; ++i) {
invdir[i] = 1.0 / (ray.dir[i] + 1e-9);
}
_dda_unit(ray.origin, invdir, &tmin, &tmax);
if (tmax < 0 || tmin > tmax) {
// Ray doesn't hit box
for (int j = 0; j < out_data_dim; ++j) {
out[j] = opt.background_brightness;
}
return;
} else {
for (int j = 0; j < out_data_dim; ++j) {
out[j] = 0.f;
}
scalar_t pos[3];
scalar_t basis_fn[25];
maybe_precalc_basis<scalar_t>(opt.format, opt.basis_dim,
tree.extra_data, ray.vdir, basis_fn);
scalar_t light_intensity = 1.f;
scalar_t t = tmin;
scalar_t cube_sz;
const scalar_t d_rgb_pad = 1 + 2 * opt.rgb_padding;
while (t < tmax) {
for (int j = 0; j < 3; ++j) {
pos[j] = ray.origin[j] + t * ray.dir[j];
}
int64_t node_id;
scalar_t* tree_val = query_single_from_root<scalar_t>(tree.data, tree.child,
pos, &cube_sz, tree.weight_accum != nullptr ? &node_id : nullptr);
scalar_t att;
scalar_t subcube_tmin, subcube_tmax;
_dda_unit(pos, invdir, &subcube_tmin, &subcube_tmax);
const scalar_t t_subcube = (subcube_tmax - subcube_tmin) / cube_sz;
const scalar_t delta_t = t_subcube + opt.step_size;
scalar_t sigma = tree_val[data_dim - 1];
if (opt.density_softplus) sigma = _SOFTPLUS_M1(sigma);
if (sigma > opt.sigma_thresh) {
att = expf(-delta_t * delta_scale * sigma);
const scalar_t weight = light_intensity * (1.f - att);
if (opt.format != FORMAT_RGBA) {
for (int t = 0; t < out_data_dim; ++ t) {
int off = t * opt.basis_dim;
scalar_t tmp = 0.0;
for (int i = opt.min_comp; i <= opt.max_comp; ++i) {
tmp += basis_fn[i] * tree_val[off + i];
}
out[t] += weight * (_SIGMOID(tmp) * d_rgb_pad - opt.rgb_padding);
}
} else {
for (int j = 0; j < out_data_dim; ++j) {
out[j] += weight * (_SIGMOID(tree_val[j]) * d_rgb_pad - opt.rgb_padding);
}
}
light_intensity *= att;
if (tree.weight_accum != nullptr) {
if (tree.weight_accum_max) {
atomicMax(&tree.weight_accum[node_id], weight);
} else {
atomicAdd(&tree.weight_accum[node_id], weight);
}
}
if (light_intensity <= opt.stop_thresh) {
// Full opacity, stop
scalar_t scale = 1.0 / (1.0 - light_intensity);
for (int j = 0; j < out_data_dim; ++j) {
out[j] *= scale;
}
return;
}
}
t += delta_t;
}
for (int j = 0; j < out_data_dim; ++j) {
out[j] += light_intensity * opt.background_brightness;
}
}
}
template <typename scalar_t>
__device__ __inline__ void trace_ray_backward(
PackedTreeSpec<scalar_t>& __restrict__ tree,
const torch::TensorAccessor<scalar_t, 1, torch::RestrictPtrTraits, int32_t>
grad_output,
SingleRaySpec<scalar_t> ray,
RenderOptions& __restrict__ opt,
torch::PackedTensorAccessor64<scalar_t, 5, torch::RestrictPtrTraits>
grad_data_out) {
const scalar_t delta_scale = _get_delta_scale(tree.scaling, ray.dir);
scalar_t tmin, tmax;
scalar_t invdir[3];
const int tree_N = tree.child.size(1);
const int data_dim = tree.data.size(4);
const int out_data_dim = grad_output.size(0);
#pragma unroll
for (int i = 0; i < 3; ++i) {
invdir[i] = 1.0 / (ray.dir[i] + 1e-9);
}
_dda_unit(ray.origin, invdir, &tmin, &tmax);
if (tmax < 0 || tmin > tmax) {
// Ray doesn't hit box
return;
} else {
scalar_t pos[3];
scalar_t basis_fn[25];
maybe_precalc_basis<scalar_t>(opt.format, opt.basis_dim, tree.extra_data,
ray.vdir, basis_fn);
scalar_t accum = 0.0;
const scalar_t d_rgb_pad = 1 + 2 * opt.rgb_padding;
// PASS 1
{
scalar_t light_intensity = 1.f, t = tmin, cube_sz;
while (t < tmax) {
for (int j = 0; j < 3; ++j) pos[j] = ray.origin[j] + t * ray.dir[j];
const scalar_t* tree_val = query_single_from_root<scalar_t>(
tree.data, tree.child, pos, &cube_sz);
// Reuse offset on gradient
const int64_t curr_leaf_offset = tree_val - tree.data.data();
scalar_t* grad_tree_val = grad_data_out.data() + curr_leaf_offset;
scalar_t att;
scalar_t subcube_tmin, subcube_tmax;
_dda_unit(pos, invdir, &subcube_tmin, &subcube_tmax);
const scalar_t t_subcube = (subcube_tmax - subcube_tmin) / cube_sz;
const scalar_t delta_t = t_subcube + opt.step_size;
scalar_t sigma = tree_val[data_dim - 1];
if (opt.density_softplus) sigma = _SOFTPLUS_M1(sigma);
if (sigma > 0.0) {
att = expf(-delta_t * sigma * delta_scale);
const scalar_t weight = light_intensity * (1.f - att);
scalar_t total_color = 0.f;
if (opt.format != FORMAT_RGBA) {
for (int t = 0; t < out_data_dim; ++ t) {
int off = t * opt.basis_dim;
scalar_t tmp = 0.0;
for (int i = opt.min_comp; i <= opt.max_comp; ++i) {
tmp += basis_fn[i] * tree_val[off + i];
}
const scalar_t sigmoid = _SIGMOID(tmp);
const scalar_t tmp2 = weight * sigmoid * (1.0 - sigmoid) *
grad_output[t] * d_rgb_pad;
for (int i = opt.min_comp; i <= opt.max_comp; ++i) {
const scalar_t toadd = basis_fn[i] * tmp2;
atomicAdd(&grad_tree_val[off + i],
toadd);
}
total_color += (sigmoid * d_rgb_pad - opt.rgb_padding)
* grad_output[t];
}
} else {
for (int j = 0; j < out_data_dim; ++j) {
const scalar_t sigmoid = _SIGMOID(tree_val[j]);
const scalar_t toadd = weight * sigmoid * (
1.f - sigmoid) * grad_output[j] * d_rgb_pad;
atomicAdd(&grad_tree_val[j], toadd);
total_color += (sigmoid * d_rgb_pad - opt.rgb_padding)
* grad_output[j];
}
}
light_intensity *= att;
accum += weight * total_color;
}
t += delta_t;
}
scalar_t total_grad = 0.f;
for (int j = 0; j < out_data_dim; ++j)
total_grad += grad_output[j];
accum += light_intensity * opt.background_brightness * total_grad;
}
// PASS 2
{
// scalar_t accum_lo = 0.0;
scalar_t light_intensity = 1.f, t = tmin, cube_sz;
while (t < tmax) {
for (int j = 0; j < 3; ++j) pos[j] = ray.origin[j] + t * ray.dir[j];
const scalar_t* tree_val = query_single_from_root<scalar_t>(tree.data,
tree.child, pos, &cube_sz);
// Reuse offset on gradient
const int64_t curr_leaf_offset = tree_val - tree.data.data();
scalar_t* grad_tree_val = grad_data_out.data() + curr_leaf_offset;
scalar_t att;
scalar_t subcube_tmin, subcube_tmax;
_dda_unit(pos, invdir, &subcube_tmin, &subcube_tmax);
const scalar_t t_subcube = (subcube_tmax - subcube_tmin) / cube_sz;
const scalar_t delta_t = t_subcube + opt.step_size;
scalar_t sigma = tree_val[data_dim - 1];
const scalar_t raw_sigma = sigma;
if (opt.density_softplus) sigma = _SOFTPLUS_M1(sigma);
if (sigma > 0.0) {
att = expf(-delta_t * sigma * delta_scale);
const scalar_t weight = light_intensity * (1.f - att);
scalar_t total_color = 0.f;
if (opt.format != FORMAT_RGBA) {
for (int t = 0; t < out_data_dim; ++ t) {
int off = t * opt.basis_dim;
scalar_t tmp = 0.0;
for (int i = opt.min_comp; i <= opt.max_comp; ++i) {
tmp += basis_fn[i] * tree_val[off + i];
}
total_color += (_SIGMOID(tmp) * d_rgb_pad - opt.rgb_padding)
* grad_output[t];
}
} else {
for (int j = 0; j < out_data_dim; ++j) {
total_color += (_SIGMOID(tree_val[j]) * d_rgb_pad - opt.rgb_padding)
* grad_output[j];
}
}
light_intensity *= att;
accum -= weight * total_color;
atomicAdd(
&grad_tree_val[data_dim - 1],
delta_t * delta_scale * (
total_color * light_intensity - accum)
* (opt.density_softplus ?
_SIGMOID(raw_sigma - 1)
: 1)
);
}
t += delta_t;
}
}
}
} // trace_ray_backward
template <typename scalar_t>
__device__ __inline__ void trace_ray_se_grad_hess(
PackedTreeSpec<scalar_t>& __restrict__ tree,
SingleRaySpec<scalar_t> ray,
RenderOptions& __restrict__ opt,
torch::TensorAccessor<scalar_t, 1, torch::RestrictPtrTraits, int32_t> color_ref,
torch::TensorAccessor<scalar_t, 1, torch::RestrictPtrTraits, int32_t> color_out,
torch::PackedTensorAccessor64<scalar_t, 5, torch::RestrictPtrTraits>
grad_data_out,
torch::PackedTensorAccessor64<scalar_t, 5, torch::RestrictPtrTraits>
hessdiag_out) {
const scalar_t delta_scale = _get_delta_scale(tree.scaling, ray.dir);
scalar_t tmin, tmax;
scalar_t invdir[3];
const int tree_N = tree.child.size(1);
const int data_dim = tree.data.size(4);
const int out_data_dim = color_out.size(0);
#pragma unroll
for (int i = 0; i < 3; ++i) {
invdir[i] = 1.0 / (ray.dir[i] + 1e-9);
}
_dda_unit(ray.origin, invdir, &tmin, &tmax);
if (tmax < 0 || tmin > tmax) {
// Ray doesn't hit box
for (int j = 0; j < out_data_dim; ++j) {
color_out[j] = opt.background_brightness;
}
return;
} else {
scalar_t pos[3];
scalar_t basis_fn[25];
maybe_precalc_basis<scalar_t>(opt.format, opt.basis_dim, tree.extra_data,
ray.vdir, basis_fn);
const scalar_t d_rgb_pad = 1 + 2 * opt.rgb_padding;
// PASS 1 - compute residual (trace_ray_se_grad_hess)
{
scalar_t light_intensity = 1.f, t = tmin, cube_sz;
while (t < tmax) {
for (int j = 0; j < 3; ++j) {
pos[j] = ray.origin[j] + t * ray.dir[j];
}
scalar_t* tree_val = query_single_from_root<scalar_t>(tree.data, tree.child,
pos, &cube_sz, nullptr);
scalar_t att;
scalar_t subcube_tmin, subcube_tmax;
_dda_unit(pos, invdir, &subcube_tmin, &subcube_tmax);
const scalar_t t_subcube = (subcube_tmax - subcube_tmin) / cube_sz;
const scalar_t delta_t = t_subcube + opt.step_size;
scalar_t sigma = tree_val[data_dim - 1];
if (opt.density_softplus) sigma = _SOFTPLUS_M1(sigma);
if (sigma > 0.0f) {
att = expf(-delta_t * delta_scale * sigma);
const scalar_t weight = light_intensity * (1.f - att);
if (opt.format != FORMAT_RGBA) {
for (int t = 0; t < out_data_dim; ++ t) {
int off = t * opt.basis_dim;
scalar_t tmp = 0.0;
for (int i = opt.min_comp; i <= opt.max_comp; ++i) {
tmp += basis_fn[i] * tree_val[off + i];
}
color_out[t] += weight * (_SIGMOID(tmp) * d_rgb_pad - opt.rgb_padding);
}
} else {
for (int j = 0; j < out_data_dim; ++j) {
color_out[j] += weight * (_SIGMOID(tree_val[j]) *
d_rgb_pad - opt.rgb_padding);
}
}
light_intensity *= att;
}
t += delta_t;
}
// Add background intensity & color -> residual
for (int j = 0; j < out_data_dim; ++j) {
color_out[j] += light_intensity * opt.background_brightness - color_ref[j];
}
}
// PASS 2 - compute RGB gradient & suffix (trace_ray_se_grad_hess)
scalar_t color_accum[4] = {0, 0, 0, 0};
{
scalar_t light_intensity = 1.f, t = tmin, cube_sz;
while (t < tmax) {
for (int j = 0; j < 3; ++j) pos[j] = ray.origin[j] + t * ray.dir[j];
const scalar_t* tree_val = query_single_from_root<scalar_t>(
tree.data, tree.child, pos, &cube_sz);
// Reuse offset on gradient
const int64_t curr_leaf_offset = tree_val - tree.data.data();
scalar_t* grad_tree_val = grad_data_out.data() + curr_leaf_offset;
scalar_t* hessdiag_tree_val = hessdiag_out.data() + curr_leaf_offset;
scalar_t att;
scalar_t subcube_tmin, subcube_tmax;
_dda_unit(pos, invdir, &subcube_tmin, &subcube_tmax);
const scalar_t t_subcube = (subcube_tmax - subcube_tmin) / cube_sz;
const scalar_t delta_t = t_subcube + opt.step_size;
scalar_t sigma = tree_val[data_dim - 1];
if (opt.density_softplus) sigma = _SOFTPLUS_M1(sigma);
if (sigma > 0.0) {
att = expf(-delta_t * sigma * delta_scale);
const scalar_t weight = light_intensity * (1.f - att);
if (opt.format != FORMAT_RGBA) {
for (int t = 0; t < out_data_dim; ++ t) {
int off = t * opt.basis_dim;
scalar_t tmp = 0.0;
for (int i = opt.min_comp; i <= opt.max_comp; ++i) {
tmp += basis_fn[i] * tree_val[off + i];
}
const scalar_t sigmoid = _SIGMOID(tmp);
const scalar_t grad_ci = weight * sigmoid * (1.0 - sigmoid) *
d_rgb_pad;
// const scalar_t d2_term =
// (1.f - 2.f * sigmoid) * color_out[t];
for (int i = opt.min_comp; i <= opt.max_comp; ++i) {
const scalar_t grad_wi = basis_fn[i] * grad_ci;
atomicAdd(&grad_tree_val[off + i], grad_wi * color_out[t]);
atomicAdd(&hessdiag_tree_val[off + i],
// grad_wi * basis_fn[i] * (grad_ci +
// d2_term) // Newton
grad_wi * grad_wi // Gauss-Newton
);
}
const scalar_t color_j = sigmoid * d_rgb_pad - opt.rgb_padding;
color_accum[t] += weight * color_j;
}
} else {
for (int j = 0; j < out_data_dim; ++j) {
const scalar_t sigmoid = _SIGMOID(tree_val[j]);
const scalar_t grad_ci = weight * sigmoid * (
1.f - sigmoid) * d_rgb_pad;
// const scalar_t d2_term = (1.f - 2.f * sigmoid) * color_out[j];
atomicAdd(&grad_tree_val[j], grad_ci * color_out[j]);
// Newton
// atomicAdd(&hessdiag_tree_val[j], grad_ci * (grad_ci + d2_term));
// Gauss-Newton
atomicAdd(&hessdiag_tree_val[j], grad_ci * grad_ci);
const scalar_t color_j = sigmoid * d_rgb_pad - opt.rgb_padding;
color_accum[j] += weight * color_j;
}
}
light_intensity *= att;
}
t += delta_t;
}
for (int j = 0; j < out_data_dim; ++j) {
color_accum[j] += light_intensity * opt.background_brightness;
}
}
// PASS 3 - finish computing sigma gradient (trace_ray_se_grad_hess)
{
scalar_t light_intensity = 1.f, t = tmin, cube_sz;
scalar_t color_curr[4];
while (t < tmax) {
for (int j = 0; j < 3; ++j) pos[j] = ray.origin[j] + t * ray.dir[j];
const scalar_t* tree_val = query_single_from_root<scalar_t>(tree.data,
tree.child, pos, &cube_sz);
// Reuse offset on gradient
const int64_t curr_leaf_offset = tree_val - tree.data.data();
scalar_t* grad_tree_val = grad_data_out.data() + curr_leaf_offset;
scalar_t* hessdiag_tree_val = hessdiag_out.data() + curr_leaf_offset;
scalar_t att;
scalar_t subcube_tmin, subcube_tmax;
_dda_unit(pos, invdir, &subcube_tmin, &subcube_tmax);
const scalar_t t_subcube = (subcube_tmax - subcube_tmin) / cube_sz;
const scalar_t delta_t = t_subcube + opt.step_size;
scalar_t sigma = tree_val[data_dim - 1];
const scalar_t raw_sigma = sigma;
if (opt.density_softplus) sigma = _SOFTPLUS_M1(sigma);
if (sigma > 0.0) {
att = expf(-delta_t * sigma * delta_scale);
const scalar_t weight = light_intensity * (1.f - att);
if (opt.format != FORMAT_RGBA) {
for (int u = 0; u < out_data_dim; ++ u) {
int off = u * opt.basis_dim;
scalar_t tmp = 0.0;
for (int i = opt.min_comp; i <= opt.max_comp; ++i) {
tmp += basis_fn[i] * tree_val[off + i];
}
color_curr[u] = _SIGMOID(tmp) * d_rgb_pad - opt.rgb_padding;
color_accum[u] -= weight * color_curr[u];
}
} else {
for (int j = 0; j < out_data_dim; ++j) {
color_curr[j] = _SIGMOID(tree_val[j]) * d_rgb_pad - opt.rgb_padding;
color_accum[j] -= weight * color_curr[j];
}
}
light_intensity *= att;
for (int j = 0; j < out_data_dim; ++j) {
const scalar_t grad_sigma = delta_t * delta_scale * (
color_curr[j] * light_intensity - color_accum[j]);
// Newton
// const scalar_t grad2_sigma =
// grad_sigma * (grad_sigma - delta_t * delta_scale * color_out[j]);
// Gauss-Newton
const scalar_t grad2_sigma = grad_sigma * grad_sigma;
if (opt.density_softplus) {
const scalar_t sigmoid = _SIGMOID(raw_sigma - 1);
const scalar_t d_sigmoid = sigmoid * (1.f - sigmoid);
// FIXME not sure this works
atomicAdd(&grad_tree_val[data_dim - 1], grad_sigma *
color_out[j] * sigmoid);
atomicAdd(&hessdiag_tree_val[data_dim - 1],
grad2_sigma * sigmoid * sigmoid
+ grad_sigma * d_sigmoid);
} else {
atomicAdd(&grad_tree_val[data_dim - 1],
grad_sigma * color_out[j]);
atomicAdd(&hessdiag_tree_val[data_dim - 1], grad2_sigma);
}
}
}
t += delta_t;
}
}
// Residual -> color
for (int j = 0; j < out_data_dim; ++j) {
color_out[j] += color_ref[j];
}
}
}
template <typename scalar_t>
__global__ void render_ray_kernel(
PackedTreeSpec<scalar_t> tree,
PackedRaysSpec<scalar_t> rays,
RenderOptions opt,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>
out) {
CUDA_GET_THREAD_ID(tid, rays.origins.size(0));
scalar_t origin[3] = {rays.origins[tid][0], rays.origins[tid][1], rays.origins[tid][2]};
transform_coord<scalar_t>(origin, tree.offset, tree.scaling);
scalar_t dir[3] = {rays.dirs[tid][0], rays.dirs[tid][1], rays.dirs[tid][2]};
trace_ray<scalar_t>(
tree,
SingleRaySpec<scalar_t>{origin, dir, &rays.vdirs[tid][0]},
opt,
out[tid]);
}
template <typename scalar_t>
__global__ void render_ray_backward_kernel(
PackedTreeSpec<scalar_t> tree,
const torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits>
grad_output,
PackedRaysSpec<scalar_t> rays,
RenderOptions opt,
torch::PackedTensorAccessor64<scalar_t, 5, torch::RestrictPtrTraits>
grad_data_out
) {
CUDA_GET_THREAD_ID(tid, rays.origins.size(0));
scalar_t origin[3] = {rays.origins[tid][0], rays.origins[tid][1], rays.origins[tid][2]};
transform_coord<scalar_t>(origin, tree.offset, tree.scaling);
scalar_t dir[3] = {rays.dirs[tid][0], rays.dirs[tid][1], rays.dirs[tid][2]};
trace_ray_backward<scalar_t>(
tree,
grad_output[tid],
SingleRaySpec<scalar_t>{origin, dir, &rays.vdirs[tid][0]},
opt,
grad_data_out);
}
template <typename scalar_t>
__device__ __inline__ void cam2world_ray(
int ix, int iy,
scalar_t* dir,
scalar_t* origin,
const PackedCameraSpec<scalar_t>& __restrict__ cam) {
scalar_t x = (ix - 0.5 * cam.width) / cam.fx;
scalar_t y = -(iy - 0.5 * cam.height) / cam.fy;
scalar_t z = sqrtf(x * x + y * y + 1.0);
x /= z; y /= z; z = -1.0f / z;
dir[0] = cam.c2w[0][0] * x + cam.c2w[0][1] * y + cam.c2w[0][2] * z;
dir[1] = cam.c2w[1][0] * x + cam.c2w[1][1] * y + cam.c2w[1][2] * z;
dir[2] = cam.c2w[2][0] * x + cam.c2w[2][1] * y + cam.c2w[2][2] * z;
origin[0] = cam.c2w[0][3]; origin[1] = cam.c2w[1][3]; origin[2] = cam.c2w[2][3];
}
template <typename scalar_t>
__host__ __device__ __inline__ static void maybe_world2ndc(
RenderOptions& __restrict__ opt,
scalar_t* __restrict__ dir,
scalar_t* __restrict__ cen, scalar_t near = 1.f) {
if (opt.ndc_width < 0)
return;
scalar_t t = -(near + cen[2]) / dir[2];
for (int i = 0; i < 3; ++i) {
cen[i] = cen[i] + t * dir[i];
}
dir[0] = -((2 * opt.ndc_focal) / opt.ndc_width) * (dir[0] / dir[2] - cen[0] / cen[2]);
dir[1] = -((2 * opt.ndc_focal) / opt.ndc_height) * (dir[1] / dir[2] - cen[1] / cen[2]);
dir[2] = -2 * near / cen[2];
cen[0] = -((2 * opt.ndc_focal) / opt.ndc_width) * (cen[0] / cen[2]);
cen[1] = -((2 * opt.ndc_focal) / opt.ndc_height) * (cen[1] / cen[2]);
cen[2] = 1 + 2 * near / cen[2];
_normalize(dir);
}
template <typename scalar_t>
__global__ void render_image_kernel(
PackedTreeSpec<scalar_t> tree,
PackedCameraSpec<scalar_t> cam,
RenderOptions opt,
torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
out) {
CUDA_GET_THREAD_ID(tid, cam.width * cam.height);
int iy = tid / cam.width, ix = tid % cam.width;
scalar_t dir[3], origin[3];
cam2world_ray(ix, iy, dir, origin, cam);
scalar_t vdir[3] = {dir[0], dir[1], dir[2]};
maybe_world2ndc(opt, dir, origin);
transform_coord<scalar_t>(origin, tree.offset, tree.scaling);
trace_ray<scalar_t>(
tree,
SingleRaySpec<scalar_t>{origin, dir, vdir},
opt,
out[iy][ix]);
}
template <typename scalar_t>
__global__ void render_image_backward_kernel(
PackedTreeSpec<scalar_t> tree,
const torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
grad_output,
PackedCameraSpec<scalar_t> cam,
RenderOptions opt,
torch::PackedTensorAccessor64<scalar_t, 5, torch::RestrictPtrTraits>
grad_data_out) {
CUDA_GET_THREAD_ID(tid, cam.width * cam.height);
int iy = tid / cam.width, ix = tid % cam.width;
scalar_t dir[3], origin[3];
cam2world_ray(ix, iy, dir, origin, cam);
scalar_t vdir[3] = {dir[0], dir[1], dir[2]};
maybe_world2ndc(opt, dir, origin);
transform_coord<scalar_t>(origin, tree.offset, tree.scaling);
trace_ray_backward<scalar_t>(
tree,
grad_output[iy][ix],
SingleRaySpec<scalar_t>{origin, dir, vdir},
opt,
grad_data_out);
}
template <typename scalar_t>
__global__ void se_grad_kernel(
PackedTreeSpec<scalar_t> tree,
PackedRaysSpec<scalar_t> rays,
RenderOptions opt,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> color_ref,
torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> color_out,
torch::PackedTensorAccessor64<scalar_t, 5, torch::RestrictPtrTraits> grad_out,
torch::PackedTensorAccessor64<scalar_t, 5, torch::RestrictPtrTraits> hessdiag_out) {
CUDA_GET_THREAD_ID(tid, rays.origins.size(0));
scalar_t origin[3] = {rays.origins[tid][0], rays.origins[tid][1], rays.origins[tid][2]};
transform_coord<scalar_t>(origin, tree.offset, tree.scaling);
scalar_t dir[3] = {rays.dirs[tid][0], rays.dirs[tid][1], rays.dirs[tid][2]};
trace_ray_se_grad_hess<scalar_t>(
tree,
SingleRaySpec<scalar_t>{origin, dir, &rays.vdirs[tid][0]},
opt,
color_ref[tid],
color_out[tid],
grad_out,
hessdiag_out);
}
template <typename scalar_t>
__global__ void se_grad_persp_kernel(
PackedTreeSpec<scalar_t> tree,
PackedCameraSpec<scalar_t> cam,
RenderOptions opt,
torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
color_ref,
torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
color_out,
torch::PackedTensorAccessor64<scalar_t, 5, torch::RestrictPtrTraits> grad_out,
torch::PackedTensorAccessor64<scalar_t, 5, torch::RestrictPtrTraits> hessdiag_out) {
CUDA_GET_THREAD_ID(tid, cam.width * cam.height);
int iy = tid / cam.width, ix = tid % cam.width;
scalar_t dir[3], origin[3];
cam2world_ray(ix, iy, dir, origin, cam);
scalar_t vdir[3] = {dir[0], dir[1], dir[2]};
maybe_world2ndc(opt, dir, origin);
transform_coord<scalar_t>(origin, tree.offset, tree.scaling);
trace_ray_se_grad_hess<scalar_t>(
tree,
SingleRaySpec<scalar_t>{origin, dir, vdir},
opt,
color_ref[iy][ix],
color_out[iy][ix],
grad_out,
hessdiag_out);
}
template <typename scalar_t>
__device__ __inline__ void grid_trace_ray(
const torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
data,
const scalar_t* __restrict__ origin,
const scalar_t* __restrict__ dir,
const scalar_t* __restrict__ vdir,
scalar_t step_size,
scalar_t delta_scale,
scalar_t sigma_thresh,
torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
grid_weight,
torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
grid_hit) {
scalar_t tmin, tmax;
scalar_t invdir[3];
const int reso = data.size(0);
scalar_t* grid_weight_val = grid_weight.data();
scalar_t* grid_hit_val = grid_hit.data();
#pragma unroll
for (int i = 0; i < 3; ++i) {
invdir[i] = 1.0 / (dir[i] + 1e-9);
}
_dda_unit(origin, invdir, &tmin, &tmax);
if (tmax < 0 || tmin > tmax) {
// Ray doesn't hit box
return;
} else {
scalar_t pos[3];
scalar_t light_intensity = 1.f;
scalar_t t = tmin;
scalar_t cube_sz = reso;
int32_t u, v, w, node_id;
while (t < tmax) {
for (int j = 0; j < 3; ++j) {
pos[j] = origin[j] + t * dir[j];
}
clamp_coord<scalar_t>(pos);
pos[0] *= reso;
pos[1] *= reso;
pos[2] *= reso;
u = floor(pos[0]);
v = floor(pos[1]);
w = floor(pos[2]);
pos[0] -= u;
pos[1] -= v;
pos[2] -= w;
node_id = u * reso * reso + v * reso + w;
scalar_t att;
scalar_t subcube_tmin, subcube_tmax;
_dda_unit(pos, invdir, &subcube_tmin, &subcube_tmax);
const scalar_t t_subcube = (subcube_tmax - subcube_tmin) / cube_sz;
const scalar_t delta_t = t_subcube + step_size;
scalar_t sigma = data[u][v][w];
if (sigma > sigma_thresh) {
att = expf(-delta_t * delta_scale * sigma);
const scalar_t weight = light_intensity * (1.f - att);
light_intensity *= att;
atomicMax(&grid_weight_val[node_id], weight);
atomicAdd(&grid_hit_val[node_id], (scalar_t) 1.0);
}
t += delta_t;
}
}
}
template <typename scalar_t>
__global__ void grid_weight_render_kernel(
const torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
data,
PackedCameraSpec<scalar_t> cam,
RenderOptions opt,
const scalar_t* __restrict__ offset,
const scalar_t* __restrict__ scaling,
torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
grid_weight,
torch::PackedTensorAccessor32<scalar_t, 3, torch::RestrictPtrTraits>
grid_hit) {
CUDA_GET_THREAD_ID(tid, cam.width * cam.height);
int iy = tid / cam.width, ix = tid % cam.width;
scalar_t dir[3], origin[3];
cam2world_ray(ix, iy, dir, origin, cam);
scalar_t vdir[3] = {dir[0], dir[1], dir[2]};
maybe_world2ndc(opt, dir, origin);
transform_coord<scalar_t>(origin, offset, scaling);
const scalar_t delta_scale = _get_delta_scale(scaling, dir);
grid_trace_ray<scalar_t>(
data,
origin,
dir,
vdir,
opt.step_size,
delta_scale,
opt.sigma_thresh,
grid_weight,
grid_hit);
}
} // namespace device
// Compute RGB output dimension from input dimension & SH degree
__host__ int get_out_data_dim(int format, int basis_dim, int in_data_dim) {
if (format != FORMAT_RGBA) {
return (in_data_dim - 1) / basis_dim;
} else {
return in_data_dim - 1;
}
}
} // namespace
torch::Tensor volume_render(TreeSpec& tree, RaysSpec& rays, RenderOptions& opt) {
tree.check();
rays.check();
DEVICE_GUARD(tree.data);
const auto Q = rays.origins.size(0);
auto_cuda_threads();
const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads);
int out_data_dim = get_out_data_dim(opt.format, opt.basis_dim, tree.data.size(4));
torch::Tensor result = torch::zeros({Q, out_data_dim}, rays.origins.options());
AT_DISPATCH_FLOATING_TYPES(rays.origins.type(), __FUNCTION__, [&] {
device::render_ray_kernel<scalar_t><<<blocks, cuda_n_threads>>>(
tree, rays, opt,
result.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>());
});
CUDA_CHECK_ERRORS;
return result;
}
torch::Tensor volume_render_image(TreeSpec& tree, CameraSpec& cam, RenderOptions& opt) {
tree.check();
cam.check();
DEVICE_GUARD(tree.data);
const size_t Q = size_t(cam.width) * cam.height;
auto_cuda_threads();
const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads);
int out_data_dim = get_out_data_dim(opt.format, opt.basis_dim, tree.data.size(4));
torch::Tensor result = torch::zeros({cam.height, cam.width, out_data_dim},
tree.data.options());
AT_DISPATCH_FLOATING_TYPES(tree.data.type(), __FUNCTION__, [&] {
device::render_image_kernel<scalar_t><<<blocks, cuda_n_threads>>>(
tree, cam, opt,
result.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>());
});
CUDA_CHECK_ERRORS;
return result;
}
torch::Tensor volume_render_backward(
TreeSpec& tree, RaysSpec& rays,
RenderOptions& opt,
torch::Tensor grad_output) {
tree.check();
rays.check();
DEVICE_GUARD(tree.data);
const int Q = rays.origins.size(0);
auto_cuda_threads();
const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads);
int out_data_dim = get_out_data_dim(opt.format, opt.basis_dim, tree.data.size(4));
torch::Tensor result = torch::zeros_like(tree.data);
AT_DISPATCH_FLOATING_TYPES(rays.origins.type(), __FUNCTION__, [&] {
device::render_ray_backward_kernel<scalar_t><<<blocks, cuda_n_threads>>>(
tree,
grad_output.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
rays,
opt,
result.packed_accessor64<scalar_t, 5, torch::RestrictPtrTraits>());
});
CUDA_CHECK_ERRORS;
return result;
}
torch::Tensor volume_render_image_backward(TreeSpec& tree, CameraSpec& cam,
RenderOptions& opt,
torch::Tensor grad_output) {
tree.check();
cam.check();
DEVICE_GUARD(tree.data);
const size_t Q = size_t(cam.width) * cam.height;
auto_cuda_threads();
const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads);
int out_data_dim = get_out_data_dim(opt.format, opt.basis_dim, tree.data.size(4));
torch::Tensor result = torch::zeros_like(tree.data);
AT_DISPATCH_FLOATING_TYPES(tree.data.type(), __FUNCTION__, [&] {
device::render_image_backward_kernel<scalar_t><<<blocks, cuda_n_threads>>>(
tree,
grad_output.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(),
cam,
opt,
result.packed_accessor64<scalar_t, 5, torch::RestrictPtrTraits>());
});
CUDA_CHECK_ERRORS;
return result;
}
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> se_grad(
TreeSpec& tree, RaysSpec& rays, torch::Tensor color, RenderOptions& opt) {
tree.check();
rays.check();
DEVICE_GUARD(tree.data);
CHECK_INPUT(color);
const auto Q = rays.origins.size(0);
auto_cuda_threads();
const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads);
int out_data_dim = get_out_data_dim(opt.format, opt.basis_dim, tree.data.size(4));
if (out_data_dim > 4) {
throw std::runtime_error("Tree's output dim cannot be > 4 for se_grad");
}
torch::Tensor result = torch::zeros({Q, out_data_dim}, rays.origins.options());
torch::Tensor grad = torch::zeros_like(tree.data);
torch::Tensor hessdiag = torch::zeros_like(tree.data);
AT_DISPATCH_FLOATING_TYPES(rays.origins.type(), __FUNCTION__, [&] {
device::se_grad_kernel<scalar_t><<<blocks, cuda_n_threads>>>(
tree, rays, opt,
color.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
result.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(),
grad.packed_accessor64<scalar_t, 5, torch::RestrictPtrTraits>(),
hessdiag.packed_accessor64<scalar_t, 5, torch::RestrictPtrTraits>());
});
CUDA_CHECK_ERRORS;
return std::template tuple<torch::Tensor, torch::Tensor, torch::Tensor>(result, grad, hessdiag);
}
std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> se_grad_persp(
TreeSpec& tree,
CameraSpec& cam,
RenderOptions& opt,
torch::Tensor color) {
tree.check();
cam.check();
DEVICE_GUARD(tree.data);
CHECK_INPUT(color);
const size_t Q = size_t(cam.width) * cam.height;
auto_cuda_threads();
const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads);
int out_data_dim = get_out_data_dim(opt.format, opt.basis_dim, tree.data.size(4));
if (out_data_dim > 4) {
throw std::runtime_error("Tree's output dim cannot be > 4 for se_grad");
}
torch::Tensor result = torch::zeros({cam.height, cam.width, out_data_dim},
tree.data.options());
torch::Tensor grad = torch::zeros_like(tree.data);
torch::Tensor hessdiag = torch::zeros_like(tree.data);
AT_DISPATCH_FLOATING_TYPES(tree.data.type(), __FUNCTION__, [&] {
device::se_grad_persp_kernel<scalar_t><<<blocks, cuda_n_threads>>>(
tree, cam, opt,
color.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(),
result.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(),
grad.packed_accessor64<scalar_t, 5, torch::RestrictPtrTraits>(),
hessdiag.packed_accessor64<scalar_t, 5, torch::RestrictPtrTraits>());
});
CUDA_CHECK_ERRORS;
return std::template tuple<torch::Tensor, torch::Tensor, torch::Tensor>(result, grad, hessdiag);
}
std::vector<torch::Tensor> grid_weight_render(
torch::Tensor data, CameraSpec& cam, RenderOptions& opt,
torch::Tensor offset, torch::Tensor scaling) {
cam.check();
DEVICE_GUARD(data);
const size_t Q = size_t(cam.width) * cam.height;
auto_cuda_threads();
const int blocks = CUDA_N_BLOCKS_NEEDED(Q, cuda_n_threads);
torch::Tensor grid_weight = torch::zeros_like(data);
torch::Tensor grid_hit = torch::zeros_like(data);
AT_DISPATCH_FLOATING_TYPES(data.type(), __FUNCTION__, [&] {
device::grid_weight_render_kernel<scalar_t><<<blocks, cuda_n_threads>>>(
data.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(),
cam,
opt,
offset.data<scalar_t>(),
scaling.data<scalar_t>(),
grid_weight.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>(),
grid_hit.packed_accessor32<scalar_t, 3, torch::RestrictPtrTraits>());
});
CUDA_CHECK_ERRORS;
return {grid_weight, grid_hit};
}
|
56ffd66bd6bd5f03ae64eecf4ceae90834c91a08.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zlarfgx-v2.cu normal z -> c, Fri Jan 30 19:00:09 2015
*/
#include "common_magma.h"
#include "commonblas_c.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_c
//==============================================================================
__global__
void magma_clarfgx_gpu_kernel( int n, magmaFloatComplex* dx0, magmaFloatComplex* dx,
magmaFloatComplex *dtau, float *dxnorm,
magmaFloatComplex *dA, int it)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaFloatComplex scale;
__shared__ float xnorm;
magmaFloatComplex dxi;
if ( j < n-1 )
dxi = dx[j];
if ( i == 0 ) {
xnorm = *dxnorm;
#if (defined(PRECISION_s) || defined(PRECISION_d))
float alpha = *dx0;
float alphai = MAGMA_C_ZERO;
if ( (xnorm == 0 && alphai == MAGMA_C_ZERO ) || n == 1 )
#else
magmaFloatComplex alpha = *dx0;
float alphar = MAGMA_C_REAL(alpha), alphai = MAGMA_C_IMAG(alpha);
if ( (xnorm == 0 && alphai == MAGMA_C_ZERO ) || n == 0 )
#endif
{
*dtau = MAGMA_C_ZERO;
*dA = *dx0;
}
else {
#if (defined(PRECISION_s) || defined(PRECISION_d))
// no need to compute the norm as it is passed as input
float beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = (beta - alpha) / beta;
//*dx0 = 1.; //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = beta;
}
scale = 1. / (alpha - beta);
#else
// no need to compute the norm as it is passed as input
float beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = MAGMA_C_MAKE((beta - alphar)/beta, -alphai/beta);
//*dx0 = MAGMA_C_MAKE( 1., 0.); //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = MAGMA_C_MAKE(beta, 0.);
}
alpha = MAGMA_C_MAKE( MAGMA_C_REAL(alpha) - beta, MAGMA_C_IMAG(alpha));
scale = MAGMA_C_DIV( MAGMA_C_ONE, alpha);
#endif
}
}
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_C_MUL(dxi, scale);
if (j<it){
*( dA-it+j) = *(dx0-it+j);
*(dx0-it+j) = MAGMA_C_MAKE(0., 0.);
}
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_clarfgx_gpu(
magma_int_t n,
magmaFloatComplex_ptr dx0,
magmaFloatComplex_ptr dx,
magmaFloatComplex_ptr dtau,
magmaFloat_ptr dxnorm,
magmaFloatComplex_ptr dA, magma_int_t iter)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_clarfgx_gpu_kernel), dim3(blocks), dim3(threads), 0, magma_stream , n, dx0, dx, dtau, dxnorm, dA, iter);
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_clarfgtx_gpu(
magma_int_t n,
magmaFloatComplex_ptr dx0,
magmaFloatComplex_ptr dx,
magmaFloatComplex_ptr dtau,
magmaFloat_ptr dxnorm,
magmaFloatComplex_ptr dA, magma_int_t iter,
magmaFloatComplex_ptr V, magma_int_t ldv,
magmaFloatComplex_ptr T, magma_int_t ldt,
magmaFloatComplex_ptr dwork)
{
/* Generate the elementary reflector H(iter) */
magma_clarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, iter);
if (iter==0) {
magmaFloatComplex tt = MAGMA_C_ONE;
magmablas_clacpy(MagmaUpperLower, 1, 1, dtau, 1, T+iter+iter*ldt, 1);
magma_csetmatrix(1,1, &tt,1, dx0,1);
}
else {
/* Compute the iter-th column of T */
hipLaunchKernelGGL(( magma_cgemv_kernel3), dim3(iter), dim3(BLOCK_SIZE), 0, magma_stream , n, V, ldv, dx0, dwork, dtau );
hipLaunchKernelGGL(( magma_ctrmv_kernel2), dim3(iter), dim3(iter), 0, magma_stream , T, ldt, dwork, T+iter*ldt, dtau );
}
}
//==============================================================================
| 56ffd66bd6bd5f03ae64eecf4ceae90834c91a08.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@generated from zlarfgx-v2.cu normal z -> c, Fri Jan 30 19:00:09 2015
*/
#include "common_magma.h"
#include "commonblas_c.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_c
//==============================================================================
__global__
void magma_clarfgx_gpu_kernel( int n, magmaFloatComplex* dx0, magmaFloatComplex* dx,
magmaFloatComplex *dtau, float *dxnorm,
magmaFloatComplex *dA, int it)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaFloatComplex scale;
__shared__ float xnorm;
magmaFloatComplex dxi;
if ( j < n-1 )
dxi = dx[j];
if ( i == 0 ) {
xnorm = *dxnorm;
#if (defined(PRECISION_s) || defined(PRECISION_d))
float alpha = *dx0;
float alphai = MAGMA_C_ZERO;
if ( (xnorm == 0 && alphai == MAGMA_C_ZERO ) || n == 1 )
#else
magmaFloatComplex alpha = *dx0;
float alphar = MAGMA_C_REAL(alpha), alphai = MAGMA_C_IMAG(alpha);
if ( (xnorm == 0 && alphai == MAGMA_C_ZERO ) || n == 0 )
#endif
{
*dtau = MAGMA_C_ZERO;
*dA = *dx0;
}
else {
#if (defined(PRECISION_s) || defined(PRECISION_d))
// no need to compute the norm as it is passed as input
float beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = (beta - alpha) / beta;
//*dx0 = 1.; //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = beta;
}
scale = 1. / (alpha - beta);
#else
// no need to compute the norm as it is passed as input
float beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
if (j==0){
*dtau = MAGMA_C_MAKE((beta - alphar)/beta, -alphai/beta);
//*dx0 = MAGMA_C_MAKE( 1., 0.); //cannot be done here because raise condition all threadblock need to read it for alpha
*dA = MAGMA_C_MAKE(beta, 0.);
}
alpha = MAGMA_C_MAKE( MAGMA_C_REAL(alpha) - beta, MAGMA_C_IMAG(alpha));
scale = MAGMA_C_DIV( MAGMA_C_ONE, alpha);
#endif
}
}
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_C_MUL(dxi, scale);
if (j<it){
*( dA-it+j) = *(dx0-it+j);
*(dx0-it+j) = MAGMA_C_MAKE(0., 0.);
}
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_clarfgx_gpu(
magma_int_t n,
magmaFloatComplex_ptr dx0,
magmaFloatComplex_ptr dx,
magmaFloatComplex_ptr dtau,
magmaFloat_ptr dxnorm,
magmaFloatComplex_ptr dA, magma_int_t iter)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
magma_clarfgx_gpu_kernel<<< blocks, threads, 0, magma_stream >>>( n, dx0, dx, dtau, dxnorm, dA, iter);
}
//==============================================================================
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's clarfg is that the norm of dx, and hance beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_clarfgtx_gpu(
magma_int_t n,
magmaFloatComplex_ptr dx0,
magmaFloatComplex_ptr dx,
magmaFloatComplex_ptr dtau,
magmaFloat_ptr dxnorm,
magmaFloatComplex_ptr dA, magma_int_t iter,
magmaFloatComplex_ptr V, magma_int_t ldv,
magmaFloatComplex_ptr T, magma_int_t ldt,
magmaFloatComplex_ptr dwork)
{
/* Generate the elementary reflector H(iter) */
magma_clarfgx_gpu(n, dx0, dx, dtau, dxnorm, dA, iter);
if (iter==0) {
magmaFloatComplex tt = MAGMA_C_ONE;
magmablas_clacpy(MagmaUpperLower, 1, 1, dtau, 1, T+iter+iter*ldt, 1);
magma_csetmatrix(1,1, &tt,1, dx0,1);
}
else {
/* Compute the iter-th column of T */
magma_cgemv_kernel3<<< iter, BLOCK_SIZE, 0, magma_stream >>>( n, V, ldv, dx0, dwork, dtau );
magma_ctrmv_kernel2<<< iter, iter, 0, magma_stream >>>( T, ldt, dwork, T+iter*ldt, dtau );
}
}
//==============================================================================
|
b39fd5d0bd42a9368786d8ede44a573e61a5f702.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlarf.cu normal z -> d, Wed Sep 17 15:08:23 2014
@author Azzam Haidar
*/
#include "common_magma.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
//==============================================================================
//==============================================================================
__global__
void magma_dlarf_kernel( int m, const double *dv, const double *dtau,
double *dc, int lddc )
{
if ( !MAGMA_D_EQUAL(*dtau, MAGMA_D_ZERO) ) {
const int tx = threadIdx.x;
dc = dc + blockIdx.x * lddc;
__shared__ double sum[ BLOCK_SIZE ];
double tmp;
/* perform w := v' * C */
if (tx==0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_D_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ){
tmp += MAGMA_D_MUL( MAGMA_D_CNJG( dv[j] ), dc[j] );
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
tmp = - MAGMA_D_CNJG(*dtau) * sum[0];
for( int j = m-tx-1; j>0 ; j -= BLOCK_SIZE )
dc[j] += tmp * dv[j];
if(tx==0) dc[0] += tmp;
}
}
//==============================================================================
//==============================================================================
__global__
void magma_dlarf_smkernel( int m, int n, double *dv, double *dtau,
double *dc, int lddc )
{
if ( ! MAGMA_D_EQUAL(*dtau, MAGMA_D_ZERO) ) {
const int i = threadIdx.x, col= threadIdx.y;
for( int k = col; k < n; k += BLOCK_SIZEy ) {
dc = dc + k * lddc;
__shared__ double sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1];
double lsum;
/* w := v' * C */
lsum = MAGMA_D_ZERO;
for( int j = i; j < m; j += BLOCK_SIZEx ){
if (j==0)
lsum += MAGMA_D_MUL( MAGMA_D_ONE, dc[j] );
else
lsum += MAGMA_D_MUL( MAGMA_D_CNJG( dv[j] ), dc[j] );
}
sum[i][col] = lsum;
magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( i, col, sum );
/* C := C - v * w */
__syncthreads();
double z__1 = - MAGMA_D_CNJG(*dtau) * sum[0][col];
for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZEx ) {
if (j==0)
dc[j] += z__1;
else
dc[j] += z__1 * dv[j];
}
}
}
}
//==============================================================================
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v'
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H' (the conjugate transpose of H), supply conjg(tau)
instead tau.
This routine uses only one SM (block).
*/
extern "C" void
magma_dlarf_sm(magma_int_t m, magma_int_t n, double *dv, double *dtau,
double *dc, magma_int_t lddc)
{
dim3 blocks( 1 );
dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy );
hipLaunchKernelGGL(( magma_dlarf_smkernel), dim3(blocks), dim3(threads), 0, magma_stream , m, n, dv, dtau, dc, lddc );
}
//==============================================================================
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v'
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H' (the conjugate transpose of H), supply conjg(tau)
instead tau.
*/
extern "C" magma_int_t
magma_dlarf_gpu(
magma_int_t m, magma_int_t n,
const double *dv, const double *dtau,
double *dc, magma_int_t lddc)
{
dim3 grid( n, 1, 1 );
dim3 threads( BLOCK_SIZE );
if ( n>0 ){
hipLaunchKernelGGL(( magma_dlarf_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, dv, dtau, dc, lddc);
}
// The computation can be done on 1 SM with the following routine.
// magma_dlarf_sm(m, n, dv, dtau, dc, lddc);
return MAGMA_SUCCESS;
}
//==============================================================================
| b39fd5d0bd42a9368786d8ede44a573e61a5f702.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zlarf.cu normal z -> d, Wed Sep 17 15:08:23 2014
@author Azzam Haidar
*/
#include "common_magma.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
//==============================================================================
//==============================================================================
__global__
void magma_dlarf_kernel( int m, const double *dv, const double *dtau,
double *dc, int lddc )
{
if ( !MAGMA_D_EQUAL(*dtau, MAGMA_D_ZERO) ) {
const int tx = threadIdx.x;
dc = dc + blockIdx.x * lddc;
__shared__ double sum[ BLOCK_SIZE ];
double tmp;
/* perform w := v' * C */
if (tx==0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_D_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ){
tmp += MAGMA_D_MUL( MAGMA_D_CNJG( dv[j] ), dc[j] );
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
tmp = - MAGMA_D_CNJG(*dtau) * sum[0];
for( int j = m-tx-1; j>0 ; j -= BLOCK_SIZE )
dc[j] += tmp * dv[j];
if(tx==0) dc[0] += tmp;
}
}
//==============================================================================
//==============================================================================
__global__
void magma_dlarf_smkernel( int m, int n, double *dv, double *dtau,
double *dc, int lddc )
{
if ( ! MAGMA_D_EQUAL(*dtau, MAGMA_D_ZERO) ) {
const int i = threadIdx.x, col= threadIdx.y;
for( int k = col; k < n; k += BLOCK_SIZEy ) {
dc = dc + k * lddc;
__shared__ double sum[ BLOCK_SIZEx ][ BLOCK_SIZEy + 1];
double lsum;
/* w := v' * C */
lsum = MAGMA_D_ZERO;
for( int j = i; j < m; j += BLOCK_SIZEx ){
if (j==0)
lsum += MAGMA_D_MUL( MAGMA_D_ONE, dc[j] );
else
lsum += MAGMA_D_MUL( MAGMA_D_CNJG( dv[j] ), dc[j] );
}
sum[i][col] = lsum;
magma_sum_reduce_2d< BLOCK_SIZEx, BLOCK_SIZEy+1 >( i, col, sum );
/* C := C - v * w */
__syncthreads();
double z__1 = - MAGMA_D_CNJG(*dtau) * sum[0][col];
for( int j = m-i-1; j>=0 ; j -= BLOCK_SIZEx ) {
if (j==0)
dc[j] += z__1;
else
dc[j] += z__1 * dv[j];
}
}
}
}
//==============================================================================
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v'
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H' (the conjugate transpose of H), supply conjg(tau)
instead tau.
This routine uses only one SM (block).
*/
extern "C" void
magma_dlarf_sm(magma_int_t m, magma_int_t n, double *dv, double *dtau,
double *dc, magma_int_t lddc)
{
dim3 blocks( 1 );
dim3 threads( BLOCK_SIZEx, BLOCK_SIZEy );
magma_dlarf_smkernel<<< blocks, threads, 0, magma_stream >>>( m, n, dv, dtau, dc, lddc );
}
//==============================================================================
/*
Apply a real elementary reflector H to a real M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v'
where tau is a real scalar and v is a real vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H' (the conjugate transpose of H), supply conjg(tau)
instead tau.
*/
extern "C" magma_int_t
magma_dlarf_gpu(
magma_int_t m, magma_int_t n,
const double *dv, const double *dtau,
double *dc, magma_int_t lddc)
{
dim3 grid( n, 1, 1 );
dim3 threads( BLOCK_SIZE );
if ( n>0 ){
magma_dlarf_kernel<<< grid, threads, 0, magma_stream >>>( m, dv, dtau, dc, lddc);
}
// The computation can be done on 1 SM with the following routine.
// magma_dlarf_sm(m, n, dv, dtau, dc, lddc);
return MAGMA_SUCCESS;
}
//==============================================================================
|
cbfe5e58c5f0287d3f726feda23b6bfbfe1dd382.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "../../Library/image_buffer.h"
#include "../../Library/image_function.h"
#include "../../Library/cuda/cuda_types.cuh"
#include "../../Library/cuda/image_function_cuda.cuh"
#include "unit_test_helper_cuda.cuh"
namespace
{
// This function must run with thread count as 1
__global__ void isEqualCuda( const uint8_t * image, uint8_t value, uint32_t size, uint32_t * differenceCount )
{
uint32_t id = blockDim.x * blockIdx.x + threadIdx.x;
if( id < size )
{
if( image[id] != value )
atomicAdd( differenceCount, 1 );
}
};
__global__ void isAnyEqualCuda( const uint8_t * image, uint8_t * value, uint32_t valueCount, uint32_t size, uint32_t * differenceCount )
{
uint32_t id = blockDim.x * blockIdx.x + threadIdx.x;
if( id < size )
{
bool equal = false;
for( uint32_t i = 0; i < valueCount; ++i )
{
if( image[id] == value[i] )
{
equal = true;
break;
}
}
if( !equal )
atomicAdd( differenceCount, 1 );
}
};
};
namespace Unit_Test
{
namespace Cuda
{
Bitmap_Image_Cuda::Image uniformImage( uint8_t value )
{
Bitmap_Image_Cuda::Image image( randomValue<uint32_t>( 1, 2048 ), randomValue<uint32_t>( 1, 2048 ) );
image.fill( value );
return image;
}
Bitmap_Image_Cuda::Image uniformImage()
{
return uniformImage( randomValue<uint8_t>( 256 ) );
}
Bitmap_Image_Cuda::Image uniformColorImage()
{
return uniformColorImage( randomValue<uint8_t>( 256 ) );
}
Bitmap_Image_Cuda::Image uniformColorImage( uint8_t value )
{
Bitmap_Image_Cuda::Image image( randomValue<uint32_t>( 1, 2048 ), randomValue<uint32_t>( 1, 2048 ), Bitmap_Image_Cuda::RGB );
image.fill( value );
return image;
}
Bitmap_Image_Cuda::Image blackImage()
{
return uniformImage( 0u );
}
Bitmap_Image_Cuda::Image whiteImage()
{
return uniformImage( 255u );
}
std::vector < Bitmap_Image_Cuda::Image > uniformImages( uint32_t images )
{
if( images == 0 )
throw imageException( "Invalid parameter" );
std::vector < Bitmap_Image_Cuda::Image > image;
image.push_back( uniformImage() );
image.resize( images );
for( size_t i = 1; i < image.size(); ++i ) {
image[i].resize( image[0].width(), image[0].height() );
image[i].fill( randomValue<uint8_t>( 256 ) );
}
return image;
}
std::vector < Bitmap_Image_Cuda::Image > uniformImages( std::vector < uint8_t > intensityValue )
{
if( intensityValue.size() == 0 )
throw imageException( "Invalid parameter" );
std::vector < Bitmap_Image_Cuda::Image > image;
image.push_back( uniformImage( intensityValue[0] ) );
image.resize( intensityValue.size() );
for( size_t i = 1; i < image.size(); ++i ) {
image[i].resize( image[0].width(), image[0].height() );
image[i].fill( intensityValue[i] );
}
return image;
}
bool verifyImage( const Bitmap_Image_Cuda::Image & image, uint8_t value )
{
Cuda_Types::_cuint32_t differenceCount( 0 );
const uint32_t size = image.rowSize() * image.height();
hipLaunchKernelGGL(( isEqualCuda), dim3((size + 255) / 256), dim3(256) , 0, 0, image.data(), value, size, &differenceCount);
hipError_t error = hipGetLastError();
if( error != hipSuccess )
throw imageException( "Failed to launch CUDA kernel" );
return differenceCount.get() == 0;
}
bool verifyImage( const Bitmap_Image_Cuda::Image & image, const std::vector < uint8_t > & value )
{
Cuda_Types::_cuint32_t differenceCount( 0 );
Cuda_Types::Array<uint8_t> valueCuda( value );
const uint32_t size = image.rowSize() * image.height();
hipLaunchKernelGGL(( isAnyEqualCuda), dim3((size + 255) / 256), dim3(256) , 0, 0, image.data(), &valueCuda, valueCuda.size(), size, &differenceCount);
hipError_t error = hipGetLastError();
if( error != hipSuccess )
throw imageException( "Failed to launch CUDA kernel" );
return differenceCount.get() == 0;
}
};
};
| cbfe5e58c5f0287d3f726feda23b6bfbfe1dd382.cu | #include <cuda_runtime.h>
#include "../../Library/image_buffer.h"
#include "../../Library/image_function.h"
#include "../../Library/cuda/cuda_types.cuh"
#include "../../Library/cuda/image_function_cuda.cuh"
#include "unit_test_helper_cuda.cuh"
namespace
{
// This function must run with thread count as 1
__global__ void isEqualCuda( const uint8_t * image, uint8_t value, uint32_t size, uint32_t * differenceCount )
{
uint32_t id = blockDim.x * blockIdx.x + threadIdx.x;
if( id < size )
{
if( image[id] != value )
atomicAdd( differenceCount, 1 );
}
};
__global__ void isAnyEqualCuda( const uint8_t * image, uint8_t * value, uint32_t valueCount, uint32_t size, uint32_t * differenceCount )
{
uint32_t id = blockDim.x * blockIdx.x + threadIdx.x;
if( id < size )
{
bool equal = false;
for( uint32_t i = 0; i < valueCount; ++i )
{
if( image[id] == value[i] )
{
equal = true;
break;
}
}
if( !equal )
atomicAdd( differenceCount, 1 );
}
};
};
namespace Unit_Test
{
namespace Cuda
{
Bitmap_Image_Cuda::Image uniformImage( uint8_t value )
{
Bitmap_Image_Cuda::Image image( randomValue<uint32_t>( 1, 2048 ), randomValue<uint32_t>( 1, 2048 ) );
image.fill( value );
return image;
}
Bitmap_Image_Cuda::Image uniformImage()
{
return uniformImage( randomValue<uint8_t>( 256 ) );
}
Bitmap_Image_Cuda::Image uniformColorImage()
{
return uniformColorImage( randomValue<uint8_t>( 256 ) );
}
Bitmap_Image_Cuda::Image uniformColorImage( uint8_t value )
{
Bitmap_Image_Cuda::Image image( randomValue<uint32_t>( 1, 2048 ), randomValue<uint32_t>( 1, 2048 ), Bitmap_Image_Cuda::RGB );
image.fill( value );
return image;
}
Bitmap_Image_Cuda::Image blackImage()
{
return uniformImage( 0u );
}
Bitmap_Image_Cuda::Image whiteImage()
{
return uniformImage( 255u );
}
std::vector < Bitmap_Image_Cuda::Image > uniformImages( uint32_t images )
{
if( images == 0 )
throw imageException( "Invalid parameter" );
std::vector < Bitmap_Image_Cuda::Image > image;
image.push_back( uniformImage() );
image.resize( images );
for( size_t i = 1; i < image.size(); ++i ) {
image[i].resize( image[0].width(), image[0].height() );
image[i].fill( randomValue<uint8_t>( 256 ) );
}
return image;
}
std::vector < Bitmap_Image_Cuda::Image > uniformImages( std::vector < uint8_t > intensityValue )
{
if( intensityValue.size() == 0 )
throw imageException( "Invalid parameter" );
std::vector < Bitmap_Image_Cuda::Image > image;
image.push_back( uniformImage( intensityValue[0] ) );
image.resize( intensityValue.size() );
for( size_t i = 1; i < image.size(); ++i ) {
image[i].resize( image[0].width(), image[0].height() );
image[i].fill( intensityValue[i] );
}
return image;
}
bool verifyImage( const Bitmap_Image_Cuda::Image & image, uint8_t value )
{
Cuda_Types::_cuint32_t differenceCount( 0 );
const uint32_t size = image.rowSize() * image.height();
isEqualCuda<<< (size + 255) / 256, 256 >>>(image.data(), value, size, &differenceCount);
cudaError_t error = cudaGetLastError();
if( error != cudaSuccess )
throw imageException( "Failed to launch CUDA kernel" );
return differenceCount.get() == 0;
}
bool verifyImage( const Bitmap_Image_Cuda::Image & image, const std::vector < uint8_t > & value )
{
Cuda_Types::_cuint32_t differenceCount( 0 );
Cuda_Types::Array<uint8_t> valueCuda( value );
const uint32_t size = image.rowSize() * image.height();
isAnyEqualCuda<<< (size + 255) / 256, 256 >>>(image.data(), &valueCuda, valueCuda.size(), size, &differenceCount);
cudaError_t error = cudaGetLastError();
if( error != cudaSuccess )
throw imageException( "Failed to launch CUDA kernel" );
return differenceCount.get() == 0;
}
};
};
|
6e80aac370f09c28715fe9ff3b0f7fae58fe7c15.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* objective
* C = A*B // A[m][k], B[k][n], C[m][n]
* compile: nvcc --gpu-architecture=compute_60 --gpu-code=sm_60 -O3 matmul_double.cu -o matmul_double
*/
#include <iostream>
#include <cstdlib>
#include <math.h>
# define BLK_SIZE 4
#define EC(ans) { chkerr((ans), __FILE__, __LINE__); }
inline void chkerr(hipError_t code, const char *file, int line)
{
if (code != hipSuccess)
{
std::cerr << "ERROR!!!:" << hipGetErrorString(code) << " File: " << file << " Line: " << line << '\n';
exit(-1);
}
}
void init (double *A, double *B, int M , int N, int K)
{
for (int i = 0; i < M; ++i)
{
for (int j = 0; j < K; ++j)
{
A[i * K + j] = 1; //i * K + j;
}
}
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < K; ++j)
{
B[i * K + j] = 1; //i * N + j + 1;
}
}
}
void matmul_double_host(double* A, double* B, double* C, int M, int N, int K)
{
for (int i = 0; i < M; ++i)
{
for (int j = 0; j < N; ++j)
{
double tmp = 0;
for (int k = 0; k < K; ++k)
{
tmp += A[i * K + k] * B[j * K + k];
}
C[i * N + j] = tmp;
}
}
}
void validate (double *host, double *gpu, int M, int N)
{
for (int i = 0; i < M; ++i)
{
for (int j = 0; j < N; ++j)
{
if(std::abs(host[i * N + j] - gpu[i * N + j]) > 1e-3)
{
std::cerr << "possible error at position " << i << ',' << j << " host: " << host[i * N + j] << " device " << gpu[i * N + j] << '\n';
}
}
}
}
__global__ void matmul_double(double* A, double* B , double* C, int M, int N, int K)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
//if(row >= M || col >= N)
// return;
__shared__ float SA[BLK_SIZE][BLK_SIZE];
__shared__ float SB[BLK_SIZE][BLK_SIZE];
double temp = 0;
int klimit = K + BLK_SIZE - 1;
for(int tilek=0;tilek<klimit;tilek+=BLK_SIZE){
if(tilek + tx < K && row < M)
SA[ty][tx] = A[row*K + (tilek + tx)];
else
SA[ty][tx] = 0.0;
if((bx*BLK_SIZE+ty)<N && tx+tilek < K)
SB[ty][tx] = B[(bx*BLK_SIZE+ty)*K+(tx+tilek)];
else
SB[ty][tx] = 0.0;
__syncthreads();
for(int i=0;i<BLK_SIZE;i++){
temp+= SA[ty][i] * SB[tx][i];
}
__syncthreads();
}
if(row < M && col <N){
int id = row * N + col;
C[id] = temp;
}
}
int main(int argc, char *argv[])
{
if(argc < 3)
{
std::cerr << "Usage: ./matmul_double M N K\n";
exit(-1);
}
int M = std::atoi(argv[1]);
int N = std::atoi(argv[2]);
int K = std::atoi(argv[3]);
/* Host alloc */
double *hA = (double*) malloc (M * K * sizeof(double));
double *hB = (double*) malloc (K * N * sizeof(double));
double *hC = (double*) malloc (M * N * sizeof(double));
double *dtohC = (double*) malloc (M * N * sizeof(double));
/* Device alloc */
double *dA, *dB, *dC;
hipMalloc((void**) &dA, M*K*sizeof(double));
hipMalloc((void**) &dB, K*N*sizeof(double));
hipMalloc((void**) &dC, M*N*sizeof(double));
/* Initialize host memory*/
init(hA, hB, M, N, K);
/* host compute */
matmul_double_host(hA, hB, hC, M, N, K);
/* Copy from host to device */
hipMemcpy(dA, hA, M*K*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dB, hB, K*N*sizeof(double), hipMemcpyHostToDevice);
/* call gpu kernel */
dim3 threads(BLK_SIZE, BLK_SIZE);
dim3 grid(ceil(N/float(BLK_SIZE)),ceil(M/float(BLK_SIZE)));
printf("Number of threads in a block %dx%d\n",(int)BLK_SIZE, (int)BLK_SIZE);
printf("Number of blocks in a grid %dx%d\n",(int)ceil(N/float(BLK_SIZE)),(int)ceil(M/float(BLK_SIZE)));
hipLaunchKernelGGL(( matmul_double), dim3(grid), dim3(threads), 0, 0, dA, dB, dC, M, N, K);
std::cerr << hipGetErrorString(hipGetLastError()) << std::endl;
/* Copy from device to host (dC -> dtohC) */
hipMemcpy(dtohC, dC, M*N*sizeof(double), hipMemcpyDeviceToHost);
/* host vs device validation */
validate(hC, dtohC, M, N);
/* be clean */
free(hA);
free(hB);
free(hC);
free(dtohC);
/// add code to free gpu memory
hipFree(dA);
hipFree(dB);
hipFree(dC);
return 0;
}
| 6e80aac370f09c28715fe9ff3b0f7fae58fe7c15.cu | /* objective
* C = A*B // A[m][k], B[k][n], C[m][n]
* compile: nvcc --gpu-architecture=compute_60 --gpu-code=sm_60 -O3 matmul_double.cu -o matmul_double
*/
#include <iostream>
#include <cstdlib>
#include <math.h>
# define BLK_SIZE 4
#define EC(ans) { chkerr((ans), __FILE__, __LINE__); }
inline void chkerr(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) << " File: " << file << " Line: " << line << '\n';
exit(-1);
}
}
void init (double *A, double *B, int M , int N, int K)
{
for (int i = 0; i < M; ++i)
{
for (int j = 0; j < K; ++j)
{
A[i * K + j] = 1; //i * K + j;
}
}
for (int i = 0; i < N; ++i)
{
for (int j = 0; j < K; ++j)
{
B[i * K + j] = 1; //i * N + j + 1;
}
}
}
void matmul_double_host(double* A, double* B, double* C, int M, int N, int K)
{
for (int i = 0; i < M; ++i)
{
for (int j = 0; j < N; ++j)
{
double tmp = 0;
for (int k = 0; k < K; ++k)
{
tmp += A[i * K + k] * B[j * K + k];
}
C[i * N + j] = tmp;
}
}
}
void validate (double *host, double *gpu, int M, int N)
{
for (int i = 0; i < M; ++i)
{
for (int j = 0; j < N; ++j)
{
if(std::abs(host[i * N + j] - gpu[i * N + j]) > 1e-3)
{
std::cerr << "possible error at position " << i << ',' << j << " host: " << host[i * N + j] << " device " << gpu[i * N + j] << '\n';
}
}
}
}
__global__ void matmul_double(double* A, double* B , double* C, int M, int N, int K)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
//if(row >= M || col >= N)
// return;
__shared__ float SA[BLK_SIZE][BLK_SIZE];
__shared__ float SB[BLK_SIZE][BLK_SIZE];
double temp = 0;
int klimit = K + BLK_SIZE - 1;
for(int tilek=0;tilek<klimit;tilek+=BLK_SIZE){
if(tilek + tx < K && row < M)
SA[ty][tx] = A[row*K + (tilek + tx)];
else
SA[ty][tx] = 0.0;
if((bx*BLK_SIZE+ty)<N && tx+tilek < K)
SB[ty][tx] = B[(bx*BLK_SIZE+ty)*K+(tx+tilek)];
else
SB[ty][tx] = 0.0;
__syncthreads();
for(int i=0;i<BLK_SIZE;i++){
temp+= SA[ty][i] * SB[tx][i];
}
__syncthreads();
}
if(row < M && col <N){
int id = row * N + col;
C[id] = temp;
}
}
int main(int argc, char *argv[])
{
if(argc < 3)
{
std::cerr << "Usage: ./matmul_double M N K\n";
exit(-1);
}
int M = std::atoi(argv[1]);
int N = std::atoi(argv[2]);
int K = std::atoi(argv[3]);
/* Host alloc */
double *hA = (double*) malloc (M * K * sizeof(double));
double *hB = (double*) malloc (K * N * sizeof(double));
double *hC = (double*) malloc (M * N * sizeof(double));
double *dtohC = (double*) malloc (M * N * sizeof(double));
/* Device alloc */
double *dA, *dB, *dC;
cudaMalloc((void**) &dA, M*K*sizeof(double));
cudaMalloc((void**) &dB, K*N*sizeof(double));
cudaMalloc((void**) &dC, M*N*sizeof(double));
/* Initialize host memory*/
init(hA, hB, M, N, K);
/* host compute */
matmul_double_host(hA, hB, hC, M, N, K);
/* Copy from host to device */
cudaMemcpy(dA, hA, M*K*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, K*N*sizeof(double), cudaMemcpyHostToDevice);
/* call gpu kernel */
dim3 threads(BLK_SIZE, BLK_SIZE);
dim3 grid(ceil(N/float(BLK_SIZE)),ceil(M/float(BLK_SIZE)));
printf("Number of threads in a block %dx%d\n",(int)BLK_SIZE, (int)BLK_SIZE);
printf("Number of blocks in a grid %dx%d\n",(int)ceil(N/float(BLK_SIZE)),(int)ceil(M/float(BLK_SIZE)));
matmul_double<<<grid, threads>>>(dA, dB, dC, M, N, K);
std::cerr << cudaGetErrorString(cudaGetLastError()) << std::endl;
/* Copy from device to host (dC -> dtohC) */
cudaMemcpy(dtohC, dC, M*N*sizeof(double), cudaMemcpyDeviceToHost);
/* host vs device validation */
validate(hC, dtohC, M, N);
/* be clean */
free(hA);
free(hB);
free(hC);
free(dtohC);
/// add code to free gpu memory
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
return 0;
}
|
1e41a8b6a4d54755bcad0a4b5b4b514e1feadcf8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.m on 30-May-2011 22:03:11
//
// user function
__device__
#include "bres_calc.h"
// CUDA kernel function
__global__ void op_cuda_bres_calc(
float *ind_arg0, int *ind_arg0_maps,
float *ind_arg1, int *ind_arg1_maps,
float *ind_arg2, int *ind_arg2_maps,
float *ind_arg3, int *ind_arg3_maps,
short *arg0_maps,
short *arg1_maps,
short *arg2_maps,
short *arg3_maps,
short *arg4_maps,
int *arg5,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors) {
float arg4_l[4];
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ int *ind_arg2_map, ind_arg2_size;
__shared__ int *ind_arg3_map, ind_arg3_size;
__shared__ float *ind_arg0_s;
__shared__ float *ind_arg1_s;
__shared__ float *ind_arg2_s;
__shared__ float *ind_arg3_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*4];
ind_arg1_size = ind_arg_sizes[1+blockId*4];
ind_arg2_size = ind_arg_sizes[2+blockId*4];
ind_arg3_size = ind_arg_sizes[3+blockId*4];
ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*4];
ind_arg1_map = ind_arg1_maps + ind_arg_offs[1+blockId*4];
ind_arg2_map = ind_arg2_maps + ind_arg_offs[2+blockId*4];
ind_arg3_map = ind_arg3_maps + ind_arg_offs[3+blockId*4];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(float)*2);
ind_arg1_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(float)*4);
ind_arg2_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg2_size*sizeof(float)*1);
ind_arg3_s = (float *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x)
ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4];
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1];
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3_s[n] = ZERO_float;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<4; d++)
arg4_l[d] = ZERO_float;
// user-supplied kernel call
bres_calc( ind_arg0_s+arg0_maps[n+offset_b]*2,
ind_arg0_s+arg1_maps[n+offset_b]*2,
ind_arg1_s+arg2_maps[n+offset_b]*4,
ind_arg2_s+arg3_maps[n+offset_b]*1,
arg4_l,
arg5+(n+offset_b)*1 );
col2 = colors[n+offset_b];
}
// store local variables
int arg4_map = arg4_maps[n+offset_b];
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<4; d++)
ind_arg3_s[d+arg4_map*4] += arg4_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n];
}
// host stub function
void op_par_loop_bres_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5 ){
int nargs = 6;
op_arg args[6] = {arg0,arg1,arg2,arg3,arg4,arg5};
int ninds = 4;
int inds[6] = {0,0,1,2,3,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: bres_calc \n");
}
// get plan
#ifdef OP_PART_SIZE_3
int part_size = OP_PART_SIZE_3;
#else
int part_size = OP_part_size;
#endif
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers(&cpu_t1, &wall_t1);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
#ifdef OP_BLOCK_SIZE_3
int nthread = OP_BLOCK_SIZE_3;
#else
int nthread = OP_block_size;
#endif
int nblocks = Plan->ncolblk[col];
int nshared = Plan->nshared;
hipLaunchKernelGGL(( op_cuda_bres_calc), dim3(nblocks),dim3(nthread),nshared, 0,
(float *)arg0.data_d, Plan->ind_maps[0],
(float *)arg2.data_d, Plan->ind_maps[1],
(float *)arg3.data_d, Plan->ind_maps[2],
(float *)arg4.data_d, Plan->ind_maps[3],
Plan->loc_maps[0],
Plan->loc_maps[1],
Plan->loc_maps[2],
Plan->loc_maps[3],
Plan->loc_maps[4],
(int *)arg5.data_d,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol);
cutilSafeCall(hipDeviceSynchronize());
cutilCheckMsg("op_cuda_bres_calc execution failed\n");
block_offset += nblocks;
}
// update kernel record
op_timers(&cpu_t2, &wall_t2);
op_timing_realloc(3);
OP_kernels[3].name = name;
OP_kernels[3].count += 1;
OP_kernels[3].time += wall_t2 - wall_t1;
OP_kernels[3].transfer += Plan->transfer;
OP_kernels[3].transfer2 += Plan->transfer2;
}
| 1e41a8b6a4d54755bcad0a4b5b4b514e1feadcf8.cu | //
// auto-generated by op2.m on 30-May-2011 22:03:11
//
// user function
__device__
#include "bres_calc.h"
// CUDA kernel function
__global__ void op_cuda_bres_calc(
float *ind_arg0, int *ind_arg0_maps,
float *ind_arg1, int *ind_arg1_maps,
float *ind_arg2, int *ind_arg2_maps,
float *ind_arg3, int *ind_arg3_maps,
short *arg0_maps,
short *arg1_maps,
short *arg2_maps,
short *arg3_maps,
short *arg4_maps,
int *arg5,
int *ind_arg_sizes,
int *ind_arg_offs,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors) {
float arg4_l[4];
__shared__ int *ind_arg0_map, ind_arg0_size;
__shared__ int *ind_arg1_map, ind_arg1_size;
__shared__ int *ind_arg2_map, ind_arg2_size;
__shared__ int *ind_arg3_map, ind_arg3_size;
__shared__ float *ind_arg0_s;
__shared__ float *ind_arg1_s;
__shared__ float *ind_arg2_s;
__shared__ float *ind_arg3_s;
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (threadIdx.x==0) {
// get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
ind_arg0_size = ind_arg_sizes[0+blockId*4];
ind_arg1_size = ind_arg_sizes[1+blockId*4];
ind_arg2_size = ind_arg_sizes[2+blockId*4];
ind_arg3_size = ind_arg_sizes[3+blockId*4];
ind_arg0_map = ind_arg0_maps + ind_arg_offs[0+blockId*4];
ind_arg1_map = ind_arg1_maps + ind_arg_offs[1+blockId*4];
ind_arg2_map = ind_arg2_maps + ind_arg_offs[2+blockId*4];
ind_arg3_map = ind_arg3_maps + ind_arg_offs[3+blockId*4];
// set shared memory pointers
int nbytes = 0;
ind_arg0_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg0_size*sizeof(float)*2);
ind_arg1_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg1_size*sizeof(float)*4);
ind_arg2_s = (float *) &shared[nbytes];
nbytes += ROUND_UP(ind_arg2_size*sizeof(float)*1);
ind_arg3_s = (float *) &shared[nbytes];
}
__syncthreads(); // make sure all of above completed
// copy indirect datasets into shared memory or zero increment
for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x)
ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2];
for (int n=threadIdx.x; n<ind_arg1_size*4; n+=blockDim.x)
ind_arg1_s[n] = ind_arg1[n%4+ind_arg1_map[n/4]*4];
for (int n=threadIdx.x; n<ind_arg2_size*1; n+=blockDim.x)
ind_arg2_s[n] = ind_arg2[n%1+ind_arg2_map[n/1]*1];
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3_s[n] = ZERO_float;
__syncthreads();
// process set elements
for (int n=threadIdx.x; n<nelems2; n+=blockDim.x) {
int col2 = -1;
if (n<nelem) {
// initialise local variables
for (int d=0; d<4; d++)
arg4_l[d] = ZERO_float;
// user-supplied kernel call
bres_calc( ind_arg0_s+arg0_maps[n+offset_b]*2,
ind_arg0_s+arg1_maps[n+offset_b]*2,
ind_arg1_s+arg2_maps[n+offset_b]*4,
ind_arg2_s+arg3_maps[n+offset_b]*1,
arg4_l,
arg5+(n+offset_b)*1 );
col2 = colors[n+offset_b];
}
// store local variables
int arg4_map = arg4_maps[n+offset_b];
for (int col=0; col<ncolor; col++) {
if (col2==col) {
for (int d=0; d<4; d++)
ind_arg3_s[d+arg4_map*4] += arg4_l[d];
}
__syncthreads();
}
}
// apply pointered write/increment
for (int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x)
ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n];
}
// host stub function
void op_par_loop_bres_calc(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5 ){
int nargs = 6;
op_arg args[6] = {arg0,arg1,arg2,arg3,arg4,arg5};
int ninds = 4;
int inds[6] = {0,0,1,2,3,-1};
if (OP_diags>2) {
printf(" kernel routine with indirection: bres_calc \n");
}
// get plan
#ifdef OP_PART_SIZE_3
int part_size = OP_PART_SIZE_3;
#else
int part_size = OP_part_size;
#endif
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timers(&cpu_t1, &wall_t1);
// execute plan
int block_offset = 0;
for (int col=0; col < Plan->ncolors; col++) {
#ifdef OP_BLOCK_SIZE_3
int nthread = OP_BLOCK_SIZE_3;
#else
int nthread = OP_block_size;
#endif
int nblocks = Plan->ncolblk[col];
int nshared = Plan->nshared;
op_cuda_bres_calc<<<nblocks,nthread,nshared>>>(
(float *)arg0.data_d, Plan->ind_maps[0],
(float *)arg2.data_d, Plan->ind_maps[1],
(float *)arg3.data_d, Plan->ind_maps[2],
(float *)arg4.data_d, Plan->ind_maps[3],
Plan->loc_maps[0],
Plan->loc_maps[1],
Plan->loc_maps[2],
Plan->loc_maps[3],
Plan->loc_maps[4],
(int *)arg5.data_d,
Plan->ind_sizes,
Plan->ind_offs,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol);
cutilSafeCall(cudaThreadSynchronize());
cutilCheckMsg("op_cuda_bres_calc execution failed\n");
block_offset += nblocks;
}
// update kernel record
op_timers(&cpu_t2, &wall_t2);
op_timing_realloc(3);
OP_kernels[3].name = name;
OP_kernels[3].count += 1;
OP_kernels[3].time += wall_t2 - wall_t1;
OP_kernels[3].transfer += Plan->transfer;
OP_kernels[3].transfer2 += Plan->transfer2;
}
|
d87193e13ea547165cebf1a5efda3a885ad15681.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/contrastive_accuracy_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void ContrastiveAccuracyLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
caffe_gpu_sub(
count,
bottom[0]->gpu_data(), // a
bottom[1]->gpu_data(), // b
diff_.mutable_gpu_data()); // a_i-b_i
caffe_gpu_powx(
count,
diff_.mutable_gpu_data(), // a_i-b_i
Dtype(2),
diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2
caffe_gpu_gemv(
CblasNoTrans,
bottom[0]->num(),
bottom[0]->channels(),
Dtype(1.0),
diff_sq_.gpu_data(), // (a_i-b_i)^2
summer_vec_.gpu_data(),
Dtype(0.0),
dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2
Dtype margin = this->layer_param_.contrastive_accuracy_param().margin();
bool legacy_version =
this->layer_param_.contrastive_accuracy_param().legacy_version();
// Dtype loss(0.0);
int pos_cnt = 0;
int neg_cnt = 0;
int pos_right = 0;
int neg_right = 0;
float eps = 0.0001;
for (int i = 0; i < bottom[0]->num(); ++i) {
if (static_cast<int>(bottom[2]->cpu_data()[i])) { // similar pairs
// loss += dist_sq_.cpu_data()[i];
// handle the postive pair
pos_cnt += 1;
if (dist_sq_.cpu_data()[i] < margin) pos_right += 1;
} else { // dissimilar pairs
if (legacy_version) {
// loss += ::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0));
neg_cnt += 1;
if( ::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)) == 0)
{
neg_right += 1;
}
} else {
Dtype dist = ::max(margin - sqrt(dist_sq_.cpu_data()[i]),
Dtype(0.0));
// loss += dist*dist;
neg_cnt += 1;
if (dist == 0)
{
neg_right += 1;
}
}
}
}
// loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);
float pos_accuracy = pos_right/(pos_cnt + eps);
float neg_accuracy = neg_right/(neg_cnt + eps);
float accuracy = 0.5*(pos_accuracy + neg_accuracy);
top[0]->mutable_cpu_data()[0] = accuracy;
top[0]->mutable_cpu_data()[1] = pos_accuracy;
top[0]->mutable_cpu_data()[2] = neg_accuracy;
}
template <typename Dtype>
__global__ void CLLBackward(const int count, const int channels,
const Dtype margin, const bool legacy_version, const Dtype alpha,
const Dtype* y, const Dtype* diff, const Dtype* dist_sq,
Dtype *bottom_diff) {
CUDA_KERNEL_LOOP(i, count) {
int n = i / channels; // the num index, to access y and dist_sq
if (static_cast<int>(y[n])) { // similar pairs
bottom_diff[i] = alpha * diff[i];
} else { // dissimilar pairs
Dtype mdist(0.0);
Dtype beta(0.0);
if (legacy_version) {
mdist = (margin - dist_sq[n]);
beta = -alpha;
} else {
Dtype dist = sqrt(dist_sq[n]);
mdist = (margin - dist);
beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i];
}
if (mdist > 0.0) {
bottom_diff[i] = beta;
} else {
bottom_diff[i] = 0;
}
}
}
}
template <typename Dtype>
void ContrastiveAccuracyLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
for (int i = 0; i < propagate_down.size(); ++i) {
if (propagate_down[i]) { NOT_IMPLEMENTED; }
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ContrastiveAccuracyLayer);
} // namespace caffe
| d87193e13ea547165cebf1a5efda3a885ad15681.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/contrastive_accuracy_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void ContrastiveAccuracyLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
caffe_gpu_sub(
count,
bottom[0]->gpu_data(), // a
bottom[1]->gpu_data(), // b
diff_.mutable_gpu_data()); // a_i-b_i
caffe_gpu_powx(
count,
diff_.mutable_gpu_data(), // a_i-b_i
Dtype(2),
diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2
caffe_gpu_gemv(
CblasNoTrans,
bottom[0]->num(),
bottom[0]->channels(),
Dtype(1.0),
diff_sq_.gpu_data(), // (a_i-b_i)^2
summer_vec_.gpu_data(),
Dtype(0.0),
dist_sq_.mutable_gpu_data()); // \Sum (a_i-b_i)^2
Dtype margin = this->layer_param_.contrastive_accuracy_param().margin();
bool legacy_version =
this->layer_param_.contrastive_accuracy_param().legacy_version();
// Dtype loss(0.0);
int pos_cnt = 0;
int neg_cnt = 0;
int pos_right = 0;
int neg_right = 0;
float eps = 0.0001;
for (int i = 0; i < bottom[0]->num(); ++i) {
if (static_cast<int>(bottom[2]->cpu_data()[i])) { // similar pairs
// loss += dist_sq_.cpu_data()[i];
// handle the postive pair
pos_cnt += 1;
if (dist_sq_.cpu_data()[i] < margin) pos_right += 1;
} else { // dissimilar pairs
if (legacy_version) {
// loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0));
neg_cnt += 1;
if( std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0)) == 0)
{
neg_right += 1;
}
} else {
Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]),
Dtype(0.0));
// loss += dist*dist;
neg_cnt += 1;
if (dist == 0)
{
neg_right += 1;
}
}
}
}
// loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);
float pos_accuracy = pos_right/(pos_cnt + eps);
float neg_accuracy = neg_right/(neg_cnt + eps);
float accuracy = 0.5*(pos_accuracy + neg_accuracy);
top[0]->mutable_cpu_data()[0] = accuracy;
top[0]->mutable_cpu_data()[1] = pos_accuracy;
top[0]->mutable_cpu_data()[2] = neg_accuracy;
}
template <typename Dtype>
__global__ void CLLBackward(const int count, const int channels,
const Dtype margin, const bool legacy_version, const Dtype alpha,
const Dtype* y, const Dtype* diff, const Dtype* dist_sq,
Dtype *bottom_diff) {
CUDA_KERNEL_LOOP(i, count) {
int n = i / channels; // the num index, to access y and dist_sq
if (static_cast<int>(y[n])) { // similar pairs
bottom_diff[i] = alpha * diff[i];
} else { // dissimilar pairs
Dtype mdist(0.0);
Dtype beta(0.0);
if (legacy_version) {
mdist = (margin - dist_sq[n]);
beta = -alpha;
} else {
Dtype dist = sqrt(dist_sq[n]);
mdist = (margin - dist);
beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i];
}
if (mdist > 0.0) {
bottom_diff[i] = beta;
} else {
bottom_diff[i] = 0;
}
}
}
}
template <typename Dtype>
void ContrastiveAccuracyLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
for (int i = 0; i < propagate_down.size(); ++i) {
if (propagate_down[i]) { NOT_IMPLEMENTED; }
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ContrastiveAccuracyLayer);
} // namespace caffe
|
c1ef96b2cf2a2f093a8c3c1dd6dea31b6b3089d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file test_count_neighbours.cu
* @author Adam Rogowiec
*
* This file is an integral part of the master thesis entitled:
* "Elaboration and implementation in CUDA technology parallel version of
* estimation of multidimensional random variable density function ridge
* detection algorithm."
* , which is conducted under the supervision of prof. dr hab. in. Marek
* Nacz.
*
* Institute of Control and Computation Engineering Faculty of Electronics and
* Information Technology Warsaw University of Technology 2016
*/
#include <iostream>
#include <sstream>
#include <typeinfo>
#include <stdexcept>
#include <string>
#include "rd/gpu/device/brute_force/rd_globals.cuh"
#include "rd/gpu/device/samples_generator.cuh"
#include "rd/gpu/block/cta_count_neighbour_points.cuh"
#include "rd/cpu/brute_force/rd_inner.hpp"
#include "rd/gpu/util/data_order_traits.hpp"
#include "rd/gpu/util/dev_memcpy.cuh"
#include "rd/utils/graph_drawer.hpp"
#include "rd/utils/cmd_line_parser.hpp"
#include "rd/utils/utilities.hpp"
#include "rd/utils/rd_samples.cuh"
#include "cub/test_util.h"
#include "cub/util_device.cuh"
#include "rd/utils/rd_params.hpp"
#include <helper_cuda.h>
static const int TEST_DIM = 2;
template <typename T>
void testCountNeighboursKernel(rd::RDParams<T> &rdp,
rd::RDSpiralParams<T> const &rds);
int main(int argc, char const **argv)
{
rd::RDParams<double> dParams;
rd::RDSpiralParams<double> dSParams;
rd::RDParams<float> fParams;
rd::RDSpiralParams<float> fSParams;
dSParams.sigma = 1;
fSParams.sigma = 1.f;
dParams.np = 5000;
dParams.r1 = 20;
dParams.r2 = 20;
fParams.np = 5000;
fParams.r1 = 20;
fParams.r2 = 20;
//-----------------------------------------------------------------
// Initialize command line
rd::CommandLineArgs args(argc, argv);
if (args.CheckCmdLineFlag("help"))
{
printf("%s \n"
"\t\t[--np=<P size>]\n"
"\t\t[--r1=<r1 param>]\n"
"\t\t[--d=<device id>]\n"
"\t\t[--v <verbose>]\n"
"\n", argv[0]);
exit(0);
}
args.GetCmdLineArgument("r1", dParams.r1);
args.GetCmdLineArgument("r1", fParams.r1);
args.GetCmdLineArgument("np", dParams.np);
args.GetCmdLineArgument("np", fParams.np);
if (args.CheckCmdLineFlag("d"))
{
args.GetCmdLineArgument("d", fParams.devId);
args.GetCmdLineArgument("d", dParams.devId);
}
if (args.CheckCmdLineFlag("v"))
{
fParams.verbose = true;
dParams.verbose = true;
}
deviceInit(fParams.devId);
std::cout << rd::HLINE << std::endl;
std::cout << "FLOAT: " << std::endl;
testCountNeighboursKernel<float>(fParams, fSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "DOUBLE: " << std::endl;
testCountNeighboursKernel<double>(dParams, dSParams);
std::cout << rd::HLINE << std::endl;
deviceReset();
std::cout << "END!" << std::endl;
return 0;
}
template <typename T>
bool countNeighboursGold(
rd::RDParams<T> &rdp,
T const *S,
T const *origin,
int treshold)
{
return rd::countNeighbouringPoints(S, rdp.np, origin,
TEST_DIM, rdp.r1 * rdp.r1, treshold);
}
template <
int BLOCK_SIZE,
typename T>
__global__ void __dispatch_count_neighbours_row_major(
T const * points,
int np,
T const * srcP,
int dim,
T r2,
int threshold,
int * result)
{
int res = rd::gpu::ctaCountNeighbouringPoints<T, BLOCK_SIZE>(points, np, srcP, dim, r2,
threshold, rd::gpu::rowMajorOrderTag());
if (threadIdx.x == 0)
*result = res;
}
template <typename T>
void testCountNeighboursRowMajorOrder(
rd::RDParams<T> & rdp,
T const * d_S,
T const * d_origin,
int NEIGHBOURS_THRESHOLD,
bool hasNeighbours)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testCountNeighboursRowMajorOrder:" << std::endl;
checkCudaErrors(hipDeviceSynchronize());
int *d_result, h_result;
checkCudaErrors(hipMalloc((void**)&d_result, sizeof(int)));
hipLaunchKernelGGL(( __dispatch_count_neighbours_row_major<256>), dim3(1), dim3(256), 0, 0, d_S, rdp.np, d_origin, TEST_DIM,
rdp.r1 * rdp.r1, NEIGHBOURS_THRESHOLD, d_result);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(&h_result, d_result, sizeof(int), hipMemcpyDeviceToHost));
checkCudaErrors(hipDeviceSynchronize());
if (static_cast<bool>(h_result) != hasNeighbours)
{
std::cout << "[ERROR!]";
}
else
{
std::cout << "[SUCCESS!]";
}
std::cout << std::boolalpha << " is: <" << static_cast<bool>(h_result) <<
">, and should be: <" << hasNeighbours << ">" << std::endl;
checkCudaErrors(hipFree(d_result));
}
template <
int DIM,
int BLOCK_SIZE,
typename T>
__global__ void __dispatch_count_neighbours_row_major_v2(
T const * points,
int np,
T const * srcP,
T r2,
int threshold,
int * result)
{
int res = rd::gpu::ctaCountNeighbouringPoints_v2<DIM, BLOCK_SIZE>(points, np, srcP, r2,
threshold, rd::gpu::rowMajorOrderTag());
if (threadIdx.x == 0)
{
*result = (res >= threshold) ? 1 : 0;
}
}
template <typename T>
void testCountNeighboursRowMajorOrder_v2(
rd::RDParams<T> const & rdp,
T const * d_S,
T const * d_origin,
int NEIGHBOURS_THRESHOLD,
bool hasNeighbours)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testCountNeighboursRowMajorOrder_v2:" << std::endl;
checkCudaErrors(hipDeviceSynchronize());
int *d_result, h_result;
checkCudaErrors(hipMalloc((void**)&d_result, sizeof(int)));
hipLaunchKernelGGL(( __dispatch_count_neighbours_row_major_v2<TEST_DIM, 256>), dim3(1), dim3(256), 0, 0, d_S, rdp.np, d_origin,
rdp.r1 * rdp.r1, NEIGHBOURS_THRESHOLD, d_result);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(&h_result, d_result, sizeof(int), hipMemcpyDeviceToHost));
checkCudaErrors(hipDeviceSynchronize());
if (static_cast<bool>(h_result) != hasNeighbours)
{
std::cout << "[ERROR!]";
}
else
{
std::cout << "[SUCCESS!]";
}
std::cout << std::boolalpha << " is: <" << static_cast<bool>(h_result) <<
">, and should be: <" << hasNeighbours << ">" << std::endl;
checkCudaErrors(hipFree(d_result));
}
template <
int DIM,
int BLOCK_SIZE,
typename T>
__global__ void __dispatch_count_neighbours_mixed_order_v2(
T const * points,
int np,
T const * srcP,
int stride,
T r2,
int threshold,
int * result)
{
int res = rd::gpu::ctaCountNeighbouringPoints_v2<DIM, BLOCK_SIZE>(points, np, srcP, stride, r2,
threshold, rd::gpu::rowMajorOrderTag());
if (threadIdx.x == 0)
{
*result = (res >= threshold) ? 1 : 0;
}
}
template <typename T>
void testCountNeighboursMixedOrder_v2(
rd::RDParams<T> const & rdp,
T const * d_S,
T const * d_origin,
int NEIGHBOURS_THRESHOLD,
bool hasNeighbours)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testCountNeighboursMixedOrder_v2:" << std::endl;
checkCudaErrors(hipDeviceSynchronize());
int *d_result, h_result;
checkCudaErrors(hipMalloc((void**)&d_result, sizeof(int)));
hipLaunchKernelGGL(( __dispatch_count_neighbours_mixed_order_v2<TEST_DIM, 256>), dim3(1), dim3(256), 0, 0, d_S, rdp.np, d_origin,
1, rdp.r1 * rdp.r1, NEIGHBOURS_THRESHOLD, d_result);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(&h_result, d_result, sizeof(int), hipMemcpyDeviceToHost));
checkCudaErrors(hipDeviceSynchronize());
if (static_cast<bool>(h_result) != hasNeighbours)
{
std::cout << "[ERROR!]";
}
else
{
std::cout << "[SUCCESS!]";
}
std::cout << std::boolalpha << " is: <" << static_cast<bool>(h_result) <<
">, and should be: <" << hasNeighbours << ">" << std::endl;
checkCudaErrors(hipFree(d_result));
}
template <
int DIM,
int BLOCK_SIZE,
typename T>
__global__ void __dispatch_count_neighbours_col_major_order_v2(
T const * points,
int np,
int pStride,
T const * srcP,
int sStride,
T r2,
int threshold,
int * result)
{
int res = rd::gpu::ctaCountNeighbouringPoints_v2<DIM, BLOCK_SIZE>(points, np, pStride, srcP,
sStride, r2, threshold, rd::gpu::colMajorOrderTag());
if (threadIdx.x == 0)
{
*result = (res >= threshold) ? 1 : 0;
}
}
template <typename T>
void testCountNeighboursColMajorOrder_v2(
rd::RDParams<T> const & rdp,
T const * d_SInitial,
T const * d_origin,
int NEIGHBOURS_THRESHOLD,
bool hasNeighbours)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testCountNeighboursColMajorOrder_v2:" << std::endl;
checkCudaErrors(hipDeviceSynchronize());
int *d_result, h_result;
T *d_S, *aux;
aux = new T[rdp.np * TEST_DIM];
checkCudaErrors(hipMalloc((void**)&d_result, sizeof(int)));
checkCudaErrors(hipMalloc((void**)&d_S, rdp.np * TEST_DIM * sizeof(T)));
rd::gpu::rdMemcpy<TEST_DIM, rd::COL_MAJOR, rd::ROW_MAJOR, hipMemcpyDeviceToHost>(
aux, d_SInitial, rdp.np, rdp.np, TEST_DIM);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(d_S, aux, rdp.np * TEST_DIM * sizeof(T), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( __dispatch_count_neighbours_col_major_order_v2<TEST_DIM, 256>), dim3(1), dim3(256), 0, 0, d_S, rdp.np, rdp.np, d_origin,
1, rdp.r1 * rdp.r1, NEIGHBOURS_THRESHOLD, d_result);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(&h_result, d_result, sizeof(int), hipMemcpyDeviceToHost));
checkCudaErrors(hipDeviceSynchronize());
if (static_cast<bool>(h_result) != hasNeighbours)
{
std::cout << "[ERROR!]";
}
else
{
std::cout << "[SUCCESS!]";
}
std::cout << std::boolalpha << " is: <" << static_cast<bool>(h_result) <<
">, and should be: <" << hasNeighbours << ">" << std::endl;
delete[] aux;
checkCudaErrors(hipFree(d_S));
checkCudaErrors(hipFree(d_result));
}
template <typename T>
void testCountNeighboursKernel(rd::RDParams<T> &rdp,
rd::RDSpiralParams<T> const &sp)
{
const int NEIGHBOURS_THRESHOLD = 100;
const int oldCount = rdp.np;
const int closerRadius = rdp.r1;
rdp.np += NEIGHBOURS_THRESHOLD;
rdp.r1 += 10;
std::cout << "Samples: " << std::endl;
std::cout << "\t dimension: " << TEST_DIM << std::endl;
std::cout << "\t n_samples: " << rdp.np << std::endl;
std::cout << "\t r1: " << rdp.r1 << std::endl;
std::cout << "\t sigma: " << sp.sigma << std::endl;
rd::GraphDrawer<T> gDrawer;
T *d_S, *d_origin;
T *h_S, *h_origin;
checkCudaErrors(hipMalloc((void**)&d_S, rdp.np * TEST_DIM * sizeof(T)));
checkCudaErrors(hipMalloc((void**)&d_origin, TEST_DIM * sizeof(T)));
checkCudaErrors(hipMemset(d_S, 0, rdp.np * TEST_DIM * sizeof(T)));
checkCudaErrors(hipMemset(d_origin, 0, TEST_DIM * sizeof(T)));
h_S = new T[rdp.np * TEST_DIM];
h_origin = new T[TEST_DIM];
for (int d = 0; d < TEST_DIM; ++d)
h_origin[d] = 0;
switch(TEST_DIM)
{
case 2:
rd::gpu::SamplesGenerator<T>::template circle<rd::ROW_MAJOR>(
oldCount, T(0), T(0), rdp.r1, sp.sigma, d_S);
rd::gpu::SamplesGenerator<T>::template circle<rd::ROW_MAJOR>(
NEIGHBOURS_THRESHOLD, T(0), T(0), closerRadius, sp.sigma, d_S + oldCount * TEST_DIM);
break;
case 3:
rd::gpu::SamplesGenerator<T>::template sphere<rd::ROW_MAJOR>(
oldCount, T(0), T(0), T(0), rdp.r1, sp.sigma, d_S);
rd::gpu::SamplesGenerator<T>::template sphere<rd::ROW_MAJOR>(
NEIGHBOURS_THRESHOLD, T(0), T(0), T(0), closerRadius, sp.sigma, d_S + oldCount * TEST_DIM);
break;
default:
throw std::logic_error("Not supported dimension!");
}
// decrease radius for not taking larger circle into consideration
rdp.r1 -= 5;
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(h_S, d_S, rdp.np * TEST_DIM * sizeof(T),
hipMemcpyDeviceToHost));
checkCudaErrors(hipDeviceSynchronize());
std::ostringstream os;
if (rdp.verbose)
{
os << typeid(T).name() << "_" << TEST_DIM;
os << "D_initial_samples_set_";
gDrawer.showPoints(os.str(), h_S, rdp.np, TEST_DIM);
os.clear();
os.str(std::string());
}
//---------------------------------------------------
// REFERENCE COUNT_NEIGHBOURS
//---------------------------------------------------
bool hasNeighbours = countNeighboursGold(rdp, h_S, h_origin, NEIGHBOURS_THRESHOLD);
//---------------------------------------------------
// GPU COUNT_NEIGHBOURS
//---------------------------------------------------
rdp.devId = (rdp.devId != -1) ? rdp.devId : 0;
// int smVersion;
// checkCudaErrors(cub::SmVersion(smVersion, rdp.devId));
// testCountNeighboursRowMajorOrder(rdp, d_S, d_origin, NEIGHBOURS_THRESHOLD, hasNeighbours);
testCountNeighboursRowMajorOrder_v2(rdp, d_S, d_origin, NEIGHBOURS_THRESHOLD, hasNeighbours);
testCountNeighboursColMajorOrder_v2(rdp, d_S, d_origin, NEIGHBOURS_THRESHOLD, hasNeighbours);
testCountNeighboursMixedOrder_v2(rdp, d_S, d_origin, NEIGHBOURS_THRESHOLD, hasNeighbours);
// clean-up
delete[] h_S;
delete[] h_origin;
checkCudaErrors(hipFree(d_S));
checkCudaErrors(hipFree(d_origin));
}
| c1ef96b2cf2a2f093a8c3c1dd6dea31b6b3089d7.cu | /**
* @file test_count_neighbours.cu
* @author Adam Rogowiec
*
* This file is an integral part of the master thesis entitled:
* "Elaboration and implementation in CUDA technology parallel version of
* estimation of multidimensional random variable density function ridge
* detection algorithm."
* , which is conducted under the supervision of prof. dr hab. inż. Marek
* Nałęcz.
*
* Institute of Control and Computation Engineering Faculty of Electronics and
* Information Technology Warsaw University of Technology 2016
*/
#include <iostream>
#include <sstream>
#include <typeinfo>
#include <stdexcept>
#include <string>
#include "rd/gpu/device/brute_force/rd_globals.cuh"
#include "rd/gpu/device/samples_generator.cuh"
#include "rd/gpu/block/cta_count_neighbour_points.cuh"
#include "rd/cpu/brute_force/rd_inner.hpp"
#include "rd/gpu/util/data_order_traits.hpp"
#include "rd/gpu/util/dev_memcpy.cuh"
#include "rd/utils/graph_drawer.hpp"
#include "rd/utils/cmd_line_parser.hpp"
#include "rd/utils/utilities.hpp"
#include "rd/utils/rd_samples.cuh"
#include "cub/test_util.h"
#include "cub/util_device.cuh"
#include "rd/utils/rd_params.hpp"
#include <helper_cuda.h>
static const int TEST_DIM = 2;
template <typename T>
void testCountNeighboursKernel(rd::RDParams<T> &rdp,
rd::RDSpiralParams<T> const &rds);
int main(int argc, char const **argv)
{
rd::RDParams<double> dParams;
rd::RDSpiralParams<double> dSParams;
rd::RDParams<float> fParams;
rd::RDSpiralParams<float> fSParams;
dSParams.sigma = 1;
fSParams.sigma = 1.f;
dParams.np = 5000;
dParams.r1 = 20;
dParams.r2 = 20;
fParams.np = 5000;
fParams.r1 = 20;
fParams.r2 = 20;
//-----------------------------------------------------------------
// Initialize command line
rd::CommandLineArgs args(argc, argv);
if (args.CheckCmdLineFlag("help"))
{
printf("%s \n"
"\t\t[--np=<P size>]\n"
"\t\t[--r1=<r1 param>]\n"
"\t\t[--d=<device id>]\n"
"\t\t[--v <verbose>]\n"
"\n", argv[0]);
exit(0);
}
args.GetCmdLineArgument("r1", dParams.r1);
args.GetCmdLineArgument("r1", fParams.r1);
args.GetCmdLineArgument("np", dParams.np);
args.GetCmdLineArgument("np", fParams.np);
if (args.CheckCmdLineFlag("d"))
{
args.GetCmdLineArgument("d", fParams.devId);
args.GetCmdLineArgument("d", dParams.devId);
}
if (args.CheckCmdLineFlag("v"))
{
fParams.verbose = true;
dParams.verbose = true;
}
deviceInit(fParams.devId);
std::cout << rd::HLINE << std::endl;
std::cout << "FLOAT: " << std::endl;
testCountNeighboursKernel<float>(fParams, fSParams);
std::cout << rd::HLINE << std::endl;
std::cout << "DOUBLE: " << std::endl;
testCountNeighboursKernel<double>(dParams, dSParams);
std::cout << rd::HLINE << std::endl;
deviceReset();
std::cout << "END!" << std::endl;
return 0;
}
template <typename T>
bool countNeighboursGold(
rd::RDParams<T> &rdp,
T const *S,
T const *origin,
int treshold)
{
return rd::countNeighbouringPoints(S, rdp.np, origin,
TEST_DIM, rdp.r1 * rdp.r1, treshold);
}
template <
int BLOCK_SIZE,
typename T>
__global__ void __dispatch_count_neighbours_row_major(
T const * points,
int np,
T const * srcP,
int dim,
T r2,
int threshold,
int * result)
{
int res = rd::gpu::ctaCountNeighbouringPoints<T, BLOCK_SIZE>(points, np, srcP, dim, r2,
threshold, rd::gpu::rowMajorOrderTag());
if (threadIdx.x == 0)
*result = res;
}
template <typename T>
void testCountNeighboursRowMajorOrder(
rd::RDParams<T> & rdp,
T const * d_S,
T const * d_origin,
int NEIGHBOURS_THRESHOLD,
bool hasNeighbours)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testCountNeighboursRowMajorOrder:" << std::endl;
checkCudaErrors(cudaDeviceSynchronize());
int *d_result, h_result;
checkCudaErrors(cudaMalloc((void**)&d_result, sizeof(int)));
__dispatch_count_neighbours_row_major<256><<<1, 256>>>(d_S, rdp.np, d_origin, TEST_DIM,
rdp.r1 * rdp.r1, NEIGHBOURS_THRESHOLD, d_result);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(&h_result, d_result, sizeof(int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaDeviceSynchronize());
if (static_cast<bool>(h_result) != hasNeighbours)
{
std::cout << "[ERROR!]";
}
else
{
std::cout << "[SUCCESS!]";
}
std::cout << std::boolalpha << " is: <" << static_cast<bool>(h_result) <<
">, and should be: <" << hasNeighbours << ">" << std::endl;
checkCudaErrors(cudaFree(d_result));
}
template <
int DIM,
int BLOCK_SIZE,
typename T>
__global__ void __dispatch_count_neighbours_row_major_v2(
T const * points,
int np,
T const * srcP,
T r2,
int threshold,
int * result)
{
int res = rd::gpu::ctaCountNeighbouringPoints_v2<DIM, BLOCK_SIZE>(points, np, srcP, r2,
threshold, rd::gpu::rowMajorOrderTag());
if (threadIdx.x == 0)
{
*result = (res >= threshold) ? 1 : 0;
}
}
template <typename T>
void testCountNeighboursRowMajorOrder_v2(
rd::RDParams<T> const & rdp,
T const * d_S,
T const * d_origin,
int NEIGHBOURS_THRESHOLD,
bool hasNeighbours)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testCountNeighboursRowMajorOrder_v2:" << std::endl;
checkCudaErrors(cudaDeviceSynchronize());
int *d_result, h_result;
checkCudaErrors(cudaMalloc((void**)&d_result, sizeof(int)));
__dispatch_count_neighbours_row_major_v2<TEST_DIM, 256><<<1, 256>>>(d_S, rdp.np, d_origin,
rdp.r1 * rdp.r1, NEIGHBOURS_THRESHOLD, d_result);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(&h_result, d_result, sizeof(int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaDeviceSynchronize());
if (static_cast<bool>(h_result) != hasNeighbours)
{
std::cout << "[ERROR!]";
}
else
{
std::cout << "[SUCCESS!]";
}
std::cout << std::boolalpha << " is: <" << static_cast<bool>(h_result) <<
">, and should be: <" << hasNeighbours << ">" << std::endl;
checkCudaErrors(cudaFree(d_result));
}
template <
int DIM,
int BLOCK_SIZE,
typename T>
__global__ void __dispatch_count_neighbours_mixed_order_v2(
T const * points,
int np,
T const * srcP,
int stride,
T r2,
int threshold,
int * result)
{
int res = rd::gpu::ctaCountNeighbouringPoints_v2<DIM, BLOCK_SIZE>(points, np, srcP, stride, r2,
threshold, rd::gpu::rowMajorOrderTag());
if (threadIdx.x == 0)
{
*result = (res >= threshold) ? 1 : 0;
}
}
template <typename T>
void testCountNeighboursMixedOrder_v2(
rd::RDParams<T> const & rdp,
T const * d_S,
T const * d_origin,
int NEIGHBOURS_THRESHOLD,
bool hasNeighbours)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testCountNeighboursMixedOrder_v2:" << std::endl;
checkCudaErrors(cudaDeviceSynchronize());
int *d_result, h_result;
checkCudaErrors(cudaMalloc((void**)&d_result, sizeof(int)));
__dispatch_count_neighbours_mixed_order_v2<TEST_DIM, 256><<<1, 256>>>(d_S, rdp.np, d_origin,
1, rdp.r1 * rdp.r1, NEIGHBOURS_THRESHOLD, d_result);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(&h_result, d_result, sizeof(int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaDeviceSynchronize());
if (static_cast<bool>(h_result) != hasNeighbours)
{
std::cout << "[ERROR!]";
}
else
{
std::cout << "[SUCCESS!]";
}
std::cout << std::boolalpha << " is: <" << static_cast<bool>(h_result) <<
">, and should be: <" << hasNeighbours << ">" << std::endl;
checkCudaErrors(cudaFree(d_result));
}
template <
int DIM,
int BLOCK_SIZE,
typename T>
__global__ void __dispatch_count_neighbours_col_major_order_v2(
T const * points,
int np,
int pStride,
T const * srcP,
int sStride,
T r2,
int threshold,
int * result)
{
int res = rd::gpu::ctaCountNeighbouringPoints_v2<DIM, BLOCK_SIZE>(points, np, pStride, srcP,
sStride, r2, threshold, rd::gpu::colMajorOrderTag());
if (threadIdx.x == 0)
{
*result = (res >= threshold) ? 1 : 0;
}
}
template <typename T>
void testCountNeighboursColMajorOrder_v2(
rd::RDParams<T> const & rdp,
T const * d_SInitial,
T const * d_origin,
int NEIGHBOURS_THRESHOLD,
bool hasNeighbours)
{
std::cout << rd::HLINE << std::endl;
std::cout << "testCountNeighboursColMajorOrder_v2:" << std::endl;
checkCudaErrors(cudaDeviceSynchronize());
int *d_result, h_result;
T *d_S, *aux;
aux = new T[rdp.np * TEST_DIM];
checkCudaErrors(cudaMalloc((void**)&d_result, sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&d_S, rdp.np * TEST_DIM * sizeof(T)));
rd::gpu::rdMemcpy<TEST_DIM, rd::COL_MAJOR, rd::ROW_MAJOR, cudaMemcpyDeviceToHost>(
aux, d_SInitial, rdp.np, rdp.np, TEST_DIM);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(d_S, aux, rdp.np * TEST_DIM * sizeof(T), cudaMemcpyHostToDevice));
__dispatch_count_neighbours_col_major_order_v2<TEST_DIM, 256><<<1, 256>>>(d_S, rdp.np, rdp.np, d_origin,
1, rdp.r1 * rdp.r1, NEIGHBOURS_THRESHOLD, d_result);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(&h_result, d_result, sizeof(int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaDeviceSynchronize());
if (static_cast<bool>(h_result) != hasNeighbours)
{
std::cout << "[ERROR!]";
}
else
{
std::cout << "[SUCCESS!]";
}
std::cout << std::boolalpha << " is: <" << static_cast<bool>(h_result) <<
">, and should be: <" << hasNeighbours << ">" << std::endl;
delete[] aux;
checkCudaErrors(cudaFree(d_S));
checkCudaErrors(cudaFree(d_result));
}
template <typename T>
void testCountNeighboursKernel(rd::RDParams<T> &rdp,
rd::RDSpiralParams<T> const &sp)
{
const int NEIGHBOURS_THRESHOLD = 100;
const int oldCount = rdp.np;
const int closerRadius = rdp.r1;
rdp.np += NEIGHBOURS_THRESHOLD;
rdp.r1 += 10;
std::cout << "Samples: " << std::endl;
std::cout << "\t dimension: " << TEST_DIM << std::endl;
std::cout << "\t n_samples: " << rdp.np << std::endl;
std::cout << "\t r1: " << rdp.r1 << std::endl;
std::cout << "\t sigma: " << sp.sigma << std::endl;
rd::GraphDrawer<T> gDrawer;
T *d_S, *d_origin;
T *h_S, *h_origin;
checkCudaErrors(cudaMalloc((void**)&d_S, rdp.np * TEST_DIM * sizeof(T)));
checkCudaErrors(cudaMalloc((void**)&d_origin, TEST_DIM * sizeof(T)));
checkCudaErrors(cudaMemset(d_S, 0, rdp.np * TEST_DIM * sizeof(T)));
checkCudaErrors(cudaMemset(d_origin, 0, TEST_DIM * sizeof(T)));
h_S = new T[rdp.np * TEST_DIM];
h_origin = new T[TEST_DIM];
for (int d = 0; d < TEST_DIM; ++d)
h_origin[d] = 0;
switch(TEST_DIM)
{
case 2:
rd::gpu::SamplesGenerator<T>::template circle<rd::ROW_MAJOR>(
oldCount, T(0), T(0), rdp.r1, sp.sigma, d_S);
rd::gpu::SamplesGenerator<T>::template circle<rd::ROW_MAJOR>(
NEIGHBOURS_THRESHOLD, T(0), T(0), closerRadius, sp.sigma, d_S + oldCount * TEST_DIM);
break;
case 3:
rd::gpu::SamplesGenerator<T>::template sphere<rd::ROW_MAJOR>(
oldCount, T(0), T(0), T(0), rdp.r1, sp.sigma, d_S);
rd::gpu::SamplesGenerator<T>::template sphere<rd::ROW_MAJOR>(
NEIGHBOURS_THRESHOLD, T(0), T(0), T(0), closerRadius, sp.sigma, d_S + oldCount * TEST_DIM);
break;
default:
throw std::logic_error("Not supported dimension!");
}
// decrease radius for not taking larger circle into consideration
rdp.r1 -= 5;
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(h_S, d_S, rdp.np * TEST_DIM * sizeof(T),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaDeviceSynchronize());
std::ostringstream os;
if (rdp.verbose)
{
os << typeid(T).name() << "_" << TEST_DIM;
os << "D_initial_samples_set_";
gDrawer.showPoints(os.str(), h_S, rdp.np, TEST_DIM);
os.clear();
os.str(std::string());
}
//---------------------------------------------------
// REFERENCE COUNT_NEIGHBOURS
//---------------------------------------------------
bool hasNeighbours = countNeighboursGold(rdp, h_S, h_origin, NEIGHBOURS_THRESHOLD);
//---------------------------------------------------
// GPU COUNT_NEIGHBOURS
//---------------------------------------------------
rdp.devId = (rdp.devId != -1) ? rdp.devId : 0;
// int smVersion;
// checkCudaErrors(cub::SmVersion(smVersion, rdp.devId));
// testCountNeighboursRowMajorOrder(rdp, d_S, d_origin, NEIGHBOURS_THRESHOLD, hasNeighbours);
testCountNeighboursRowMajorOrder_v2(rdp, d_S, d_origin, NEIGHBOURS_THRESHOLD, hasNeighbours);
testCountNeighboursColMajorOrder_v2(rdp, d_S, d_origin, NEIGHBOURS_THRESHOLD, hasNeighbours);
testCountNeighboursMixedOrder_v2(rdp, d_S, d_origin, NEIGHBOURS_THRESHOLD, hasNeighbours);
// clean-up
delete[] h_S;
delete[] h_origin;
checkCudaErrors(cudaFree(d_S));
checkCudaErrors(cudaFree(d_origin));
}
|
ae1a54ec18744a1ea6924dcdbc36188bcbfbaf65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void mat_sum_by_row(
float* dst, float*src,int stride){
int tx = threadIdx.x;
float sum = 0;
for (int i = 0; i < stride; i++)
sum += src[tx + i * stride];
dst[tx] = sum;
}
__global__ void mat_sum_by_column(
float* dst, float*src, int stride){
int tx = threadIdx.x;
float sum = 0;
for (int i = 0; i < stride; i++)
sum += src[tx * stride + i];
dst[tx] = sum;
}
void matrix_sum_by_row(){
int h = 4, w = 4;
float *src_h, *dst_h;
float *src_d, *dst_d;
src_h = (float*)malloc(h*w*sizeof(float));
dst_h = (float*)malloc(h*w*sizeof(float));
hipMalloc(&src_d, h*w*sizeof(float));
hipMalloc(&dst_d, h*w*sizeof(float));
for (int i = 0; i < h*w; i++) src_h[i] = i;
hipMemcpy(src_d, src_h, h*w*sizeof(float), hipMemcpyHostToDevice);
mat_sum_by_column << <1, w >> > (dst_d, src_d, w);
hipMemcpy(dst_h, dst_d, w*sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < w; i++)
printf("%d %f\n", i, dst_h[i]);
}
int main()
{
matrix_sum_by_row();
return 0;
}
| ae1a54ec18744a1ea6924dcdbc36188bcbfbaf65.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void mat_sum_by_row(
float* dst, float*src,int stride){
int tx = threadIdx.x;
float sum = 0;
for (int i = 0; i < stride; i++)
sum += src[tx + i * stride];
dst[tx] = sum;
}
__global__ void mat_sum_by_column(
float* dst, float*src, int stride){
int tx = threadIdx.x;
float sum = 0;
for (int i = 0; i < stride; i++)
sum += src[tx * stride + i];
dst[tx] = sum;
}
void matrix_sum_by_row(){
int h = 4, w = 4;
float *src_h, *dst_h;
float *src_d, *dst_d;
src_h = (float*)malloc(h*w*sizeof(float));
dst_h = (float*)malloc(h*w*sizeof(float));
cudaMalloc(&src_d, h*w*sizeof(float));
cudaMalloc(&dst_d, h*w*sizeof(float));
for (int i = 0; i < h*w; i++) src_h[i] = i;
cudaMemcpy(src_d, src_h, h*w*sizeof(float), cudaMemcpyHostToDevice);
mat_sum_by_column << <1, w >> > (dst_d, src_d, w);
cudaMemcpy(dst_h, dst_d, w*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < w; i++)
printf("%d %f\n", i, dst_h[i]);
}
int main()
{
matrix_sum_by_row();
return 0;
}
|
e68aff98ed061f86ad5a7fb650822a8087829f21.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <opencv2/opencv.hpp>
extern "C" void CUDA_Gaussian_Filter(uchar *pcuSrc, uchar *pcuDst,
int w, int h, float *cuGkernel, int kernel_size);
__global__
void cuda_Filter_2D(uchar * pSrcImage, uchar *pDstImage,
int SrcWidth, int SrcHeight, float *pKernel, int KWidth, int KHeight)
{
// pixel index
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = y * SrcWidth + x;
int mSize = KWidth / 2;
float temp = 0.f;
// Serial pixel
if (x >= KWidth / 2 && y >= KHeight / 2
&& x < SrcWidth - KWidth / 2 && y < SrcHeight - KHeight / 2)
{
for (int j = -mSize; j <= mSize; j++) {
for (int i = -mSize; i <= mSize; i++) {
// float
temp += (float)pSrcImage[index + i + j * SrcWidth]
* pKernel[i + mSize + (j + mSize) * KHeight];
}
}
// dst uchar
pDstImage[index] = (uchar)temp;
}
else {
pDstImage[index] = 0; // kernel size
}
}
__global__
void cuda_shared_Filter_2D(uchar * pSrcImage, uchar *pDstImage,
int SrcWidth, int SrcHeight, float *pKernel, int KWidth, int KHeight)
{
// shared
extern __shared__ float shared[];
// pixel index
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = y * SrcWidth + x;
int mSize = KWidth / 2;
if (tx < KWidth && ty < KHeight)
{
// shared size
shared[ty * KWidth + tx] = pKernel[ty * KWidth + tx];
}
__syncthreads();
float temp = 0.f;
// Serial pixel
if (x >= KWidth / 2 && y >= KHeight / 2
&& x < SrcWidth - KWidth / 2 && y < SrcHeight - KHeight / 2)
{
for (int j = -mSize; j <= mSize; j++) {
for (int i = -mSize; i <= mSize; i++) {
// float
temp += (float)pSrcImage[index + i + j * SrcWidth]
* shared[i + mSize + (j + mSize) * KHeight];
}
}
// dst uchar
pDstImage[index] = (uchar)temp;
}
else {
pDstImage[index] = 0; // kernel size
}
}
// constant
__constant__ float constKernel[5 * 5];
__global__
void cuda_constant_Filter_2D(uchar * pSrcImage, uchar *pDstImage,
int SrcWidth, int SrcHeight, int KWidth, int KHeight)
{
// pixel index
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = y * SrcWidth + x;
int mSize = KWidth / 2;
float temp = 0.f;
// Serial pixel
if (x >= KWidth / 2 && y >= KHeight / 2
&& x < SrcWidth - KWidth / 2 && y < SrcHeight - KHeight / 2)
{
for (int j = -mSize; j <= mSize; j++) {
for (int i = -mSize; i <= mSize; i++) {
// constant kernel
temp += (float)pSrcImage[index + i + j * SrcWidth]
* constKernel[i + mSize + (j + mSize) * KHeight];
}
}
// dst uchar
pDstImage[index] = (uchar)temp;
}
else {
pDstImage[index] = 0; // kernel size
}
}
void CUDA_Gaussian_Filter(uchar *pcuSrc, uchar *pcuDst,
int w, int h, float *cuGkernel, int kernel_size) {
// 16 x 16 grid
dim3 grid = dim3(w / 16, h / 16);
dim3 block = dim3(16, 16);
// pixel CUDA Gaussain Filter
int c = 1; // 0 : global / 1 : shared / 2 : constant
if (c == 0)
cuda_Filter_2D << < grid, block >> > (pcuSrc, pcuDst, w, h, cuGkernel, kernel_size, kernel_size);
else if (c == 1)
// size shared memory
cuda_shared_Filter_2D << < grid, block, sizeof(float) * 5 * 5 >> > (pcuSrc, pcuDst, w, h, cuGkernel, kernel_size, kernel_size);
else if (c == 2) {
hipMemcpyToSymbol(constKernel, cuGkernel, sizeof(float)*kernel_size*kernel_size);
cuda_constant_Filter_2D << < grid, block >> > (pcuSrc, pcuDst, w, h, kernel_size, kernel_size);
}
//
hipDeviceSynchronize();
} | e68aff98ed061f86ad5a7fb650822a8087829f21.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <opencv2/opencv.hpp>
extern "C" void CUDA_Gaussian_Filter(uchar *pcuSrc, uchar *pcuDst,
int w, int h, float *cuGkernel, int kernel_size);
__global__
void cuda_Filter_2D(uchar * pSrcImage, uchar *pDstImage,
int SrcWidth, int SrcHeight, float *pKernel, int KWidth, int KHeight)
{
// 블록과 쓰레드 주소에 따라 현재 pixel의 index를 계산한다
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = y * SrcWidth + x;
int mSize = KWidth / 2;
float temp = 0.f;
// Serial과 동일하게 예외처리 진행 후 각 pixel 계산
if (x >= KWidth / 2 && y >= KHeight / 2
&& x < SrcWidth - KWidth / 2 && y < SrcHeight - KHeight / 2)
{
for (int j = -mSize; j <= mSize; j++) {
for (int i = -mSize; i <= mSize; i++) {
// float 형태로 계산 및 저장
temp += (float)pSrcImage[index + i + j * SrcWidth]
* pKernel[i + mSize + (j + mSize) * KHeight];
}
}
// 최종 dst 이미지에는 uchar의 형태로 저장
pDstImage[index] = (uchar)temp;
}
else {
pDstImage[index] = 0; // kernel size 바깥의 픽셀의 예외처리 진행
}
}
__global__
void cuda_shared_Filter_2D(uchar * pSrcImage, uchar *pDstImage,
int SrcWidth, int SrcHeight, float *pKernel, int KWidth, int KHeight)
{
// 저장할 shared 메모리 선언
extern __shared__ float shared[];
// 블록과 쓰레드 주소에 따라 현재 pixel의 index를 계산한다
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = y * SrcWidth + x;
int mSize = KWidth / 2;
if (tx < KWidth && ty < KHeight)
{
// shared 메모리에 size만큼 커널을 저장한다
shared[ty * KWidth + tx] = pKernel[ty * KWidth + tx];
}
__syncthreads();
float temp = 0.f;
// Serial과 동일하게 예외처리 진행 후 각 pixel 계산
if (x >= KWidth / 2 && y >= KHeight / 2
&& x < SrcWidth - KWidth / 2 && y < SrcHeight - KHeight / 2)
{
for (int j = -mSize; j <= mSize; j++) {
for (int i = -mSize; i <= mSize; i++) {
// float 형태로 계산 및 저장
temp += (float)pSrcImage[index + i + j * SrcWidth]
* shared[i + mSize + (j + mSize) * KHeight];
}
}
// 최종 dst 이미지에는 uchar의 형태로 저장
pDstImage[index] = (uchar)temp;
}
else {
pDstImage[index] = 0; // kernel size 바깥의 픽셀의 예외처리 진행
}
}
// 커널 사이즈만큼 미리 constant 메모리 선언
__constant__ float constKernel[5 * 5];
__global__
void cuda_constant_Filter_2D(uchar * pSrcImage, uchar *pDstImage,
int SrcWidth, int SrcHeight, int KWidth, int KHeight)
{
// 블록과 쓰레드 주소에 따라 현재 pixel의 index를 계산한다
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = y * SrcWidth + x;
int mSize = KWidth / 2;
float temp = 0.f;
// Serial과 동일하게 예외처리 진행 후 각 pixel 계산
if (x >= KWidth / 2 && y >= KHeight / 2
&& x < SrcWidth - KWidth / 2 && y < SrcHeight - KHeight / 2)
{
for (int j = -mSize; j <= mSize; j++) {
for (int i = -mSize; i <= mSize; i++) {
// constant kernel과 곱하여 계산
temp += (float)pSrcImage[index + i + j * SrcWidth]
* constKernel[i + mSize + (j + mSize) * KHeight];
}
}
// 최종 dst 이미지에는 uchar의 형태로 저장
pDstImage[index] = (uchar)temp;
}
else {
pDstImage[index] = 0; // kernel size 바깥의 픽셀의 예외처리 진행
}
}
void CUDA_Gaussian_Filter(uchar *pcuSrc, uchar *pcuDst,
int w, int h, float *cuGkernel, int kernel_size) {
// 16 x 16 쓰레드의 블록과 그 grid 사이즈 저장
dim3 grid = dim3(w / 16, h / 16);
dim3 block = dim3(16, 16);
// 각 pixel 별 CUDA Gaussain Filter 진행
int c = 1; // 0 : global / 1 : shared / 2 : constant
if (c == 0)
cuda_Filter_2D << < grid, block >> > (pcuSrc, pcuDst, w, h, cuGkernel, kernel_size, kernel_size);
else if (c == 1)
// 커널만큼의 size를 shared memory에 동적으로 할당한다
cuda_shared_Filter_2D << < grid, block, sizeof(float) * 5 * 5 >> > (pcuSrc, pcuDst, w, h, cuGkernel, kernel_size, kernel_size);
else if (c == 2) {
cudaMemcpyToSymbol(constKernel, cuGkernel, sizeof(float)*kernel_size*kernel_size);
cuda_constant_Filter_2D << < grid, block >> > (pcuSrc, pcuDst, w, h, kernel_size, kernel_size);
}
// 메모리 싱크로나이즈 진행
cudaThreadSynchronize();
} |
69835c8f05261b03a33a37f258b32092475c1dd0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "MatMul.h"
#define CHECK_BANK_CONFLICTS 0
#if CHECK_BANK_CONFLICTS
#define AS(i, j) cutilBankChecker(((float*)&As[0][0]), (BLOCK_SIZE * i + j))
#define BS(i, j) cutilBankChecker(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j))
#else
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
#endif
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B
//! wA is A's width and wB is B's width
////////////////////////////////////////////////////////////////////////////////
__global__ void
matrixMul( float* C, float* A, float* B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = A[a + wA * ty + tx];
BS(ty, tx) = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
| 69835c8f05261b03a33a37f258b32092475c1dd0.cu | /*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation and
* any modifications thereto. Any use, reproduction, disclosure, or distribution
* of this software and related documentation without an express license
* agreement from NVIDIA Corporation is strictly prohibited.
*
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "MatMul.h"
#define CHECK_BANK_CONFLICTS 0
#if CHECK_BANK_CONFLICTS
#define AS(i, j) cutilBankChecker(((float*)&As[0][0]), (BLOCK_SIZE * i + j))
#define BS(i, j) cutilBankChecker(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j))
#else
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
#endif
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B
//! wA is A's width and wB is B's width
////////////////////////////////////////////////////////////////////////////////
__global__ void
matrixMul( float* C, float* A, float* B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = A[a + wA * ty + tx];
BS(ty, tx) = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
e15e45b8af8e98208d42c8cfb93f5aa9bd08b488.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "book.h"
#include "func.h"
#include <stdio.h>
#include <time.h>
#define PI 3.1415926535897932384
#define mu0 4*PI*1e-7
int main( void ) {
FILE *myfile;
myfile = fopen("results.txt", "w");
double imax, rlength, eta, tstep, ldr, tottime;
int numseg;
printf("%s", "What is your I max? ");
scanf("%lf", &imax);
printf("%s", "What is the length of your rod? ");
scanf("%lf", &rlength);
printf("%s", "What is eta? ");
scanf("%lf", &eta);
printf("%s", "How many segments would you like? ");
scanf("%d", &numseg);
ldr = rlength/(numseg+1);
double bound = 0.5*ldr*ldr*mu0/eta;
printf("%s%lf%s", "What time step would you like? (must be less than ", bound, " ) ");
scanf("%lf", &tstep);
printf("%s", "How long would you like to run? \n");
scanf("%lf", &tottime);
//initialize
double *rod_new = new double[numseg+2];
double *rold, *rnew;
bool *ready;
int *timestep;
// allocate the memory on the GPU
HANDLE_ERROR( hipMalloc( (void**)&rold, (numseg+2) * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&rnew, (numseg+2) * sizeof(double) ) );
HANDLE_ERROR( hipMalloc( (void**)&ready, (numseg+2) * sizeof(bool) ) );
HANDLE_ERROR( hipMalloc( (void**)×tep, (numseg+2) * sizeof(int) ) );
// fill the array 'new'
hipLaunchKernelGGL(( init), dim3(numseg+2),dim3(1), 0, 0, rnew, rold, timestep, ready, imax, ldr, rlength, numseg+2);
// copy data on device from 'rnew' to 'rod_new'
HANDLE_ERROR( hipMemcpy( rod_new, rnew, (numseg+2) * sizeof(double), hipMemcpyDeviceToHost ) );
int out;
// output r values
for (out = 0; out<numseg+1; out++) {
fprintf( myfile, "%lf ", out*ldr );
}
fprintf( myfile, "%lf\n", out*ldr );
double aug = eta*tstep/(mu0*ldr*ldr);
//output initial conditions
for (out=0; out<numseg+1; out++) {
fprintf( myfile, "%lf ", *(rod_new+out) );
}
fprintf( myfile, "%lf\n", *(rod_new+out) );
int steps = tottime / tstep;
clock_t begin, end;
double time_spent;
begin = clock();
//run
hipLaunchKernelGGL(( run), dim3(numseg),dim3(1), 0, 0, rnew, rold, numseg+1, aug, steps, ready, timestep);
HANDLE_ERROR( hipMemcpy( rod_new, rnew, (numseg+2) * sizeof(double), hipMemcpyDeviceToHost ) );
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
// free the memory allocated on the GPU
HANDLE_ERROR( hipFree( rold ) );
HANDLE_ERROR( hipFree( rnew ) );
HANDLE_ERROR( hipFree( ready ) );
HANDLE_ERROR( hipFree( timestep ) );
//output final values
for (out=0; out<numseg+1; out++) {
fprintf( myfile, "%lf ", *(rod_new+out) );
}
fprintf( myfile, "%lf\n", *(rod_new+out) );
fprintf(myfile, "STOP\n");
fclose(myfile);
printf("\n------------------------------------\n");
printf("Execution took: %lf sec\n", time_spent);
return 0;
}
| e15e45b8af8e98208d42c8cfb93f5aa9bd08b488.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "book.h"
#include "func.h"
#include <stdio.h>
#include <time.h>
#define PI 3.1415926535897932384
#define mu0 4*PI*1e-7
int main( void ) {
FILE *myfile;
myfile = fopen("results.txt", "w");
double imax, rlength, eta, tstep, ldr, tottime;
int numseg;
printf("%s", "What is your I max? ");
scanf("%lf", &imax);
printf("%s", "What is the length of your rod? ");
scanf("%lf", &rlength);
printf("%s", "What is eta? ");
scanf("%lf", &eta);
printf("%s", "How many segments would you like? ");
scanf("%d", &numseg);
ldr = rlength/(numseg+1);
double bound = 0.5*ldr*ldr*mu0/eta;
printf("%s%lf%s", "What time step would you like? (must be less than ", bound, " ) ");
scanf("%lf", &tstep);
printf("%s", "How long would you like to run? \n");
scanf("%lf", &tottime);
//initialize
double *rod_new = new double[numseg+2];
double *rold, *rnew;
bool *ready;
int *timestep;
// allocate the memory on the GPU
HANDLE_ERROR( cudaMalloc( (void**)&rold, (numseg+2) * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&rnew, (numseg+2) * sizeof(double) ) );
HANDLE_ERROR( cudaMalloc( (void**)&ready, (numseg+2) * sizeof(bool) ) );
HANDLE_ERROR( cudaMalloc( (void**)×tep, (numseg+2) * sizeof(int) ) );
// fill the array 'new'
init<<<numseg+2,1>>>(rnew, rold, timestep, ready, imax, ldr, rlength, numseg+2);
// copy data on device from 'rnew' to 'rod_new'
HANDLE_ERROR( cudaMemcpy( rod_new, rnew, (numseg+2) * sizeof(double), cudaMemcpyDeviceToHost ) );
int out;
// output r values
for (out = 0; out<numseg+1; out++) {
fprintf( myfile, "%lf ", out*ldr );
}
fprintf( myfile, "%lf\n", out*ldr );
double aug = eta*tstep/(mu0*ldr*ldr);
//output initial conditions
for (out=0; out<numseg+1; out++) {
fprintf( myfile, "%lf ", *(rod_new+out) );
}
fprintf( myfile, "%lf\n", *(rod_new+out) );
int steps = tottime / tstep;
clock_t begin, end;
double time_spent;
begin = clock();
//run
run<<<numseg,1>>>(rnew, rold, numseg+1, aug, steps, ready, timestep);
HANDLE_ERROR( cudaMemcpy( rod_new, rnew, (numseg+2) * sizeof(double), cudaMemcpyDeviceToHost ) );
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
// free the memory allocated on the GPU
HANDLE_ERROR( cudaFree( rold ) );
HANDLE_ERROR( cudaFree( rnew ) );
HANDLE_ERROR( cudaFree( ready ) );
HANDLE_ERROR( cudaFree( timestep ) );
//output final values
for (out=0; out<numseg+1; out++) {
fprintf( myfile, "%lf ", *(rod_new+out) );
}
fprintf( myfile, "%lf\n", *(rod_new+out) );
fprintf(myfile, "STOP\n");
fclose(myfile);
printf("\n------------------------------------\n");
printf("Execution took: %lf sec\n", time_spent);
return 0;
}
|
4b9b1f9b69e7e7e66001de13407e444b6f7c9231.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "repeat_interleave.h"
#include <ATen/hip/HIPContext.h>
__global__ static void compute_cuda_kernel(const int64_t * __restrict__ repeat_ptr, const int64_t * __restrict__ cumsum_ptr,
int64_t * __restrict__ result_ptr, int64_t size) {
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride = blockDim.x * gridDim.x;
for (int64_t i = idx; i < size; i += stride) {
int64_t end = cumsum_ptr[i];
int64_t repeat = repeat_ptr[i];
int64_t start = end - repeat;
for(int64_t j = start; j < end; j++) {
result_ptr[j] = i;
}
}
}
__global__ static void compute_cuda_kernel_scope(const int64_t * __restrict__ scope_ptr, int64_t * __restrict__ result_ptr, int64_t size) {
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride = blockDim.x * gridDim.x;
for (int64_t i = idx; i < size; i += stride) {
int64_t start = scope_ptr[2 * i];
int64_t repeat = scope_ptr[2 * i + 1];
int64_t end = start + repeat;
for(int64_t j = start; j < end; j++) {
result_ptr[j] = i;
}
}
}
static void compute_cuda(int64_t *repeat_ptr, int64_t *cumsum_ptr, int64_t *result_ptr, int64_t size) {
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
int64_t block = 512;
int64_t grid = std::min<int64_t>((size + block - 1) / block, 2048L);
hipLaunchKernelGGL(( compute_cuda_kernel), dim3(grid), dim3(block), 0, stream, repeat_ptr, cumsum_ptr, result_ptr, size);
}
static void compute_cuda_scope(int64_t *scope_ptr, int64_t *result_ptr, int64_t size) {
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
int64_t block = 512;
int64_t grid = std::min<int64_t>((size + block - 1) / block, 2048L);
hipLaunchKernelGGL(( compute_cuda_kernel_scope), dim3(grid), dim3(block), 0, stream, scope_ptr, result_ptr, size);
}
at::Tensor &genric::repeat_interleave_gpu_out(const at::Tensor& repeats, at::Tensor &out) {
AT_CHECK(out.is_contiguous(), "Output array must be contiguous.");
auto repeats_ = repeats.contiguous();
auto cumsum = repeats.cumsum(0);
compute_cuda(repeats_.data<int64_t>(), cumsum.data<int64_t>(), out.data<int64_t>(),
repeats.size(0));
return out;
}
at::Tensor &genric::repeat_interleave_gpu_out_scope(const at::Tensor& scope, at::Tensor &out) {
AT_CHECK(out.is_contiguous(), "Output array must be contiguous");
auto scope_ = scope.contiguous();
compute_cuda_scope(scope_.data<int64_t>(), out.data<int64_t>(), scope_.size(0));
return out;
}
| 4b9b1f9b69e7e7e66001de13407e444b6f7c9231.cu | #include "repeat_interleave.h"
#include <ATen/cuda/CUDAContext.h>
__global__ static void compute_cuda_kernel(const int64_t * __restrict__ repeat_ptr, const int64_t * __restrict__ cumsum_ptr,
int64_t * __restrict__ result_ptr, int64_t size) {
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride = blockDim.x * gridDim.x;
for (int64_t i = idx; i < size; i += stride) {
int64_t end = cumsum_ptr[i];
int64_t repeat = repeat_ptr[i];
int64_t start = end - repeat;
for(int64_t j = start; j < end; j++) {
result_ptr[j] = i;
}
}
}
__global__ static void compute_cuda_kernel_scope(const int64_t * __restrict__ scope_ptr, int64_t * __restrict__ result_ptr, int64_t size) {
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride = blockDim.x * gridDim.x;
for (int64_t i = idx; i < size; i += stride) {
int64_t start = scope_ptr[2 * i];
int64_t repeat = scope_ptr[2 * i + 1];
int64_t end = start + repeat;
for(int64_t j = start; j < end; j++) {
result_ptr[j] = i;
}
}
}
static void compute_cuda(int64_t *repeat_ptr, int64_t *cumsum_ptr, int64_t *result_ptr, int64_t size) {
auto stream = at::cuda::getCurrentCUDAStream();
int64_t block = 512;
int64_t grid = std::min<int64_t>((size + block - 1) / block, 2048L);
compute_cuda_kernel<<<grid, block, 0, stream>>>(repeat_ptr, cumsum_ptr, result_ptr, size);
}
static void compute_cuda_scope(int64_t *scope_ptr, int64_t *result_ptr, int64_t size) {
auto stream = at::cuda::getCurrentCUDAStream();
int64_t block = 512;
int64_t grid = std::min<int64_t>((size + block - 1) / block, 2048L);
compute_cuda_kernel_scope<<<grid, block, 0, stream>>>(scope_ptr, result_ptr, size);
}
at::Tensor &genric::repeat_interleave_gpu_out(const at::Tensor& repeats, at::Tensor &out) {
AT_CHECK(out.is_contiguous(), "Output array must be contiguous.");
auto repeats_ = repeats.contiguous();
auto cumsum = repeats.cumsum(0);
compute_cuda(repeats_.data<int64_t>(), cumsum.data<int64_t>(), out.data<int64_t>(),
repeats.size(0));
return out;
}
at::Tensor &genric::repeat_interleave_gpu_out_scope(const at::Tensor& scope, at::Tensor &out) {
AT_CHECK(out.is_contiguous(), "Output array must be contiguous");
auto scope_ = scope.contiguous();
compute_cuda_scope(scope_.data<int64_t>(), out.data<int64_t>(), scope_.size(0));
return out;
}
|
296fd9fb6f7d57bc574b5f696aba39a3216d9748.hip | // !!! This is a file automatically generated by hipify!!!
#include "BufferedWindow.cuh"
#include "../Window/WindowsWindow.cuh"
#include <new>
BufferedWindow::BufferedWindow(OptionFlags optionFlags, Window *target, const wchar_t *windowName, FrameBufferManager *frameBufferManager, int renderingDeviceId, int refreshIntervalMilliseconds) {
options = optionFlags;
bufferManager = frameBufferManager;
deviceId = renderingDeviceId;
state = 0;
numFramesDisplayed = 0;
if (target != NULL) {
targetWindow = target;
if (windowName != NULL)
targetWindow->setName(windowName);
}
else {
targetWindow = new WindowsWindow(windowName);
state |= WINDOW_ALLOCATED_INTERNALLY;
}
windowThread = std::thread(bufferedWindowThread, this);
autoRefreshInterval = refreshIntervalMilliseconds;
if (autoRefreshInterval > 0)
refreshThread = std::thread(autoRefreshThread, this);
}
BufferedWindow::~BufferedWindow() {
closeWindow();
if ((state & WINDOW_ALLOCATED_INTERNALLY) != 0)
delete targetWindow;
}
void BufferedWindow::setBuffer(FrameBufferManager *frameBufferManager) {
std::lock_guard<std::mutex> guard(bufferLock);
bufferManager = frameBufferManager;
}
bool BufferedWindow::trySetBuffer(FrameBufferManager *frameBufferManager) {
if (bufferLock.try_lock()) {
bufferManager = frameBufferManager;
bufferLock.unlock();
return true;
}
return false;
}
void BufferedWindow::notifyChange() { bufferCond.notify_one(); }
bool BufferedWindow::getWindowResolution(int &width, int &height)const { return targetWindow->getResolution(width, height); }
bool BufferedWindow::windowClosed()const { return targetWindow->closed(); }
void BufferedWindow::closeWindow() {
while (true) {
std::lock_guard<std::mutex> guard(bufferLock);
if ((state & BUFFERED_WINDOW_THREAD_FINISHED) != 0) break;
state |= BUFFERED_WINDOW_SHOULD_EXIT;
bufferCond.notify_one();
}
{
std::lock_guard<std::mutex> guard(bufferLock);
if ((state & WINDOW_DESTROYED) != 0) return;
state |= WINDOW_DESTROYED;
targetWindow->close();
}
windowThread.join();
if (autoRefreshInterval > 0) refreshThread.join();
}
size_t BufferedWindow::framesDisplayed()const { return numFramesDisplayed; }
void BufferedWindow::bufferedWindowThread(BufferedWindow *bufferedWindow) {
bool deviceSynchNeeded = ((bufferedWindow->options & SYNCH_FRAME_BUFFER_FROM_DEVICE) != 0);
if (deviceSynchNeeded) if (hipSetDevice(bufferedWindow->deviceId != hipSuccess)) {
bufferedWindow->state |= BUFFERED_WINDOW_THREAD_FINISHED;
return;
}
while (true) {
std::unique_lock<std::mutex> uniqueLock(bufferedWindow->bufferLock);
bufferedWindow->bufferCond.wait(uniqueLock);
if (bufferedWindow->windowClosed() || ((bufferedWindow->state & BUFFERED_WINDOW_SHOULD_EXIT) != 0)) {
bufferedWindow->state |= BUFFERED_WINDOW_THREAD_FINISHED;
break;
}
if (bufferedWindow->bufferManager == NULL) continue;
FrameBuffer *cpuHandle = ((FrameBufferManager*)bufferedWindow->bufferManager)->cpuHandle();
if (cpuHandle == NULL) continue;
if (deviceSynchNeeded)
if (!cpuHandle->updateHostBlocks(
((FrameBufferManager*)bufferedWindow->bufferManager)->gpuHandle(bufferedWindow->deviceId), 0, cpuHandle->getBlockCount())) continue;
bufferedWindow->targetWindow->startUpdate();
int handleWidth, handleHeight;
cpuHandle->getSize(&handleWidth, &handleHeight);
bufferedWindow->targetWindow->setImageResolution(handleWidth, handleHeight);
for (int j = 0; j < handleHeight; j++)
for (int i = 0; i < handleWidth; i++) {
Color color = cpuHandle->getColor(i, j);
bufferedWindow->targetWindow->setPixel(i, j, color.r, color.g, color.b, color.a);
}
bufferedWindow->targetWindow->endUpdate();
bufferedWindow->numFramesDisplayed++;
}
}
void BufferedWindow::autoRefreshThread(BufferedWindow *bufferedWindow) {
while (true) {
std::this_thread::sleep_for(std::chrono::milliseconds(bufferedWindow->autoRefreshInterval));
{
std::lock_guard<std::mutex> guard(bufferedWindow->bufferLock);
if (bufferedWindow->windowClosed() || ((bufferedWindow->state & BUFFERED_WINDOW_SHOULD_EXIT) != 0)) break;
if (bufferedWindow->bufferManager == NULL) continue;
FrameBuffer *cpuHandle = ((FrameBufferManager*)bufferedWindow->bufferManager)->cpuHandle();
if (cpuHandle == NULL) continue;
bufferedWindow->targetWindow->startUpdate();
int handleWidth, handleHeight;
cpuHandle->getSize(&handleWidth, &handleHeight);
bufferedWindow->targetWindow->setImageResolution(handleWidth, handleHeight);
for (int j = 0; j < handleHeight; j++)
for (int i = 0; i < handleWidth; i++) {
Color color = cpuHandle->getColor(i, j);
bufferedWindow->targetWindow->setPixel(i, j, color.r, color.g, color.b, color.a);
}
bufferedWindow->targetWindow->endUpdate();
}
}
}
| 296fd9fb6f7d57bc574b5f696aba39a3216d9748.cu | #include "BufferedWindow.cuh"
#include "../Window/WindowsWindow.cuh"
#include <new>
BufferedWindow::BufferedWindow(OptionFlags optionFlags, Window *target, const wchar_t *windowName, FrameBufferManager *frameBufferManager, int renderingDeviceId, int refreshIntervalMilliseconds) {
options = optionFlags;
bufferManager = frameBufferManager;
deviceId = renderingDeviceId;
state = 0;
numFramesDisplayed = 0;
if (target != NULL) {
targetWindow = target;
if (windowName != NULL)
targetWindow->setName(windowName);
}
else {
targetWindow = new WindowsWindow(windowName);
state |= WINDOW_ALLOCATED_INTERNALLY;
}
windowThread = std::thread(bufferedWindowThread, this);
autoRefreshInterval = refreshIntervalMilliseconds;
if (autoRefreshInterval > 0)
refreshThread = std::thread(autoRefreshThread, this);
}
BufferedWindow::~BufferedWindow() {
closeWindow();
if ((state & WINDOW_ALLOCATED_INTERNALLY) != 0)
delete targetWindow;
}
void BufferedWindow::setBuffer(FrameBufferManager *frameBufferManager) {
std::lock_guard<std::mutex> guard(bufferLock);
bufferManager = frameBufferManager;
}
bool BufferedWindow::trySetBuffer(FrameBufferManager *frameBufferManager) {
if (bufferLock.try_lock()) {
bufferManager = frameBufferManager;
bufferLock.unlock();
return true;
}
return false;
}
void BufferedWindow::notifyChange() { bufferCond.notify_one(); }
bool BufferedWindow::getWindowResolution(int &width, int &height)const { return targetWindow->getResolution(width, height); }
bool BufferedWindow::windowClosed()const { return targetWindow->closed(); }
void BufferedWindow::closeWindow() {
while (true) {
std::lock_guard<std::mutex> guard(bufferLock);
if ((state & BUFFERED_WINDOW_THREAD_FINISHED) != 0) break;
state |= BUFFERED_WINDOW_SHOULD_EXIT;
bufferCond.notify_one();
}
{
std::lock_guard<std::mutex> guard(bufferLock);
if ((state & WINDOW_DESTROYED) != 0) return;
state |= WINDOW_DESTROYED;
targetWindow->close();
}
windowThread.join();
if (autoRefreshInterval > 0) refreshThread.join();
}
size_t BufferedWindow::framesDisplayed()const { return numFramesDisplayed; }
void BufferedWindow::bufferedWindowThread(BufferedWindow *bufferedWindow) {
bool deviceSynchNeeded = ((bufferedWindow->options & SYNCH_FRAME_BUFFER_FROM_DEVICE) != 0);
if (deviceSynchNeeded) if (cudaSetDevice(bufferedWindow->deviceId != cudaSuccess)) {
bufferedWindow->state |= BUFFERED_WINDOW_THREAD_FINISHED;
return;
}
while (true) {
std::unique_lock<std::mutex> uniqueLock(bufferedWindow->bufferLock);
bufferedWindow->bufferCond.wait(uniqueLock);
if (bufferedWindow->windowClosed() || ((bufferedWindow->state & BUFFERED_WINDOW_SHOULD_EXIT) != 0)) {
bufferedWindow->state |= BUFFERED_WINDOW_THREAD_FINISHED;
break;
}
if (bufferedWindow->bufferManager == NULL) continue;
FrameBuffer *cpuHandle = ((FrameBufferManager*)bufferedWindow->bufferManager)->cpuHandle();
if (cpuHandle == NULL) continue;
if (deviceSynchNeeded)
if (!cpuHandle->updateHostBlocks(
((FrameBufferManager*)bufferedWindow->bufferManager)->gpuHandle(bufferedWindow->deviceId), 0, cpuHandle->getBlockCount())) continue;
bufferedWindow->targetWindow->startUpdate();
int handleWidth, handleHeight;
cpuHandle->getSize(&handleWidth, &handleHeight);
bufferedWindow->targetWindow->setImageResolution(handleWidth, handleHeight);
for (int j = 0; j < handleHeight; j++)
for (int i = 0; i < handleWidth; i++) {
Color color = cpuHandle->getColor(i, j);
bufferedWindow->targetWindow->setPixel(i, j, color.r, color.g, color.b, color.a);
}
bufferedWindow->targetWindow->endUpdate();
bufferedWindow->numFramesDisplayed++;
}
}
void BufferedWindow::autoRefreshThread(BufferedWindow *bufferedWindow) {
while (true) {
std::this_thread::sleep_for(std::chrono::milliseconds(bufferedWindow->autoRefreshInterval));
{
std::lock_guard<std::mutex> guard(bufferedWindow->bufferLock);
if (bufferedWindow->windowClosed() || ((bufferedWindow->state & BUFFERED_WINDOW_SHOULD_EXIT) != 0)) break;
if (bufferedWindow->bufferManager == NULL) continue;
FrameBuffer *cpuHandle = ((FrameBufferManager*)bufferedWindow->bufferManager)->cpuHandle();
if (cpuHandle == NULL) continue;
bufferedWindow->targetWindow->startUpdate();
int handleWidth, handleHeight;
cpuHandle->getSize(&handleWidth, &handleHeight);
bufferedWindow->targetWindow->setImageResolution(handleWidth, handleHeight);
for (int j = 0; j < handleHeight; j++)
for (int i = 0; i < handleWidth; i++) {
Color color = cpuHandle->getColor(i, j);
bufferedWindow->targetWindow->setPixel(i, j, color.r, color.g, color.b, color.a);
}
bufferedWindow->targetWindow->endUpdate();
}
}
}
|
af3cb8561891f85b203a6a25dacbecd90de9f19b.hip | // !!! This is a file automatically generated by hipify!!!
#include "CudaHelpers.h"
#include "GatherShuffle.h"
#include "ScatterShuffle.h"
#include "shuffle/FeistelBijectiveShuffle.h"
#include "shuffle/FisherYatesShuffle.h"
#include "shuffle/GPUSwapShuffle.h"
#include "shuffle/LCGBijectiveShuffle.h"
#include "shuffle/LubyRackoffBijectiveShuffle.h"
#include "shuffle/MergeShuffle.h"
#include "shuffle/NoOpBijectiveShuffle.h"
#include "shuffle/RaoSandeliusShuffle.h"
#include "shuffle/SPNetworkBijectiveShuffle.h"
#include "shuffle/SortShuffle.h"
#include "shuffle/StdShuffle.h"
#include <benchmark/benchmark.h>
#include <cmath>
#include <sstream>
#include <thrust/device_vector.h>
#include <vector>
using DataType = uint64_t;
template <class ShuffleFunction>
static void benchmarkScatterGather( benchmark::State& state )
{
ShuffleFunction shuffler;
using ContainerType = typename ShuffleFunction::container_type;
// Shuffle second param adds 0 or 1 to compare power of two (best case) vs.
// one above power of two (worst case)
const uint64_t num_to_shuffle = (uint64_t)state.range( 1 ) + state.range( 0 );
ContainerType in_container( num_to_shuffle );
ContainerType out_container( num_to_shuffle );
using HostVector = typename std::vector<typename ContainerType::value_type>;
HostVector tmp( in_container.size() );
StdShuffle<HostVector> temp_shuffler;
thrust::sequence( tmp.begin(), tmp.end() );
int seed = 0;
for( auto _ : state )
{
state.PauseTiming();
temp_shuffler( tmp, tmp, seed );
thrust::copy( tmp.begin(), tmp.end(), in_container.begin() );
checkCudaError( hipDeviceSynchronize() );
state.ResumeTiming();
// Benchmarks raw gather speed of a random permutation
shuffler( in_container, out_container, seed );
checkCudaError( hipDeviceSynchronize() );
seed++;
}
state.SetItemsProcessed( state.iterations() * num_to_shuffle );
uint64_t log = std::log2( num_to_shuffle );
std::stringstream s;
s << "Shuffle 2^" << log;
if( state.range( 1 ) )
{
s << " + 1";
}
state.SetLabel( s.str() );
}
template <class ShuffleFunction>
static void benchmarkFunction( benchmark::State& state )
{
ShuffleFunction shuffler;
using ContainerType = typename ShuffleFunction::container_type;
// Shuffle second param adds 0 or 1 to compare power of two (best case) vs.
// one above power of two (worst case)
const uint64_t num_to_shuffle = (uint64_t)state.range( 1 ) + state.range( 0 );
ContainerType in_container( num_to_shuffle );
ContainerType out_container( num_to_shuffle );
int seed = 0;
for( auto _ : state )
{
shuffler( in_container, out_container, seed );
checkCudaError( hipDeviceSynchronize() );
seed++;
}
state.SetItemsProcessed( state.iterations() * num_to_shuffle );
uint64_t log = std::log2( num_to_shuffle );
std::stringstream s;
s << "Shuffle 2^" << log;
if( state.range( 1 ) )
{
s << " + 1";
}
state.SetLabel( s.str() );
}
static void argsGenerator( benchmark::internal::Benchmark* b )
{
b->Ranges( { { 1 << 8, 1 << 29 }, { 0, 1 } } );
}
BENCHMARK_TEMPLATE( benchmarkFunction, MergeShuffle<std::vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, RaoSandeliusShuffle<std::vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, SortShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// FeistelBijectiveShuffle<thrust::device_vector<DataType>> )->Apply(
// argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// FeistelBijectiveSortShuffle<thrust::device_vector<DataType>> )
// ->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, FeistelBijectiveScanShuffle<thrust::device_vector<DataType>> )
->Apply( argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// SPNetworkBijectiveShuffle<thrust::device_vector<DataType>> )->Apply(
// argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// SPNetworkBijectiveSortShuffle<thrust::device_vector<DataType>> )
// ->Apply( argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// SPNetworkBijectiveScanShuffle<thrust::device_vector<DataType>> )
// ->Apply( argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// LCGBijectiveShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator
// );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// LCGBijectiveSortShuffle<thrust::device_vector<DataType>> )->Apply(
// argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, LCGBijectiveScanShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// NoOpBijectiveShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator
// );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// NoOpBijectiveSortShuffle<thrust::device_vector<DataType>> )->Apply(
// argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// NoOpBijectiveScanShuffle<thrust::device_vector<DataType>> )->Apply(
// argsGenerator );
BENCHMARK_TEMPLATE( benchmarkScatterGather, GatherShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkScatterGather, ScatterShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// SortShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, StdShuffle<std::vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, FisherYatesShuffle<std::vector<DataType>> )->Apply( argsGenerator );
// Too slow
// BENCHMARK_TEMPLATE( benchmarkFunction,
// LubyRackoffBijectiveShuffle<thrust::device_vector<uint64_t>> )->Apply(
// argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// LubyRackoffBijectiveSortShuffle<thrust::device_vector<uint64_t>> )->Apply(
// argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// LubyRackoffBijectiveScanShuffle<thrust::device_vector<uint64_t>> )->Apply(
// argsGenerator );
// BENCHMARK_TEMPLATE(benchmarkFunction,
// GPUSwapShuffle<uint64_t>)->Apply(argsGenerator);
BENCHMARK_MAIN(); | af3cb8561891f85b203a6a25dacbecd90de9f19b.cu | #include "CudaHelpers.h"
#include "GatherShuffle.h"
#include "ScatterShuffle.h"
#include "shuffle/FeistelBijectiveShuffle.h"
#include "shuffle/FisherYatesShuffle.h"
#include "shuffle/GPUSwapShuffle.h"
#include "shuffle/LCGBijectiveShuffle.h"
#include "shuffle/LubyRackoffBijectiveShuffle.h"
#include "shuffle/MergeShuffle.h"
#include "shuffle/NoOpBijectiveShuffle.h"
#include "shuffle/RaoSandeliusShuffle.h"
#include "shuffle/SPNetworkBijectiveShuffle.h"
#include "shuffle/SortShuffle.h"
#include "shuffle/StdShuffle.h"
#include <benchmark/benchmark.h>
#include <cmath>
#include <sstream>
#include <thrust/device_vector.h>
#include <vector>
using DataType = uint64_t;
template <class ShuffleFunction>
static void benchmarkScatterGather( benchmark::State& state )
{
ShuffleFunction shuffler;
using ContainerType = typename ShuffleFunction::container_type;
// Shuffle second param adds 0 or 1 to compare power of two (best case) vs.
// one above power of two (worst case)
const uint64_t num_to_shuffle = (uint64_t)state.range( 1 ) + state.range( 0 );
ContainerType in_container( num_to_shuffle );
ContainerType out_container( num_to_shuffle );
using HostVector = typename std::vector<typename ContainerType::value_type>;
HostVector tmp( in_container.size() );
StdShuffle<HostVector> temp_shuffler;
thrust::sequence( tmp.begin(), tmp.end() );
int seed = 0;
for( auto _ : state )
{
state.PauseTiming();
temp_shuffler( tmp, tmp, seed );
thrust::copy( tmp.begin(), tmp.end(), in_container.begin() );
checkCudaError( cudaDeviceSynchronize() );
state.ResumeTiming();
// Benchmarks raw gather speed of a random permutation
shuffler( in_container, out_container, seed );
checkCudaError( cudaDeviceSynchronize() );
seed++;
}
state.SetItemsProcessed( state.iterations() * num_to_shuffle );
uint64_t log = std::log2( num_to_shuffle );
std::stringstream s;
s << "Shuffle 2^" << log;
if( state.range( 1 ) )
{
s << " + 1";
}
state.SetLabel( s.str() );
}
template <class ShuffleFunction>
static void benchmarkFunction( benchmark::State& state )
{
ShuffleFunction shuffler;
using ContainerType = typename ShuffleFunction::container_type;
// Shuffle second param adds 0 or 1 to compare power of two (best case) vs.
// one above power of two (worst case)
const uint64_t num_to_shuffle = (uint64_t)state.range( 1 ) + state.range( 0 );
ContainerType in_container( num_to_shuffle );
ContainerType out_container( num_to_shuffle );
int seed = 0;
for( auto _ : state )
{
shuffler( in_container, out_container, seed );
checkCudaError( cudaDeviceSynchronize() );
seed++;
}
state.SetItemsProcessed( state.iterations() * num_to_shuffle );
uint64_t log = std::log2( num_to_shuffle );
std::stringstream s;
s << "Shuffle 2^" << log;
if( state.range( 1 ) )
{
s << " + 1";
}
state.SetLabel( s.str() );
}
static void argsGenerator( benchmark::internal::Benchmark* b )
{
b->Ranges( { { 1 << 8, 1 << 29 }, { 0, 1 } } );
}
BENCHMARK_TEMPLATE( benchmarkFunction, MergeShuffle<std::vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, RaoSandeliusShuffle<std::vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, SortShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// FeistelBijectiveShuffle<thrust::device_vector<DataType>> )->Apply(
// argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// FeistelBijectiveSortShuffle<thrust::device_vector<DataType>> )
// ->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, FeistelBijectiveScanShuffle<thrust::device_vector<DataType>> )
->Apply( argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// SPNetworkBijectiveShuffle<thrust::device_vector<DataType>> )->Apply(
// argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// SPNetworkBijectiveSortShuffle<thrust::device_vector<DataType>> )
// ->Apply( argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// SPNetworkBijectiveScanShuffle<thrust::device_vector<DataType>> )
// ->Apply( argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// LCGBijectiveShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator
// );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// LCGBijectiveSortShuffle<thrust::device_vector<DataType>> )->Apply(
// argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, LCGBijectiveScanShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// NoOpBijectiveShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator
// );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// NoOpBijectiveSortShuffle<thrust::device_vector<DataType>> )->Apply(
// argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// NoOpBijectiveScanShuffle<thrust::device_vector<DataType>> )->Apply(
// argsGenerator );
BENCHMARK_TEMPLATE( benchmarkScatterGather, GatherShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkScatterGather, ScatterShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// SortShuffle<thrust::device_vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, StdShuffle<std::vector<DataType>> )->Apply( argsGenerator );
BENCHMARK_TEMPLATE( benchmarkFunction, FisherYatesShuffle<std::vector<DataType>> )->Apply( argsGenerator );
// Too slow
// BENCHMARK_TEMPLATE( benchmarkFunction,
// LubyRackoffBijectiveShuffle<thrust::device_vector<uint64_t>> )->Apply(
// argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// LubyRackoffBijectiveSortShuffle<thrust::device_vector<uint64_t>> )->Apply(
// argsGenerator );
// BENCHMARK_TEMPLATE( benchmarkFunction,
// LubyRackoffBijectiveScanShuffle<thrust::device_vector<uint64_t>> )->Apply(
// argsGenerator );
// BENCHMARK_TEMPLATE(benchmarkFunction,
// GPUSwapShuffle<uint64_t>)->Apply(argsGenerator);
BENCHMARK_MAIN(); |
e2c1db860388c151774a985efe7f774c125acbaf.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include <fstream>
#include <iostream>
#include <cmath>
#include <ctime>
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#define DEBUG
#define CHECK_GPU_RESULTS
#define PRINT_TIME
#define HEAD_LENGTH 5
// ,
void CudaSyncAndCheckErrors(void){
// CUDA
hipStreamSynchronize(0);
// ,
hipError_t x = hipGetLastError();
if ((x) != hipSuccess) {
printf("Error: %s\n", hipGetErrorString(x)); fclose(stdout); exit(1);
}
}
// :
// CPU GPU
typedef thrust::host_vector<double> Timeseries;
typedef thrust::device_vector<double> Timeseries_GPU;
// ,
void ReadTimeseries(Timeseries &ts, const char* filename){
std::ifstream fin(filename, std::ios::binary);
//
int length;
fin.read((char*)&length, sizeof(int));
//
ts.resize(length);
fin.read((char*)&ts[0], sizeof(double) * length);
fin.close();
}
//
// CPU
struct RollingStat{
Timeseries mean;
Timeseries var;
};
// GPU
struct RollingStat_GPU{
Timeseries_GPU mean;
Timeseries_GPU var;
};
// ,
void ComputeRollingStat_CPU(int window_size, Timeseries &ts, RollingStat &rolling){
int rolling_length = ts.size() - (window_size - 1);
//
rolling.mean.resize(rolling_length);
rolling.var.resize(rolling_length);
for(int i = 0; i < rolling_length; ++i){
//
double loc = 0.0;
for(int k = 0; k < window_size; ++k)
loc += ts[i+k];
rolling.mean[i] = loc / window_size;
//
loc = 0;
for(int k = 0; k < window_size; ++k)
loc += pow(ts[i+k] - rolling.mean[i], 2);
rolling.var[i] = loc / (window_size - 1);
}
return;
}
// CUDA-
__global__ void __kernel_ComputeRollingStat_GPU(int window_size, int N, double *ts, double *mean, double *var){
//
int idx = blockDim.x*blockIdx.x + threadIdx.x;
// ,
if(idx < N){
//
double loc_mean = 0.0;
for(int k = 0; k < window_size; ++k)
loc_mean += ts[idx + k];
mean[idx] = loc_mean / window_size;
//
double loc = 0.0;
for(int k = 0; k < window_size; ++k)
loc += pow(ts[idx + k] - mean[idx], 2);
var[idx] = loc / (window_size - 1);
}
}
// , GPU
void ComputeRollingStat_GPU(int window_size, Timeseries_GPU &ts, RollingStat_GPU &rolling){
int rolling_length = ts.size() - (window_size - 1);
rolling.mean.resize(rolling_length);
rolling.var.resize(rolling_length);
//
// 256 --
dim3 block_spec(256);
dim3 grid_spec(rolling_length / 256 + (rolling_length % 256? 1 : 0));
// CUDA-
hipLaunchKernelGGL(( __kernel_ComputeRollingStat_GPU), dim3(grid_spec), dim3(block_spec) , 0, 0, window_size, rolling_length,
thrust::raw_pointer_cast(&ts[0]),
thrust::raw_pointer_cast(&rolling.mean[0]),
thrust::raw_pointer_cast(&rolling.var[0]));
//
CudaSyncAndCheckErrors();
return;
}
// Thrust
// , CPU
double ComputeQuantile_CPU(double alpha, Timeseries &ts){
Timeseries loc(ts);
//
int k_alpha = (int)(alpha * ts.size());
//
thrust::sort(thrust::host, loc.begin(), loc.end());
return loc[k_alpha];
}
// , GPU
double ComputeQuantile_GPU(double alpha, Timeseries_GPU &ts){
Timeseries_GPU loc(ts);
//
int k_alpha = (int)(alpha * ts.size());
//
thrust::sort(thrust::device, loc.begin(), loc.end());
return loc[k_alpha];
}
// ,
inline double ComputeMean_CPU(double *ts, int window_size){
double loc = 0;
for(int i = 0; i < window_size; ++i)
loc += ts[i];
return loc / window_size;
}
// ,
void DetectMovement_CPU(int window_size, int min_dist, double q_alpha,
Timeseries &ts, Timeseries &movement, std::vector<double> &points
){
//
double prev_mean = ComputeMean_CPU(&ts[0], window_size);
bool prev_movement = false;
int prev_point = -2 * min_dist;
//
for(int left = window_size; left < ts.size(); left += window_size){
double cur_mean = ComputeMean_CPU(&ts[left], window_size);
// ,
bool cur_movement = (fabs(cur_mean - prev_mean) > q_alpha);
// ,
// ,
// ,
if(cur_movement && (!prev_movement) && (left - prev_point > min_dist)){
points.push_back(left);
prev_point = left;
}
//
prev_movement = cur_movement;
prev_mean = cur_mean;
}
}
//
int main(int argc, char *argv[]){
//
clock_t global_start, global_stop;
global_start = clock();
//
std::cout << "Hello, CUDA and Thrust!" << std::endl;
//
//
int window_size = 32;
//
int window_size_mm = 64;
//
int min_dist = window_size_mm * 4;
//
clock_t start, stop;
//
//
Timeseries ts;
start = clock();
ReadTimeseries(ts,(argc > 1? argv[1] : "data/miog.bin"));
stop = clock();
#ifdef PRINT_TIME
std::cerr << "ReadTimeseries time: " << ((double)(stop - start))/CLOCKS_PER_SEC << std::endl;
#endif
// GPU
start = clock();
Timeseries_GPU ts_gpu(ts);
stop = clock();
#ifdef PRINT_TIME
std::cerr << "HtoD time: " << ((double)(stop - start))/CLOCKS_PER_SEC << std::endl;
#endif
// GPU
RollingStat_GPU rolling_gpu;
start = clock();
ComputeRollingStat_GPU(window_size, ts_gpu, rolling_gpu);
stop = clock();
#ifdef PRINT_TIME
std::cerr << "ComputeRollingStat_GPU time: " << ((double)(stop - start))/CLOCKS_PER_SEC << std::endl;
#endif
// GPU
start = clock();
double q_gpu = ComputeQuantile_GPU(0.7, rolling_gpu.var);
stop = clock();
#ifdef PRINT_TIME
std::cerr << "ComputeQuantile_GPU time: " << ((double)(stop - start))/CLOCKS_PER_SEC << std::endl;
#endif
// CPU
start = clock();
Timeseries var(rolling_gpu.var);
stop = clock();
#ifdef PRINT_TIME
std::cerr << "DtoH time: " << ((double)(stop - start))/CLOCKS_PER_SEC << std::endl;
#endif
//
Timeseries movement;
std::vector<double> points;
start = clock();
DetectMovement_CPU(window_size_mm, min_dist, q_gpu, var, movement, points);
stop = clock();
#ifdef PRINT_TIME
std::cerr << "DetectMovement_CPU time: " << ((double)(stop - start))/CLOCKS_PER_SEC << std::endl;
#endif
//
//
start = clock();
std::ofstream fout((argc > 2? argv[2] : "result.txt"));
for(int i = 0; i < points.size(); ++i)
fout << points[i] << " ";
fout.close();
stop = clock();
#ifdef PRINT_TIME
std::cerr << "SavingResults time: " << ((double)(stop - start))/CLOCKS_PER_SEC << std::endl;
#endif
//
global_stop = clock();
std::cerr << "Total time: " << ((double)(global_stop - global_start))/CLOCKS_PER_SEC << std::endl;
return 0;
} | e2c1db860388c151774a985efe7f774c125acbaf.cu | #include <vector>
#include <fstream>
#include <iostream>
#include <cmath>
#include <ctime>
#include <cuda.h>
#include <cuda_device_runtime_api.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#define DEBUG
#define CHECK_GPU_RESULTS
#define PRINT_TIME
#define HEAD_LENGTH 5
// функция, которая ожидает завершения вызова ядра
void CudaSyncAndCheckErrors(void){
// синхронизация потока выполнения CUDA
cudaStreamSynchronize(0);
// ловим ошибки и выходим, если они произошли
cudaError_t x = cudaGetLastError();
if ((x) != cudaSuccess) {
printf("Error: %s\n", cudaGetErrorString(x)); fclose(stdout); exit(1);
}
}
// Определим два типа данных:
// временной ряд на CPU и GPU соответственно
typedef thrust::host_vector<double> Timeseries;
typedef thrust::device_vector<double> Timeseries_GPU;
// Функция, которая считывает временной ряд с файла специального формата
void ReadTimeseries(Timeseries &ts, const char* filename){
std::ifstream fin(filename, std::ios::binary);
// длина временного ряда
int length;
fin.read((char*)&length, sizeof(int));
// сам временной ряд
ts.resize(length);
fin.read((char*)&ts[0], sizeof(double) * length);
fin.close();
}
// Вводим две структуры для оконных статистик
// на CPU
struct RollingStat{
Timeseries mean;
Timeseries var;
};
// на GPU
struct RollingStat_GPU{
Timeseries_GPU mean;
Timeseries_GPU var;
};
// Функция, которая считает оконные статистики
void ComputeRollingStat_CPU(int window_size, Timeseries &ts, RollingStat &rolling){
int rolling_length = ts.size() - (window_size - 1);
// выделяем память
rolling.mean.resize(rolling_length);
rolling.var.resize(rolling_length);
for(int i = 0; i < rolling_length; ++i){
// считаем сначала среднее
double loc = 0.0;
for(int k = 0; k < window_size; ++k)
loc += ts[i+k];
rolling.mean[i] = loc / window_size;
// теперь считаем дисперсию
loc = 0;
for(int k = 0; k < window_size; ++k)
loc += pow(ts[i+k] - rolling.mean[i], 2);
rolling.var[i] = loc / (window_size - 1);
}
return;
}
// CUDA-ядро для вычисления оконных статистик
__global__ void __kernel_ComputeRollingStat_GPU(int window_size, int N, double *ts, double *mean, double *var){
// получаем номер нити в сетке
int idx = blockDim.x*blockIdx.x + threadIdx.x;
// если мы не вылезли за границы, то
if(idx < N){
// считаем среднее
double loc_mean = 0.0;
for(int k = 0; k < window_size; ++k)
loc_mean += ts[idx + k];
mean[idx] = loc_mean / window_size;
// считаем дисперсию
double loc = 0.0;
for(int k = 0; k < window_size; ++k)
loc += pow(ts[idx + k] - mean[idx], 2);
var[idx] = loc / (window_size - 1);
}
}
// Функция, которая вычисляет оконные статистики на GPU
void ComputeRollingStat_GPU(int window_size, Timeseries_GPU &ts, RollingStat_GPU &rolling){
int rolling_length = ts.size() - (window_size - 1);
rolling.mean.resize(rolling_length);
rolling.var.resize(rolling_length);
// задаем параметры сетки
// размер блока 256 -- дань традициям
dim3 block_spec(256);
dim3 grid_spec(rolling_length / 256 + (rolling_length % 256? 1 : 0));
// вызываем CUDA-ядро
__kernel_ComputeRollingStat_GPU<<< grid_spec, block_spec >>> (window_size, rolling_length,
thrust::raw_pointer_cast(&ts[0]),
thrust::raw_pointer_cast(&rolling.mean[0]),
thrust::raw_pointer_cast(&rolling.var[0]));
// ждем завершения программы
CudaSyncAndCheckErrors();
return;
}
// Для вычисления квантили использовалась библиотека Thrust
// функция, которая вычисляет квантиль на CPU
double ComputeQuantile_CPU(double alpha, Timeseries &ts){
Timeseries loc(ts);
// номер искомой порядковой статистики
int k_alpha = (int)(alpha * ts.size());
// вычисление вариационного ряда
thrust::sort(thrust::host, loc.begin(), loc.end());
return loc[k_alpha];
}
// функция, которая вычисляет квантиль на GPU
double ComputeQuantile_GPU(double alpha, Timeseries_GPU &ts){
Timeseries_GPU loc(ts);
// номер искомой порядковой статистики
int k_alpha = (int)(alpha * ts.size());
// вычисление вариационного ряда
thrust::sort(thrust::device, loc.begin(), loc.end());
return loc[k_alpha];
}
// функция, которая посчитает среднее для окна
inline double ComputeMean_CPU(double *ts, int window_size){
double loc = 0;
for(int i = 0; i < window_size; ++i)
loc += ts[i];
return loc / window_size;
}
// функция, вычисляющая точки начала движения
void DetectMovement_CPU(int window_size, int min_dist, double q_alpha,
Timeseries &ts, Timeseries &movement, std::vector<double> &points
){
// инициализируем параметры
double prev_mean = ComputeMean_CPU(&ts[0], window_size);
bool prev_movement = false;
int prev_point = -2 * min_dist;
// начинаем со второго окна и сравниваем его с первым
for(int left = window_size; left < ts.size(); left += window_size){
double cur_mean = ComputeMean_CPU(&ts[left], window_size);
// узнаем, значимо ли отличие средних в смежных окнах
bool cur_movement = (fabs(cur_mean - prev_mean) > q_alpha);
// если до этого движения не было, а сейчас есть и расстояние
// от предыдущей точки начала движения больше, чем выбранный
// порог фильтрации, то добавляем точку в список
if(cur_movement && (!prev_movement) && (left - prev_point > min_dist)){
points.push_back(left);
prev_point = left;
}
// переходим к следующему шагу
prev_movement = cur_movement;
prev_mean = cur_mean;
}
}
// основная программа
int main(int argc, char *argv[]){
// засекаем глобальное время
clock_t global_start, global_stop;
global_start = clock();
// приветственная фраза
std::cout << "Hello, CUDA and Thrust!" << std::endl;
// инициализация параметров
// размер окна для вычисления оконной дисперсии
int window_size = 32;
// размер окна для метода сравнения средних
int window_size_mm = 64;
// минимальное расстояние между точками начала движения
int min_dist = window_size_mm * 4;
// для замера времени работы отдельных кусков программы
clock_t start, stop;
// Считываем временной ряд миограммы
// в качестве аргумента можно подать путь к файлу с данными
Timeseries ts;
start = clock();
ReadTimeseries(ts,(argc > 1? argv[1] : "data/miog.bin"));
stop = clock();
#ifdef PRINT_TIME
std::cerr << "ReadTimeseries time: " << ((double)(stop - start))/CLOCKS_PER_SEC << std::endl;
#endif
// копируем считанный ряд на GPU
start = clock();
Timeseries_GPU ts_gpu(ts);
stop = clock();
#ifdef PRINT_TIME
std::cerr << "HtoD time: " << ((double)(stop - start))/CLOCKS_PER_SEC << std::endl;
#endif
// Считаем оконные статистики на GPU
RollingStat_GPU rolling_gpu;
start = clock();
ComputeRollingStat_GPU(window_size, ts_gpu, rolling_gpu);
stop = clock();
#ifdef PRINT_TIME
std::cerr << "ComputeRollingStat_GPU time: " << ((double)(stop - start))/CLOCKS_PER_SEC << std::endl;
#endif
// Вычисляем квантиль на GPU
start = clock();
double q_gpu = ComputeQuantile_GPU(0.7, rolling_gpu.var);
stop = clock();
#ifdef PRINT_TIME
std::cerr << "ComputeQuantile_GPU time: " << ((double)(stop - start))/CLOCKS_PER_SEC << std::endl;
#endif
// копируем результаты на CPU
start = clock();
Timeseries var(rolling_gpu.var);
stop = clock();
#ifdef PRINT_TIME
std::cerr << "DtoH time: " << ((double)(stop - start))/CLOCKS_PER_SEC << std::endl;
#endif
// ищем точки начала движения
Timeseries movement;
std::vector<double> points;
start = clock();
DetectMovement_CPU(window_size_mm, min_dist, q_gpu, var, movement, points);
stop = clock();
#ifdef PRINT_TIME
std::cerr << "DetectMovement_CPU time: " << ((double)(stop - start))/CLOCKS_PER_SEC << std::endl;
#endif
// сохраняем найденные точки в текстовый файл
// можно передать название файла вторым аргументом командной строки
start = clock();
std::ofstream fout((argc > 2? argv[2] : "result.txt"));
for(int i = 0; i < points.size(); ++i)
fout << points[i] << " ";
fout.close();
stop = clock();
#ifdef PRINT_TIME
std::cerr << "SavingResults time: " << ((double)(stop - start))/CLOCKS_PER_SEC << std::endl;
#endif
// печатаем время работы программы
global_stop = clock();
std::cerr << "Total time: " << ((double)(global_stop - global_start))/CLOCKS_PER_SEC << std::endl;
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.